problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_35142
rasdani/github-patches
git_diff
Lightning-Universe__lightning-flash-380
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Flash `Trainer.from_argparse_args` not working ## 🐛 Bug Looks like from_argparse_args isn't working for flash trainer. Likely related to #343 cc @aribornstein --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flash/core/trainer.py` Content: ``` 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import warnings 15 from argparse import ArgumentParser 16 from functools import wraps 17 from typing import Callable, List, Optional, Union 18 19 import torch 20 from pytorch_lightning import LightningDataModule, LightningModule 21 from pytorch_lightning import Trainer as PlTrainer 22 from pytorch_lightning.callbacks import BaseFinetuning 23 from pytorch_lightning.utilities import rank_zero_warn 24 from pytorch_lightning.utilities.argparse import add_argparse_args, get_init_arguments_and_types, parse_env_variables 25 from pytorch_lightning.utilities.exceptions import MisconfigurationException 26 from torch.utils.data import DataLoader 27 28 import flash 29 from flash.core.finetuning import _DEFAULTS_FINETUNE_STRATEGIES, instantiate_default_finetuning_callbacks 30 31 32 def _defaults_from_env_vars(fn: Callable) -> Callable: 33 """Copy of ``pytorch_lightning.trainer.connectors.env_vars_connector._defaults_from_env_vars``. Required to fix 34 build error in readthedocs.""" 35 36 @wraps(fn) 37 def insert_env_defaults(self, *args, **kwargs): 38 cls = self.__class__ # get the class 39 if args: # inace any args passed move them to kwargs 40 # parse only the argument names 41 cls_arg_names = [arg[0] for arg in get_init_arguments_and_types(cls)] 42 # convert args to kwargs 43 kwargs.update({k: v for k, v in zip(cls_arg_names, args)}) 44 env_variables = vars(parse_env_variables(cls)) 45 # update the kwargs by env variables 46 kwargs = dict(list(env_variables.items()) + list(kwargs.items())) 47 48 # all args were already moved to kwargs 49 return fn(self, **kwargs) 50 51 return insert_env_defaults 52 53 54 class Trainer(PlTrainer): 55 56 @_defaults_from_env_vars 57 def __init__(self, *args, **kwargs): 58 if flash._IS_TESTING: 59 if torch.cuda.is_available(): 60 kwargs["gpus"] = 1 61 kwargs["max_epochs"] = 3 62 kwargs["limit_train_batches"] = 1.0 63 kwargs["limit_val_batches"] = 1.0 64 kwargs["limit_test_batches"] = 1.0 65 kwargs["fast_dev_run"] = False 66 else: 67 kwargs["fast_dev_run"] = True 68 super().__init__(*args, **kwargs) 69 70 def fit( 71 self, 72 model: LightningModule, 73 train_dataloader: Optional[DataLoader] = None, 74 val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None, 75 datamodule: Optional[LightningDataModule] = None, 76 ): 77 r""" 78 Runs the full optimization routine. Same as :meth:`pytorch_lightning.Trainer.fit` 79 80 Args: 81 datamodule: A instance of :class:`LightningDataModule`. 82 83 model: Model to fit. 84 85 train_dataloader: A Pytorch DataLoader with training samples. If the model has 86 a predefined train_dataloader method this will be skipped. 87 88 val_dataloaders: Either a single Pytorch Dataloader or a list of them, specifying validation samples. 89 If the model has a predefined val_dataloaders method this will be skipped 90 """ 91 if any(isinstance(c, BaseFinetuning) for c in self.callbacks): 92 # TODO: if we find a finetuning callback in the trainer should we remove it? or just warn the user? 93 warnings.warn("Warning: You are calling fit(), but your trainer is using a fine-tuning callback") 94 return super().fit(model, train_dataloader, val_dataloaders, datamodule) 95 96 def finetune( 97 self, 98 model: LightningModule, 99 train_dataloader: Optional[DataLoader] = None, 100 val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None, 101 datamodule: Optional[LightningDataModule] = None, 102 strategy: Optional[Union[str, BaseFinetuning]] = None, 103 ): 104 r""" 105 106 Runs the full optimization routine. Same as :meth:`pytorch_lightning.Trainer.fit`, but unfreezes layers 107 of the backbone throughout training layers of the backbone throughout training. 108 109 Args: 110 datamodule: A instance of :class:`LightningDataModule`. 111 112 model: Model to fit. 113 114 train_dataloader: A PyTorch DataLoader with training samples. If the model has 115 a predefined train_dataloader method this will be skipped. 116 117 val_dataloaders: Either a single PyTorch Dataloader or a list of them, specifying validation samples. 118 If the model has a predefined val_dataloaders method this will be skipped 119 120 strategy: Should either be a string or a finetuning callback subclassing 121 :class:`pytorch_lightning.callbacks.BaseFinetuning`. 122 123 Default strategies can be enabled with these strings: 124 125 - ``"no_freeze"``, 126 - ``"freeze"``, 127 - ``"freeze_unfreeze"``, 128 - ``"unfreeze_milestones"``. 129 """ 130 self._resolve_callbacks(model, strategy) 131 return super().fit(model, train_dataloader, val_dataloaders, datamodule) 132 133 def _resolve_callbacks(self, model, strategy): 134 """ 135 This function is used to select the `BaseFinetuning` to be used for finetuning. 136 """ 137 if strategy is not None and not isinstance(strategy, (str, BaseFinetuning)): 138 raise MisconfigurationException( 139 "strategy should be a ``pytorch_lightning.callbacks.BaseFinetuning``" 140 f"callback or a str within {list(_DEFAULTS_FINETUNE_STRATEGIES.keys())}" 141 ) 142 143 if isinstance(strategy, BaseFinetuning): 144 callback = [strategy] 145 else: 146 # todo: change to ``configure_callbacks`` when merged to Lightning. 147 model_callback = model.configure_finetune_callback() 148 if len(model_callback) > 1: 149 raise MisconfigurationException( 150 f"{model} configure_finetune_callback should create a list with only 1 callback" 151 ) 152 if len(model_callback) == 1: 153 if strategy is not None: 154 rank_zero_warn( 155 "The model contains a default finetune callback. The provided {strategy} will be overriden.\n" 156 " HINT: Provide a `BaseFinetuning` callback as strategy to make it prioritized. ", UserWarning 157 ) 158 callback = model_callback 159 else: 160 callback = instantiate_default_finetuning_callbacks(strategy) 161 162 self.callbacks = self._merge_callbacks(self.callbacks, callback) 163 164 @staticmethod 165 def _merge_callbacks(old_callbacks: List, new_callbacks: List) -> List: 166 """ 167 This function keeps only 1 instance of each callback type, 168 extending new_callbacks with old_callbacks 169 """ 170 if len(new_callbacks) == 0: 171 return old_callbacks 172 new_callbacks_types = set(type(c) for c in new_callbacks) 173 old_callbacks_types = set(type(c) for c in old_callbacks) 174 override_types = new_callbacks_types.intersection(old_callbacks_types) 175 new_callbacks.extend(c for c in old_callbacks if type(c) not in override_types) 176 return new_callbacks 177 178 @classmethod 179 def add_argparse_args(cls, *args, **kwargs) -> ArgumentParser: 180 # the lightning trainer implementation does not support subclasses. 181 # context: https://github.com/PyTorchLightning/lightning-flash/issues/342#issuecomment-848892447 182 return add_argparse_args(PlTrainer, *args, **kwargs) 183 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flash/core/trainer.py b/flash/core/trainer.py --- a/flash/core/trainer.py +++ b/flash/core/trainer.py @@ -11,8 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import inspect import warnings -from argparse import ArgumentParser +from argparse import ArgumentParser, Namespace from functools import wraps from typing import Callable, List, Optional, Union @@ -29,6 +30,22 @@ from flash.core.finetuning import _DEFAULTS_FINETUNE_STRATEGIES, instantiate_default_finetuning_callbacks +def from_argparse_args(cls, args: Union[Namespace, ArgumentParser], **kwargs): + """Modified version of ``pytorch_lightning.utilities.argparse.from_argparse_args`` which populates ``valid_kwargs`` + from ``pytorch_lightning.Trainer``.""" + if isinstance(args, ArgumentParser): + args = cls.parse_argparser(args) + + params = vars(args) + + # we only want to pass in valid PLTrainer args, the rest may be user specific + valid_kwargs = inspect.signature(PlTrainer.__init__).parameters + trainer_kwargs = dict((name, params[name]) for name in valid_kwargs if name in params) + trainer_kwargs.update(**kwargs) + + return cls(**trainer_kwargs) + + def _defaults_from_env_vars(fn: Callable) -> Callable: """Copy of ``pytorch_lightning.trainer.connectors.env_vars_connector._defaults_from_env_vars``. Required to fix build error in readthedocs.""" @@ -180,3 +197,9 @@ # the lightning trainer implementation does not support subclasses. # context: https://github.com/PyTorchLightning/lightning-flash/issues/342#issuecomment-848892447 return add_argparse_args(PlTrainer, *args, **kwargs) + + @classmethod + def from_argparse_args(cls, args: Union[Namespace, ArgumentParser], **kwargs) -> 'Trainer': + # the lightning trainer implementation does not support subclasses. + # context: https://github.com/PyTorchLightning/lightning-flash/issues/342#issuecomment-848892447 + return from_argparse_args(PlTrainer, args, **kwargs)
{"golden_diff": "diff --git a/flash/core/trainer.py b/flash/core/trainer.py\n--- a/flash/core/trainer.py\n+++ b/flash/core/trainer.py\n@@ -11,8 +11,9 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+import inspect\n import warnings\n-from argparse import ArgumentParser\n+from argparse import ArgumentParser, Namespace\n from functools import wraps\n from typing import Callable, List, Optional, Union\n \n@@ -29,6 +30,22 @@\n from flash.core.finetuning import _DEFAULTS_FINETUNE_STRATEGIES, instantiate_default_finetuning_callbacks\n \n \n+def from_argparse_args(cls, args: Union[Namespace, ArgumentParser], **kwargs):\n+ \"\"\"Modified version of ``pytorch_lightning.utilities.argparse.from_argparse_args`` which populates ``valid_kwargs``\n+ from ``pytorch_lightning.Trainer``.\"\"\"\n+ if isinstance(args, ArgumentParser):\n+ args = cls.parse_argparser(args)\n+\n+ params = vars(args)\n+\n+ # we only want to pass in valid PLTrainer args, the rest may be user specific\n+ valid_kwargs = inspect.signature(PlTrainer.__init__).parameters\n+ trainer_kwargs = dict((name, params[name]) for name in valid_kwargs if name in params)\n+ trainer_kwargs.update(**kwargs)\n+\n+ return cls(**trainer_kwargs)\n+\n+\n def _defaults_from_env_vars(fn: Callable) -> Callable:\n \"\"\"Copy of ``pytorch_lightning.trainer.connectors.env_vars_connector._defaults_from_env_vars``. Required to fix\n build error in readthedocs.\"\"\"\n@@ -180,3 +197,9 @@\n # the lightning trainer implementation does not support subclasses.\n # context: https://github.com/PyTorchLightning/lightning-flash/issues/342#issuecomment-848892447\n return add_argparse_args(PlTrainer, *args, **kwargs)\n+\n+ @classmethod\n+ def from_argparse_args(cls, args: Union[Namespace, ArgumentParser], **kwargs) -> 'Trainer':\n+ # the lightning trainer implementation does not support subclasses.\n+ # context: https://github.com/PyTorchLightning/lightning-flash/issues/342#issuecomment-848892447\n+ return from_argparse_args(PlTrainer, args, **kwargs)\n", "issue": "Flash `Trainer.from_argparse_args` not working\n## \ud83d\udc1b Bug\r\n\r\nLooks like from_argparse_args isn't working for flash trainer. Likely related to #343\r\n\r\ncc @aribornstein \n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport warnings\nfrom argparse import ArgumentParser\nfrom functools import wraps\nfrom typing import Callable, List, Optional, Union\n\nimport torch\nfrom pytorch_lightning import LightningDataModule, LightningModule\nfrom pytorch_lightning import Trainer as PlTrainer\nfrom pytorch_lightning.callbacks import BaseFinetuning\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom pytorch_lightning.utilities.argparse import add_argparse_args, get_init_arguments_and_types, parse_env_variables\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch.utils.data import DataLoader\n\nimport flash\nfrom flash.core.finetuning import _DEFAULTS_FINETUNE_STRATEGIES, instantiate_default_finetuning_callbacks\n\n\ndef _defaults_from_env_vars(fn: Callable) -> Callable:\n \"\"\"Copy of ``pytorch_lightning.trainer.connectors.env_vars_connector._defaults_from_env_vars``. Required to fix\n build error in readthedocs.\"\"\"\n\n @wraps(fn)\n def insert_env_defaults(self, *args, **kwargs):\n cls = self.__class__ # get the class\n if args: # inace any args passed move them to kwargs\n # parse only the argument names\n cls_arg_names = [arg[0] for arg in get_init_arguments_and_types(cls)]\n # convert args to kwargs\n kwargs.update({k: v for k, v in zip(cls_arg_names, args)})\n env_variables = vars(parse_env_variables(cls))\n # update the kwargs by env variables\n kwargs = dict(list(env_variables.items()) + list(kwargs.items()))\n\n # all args were already moved to kwargs\n return fn(self, **kwargs)\n\n return insert_env_defaults\n\n\nclass Trainer(PlTrainer):\n\n @_defaults_from_env_vars\n def __init__(self, *args, **kwargs):\n if flash._IS_TESTING:\n if torch.cuda.is_available():\n kwargs[\"gpus\"] = 1\n kwargs[\"max_epochs\"] = 3\n kwargs[\"limit_train_batches\"] = 1.0\n kwargs[\"limit_val_batches\"] = 1.0\n kwargs[\"limit_test_batches\"] = 1.0\n kwargs[\"fast_dev_run\"] = False\n else:\n kwargs[\"fast_dev_run\"] = True\n super().__init__(*args, **kwargs)\n\n def fit(\n self,\n model: LightningModule,\n train_dataloader: Optional[DataLoader] = None,\n val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,\n datamodule: Optional[LightningDataModule] = None,\n ):\n r\"\"\"\n Runs the full optimization routine. Same as :meth:`pytorch_lightning.Trainer.fit`\n\n Args:\n datamodule: A instance of :class:`LightningDataModule`.\n\n model: Model to fit.\n\n train_dataloader: A Pytorch DataLoader with training samples. If the model has\n a predefined train_dataloader method this will be skipped.\n\n val_dataloaders: Either a single Pytorch Dataloader or a list of them, specifying validation samples.\n If the model has a predefined val_dataloaders method this will be skipped\n \"\"\"\n if any(isinstance(c, BaseFinetuning) for c in self.callbacks):\n # TODO: if we find a finetuning callback in the trainer should we remove it? or just warn the user?\n warnings.warn(\"Warning: You are calling fit(), but your trainer is using a fine-tuning callback\")\n return super().fit(model, train_dataloader, val_dataloaders, datamodule)\n\n def finetune(\n self,\n model: LightningModule,\n train_dataloader: Optional[DataLoader] = None,\n val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,\n datamodule: Optional[LightningDataModule] = None,\n strategy: Optional[Union[str, BaseFinetuning]] = None,\n ):\n r\"\"\"\n\n Runs the full optimization routine. Same as :meth:`pytorch_lightning.Trainer.fit`, but unfreezes layers\n of the backbone throughout training layers of the backbone throughout training.\n\n Args:\n datamodule: A instance of :class:`LightningDataModule`.\n\n model: Model to fit.\n\n train_dataloader: A PyTorch DataLoader with training samples. If the model has\n a predefined train_dataloader method this will be skipped.\n\n val_dataloaders: Either a single PyTorch Dataloader or a list of them, specifying validation samples.\n If the model has a predefined val_dataloaders method this will be skipped\n\n strategy: Should either be a string or a finetuning callback subclassing\n :class:`pytorch_lightning.callbacks.BaseFinetuning`.\n\n Default strategies can be enabled with these strings:\n\n - ``\"no_freeze\"``,\n - ``\"freeze\"``,\n - ``\"freeze_unfreeze\"``,\n - ``\"unfreeze_milestones\"``.\n \"\"\"\n self._resolve_callbacks(model, strategy)\n return super().fit(model, train_dataloader, val_dataloaders, datamodule)\n\n def _resolve_callbacks(self, model, strategy):\n \"\"\"\n This function is used to select the `BaseFinetuning` to be used for finetuning.\n \"\"\"\n if strategy is not None and not isinstance(strategy, (str, BaseFinetuning)):\n raise MisconfigurationException(\n \"strategy should be a ``pytorch_lightning.callbacks.BaseFinetuning``\"\n f\"callback or a str within {list(_DEFAULTS_FINETUNE_STRATEGIES.keys())}\"\n )\n\n if isinstance(strategy, BaseFinetuning):\n callback = [strategy]\n else:\n # todo: change to ``configure_callbacks`` when merged to Lightning.\n model_callback = model.configure_finetune_callback()\n if len(model_callback) > 1:\n raise MisconfigurationException(\n f\"{model} configure_finetune_callback should create a list with only 1 callback\"\n )\n if len(model_callback) == 1:\n if strategy is not None:\n rank_zero_warn(\n \"The model contains a default finetune callback. The provided {strategy} will be overriden.\\n\"\n \" HINT: Provide a `BaseFinetuning` callback as strategy to make it prioritized. \", UserWarning\n )\n callback = model_callback\n else:\n callback = instantiate_default_finetuning_callbacks(strategy)\n\n self.callbacks = self._merge_callbacks(self.callbacks, callback)\n\n @staticmethod\n def _merge_callbacks(old_callbacks: List, new_callbacks: List) -> List:\n \"\"\"\n This function keeps only 1 instance of each callback type,\n extending new_callbacks with old_callbacks\n \"\"\"\n if len(new_callbacks) == 0:\n return old_callbacks\n new_callbacks_types = set(type(c) for c in new_callbacks)\n old_callbacks_types = set(type(c) for c in old_callbacks)\n override_types = new_callbacks_types.intersection(old_callbacks_types)\n new_callbacks.extend(c for c in old_callbacks if type(c) not in override_types)\n return new_callbacks\n\n @classmethod\n def add_argparse_args(cls, *args, **kwargs) -> ArgumentParser:\n # the lightning trainer implementation does not support subclasses.\n # context: https://github.com/PyTorchLightning/lightning-flash/issues/342#issuecomment-848892447\n return add_argparse_args(PlTrainer, *args, **kwargs)\n", "path": "flash/core/trainer.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport inspect\nimport warnings\nfrom argparse import ArgumentParser, Namespace\nfrom functools import wraps\nfrom typing import Callable, List, Optional, Union\n\nimport torch\nfrom pytorch_lightning import LightningDataModule, LightningModule\nfrom pytorch_lightning import Trainer as PlTrainer\nfrom pytorch_lightning.callbacks import BaseFinetuning\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom pytorch_lightning.utilities.argparse import add_argparse_args, get_init_arguments_and_types, parse_env_variables\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch.utils.data import DataLoader\n\nimport flash\nfrom flash.core.finetuning import _DEFAULTS_FINETUNE_STRATEGIES, instantiate_default_finetuning_callbacks\n\n\ndef from_argparse_args(cls, args: Union[Namespace, ArgumentParser], **kwargs):\n \"\"\"Modified version of ``pytorch_lightning.utilities.argparse.from_argparse_args`` which populates ``valid_kwargs``\n from ``pytorch_lightning.Trainer``.\"\"\"\n if isinstance(args, ArgumentParser):\n args = cls.parse_argparser(args)\n\n params = vars(args)\n\n # we only want to pass in valid PLTrainer args, the rest may be user specific\n valid_kwargs = inspect.signature(PlTrainer.__init__).parameters\n trainer_kwargs = dict((name, params[name]) for name in valid_kwargs if name in params)\n trainer_kwargs.update(**kwargs)\n\n return cls(**trainer_kwargs)\n\n\ndef _defaults_from_env_vars(fn: Callable) -> Callable:\n \"\"\"Copy of ``pytorch_lightning.trainer.connectors.env_vars_connector._defaults_from_env_vars``. Required to fix\n build error in readthedocs.\"\"\"\n\n @wraps(fn)\n def insert_env_defaults(self, *args, **kwargs):\n cls = self.__class__ # get the class\n if args: # inace any args passed move them to kwargs\n # parse only the argument names\n cls_arg_names = [arg[0] for arg in get_init_arguments_and_types(cls)]\n # convert args to kwargs\n kwargs.update({k: v for k, v in zip(cls_arg_names, args)})\n env_variables = vars(parse_env_variables(cls))\n # update the kwargs by env variables\n kwargs = dict(list(env_variables.items()) + list(kwargs.items()))\n\n # all args were already moved to kwargs\n return fn(self, **kwargs)\n\n return insert_env_defaults\n\n\nclass Trainer(PlTrainer):\n\n @_defaults_from_env_vars\n def __init__(self, *args, **kwargs):\n if flash._IS_TESTING:\n if torch.cuda.is_available():\n kwargs[\"gpus\"] = 1\n kwargs[\"max_epochs\"] = 3\n kwargs[\"limit_train_batches\"] = 1.0\n kwargs[\"limit_val_batches\"] = 1.0\n kwargs[\"limit_test_batches\"] = 1.0\n kwargs[\"fast_dev_run\"] = False\n else:\n kwargs[\"fast_dev_run\"] = True\n super().__init__(*args, **kwargs)\n\n def fit(\n self,\n model: LightningModule,\n train_dataloader: Optional[DataLoader] = None,\n val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,\n datamodule: Optional[LightningDataModule] = None,\n ):\n r\"\"\"\n Runs the full optimization routine. Same as :meth:`pytorch_lightning.Trainer.fit`\n\n Args:\n datamodule: A instance of :class:`LightningDataModule`.\n\n model: Model to fit.\n\n train_dataloader: A Pytorch DataLoader with training samples. If the model has\n a predefined train_dataloader method this will be skipped.\n\n val_dataloaders: Either a single Pytorch Dataloader or a list of them, specifying validation samples.\n If the model has a predefined val_dataloaders method this will be skipped\n \"\"\"\n if any(isinstance(c, BaseFinetuning) for c in self.callbacks):\n # TODO: if we find a finetuning callback in the trainer should we remove it? or just warn the user?\n warnings.warn(\"Warning: You are calling fit(), but your trainer is using a fine-tuning callback\")\n return super().fit(model, train_dataloader, val_dataloaders, datamodule)\n\n def finetune(\n self,\n model: LightningModule,\n train_dataloader: Optional[DataLoader] = None,\n val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,\n datamodule: Optional[LightningDataModule] = None,\n strategy: Optional[Union[str, BaseFinetuning]] = None,\n ):\n r\"\"\"\n\n Runs the full optimization routine. Same as :meth:`pytorch_lightning.Trainer.fit`, but unfreezes layers\n of the backbone throughout training layers of the backbone throughout training.\n\n Args:\n datamodule: A instance of :class:`LightningDataModule`.\n\n model: Model to fit.\n\n train_dataloader: A PyTorch DataLoader with training samples. If the model has\n a predefined train_dataloader method this will be skipped.\n\n val_dataloaders: Either a single PyTorch Dataloader or a list of them, specifying validation samples.\n If the model has a predefined val_dataloaders method this will be skipped\n\n strategy: Should either be a string or a finetuning callback subclassing\n :class:`pytorch_lightning.callbacks.BaseFinetuning`.\n\n Default strategies can be enabled with these strings:\n\n - ``\"no_freeze\"``,\n - ``\"freeze\"``,\n - ``\"freeze_unfreeze\"``,\n - ``\"unfreeze_milestones\"``.\n \"\"\"\n self._resolve_callbacks(model, strategy)\n return super().fit(model, train_dataloader, val_dataloaders, datamodule)\n\n def _resolve_callbacks(self, model, strategy):\n \"\"\"\n This function is used to select the `BaseFinetuning` to be used for finetuning.\n \"\"\"\n if strategy is not None and not isinstance(strategy, (str, BaseFinetuning)):\n raise MisconfigurationException(\n \"strategy should be a ``pytorch_lightning.callbacks.BaseFinetuning``\"\n f\"callback or a str within {list(_DEFAULTS_FINETUNE_STRATEGIES.keys())}\"\n )\n\n if isinstance(strategy, BaseFinetuning):\n callback = [strategy]\n else:\n # todo: change to ``configure_callbacks`` when merged to Lightning.\n model_callback = model.configure_finetune_callback()\n if len(model_callback) > 1:\n raise MisconfigurationException(\n f\"{model} configure_finetune_callback should create a list with only 1 callback\"\n )\n if len(model_callback) == 1:\n if strategy is not None:\n rank_zero_warn(\n \"The model contains a default finetune callback. The provided {strategy} will be overriden.\\n\"\n \" HINT: Provide a `BaseFinetuning` callback as strategy to make it prioritized. \", UserWarning\n )\n callback = model_callback\n else:\n callback = instantiate_default_finetuning_callbacks(strategy)\n\n self.callbacks = self._merge_callbacks(self.callbacks, callback)\n\n @staticmethod\n def _merge_callbacks(old_callbacks: List, new_callbacks: List) -> List:\n \"\"\"\n This function keeps only 1 instance of each callback type,\n extending new_callbacks with old_callbacks\n \"\"\"\n if len(new_callbacks) == 0:\n return old_callbacks\n new_callbacks_types = set(type(c) for c in new_callbacks)\n old_callbacks_types = set(type(c) for c in old_callbacks)\n override_types = new_callbacks_types.intersection(old_callbacks_types)\n new_callbacks.extend(c for c in old_callbacks if type(c) not in override_types)\n return new_callbacks\n\n @classmethod\n def add_argparse_args(cls, *args, **kwargs) -> ArgumentParser:\n # the lightning trainer implementation does not support subclasses.\n # context: https://github.com/PyTorchLightning/lightning-flash/issues/342#issuecomment-848892447\n return add_argparse_args(PlTrainer, *args, **kwargs)\n\n @classmethod\n def from_argparse_args(cls, args: Union[Namespace, ArgumentParser], **kwargs) -> 'Trainer':\n # the lightning trainer implementation does not support subclasses.\n # context: https://github.com/PyTorchLightning/lightning-flash/issues/342#issuecomment-848892447\n return from_argparse_args(PlTrainer, args, **kwargs)\n", "path": "flash/core/trainer.py"}]}
2,511
545
gh_patches_debug_34311
rasdani/github-patches
git_diff
bridgecrewio__checkov-2084
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- False positive for CKV_AZURE_43: check storage account name I'm building my Storage Account names like this ``` name = "${local.saname_prefix}diagnostics${module.tf-var-project.random_id} ``` With https://github.com/bridgecrewio/checkov/pull/429 merged I now get a Check failure on the SA name: ``` Check: CKV_AZURE_43: "Ensure the Storage Account naming rules" FAILED for resource: azurerm_storage_account.diagnostics File: /az_diag_sa.tf:8-22 8 | resource "azurerm_storage_account" "diagnostics" { 9 | #checkov:skip=CKV_AZURE_35:Public access is allowed 10 | name = "${local.saname_prefix}diagnostics${module.tf-var-project.random_id}" ```` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/checks/resource/azure/StorageAccountName.py` Content: ``` 1 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 2 from checkov.common.models.enums import CheckResult, CheckCategories 3 import re 4 from typing import List 5 6 STO_NAME_REGEX = re.compile('^[a-z0-9]{3,24}$') 7 8 9 class StorageAccountName(BaseResourceCheck): 10 def __init__(self): 11 name = "Ensure Storage Accounts adhere to the naming rules" 12 id = "CKV_AZURE_43" 13 supported_resources = ['azurerm_storage_account'] 14 categories = [CheckCategories.CONVENTION] 15 super().__init__(name=name, id=id, categories=categories, 16 supported_resources=supported_resources) 17 18 def scan_resource_conf(self, conf): 19 """ 20 The Storage Account naming reference: 21 https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#naming-storage-accounts 22 :param conf: azurerm_storage_account configuration 23 :return: <CheckResult> 24 """ 25 return CheckResult.PASSED if conf.get('name') and re.findall(STO_NAME_REGEX, str(conf['name'][0])) else CheckResult.FAILED 26 27 def get_evaluated_keys(self) -> List[str]: 28 return ['name'] 29 30 31 check = StorageAccountName() 32 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/terraform/checks/resource/azure/StorageAccountName.py b/checkov/terraform/checks/resource/azure/StorageAccountName.py --- a/checkov/terraform/checks/resource/azure/StorageAccountName.py +++ b/checkov/terraform/checks/resource/azure/StorageAccountName.py @@ -1,31 +1,41 @@ +import re +from typing import List, Dict, Any + from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck from checkov.common.models.enums import CheckResult, CheckCategories -import re -from typing import List -STO_NAME_REGEX = re.compile('^[a-z0-9]{3,24}$') +STO_NAME_REGEX = re.compile(r"^[a-z0-9]{3,24}$") +VARIABLE_REFS = ("local.", "module.", "var.") class StorageAccountName(BaseResourceCheck): - def __init__(self): + def __init__(self) -> None: name = "Ensure Storage Accounts adhere to the naming rules" id = "CKV_AZURE_43" - supported_resources = ['azurerm_storage_account'] + supported_resources = ["azurerm_storage_account"] categories = [CheckCategories.CONVENTION] - super().__init__(name=name, id=id, categories=categories, - supported_resources=supported_resources) + super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - def scan_resource_conf(self, conf): + def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult: """ The Storage Account naming reference: https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#naming-storage-accounts :param conf: azurerm_storage_account configuration :return: <CheckResult> """ - return CheckResult.PASSED if conf.get('name') and re.findall(STO_NAME_REGEX, str(conf['name'][0])) else CheckResult.FAILED + name = conf.get("name") + if name: + name = name[0] + if any(x in name for x in VARIABLE_REFS): + # in the case we couldn't evaluate the name, just ignore + return CheckResult.UNKNOWN + if re.findall(STO_NAME_REGEX, str(conf["name"][0])): + return CheckResult.PASSED + + return CheckResult.FAILED def get_evaluated_keys(self) -> List[str]: - return ['name'] + return ["name"] check = StorageAccountName()
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/StorageAccountName.py b/checkov/terraform/checks/resource/azure/StorageAccountName.py\n--- a/checkov/terraform/checks/resource/azure/StorageAccountName.py\n+++ b/checkov/terraform/checks/resource/azure/StorageAccountName.py\n@@ -1,31 +1,41 @@\n+import re\n+from typing import List, Dict, Any\n+\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n from checkov.common.models.enums import CheckResult, CheckCategories\n-import re\n-from typing import List\n \n-STO_NAME_REGEX = re.compile('^[a-z0-9]{3,24}$')\n+STO_NAME_REGEX = re.compile(r\"^[a-z0-9]{3,24}$\")\n+VARIABLE_REFS = (\"local.\", \"module.\", \"var.\")\n \n \n class StorageAccountName(BaseResourceCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n name = \"Ensure Storage Accounts adhere to the naming rules\"\n id = \"CKV_AZURE_43\"\n- supported_resources = ['azurerm_storage_account']\n+ supported_resources = [\"azurerm_storage_account\"]\n categories = [CheckCategories.CONVENTION]\n- super().__init__(name=name, id=id, categories=categories,\n- supported_resources=supported_resources)\n+ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n+ def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult:\n \"\"\"\n The Storage Account naming reference:\n https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#naming-storage-accounts\n :param conf: azurerm_storage_account configuration\n :return: <CheckResult>\n \"\"\"\n- return CheckResult.PASSED if conf.get('name') and re.findall(STO_NAME_REGEX, str(conf['name'][0])) else CheckResult.FAILED\n+ name = conf.get(\"name\")\n+ if name:\n+ name = name[0]\n+ if any(x in name for x in VARIABLE_REFS):\n+ # in the case we couldn't evaluate the name, just ignore\n+ return CheckResult.UNKNOWN\n+ if re.findall(STO_NAME_REGEX, str(conf[\"name\"][0])):\n+ return CheckResult.PASSED\n+\n+ return CheckResult.FAILED\n \n def get_evaluated_keys(self) -> List[str]:\n- return ['name']\n+ return [\"name\"]\n \n \n check = StorageAccountName()\n", "issue": "False positive for CKV_AZURE_43: check storage account name\nI'm building my Storage Account names like this\r\n```\r\nname = \"${local.saname_prefix}diagnostics${module.tf-var-project.random_id}\r\n```\r\n\r\nWith https://github.com/bridgecrewio/checkov/pull/429 merged I now get a Check failure on the SA name:\r\n\r\n```\r\nCheck: CKV_AZURE_43: \"Ensure the Storage Account naming rules\"\r\n\tFAILED for resource: azurerm_storage_account.diagnostics\r\n\tFile: /az_diag_sa.tf:8-22\r\n\r\n\t\t8 | resource \"azurerm_storage_account\" \"diagnostics\" {\r\n\t\t9 | #checkov:skip=CKV_AZURE_35:Public access is allowed\r\n\t\t10 | name = \"${local.saname_prefix}diagnostics${module.tf-var-project.random_id}\"\r\n\r\n````\n", "before_files": [{"content": "from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nimport re\nfrom typing import List\n\nSTO_NAME_REGEX = re.compile('^[a-z0-9]{3,24}$')\n\n\nclass StorageAccountName(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure Storage Accounts adhere to the naming rules\"\n id = \"CKV_AZURE_43\"\n supported_resources = ['azurerm_storage_account']\n categories = [CheckCategories.CONVENTION]\n super().__init__(name=name, id=id, categories=categories,\n supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n The Storage Account naming reference:\n https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#naming-storage-accounts\n :param conf: azurerm_storage_account configuration\n :return: <CheckResult>\n \"\"\"\n return CheckResult.PASSED if conf.get('name') and re.findall(STO_NAME_REGEX, str(conf['name'][0])) else CheckResult.FAILED\n\n def get_evaluated_keys(self) -> List[str]:\n return ['name']\n\n\ncheck = StorageAccountName()\n", "path": "checkov/terraform/checks/resource/azure/StorageAccountName.py"}], "after_files": [{"content": "import re\nfrom typing import List, Dict, Any\n\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\nSTO_NAME_REGEX = re.compile(r\"^[a-z0-9]{3,24}$\")\nVARIABLE_REFS = (\"local.\", \"module.\", \"var.\")\n\n\nclass StorageAccountName(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure Storage Accounts adhere to the naming rules\"\n id = \"CKV_AZURE_43\"\n supported_resources = [\"azurerm_storage_account\"]\n categories = [CheckCategories.CONVENTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult:\n \"\"\"\n The Storage Account naming reference:\n https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#naming-storage-accounts\n :param conf: azurerm_storage_account configuration\n :return: <CheckResult>\n \"\"\"\n name = conf.get(\"name\")\n if name:\n name = name[0]\n if any(x in name for x in VARIABLE_REFS):\n # in the case we couldn't evaluate the name, just ignore\n return CheckResult.UNKNOWN\n if re.findall(STO_NAME_REGEX, str(conf[\"name\"][0])):\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n def get_evaluated_keys(self) -> List[str]:\n return [\"name\"]\n\n\ncheck = StorageAccountName()\n", "path": "checkov/terraform/checks/resource/azure/StorageAccountName.py"}]}
792
571
gh_patches_debug_23360
rasdani/github-patches
git_diff
allegro__ralph-3159
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Doc fixes Some minor doc fixes with a bit of style change --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/ralph/dashboards/management/commands/push_graphs_to_statsd.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import logging 3 import textwrap 4 5 from django.conf import settings 6 from django.core.management.base import BaseCommand 7 from django.utils.text import slugify 8 9 from ralph.dashboards.models import Graph 10 from ralph.lib.metrics import build_statsd_client 11 12 logger = logging.getLogger(__name__) 13 PREFIX = settings.STATSD_GRAPHS_PREFIX 14 STATSD_PATH = '{}.{{}}.{{}}'.format(PREFIX) 15 16 17 def normalize(s): 18 s = slugify(s) 19 return s.replace('-', '_') 20 21 22 class Command(BaseCommand): 23 """Push to statsd data generated by graphs.""" 24 help = textwrap.dedent(__doc__).strip() 25 26 def handle(self, *args, **kwargs): 27 statsd = build_statsd_client(prefix=STATSD_PATH) 28 graphs = Graph.objects.filter(push_to_statsd=True) 29 for graph in graphs: 30 graph_data = graph.get_data() 31 graph_name = normalize(graph.name) 32 for label, value in zip(graph_data['labels'], graph_data['series']): 33 path = STATSD_PATH.format(graph_name, normalize(label)) 34 statsd.gauge(path, value) 35 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py b/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py --- a/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py +++ b/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py @@ -10,8 +10,6 @@ from ralph.lib.metrics import build_statsd_client logger = logging.getLogger(__name__) -PREFIX = settings.STATSD_GRAPHS_PREFIX -STATSD_PATH = '{}.{{}}.{{}}'.format(PREFIX) def normalize(s): @@ -24,11 +22,11 @@ help = textwrap.dedent(__doc__).strip() def handle(self, *args, **kwargs): - statsd = build_statsd_client(prefix=STATSD_PATH) + statsd = build_statsd_client(prefix=settings.STATSD_GRAPHS_PREFIX) graphs = Graph.objects.filter(push_to_statsd=True) for graph in graphs: graph_data = graph.get_data() graph_name = normalize(graph.name) for label, value in zip(graph_data['labels'], graph_data['series']): - path = STATSD_PATH.format(graph_name, normalize(label)) + path = '.'.join((graph_name, normalize(label))) statsd.gauge(path, value)
{"golden_diff": "diff --git a/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py b/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py\n--- a/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py\n+++ b/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py\n@@ -10,8 +10,6 @@\n from ralph.lib.metrics import build_statsd_client\n \n logger = logging.getLogger(__name__)\n-PREFIX = settings.STATSD_GRAPHS_PREFIX\n-STATSD_PATH = '{}.{{}}.{{}}'.format(PREFIX)\n \n \n def normalize(s):\n@@ -24,11 +22,11 @@\n help = textwrap.dedent(__doc__).strip()\n \n def handle(self, *args, **kwargs):\n- statsd = build_statsd_client(prefix=STATSD_PATH)\n+ statsd = build_statsd_client(prefix=settings.STATSD_GRAPHS_PREFIX)\n graphs = Graph.objects.filter(push_to_statsd=True)\n for graph in graphs:\n graph_data = graph.get_data()\n graph_name = normalize(graph.name)\n for label, value in zip(graph_data['labels'], graph_data['series']):\n- path = STATSD_PATH.format(graph_name, normalize(label))\n+ path = '.'.join((graph_name, normalize(label)))\n statsd.gauge(path, value)\n", "issue": "Doc fixes\nSome minor doc fixes with a bit of style change\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport logging\nimport textwrap\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.utils.text import slugify\n\nfrom ralph.dashboards.models import Graph\nfrom ralph.lib.metrics import build_statsd_client\n\nlogger = logging.getLogger(__name__)\nPREFIX = settings.STATSD_GRAPHS_PREFIX\nSTATSD_PATH = '{}.{{}}.{{}}'.format(PREFIX)\n\n\ndef normalize(s):\n s = slugify(s)\n return s.replace('-', '_')\n\n\nclass Command(BaseCommand):\n \"\"\"Push to statsd data generated by graphs.\"\"\"\n help = textwrap.dedent(__doc__).strip()\n\n def handle(self, *args, **kwargs):\n statsd = build_statsd_client(prefix=STATSD_PATH)\n graphs = Graph.objects.filter(push_to_statsd=True)\n for graph in graphs:\n graph_data = graph.get_data()\n graph_name = normalize(graph.name)\n for label, value in zip(graph_data['labels'], graph_data['series']):\n path = STATSD_PATH.format(graph_name, normalize(label))\n statsd.gauge(path, value)\n", "path": "src/ralph/dashboards/management/commands/push_graphs_to_statsd.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport logging\nimport textwrap\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.utils.text import slugify\n\nfrom ralph.dashboards.models import Graph\nfrom ralph.lib.metrics import build_statsd_client\n\nlogger = logging.getLogger(__name__)\n\n\ndef normalize(s):\n s = slugify(s)\n return s.replace('-', '_')\n\n\nclass Command(BaseCommand):\n \"\"\"Push to statsd data generated by graphs.\"\"\"\n help = textwrap.dedent(__doc__).strip()\n\n def handle(self, *args, **kwargs):\n statsd = build_statsd_client(prefix=settings.STATSD_GRAPHS_PREFIX)\n graphs = Graph.objects.filter(push_to_statsd=True)\n for graph in graphs:\n graph_data = graph.get_data()\n graph_name = normalize(graph.name)\n for label, value in zip(graph_data['labels'], graph_data['series']):\n path = '.'.join((graph_name, normalize(label)))\n statsd.gauge(path, value)\n", "path": "src/ralph/dashboards/management/commands/push_graphs_to_statsd.py"}]}
594
313
gh_patches_debug_14848
rasdani/github-patches
git_diff
MongoEngine__mongoengine-1858
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- select_related() ConnectionError Hi, I am having an interesting situation. We have been using MongoEngine in our project for a good few years now and this issue appeared out of nowhere (no update, no change to the code). Calling `select_related()` on the results of a query raises a `mongoengine.connection.ConnectionError: You have not defined a default connection` Exception The call that causes the issue: ```credentials = Credential.objects(id__in=ids).select_related()``` I noticed that I am getting the exception only the first time time I call `select_related()` and only on this object. Any subsequent calls (on the same collection) work as expected. While trying to debug, I got to this: ``` > /usr/local/lib/python2.7/dist-packages/mongoengine/dereference.py(129)_fetch_objects() 127 object_map = {} 128 for collection, dbrefs in self.reference_map.iteritems(): --> 129 if hasattr(collection, 'objects'): # We have a document class for the refs 130 col_name = collection._get_collection_name() 131 refs = [dbref for dbref in dbrefs ipdb> collection <class 'insights.models.company.Company'> ipdb> hasattr(collection, 'objects') False ipdb> hasattr(collection, 'objects') True ``` Can this be caused by some lazy loading or some configuration issue? Thank you! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mongoengine/dereference.py` Content: ``` 1 from bson import DBRef, SON 2 import six 3 4 from mongoengine.base import (BaseDict, BaseList, EmbeddedDocumentList, 5 TopLevelDocumentMetaclass, get_document) 6 from mongoengine.base.datastructures import LazyReference 7 from mongoengine.connection import get_db 8 from mongoengine.document import Document, EmbeddedDocument 9 from mongoengine.fields import DictField, ListField, MapField, ReferenceField 10 from mongoengine.queryset import QuerySet 11 12 13 class DeReference(object): 14 def __call__(self, items, max_depth=1, instance=None, name=None): 15 """ 16 Cheaply dereferences the items to a set depth. 17 Also handles the conversion of complex data types. 18 19 :param items: The iterable (dict, list, queryset) to be dereferenced. 20 :param max_depth: The maximum depth to recurse to 21 :param instance: The owning instance used for tracking changes by 22 :class:`~mongoengine.base.ComplexBaseField` 23 :param name: The name of the field, used for tracking changes by 24 :class:`~mongoengine.base.ComplexBaseField` 25 :param get: A boolean determining if being called by __get__ 26 """ 27 if items is None or isinstance(items, six.string_types): 28 return items 29 30 # cheapest way to convert a queryset to a list 31 # list(queryset) uses a count() query to determine length 32 if isinstance(items, QuerySet): 33 items = [i for i in items] 34 35 self.max_depth = max_depth 36 doc_type = None 37 38 if instance and isinstance(instance, (Document, EmbeddedDocument, 39 TopLevelDocumentMetaclass)): 40 doc_type = instance._fields.get(name) 41 while hasattr(doc_type, 'field'): 42 doc_type = doc_type.field 43 44 if isinstance(doc_type, ReferenceField): 45 field = doc_type 46 doc_type = doc_type.document_type 47 is_list = not hasattr(items, 'items') 48 49 if is_list and all([i.__class__ == doc_type for i in items]): 50 return items 51 elif not is_list and all( 52 [i.__class__ == doc_type for i in items.values()]): 53 return items 54 elif not field.dbref: 55 if not hasattr(items, 'items'): 56 57 def _get_items(items): 58 new_items = [] 59 for v in items: 60 if isinstance(v, list): 61 new_items.append(_get_items(v)) 62 elif not isinstance(v, (DBRef, Document)): 63 new_items.append(field.to_python(v)) 64 else: 65 new_items.append(v) 66 return new_items 67 68 items = _get_items(items) 69 else: 70 items = { 71 k: (v if isinstance(v, (DBRef, Document)) 72 else field.to_python(v)) 73 for k, v in items.iteritems() 74 } 75 76 self.reference_map = self._find_references(items) 77 self.object_map = self._fetch_objects(doc_type=doc_type) 78 return self._attach_objects(items, 0, instance, name) 79 80 def _find_references(self, items, depth=0): 81 """ 82 Recursively finds all db references to be dereferenced 83 84 :param items: The iterable (dict, list, queryset) 85 :param depth: The current depth of recursion 86 """ 87 reference_map = {} 88 if not items or depth >= self.max_depth: 89 return reference_map 90 91 # Determine the iterator to use 92 if isinstance(items, dict): 93 iterator = items.values() 94 else: 95 iterator = items 96 97 # Recursively find dbreferences 98 depth += 1 99 for item in iterator: 100 if isinstance(item, (Document, EmbeddedDocument)): 101 for field_name, field in item._fields.iteritems(): 102 v = item._data.get(field_name, None) 103 if isinstance(v, LazyReference): 104 # LazyReference inherits DBRef but should not be dereferenced here ! 105 continue 106 elif isinstance(v, DBRef): 107 reference_map.setdefault(field.document_type, set()).add(v.id) 108 elif isinstance(v, (dict, SON)) and '_ref' in v: 109 reference_map.setdefault(get_document(v['_cls']), set()).add(v['_ref'].id) 110 elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth: 111 field_cls = getattr(getattr(field, 'field', None), 'document_type', None) 112 references = self._find_references(v, depth) 113 for key, refs in references.iteritems(): 114 if isinstance(field_cls, (Document, TopLevelDocumentMetaclass)): 115 key = field_cls 116 reference_map.setdefault(key, set()).update(refs) 117 elif isinstance(item, LazyReference): 118 # LazyReference inherits DBRef but should not be dereferenced here ! 119 continue 120 elif isinstance(item, DBRef): 121 reference_map.setdefault(item.collection, set()).add(item.id) 122 elif isinstance(item, (dict, SON)) and '_ref' in item: 123 reference_map.setdefault(get_document(item['_cls']), set()).add(item['_ref'].id) 124 elif isinstance(item, (dict, list, tuple)) and depth - 1 <= self.max_depth: 125 references = self._find_references(item, depth - 1) 126 for key, refs in references.iteritems(): 127 reference_map.setdefault(key, set()).update(refs) 128 129 return reference_map 130 131 def _fetch_objects(self, doc_type=None): 132 """Fetch all references and convert to their document objects 133 """ 134 object_map = {} 135 for collection, dbrefs in self.reference_map.iteritems(): 136 if hasattr(collection, 'objects'): # We have a document class for the refs 137 col_name = collection._get_collection_name() 138 refs = [dbref for dbref in dbrefs 139 if (col_name, dbref) not in object_map] 140 references = collection.objects.in_bulk(refs) 141 for key, doc in references.iteritems(): 142 object_map[(col_name, key)] = doc 143 else: # Generic reference: use the refs data to convert to document 144 if isinstance(doc_type, (ListField, DictField, MapField,)): 145 continue 146 147 refs = [dbref for dbref in dbrefs 148 if (collection, dbref) not in object_map] 149 150 if doc_type: 151 references = doc_type._get_db()[collection].find({'_id': {'$in': refs}}) 152 for ref in references: 153 doc = doc_type._from_son(ref) 154 object_map[(collection, doc.id)] = doc 155 else: 156 references = get_db()[collection].find({'_id': {'$in': refs}}) 157 for ref in references: 158 if '_cls' in ref: 159 doc = get_document(ref['_cls'])._from_son(ref) 160 elif doc_type is None: 161 doc = get_document( 162 ''.join(x.capitalize() 163 for x in collection.split('_')))._from_son(ref) 164 else: 165 doc = doc_type._from_son(ref) 166 object_map[(collection, doc.id)] = doc 167 return object_map 168 169 def _attach_objects(self, items, depth=0, instance=None, name=None): 170 """ 171 Recursively finds all db references to be dereferenced 172 173 :param items: The iterable (dict, list, queryset) 174 :param depth: The current depth of recursion 175 :param instance: The owning instance used for tracking changes by 176 :class:`~mongoengine.base.ComplexBaseField` 177 :param name: The name of the field, used for tracking changes by 178 :class:`~mongoengine.base.ComplexBaseField` 179 """ 180 if not items: 181 if isinstance(items, (BaseDict, BaseList)): 182 return items 183 184 if instance: 185 if isinstance(items, dict): 186 return BaseDict(items, instance, name) 187 else: 188 return BaseList(items, instance, name) 189 190 if isinstance(items, (dict, SON)): 191 if '_ref' in items: 192 return self.object_map.get( 193 (items['_ref'].collection, items['_ref'].id), items) 194 elif '_cls' in items: 195 doc = get_document(items['_cls'])._from_son(items) 196 _cls = doc._data.pop('_cls', None) 197 del items['_cls'] 198 doc._data = self._attach_objects(doc._data, depth, doc, None) 199 if _cls is not None: 200 doc._data['_cls'] = _cls 201 return doc 202 203 if not hasattr(items, 'items'): 204 is_list = True 205 list_type = BaseList 206 if isinstance(items, EmbeddedDocumentList): 207 list_type = EmbeddedDocumentList 208 as_tuple = isinstance(items, tuple) 209 iterator = enumerate(items) 210 data = [] 211 else: 212 is_list = False 213 iterator = items.iteritems() 214 data = {} 215 216 depth += 1 217 for k, v in iterator: 218 if is_list: 219 data.append(v) 220 else: 221 data[k] = v 222 223 if k in self.object_map and not is_list: 224 data[k] = self.object_map[k] 225 elif isinstance(v, (Document, EmbeddedDocument)): 226 for field_name in v._fields: 227 v = data[k]._data.get(field_name, None) 228 if isinstance(v, DBRef): 229 data[k]._data[field_name] = self.object_map.get( 230 (v.collection, v.id), v) 231 elif isinstance(v, (dict, SON)) and '_ref' in v: 232 data[k]._data[field_name] = self.object_map.get( 233 (v['_ref'].collection, v['_ref'].id), v) 234 elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth: 235 item_name = six.text_type('{0}.{1}.{2}').format(name, k, field_name) 236 data[k]._data[field_name] = self._attach_objects(v, depth, instance=instance, name=item_name) 237 elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth: 238 item_name = '%s.%s' % (name, k) if name else name 239 data[k] = self._attach_objects(v, depth - 1, instance=instance, name=item_name) 240 elif isinstance(v, DBRef) and hasattr(v, 'id'): 241 data[k] = self.object_map.get((v.collection, v.id), v) 242 243 if instance and name: 244 if is_list: 245 return tuple(data) if as_tuple else list_type(data, instance, name) 246 return BaseDict(data, instance, name) 247 depth += 1 248 return data 249 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mongoengine/dereference.py b/mongoengine/dereference.py --- a/mongoengine/dereference.py +++ b/mongoengine/dereference.py @@ -133,7 +133,12 @@ """ object_map = {} for collection, dbrefs in self.reference_map.iteritems(): - if hasattr(collection, 'objects'): # We have a document class for the refs + + # we use getattr instead of hasattr because as hasattr swallows any exception under python2 + # so it could hide nasty things without raising exceptions (cfr bug #1688)) + ref_document_cls_exists = (getattr(collection, 'objects', None) is not None) + + if ref_document_cls_exists: col_name = collection._get_collection_name() refs = [dbref for dbref in dbrefs if (col_name, dbref) not in object_map]
{"golden_diff": "diff --git a/mongoengine/dereference.py b/mongoengine/dereference.py\n--- a/mongoengine/dereference.py\n+++ b/mongoengine/dereference.py\n@@ -133,7 +133,12 @@\n \"\"\"\n object_map = {}\n for collection, dbrefs in self.reference_map.iteritems():\n- if hasattr(collection, 'objects'): # We have a document class for the refs\n+\n+ # we use getattr instead of hasattr because as hasattr swallows any exception under python2\n+ # so it could hide nasty things without raising exceptions (cfr bug #1688))\n+ ref_document_cls_exists = (getattr(collection, 'objects', None) is not None)\n+\n+ if ref_document_cls_exists:\n col_name = collection._get_collection_name()\n refs = [dbref for dbref in dbrefs\n if (col_name, dbref) not in object_map]\n", "issue": "select_related() ConnectionError \nHi,\r\n\r\nI am having an interesting situation. We have been using MongoEngine in our project for a good few years now and this issue appeared out of nowhere (no update, no change to the code).\r\n\r\nCalling `select_related()` on the results of a query raises a `mongoengine.connection.ConnectionError: You have not defined a default connection` Exception\r\n\r\nThe call that causes the issue:\r\n```credentials = Credential.objects(id__in=ids).select_related()```\r\n\r\nI noticed that I am getting the exception only the first time time I call `select_related()` and only on this object. Any subsequent calls (on the same collection) work as expected. \r\n\r\nWhile trying to debug, I got to this:\r\n```\r\n> /usr/local/lib/python2.7/dist-packages/mongoengine/dereference.py(129)_fetch_objects()\r\n 127 object_map = {}\r\n 128 for collection, dbrefs in self.reference_map.iteritems():\r\n--> 129 if hasattr(collection, 'objects'): # We have a document class for the refs\r\n 130 col_name = collection._get_collection_name()\r\n 131 refs = [dbref for dbref in dbrefs\r\n\r\nipdb> collection\r\n<class 'insights.models.company.Company'>\r\nipdb> hasattr(collection, 'objects')\r\nFalse\r\nipdb> hasattr(collection, 'objects')\r\nTrue\r\n```\r\n\r\nCan this be caused by some lazy loading or some configuration issue?\r\n\r\nThank you!\n", "before_files": [{"content": "from bson import DBRef, SON\nimport six\n\nfrom mongoengine.base import (BaseDict, BaseList, EmbeddedDocumentList,\n TopLevelDocumentMetaclass, get_document)\nfrom mongoengine.base.datastructures import LazyReference\nfrom mongoengine.connection import get_db\nfrom mongoengine.document import Document, EmbeddedDocument\nfrom mongoengine.fields import DictField, ListField, MapField, ReferenceField\nfrom mongoengine.queryset import QuerySet\n\n\nclass DeReference(object):\n def __call__(self, items, max_depth=1, instance=None, name=None):\n \"\"\"\n Cheaply dereferences the items to a set depth.\n Also handles the conversion of complex data types.\n\n :param items: The iterable (dict, list, queryset) to be dereferenced.\n :param max_depth: The maximum depth to recurse to\n :param instance: The owning instance used for tracking changes by\n :class:`~mongoengine.base.ComplexBaseField`\n :param name: The name of the field, used for tracking changes by\n :class:`~mongoengine.base.ComplexBaseField`\n :param get: A boolean determining if being called by __get__\n \"\"\"\n if items is None or isinstance(items, six.string_types):\n return items\n\n # cheapest way to convert a queryset to a list\n # list(queryset) uses a count() query to determine length\n if isinstance(items, QuerySet):\n items = [i for i in items]\n\n self.max_depth = max_depth\n doc_type = None\n\n if instance and isinstance(instance, (Document, EmbeddedDocument,\n TopLevelDocumentMetaclass)):\n doc_type = instance._fields.get(name)\n while hasattr(doc_type, 'field'):\n doc_type = doc_type.field\n\n if isinstance(doc_type, ReferenceField):\n field = doc_type\n doc_type = doc_type.document_type\n is_list = not hasattr(items, 'items')\n\n if is_list and all([i.__class__ == doc_type for i in items]):\n return items\n elif not is_list and all(\n [i.__class__ == doc_type for i in items.values()]):\n return items\n elif not field.dbref:\n if not hasattr(items, 'items'):\n\n def _get_items(items):\n new_items = []\n for v in items:\n if isinstance(v, list):\n new_items.append(_get_items(v))\n elif not isinstance(v, (DBRef, Document)):\n new_items.append(field.to_python(v))\n else:\n new_items.append(v)\n return new_items\n\n items = _get_items(items)\n else:\n items = {\n k: (v if isinstance(v, (DBRef, Document))\n else field.to_python(v))\n for k, v in items.iteritems()\n }\n\n self.reference_map = self._find_references(items)\n self.object_map = self._fetch_objects(doc_type=doc_type)\n return self._attach_objects(items, 0, instance, name)\n\n def _find_references(self, items, depth=0):\n \"\"\"\n Recursively finds all db references to be dereferenced\n\n :param items: The iterable (dict, list, queryset)\n :param depth: The current depth of recursion\n \"\"\"\n reference_map = {}\n if not items or depth >= self.max_depth:\n return reference_map\n\n # Determine the iterator to use\n if isinstance(items, dict):\n iterator = items.values()\n else:\n iterator = items\n\n # Recursively find dbreferences\n depth += 1\n for item in iterator:\n if isinstance(item, (Document, EmbeddedDocument)):\n for field_name, field in item._fields.iteritems():\n v = item._data.get(field_name, None)\n if isinstance(v, LazyReference):\n # LazyReference inherits DBRef but should not be dereferenced here !\n continue\n elif isinstance(v, DBRef):\n reference_map.setdefault(field.document_type, set()).add(v.id)\n elif isinstance(v, (dict, SON)) and '_ref' in v:\n reference_map.setdefault(get_document(v['_cls']), set()).add(v['_ref'].id)\n elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:\n field_cls = getattr(getattr(field, 'field', None), 'document_type', None)\n references = self._find_references(v, depth)\n for key, refs in references.iteritems():\n if isinstance(field_cls, (Document, TopLevelDocumentMetaclass)):\n key = field_cls\n reference_map.setdefault(key, set()).update(refs)\n elif isinstance(item, LazyReference):\n # LazyReference inherits DBRef but should not be dereferenced here !\n continue\n elif isinstance(item, DBRef):\n reference_map.setdefault(item.collection, set()).add(item.id)\n elif isinstance(item, (dict, SON)) and '_ref' in item:\n reference_map.setdefault(get_document(item['_cls']), set()).add(item['_ref'].id)\n elif isinstance(item, (dict, list, tuple)) and depth - 1 <= self.max_depth:\n references = self._find_references(item, depth - 1)\n for key, refs in references.iteritems():\n reference_map.setdefault(key, set()).update(refs)\n\n return reference_map\n\n def _fetch_objects(self, doc_type=None):\n \"\"\"Fetch all references and convert to their document objects\n \"\"\"\n object_map = {}\n for collection, dbrefs in self.reference_map.iteritems():\n if hasattr(collection, 'objects'): # We have a document class for the refs\n col_name = collection._get_collection_name()\n refs = [dbref for dbref in dbrefs\n if (col_name, dbref) not in object_map]\n references = collection.objects.in_bulk(refs)\n for key, doc in references.iteritems():\n object_map[(col_name, key)] = doc\n else: # Generic reference: use the refs data to convert to document\n if isinstance(doc_type, (ListField, DictField, MapField,)):\n continue\n\n refs = [dbref for dbref in dbrefs\n if (collection, dbref) not in object_map]\n\n if doc_type:\n references = doc_type._get_db()[collection].find({'_id': {'$in': refs}})\n for ref in references:\n doc = doc_type._from_son(ref)\n object_map[(collection, doc.id)] = doc\n else:\n references = get_db()[collection].find({'_id': {'$in': refs}})\n for ref in references:\n if '_cls' in ref:\n doc = get_document(ref['_cls'])._from_son(ref)\n elif doc_type is None:\n doc = get_document(\n ''.join(x.capitalize()\n for x in collection.split('_')))._from_son(ref)\n else:\n doc = doc_type._from_son(ref)\n object_map[(collection, doc.id)] = doc\n return object_map\n\n def _attach_objects(self, items, depth=0, instance=None, name=None):\n \"\"\"\n Recursively finds all db references to be dereferenced\n\n :param items: The iterable (dict, list, queryset)\n :param depth: The current depth of recursion\n :param instance: The owning instance used for tracking changes by\n :class:`~mongoengine.base.ComplexBaseField`\n :param name: The name of the field, used for tracking changes by\n :class:`~mongoengine.base.ComplexBaseField`\n \"\"\"\n if not items:\n if isinstance(items, (BaseDict, BaseList)):\n return items\n\n if instance:\n if isinstance(items, dict):\n return BaseDict(items, instance, name)\n else:\n return BaseList(items, instance, name)\n\n if isinstance(items, (dict, SON)):\n if '_ref' in items:\n return self.object_map.get(\n (items['_ref'].collection, items['_ref'].id), items)\n elif '_cls' in items:\n doc = get_document(items['_cls'])._from_son(items)\n _cls = doc._data.pop('_cls', None)\n del items['_cls']\n doc._data = self._attach_objects(doc._data, depth, doc, None)\n if _cls is not None:\n doc._data['_cls'] = _cls\n return doc\n\n if not hasattr(items, 'items'):\n is_list = True\n list_type = BaseList\n if isinstance(items, EmbeddedDocumentList):\n list_type = EmbeddedDocumentList\n as_tuple = isinstance(items, tuple)\n iterator = enumerate(items)\n data = []\n else:\n is_list = False\n iterator = items.iteritems()\n data = {}\n\n depth += 1\n for k, v in iterator:\n if is_list:\n data.append(v)\n else:\n data[k] = v\n\n if k in self.object_map and not is_list:\n data[k] = self.object_map[k]\n elif isinstance(v, (Document, EmbeddedDocument)):\n for field_name in v._fields:\n v = data[k]._data.get(field_name, None)\n if isinstance(v, DBRef):\n data[k]._data[field_name] = self.object_map.get(\n (v.collection, v.id), v)\n elif isinstance(v, (dict, SON)) and '_ref' in v:\n data[k]._data[field_name] = self.object_map.get(\n (v['_ref'].collection, v['_ref'].id), v)\n elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:\n item_name = six.text_type('{0}.{1}.{2}').format(name, k, field_name)\n data[k]._data[field_name] = self._attach_objects(v, depth, instance=instance, name=item_name)\n elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:\n item_name = '%s.%s' % (name, k) if name else name\n data[k] = self._attach_objects(v, depth - 1, instance=instance, name=item_name)\n elif isinstance(v, DBRef) and hasattr(v, 'id'):\n data[k] = self.object_map.get((v.collection, v.id), v)\n\n if instance and name:\n if is_list:\n return tuple(data) if as_tuple else list_type(data, instance, name)\n return BaseDict(data, instance, name)\n depth += 1\n return data\n", "path": "mongoengine/dereference.py"}], "after_files": [{"content": "from bson import DBRef, SON\nimport six\n\nfrom mongoengine.base import (BaseDict, BaseList, EmbeddedDocumentList,\n TopLevelDocumentMetaclass, get_document)\nfrom mongoengine.base.datastructures import LazyReference\nfrom mongoengine.connection import get_db\nfrom mongoengine.document import Document, EmbeddedDocument\nfrom mongoengine.fields import DictField, ListField, MapField, ReferenceField\nfrom mongoengine.queryset import QuerySet\n\n\nclass DeReference(object):\n def __call__(self, items, max_depth=1, instance=None, name=None):\n \"\"\"\n Cheaply dereferences the items to a set depth.\n Also handles the conversion of complex data types.\n\n :param items: The iterable (dict, list, queryset) to be dereferenced.\n :param max_depth: The maximum depth to recurse to\n :param instance: The owning instance used for tracking changes by\n :class:`~mongoengine.base.ComplexBaseField`\n :param name: The name of the field, used for tracking changes by\n :class:`~mongoengine.base.ComplexBaseField`\n :param get: A boolean determining if being called by __get__\n \"\"\"\n if items is None or isinstance(items, six.string_types):\n return items\n\n # cheapest way to convert a queryset to a list\n # list(queryset) uses a count() query to determine length\n if isinstance(items, QuerySet):\n items = [i for i in items]\n\n self.max_depth = max_depth\n doc_type = None\n\n if instance and isinstance(instance, (Document, EmbeddedDocument,\n TopLevelDocumentMetaclass)):\n doc_type = instance._fields.get(name)\n while hasattr(doc_type, 'field'):\n doc_type = doc_type.field\n\n if isinstance(doc_type, ReferenceField):\n field = doc_type\n doc_type = doc_type.document_type\n is_list = not hasattr(items, 'items')\n\n if is_list and all([i.__class__ == doc_type for i in items]):\n return items\n elif not is_list and all(\n [i.__class__ == doc_type for i in items.values()]):\n return items\n elif not field.dbref:\n if not hasattr(items, 'items'):\n\n def _get_items(items):\n new_items = []\n for v in items:\n if isinstance(v, list):\n new_items.append(_get_items(v))\n elif not isinstance(v, (DBRef, Document)):\n new_items.append(field.to_python(v))\n else:\n new_items.append(v)\n return new_items\n\n items = _get_items(items)\n else:\n items = {\n k: (v if isinstance(v, (DBRef, Document))\n else field.to_python(v))\n for k, v in items.iteritems()\n }\n\n self.reference_map = self._find_references(items)\n self.object_map = self._fetch_objects(doc_type=doc_type)\n return self._attach_objects(items, 0, instance, name)\n\n def _find_references(self, items, depth=0):\n \"\"\"\n Recursively finds all db references to be dereferenced\n\n :param items: The iterable (dict, list, queryset)\n :param depth: The current depth of recursion\n \"\"\"\n reference_map = {}\n if not items or depth >= self.max_depth:\n return reference_map\n\n # Determine the iterator to use\n if isinstance(items, dict):\n iterator = items.values()\n else:\n iterator = items\n\n # Recursively find dbreferences\n depth += 1\n for item in iterator:\n if isinstance(item, (Document, EmbeddedDocument)):\n for field_name, field in item._fields.iteritems():\n v = item._data.get(field_name, None)\n if isinstance(v, LazyReference):\n # LazyReference inherits DBRef but should not be dereferenced here !\n continue\n elif isinstance(v, DBRef):\n reference_map.setdefault(field.document_type, set()).add(v.id)\n elif isinstance(v, (dict, SON)) and '_ref' in v:\n reference_map.setdefault(get_document(v['_cls']), set()).add(v['_ref'].id)\n elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:\n field_cls = getattr(getattr(field, 'field', None), 'document_type', None)\n references = self._find_references(v, depth)\n for key, refs in references.iteritems():\n if isinstance(field_cls, (Document, TopLevelDocumentMetaclass)):\n key = field_cls\n reference_map.setdefault(key, set()).update(refs)\n elif isinstance(item, LazyReference):\n # LazyReference inherits DBRef but should not be dereferenced here !\n continue\n elif isinstance(item, DBRef):\n reference_map.setdefault(item.collection, set()).add(item.id)\n elif isinstance(item, (dict, SON)) and '_ref' in item:\n reference_map.setdefault(get_document(item['_cls']), set()).add(item['_ref'].id)\n elif isinstance(item, (dict, list, tuple)) and depth - 1 <= self.max_depth:\n references = self._find_references(item, depth - 1)\n for key, refs in references.iteritems():\n reference_map.setdefault(key, set()).update(refs)\n\n return reference_map\n\n def _fetch_objects(self, doc_type=None):\n \"\"\"Fetch all references and convert to their document objects\n \"\"\"\n object_map = {}\n for collection, dbrefs in self.reference_map.iteritems():\n\n # we use getattr instead of hasattr because as hasattr swallows any exception under python2\n # so it could hide nasty things without raising exceptions (cfr bug #1688))\n ref_document_cls_exists = (getattr(collection, 'objects', None) is not None)\n\n if ref_document_cls_exists:\n col_name = collection._get_collection_name()\n refs = [dbref for dbref in dbrefs\n if (col_name, dbref) not in object_map]\n references = collection.objects.in_bulk(refs)\n for key, doc in references.iteritems():\n object_map[(col_name, key)] = doc\n else: # Generic reference: use the refs data to convert to document\n if isinstance(doc_type, (ListField, DictField, MapField,)):\n continue\n\n refs = [dbref for dbref in dbrefs\n if (collection, dbref) not in object_map]\n\n if doc_type:\n references = doc_type._get_db()[collection].find({'_id': {'$in': refs}})\n for ref in references:\n doc = doc_type._from_son(ref)\n object_map[(collection, doc.id)] = doc\n else:\n references = get_db()[collection].find({'_id': {'$in': refs}})\n for ref in references:\n if '_cls' in ref:\n doc = get_document(ref['_cls'])._from_son(ref)\n elif doc_type is None:\n doc = get_document(\n ''.join(x.capitalize()\n for x in collection.split('_')))._from_son(ref)\n else:\n doc = doc_type._from_son(ref)\n object_map[(collection, doc.id)] = doc\n return object_map\n\n def _attach_objects(self, items, depth=0, instance=None, name=None):\n \"\"\"\n Recursively finds all db references to be dereferenced\n\n :param items: The iterable (dict, list, queryset)\n :param depth: The current depth of recursion\n :param instance: The owning instance used for tracking changes by\n :class:`~mongoengine.base.ComplexBaseField`\n :param name: The name of the field, used for tracking changes by\n :class:`~mongoengine.base.ComplexBaseField`\n \"\"\"\n if not items:\n if isinstance(items, (BaseDict, BaseList)):\n return items\n\n if instance:\n if isinstance(items, dict):\n return BaseDict(items, instance, name)\n else:\n return BaseList(items, instance, name)\n\n if isinstance(items, (dict, SON)):\n if '_ref' in items:\n return self.object_map.get(\n (items['_ref'].collection, items['_ref'].id), items)\n elif '_cls' in items:\n doc = get_document(items['_cls'])._from_son(items)\n _cls = doc._data.pop('_cls', None)\n del items['_cls']\n doc._data = self._attach_objects(doc._data, depth, doc, None)\n if _cls is not None:\n doc._data['_cls'] = _cls\n return doc\n\n if not hasattr(items, 'items'):\n is_list = True\n list_type = BaseList\n if isinstance(items, EmbeddedDocumentList):\n list_type = EmbeddedDocumentList\n as_tuple = isinstance(items, tuple)\n iterator = enumerate(items)\n data = []\n else:\n is_list = False\n iterator = items.iteritems()\n data = {}\n\n depth += 1\n for k, v in iterator:\n if is_list:\n data.append(v)\n else:\n data[k] = v\n\n if k in self.object_map and not is_list:\n data[k] = self.object_map[k]\n elif isinstance(v, (Document, EmbeddedDocument)):\n for field_name in v._fields:\n v = data[k]._data.get(field_name, None)\n if isinstance(v, DBRef):\n data[k]._data[field_name] = self.object_map.get(\n (v.collection, v.id), v)\n elif isinstance(v, (dict, SON)) and '_ref' in v:\n data[k]._data[field_name] = self.object_map.get(\n (v['_ref'].collection, v['_ref'].id), v)\n elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:\n item_name = six.text_type('{0}.{1}.{2}').format(name, k, field_name)\n data[k]._data[field_name] = self._attach_objects(v, depth, instance=instance, name=item_name)\n elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:\n item_name = '%s.%s' % (name, k) if name else name\n data[k] = self._attach_objects(v, depth - 1, instance=instance, name=item_name)\n elif isinstance(v, DBRef) and hasattr(v, 'id'):\n data[k] = self.object_map.get((v.collection, v.id), v)\n\n if instance and name:\n if is_list:\n return tuple(data) if as_tuple else list_type(data, instance, name)\n return BaseDict(data, instance, name)\n depth += 1\n return data\n", "path": "mongoengine/dereference.py"}]}
3,469
203
gh_patches_debug_23048
rasdani/github-patches
git_diff
cupy__cupy-5759
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `cupy.concatenate()` misses arguments `dtype` and `casting` Refs: - NumPy: https://numpy.org/doc/stable/reference/generated/numpy.concatenate.html - CuPy: https://docs.cupy.dev/en/stable/reference/generated/cupy.concatenate.html The `dtype` argument is needed by the Array API standard (#5698, #4789). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cupy/_manipulation/join.py` Content: ``` 1 import cupy 2 from cupy import _core 3 4 5 def column_stack(tup): 6 """Stacks 1-D and 2-D arrays as columns into a 2-D array. 7 8 A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays 9 are concatenated along the second axis. 10 11 Args: 12 tup (sequence of arrays): 1-D or 2-D arrays to be stacked. 13 14 Returns: 15 cupy.ndarray: A new 2-D array of stacked columns. 16 17 .. seealso:: :func:`numpy.column_stack` 18 19 """ 20 if any(not isinstance(a, cupy.ndarray) for a in tup): 21 raise TypeError('Only cupy arrays can be column stacked') 22 23 lst = list(tup) 24 for i, a in enumerate(lst): 25 if a.ndim == 1: 26 a = a[:, cupy.newaxis] 27 lst[i] = a 28 elif a.ndim != 2: 29 raise ValueError( 30 'Only 1 or 2 dimensional arrays can be column stacked') 31 32 return concatenate(lst, axis=1) 33 34 35 def concatenate(tup, axis=0, out=None): 36 """Joins arrays along an axis. 37 38 Args: 39 tup (sequence of arrays): Arrays to be joined. All of these should have 40 same dimensionalities except the specified axis. 41 axis (int or None): The axis to join arrays along. 42 If axis is None, arrays are flattened before use. 43 Default is 0. 44 out (cupy.ndarray): Output array. 45 46 Returns: 47 cupy.ndarray: Joined array. 48 49 .. seealso:: :func:`numpy.concatenate` 50 51 """ 52 if axis is None: 53 tup = [m.ravel() for m in tup] 54 axis = 0 55 return _core.concatenate_method(tup, axis, out) 56 57 58 def dstack(tup): 59 """Stacks arrays along the third axis. 60 61 Args: 62 tup (sequence of arrays): Arrays to be stacked. Each array is converted 63 by :func:`cupy.atleast_3d` before stacking. 64 65 Returns: 66 cupy.ndarray: Stacked array. 67 68 .. seealso:: :func:`numpy.dstack` 69 70 """ 71 return concatenate([cupy.atleast_3d(m) for m in tup], 2) 72 73 74 def hstack(tup): 75 """Stacks arrays horizontally. 76 77 If an input array has one dimension, then the array is treated as a 78 horizontal vector and stacked along the first axis. Otherwise, the array is 79 stacked along the second axis. 80 81 Args: 82 tup (sequence of arrays): Arrays to be stacked. 83 84 Returns: 85 cupy.ndarray: Stacked array. 86 87 .. seealso:: :func:`numpy.hstack` 88 89 """ 90 arrs = [cupy.atleast_1d(a) for a in tup] 91 axis = 1 92 if arrs[0].ndim == 1: 93 axis = 0 94 return concatenate(arrs, axis) 95 96 97 def vstack(tup): 98 """Stacks arrays vertically. 99 100 If an input array has one dimension, then the array is treated as a 101 horizontal vector and stacked along the additional axis at the head. 102 Otherwise, the array is stacked along the first axis. 103 104 Args: 105 tup (sequence of arrays): Arrays to be stacked. Each array is converted 106 by :func:`cupy.atleast_2d` before stacking. 107 108 Returns: 109 cupy.ndarray: Stacked array. 110 111 .. seealso:: :func:`numpy.dstack` 112 113 """ 114 return concatenate([cupy.atleast_2d(m) for m in tup], 0) 115 116 117 def stack(tup, axis=0, out=None): 118 """Stacks arrays along a new axis. 119 120 Args: 121 tup (sequence of arrays): Arrays to be stacked. 122 axis (int): Axis along which the arrays are stacked. 123 out (cupy.ndarray): Output array. 124 125 Returns: 126 cupy.ndarray: Stacked array. 127 128 .. seealso:: :func:`numpy.stack` 129 """ 130 return concatenate([cupy.expand_dims(x, axis) for x in tup], axis, out) 131 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cupy/_manipulation/join.py b/cupy/_manipulation/join.py --- a/cupy/_manipulation/join.py +++ b/cupy/_manipulation/join.py @@ -32,7 +32,7 @@ return concatenate(lst, axis=1) -def concatenate(tup, axis=0, out=None): +def concatenate(tup, axis=0, out=None, *, dtype=None, casting='same_kind'): """Joins arrays along an axis. Args: @@ -42,6 +42,11 @@ If axis is None, arrays are flattened before use. Default is 0. out (cupy.ndarray): Output array. + dtype (str or dtype): If provided, the destination array will have this + dtype. Cannot be provided together with ``out``. + casting ({‘no’, ‘equiv’, ‘safe’, ‘same_kind’, ‘unsafe’}, optional): + Controls what kind of data casting may occur. Defaults to + ``'same_kind'``. Returns: cupy.ndarray: Joined array. @@ -52,7 +57,7 @@ if axis is None: tup = [m.ravel() for m in tup] axis = 0 - return _core.concatenate_method(tup, axis, out) + return _core.concatenate_method(tup, axis, out, dtype, casting) def dstack(tup):
{"golden_diff": "diff --git a/cupy/_manipulation/join.py b/cupy/_manipulation/join.py\n--- a/cupy/_manipulation/join.py\n+++ b/cupy/_manipulation/join.py\n@@ -32,7 +32,7 @@\n return concatenate(lst, axis=1)\n \n \n-def concatenate(tup, axis=0, out=None):\n+def concatenate(tup, axis=0, out=None, *, dtype=None, casting='same_kind'):\n \"\"\"Joins arrays along an axis.\n \n Args:\n@@ -42,6 +42,11 @@\n If axis is None, arrays are flattened before use.\n Default is 0.\n out (cupy.ndarray): Output array.\n+ dtype (str or dtype): If provided, the destination array will have this\n+ dtype. Cannot be provided together with ``out``.\n+ casting ({\u2018no\u2019, \u2018equiv\u2019, \u2018safe\u2019, \u2018same_kind\u2019, \u2018unsafe\u2019}, optional):\n+ Controls what kind of data casting may occur. Defaults to\n+ ``'same_kind'``.\n \n Returns:\n cupy.ndarray: Joined array.\n@@ -52,7 +57,7 @@\n if axis is None:\n tup = [m.ravel() for m in tup]\n axis = 0\n- return _core.concatenate_method(tup, axis, out)\n+ return _core.concatenate_method(tup, axis, out, dtype, casting)\n \n \n def dstack(tup):\n", "issue": "`cupy.concatenate()` misses arguments `dtype` and `casting`\nRefs:\r\n- NumPy: https://numpy.org/doc/stable/reference/generated/numpy.concatenate.html\r\n- CuPy: https://docs.cupy.dev/en/stable/reference/generated/cupy.concatenate.html\r\n\r\nThe `dtype` argument is needed by the Array API standard (#5698, #4789).\n", "before_files": [{"content": "import cupy\nfrom cupy import _core\n\n\ndef column_stack(tup):\n \"\"\"Stacks 1-D and 2-D arrays as columns into a 2-D array.\n\n A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays\n are concatenated along the second axis.\n\n Args:\n tup (sequence of arrays): 1-D or 2-D arrays to be stacked.\n\n Returns:\n cupy.ndarray: A new 2-D array of stacked columns.\n\n .. seealso:: :func:`numpy.column_stack`\n\n \"\"\"\n if any(not isinstance(a, cupy.ndarray) for a in tup):\n raise TypeError('Only cupy arrays can be column stacked')\n\n lst = list(tup)\n for i, a in enumerate(lst):\n if a.ndim == 1:\n a = a[:, cupy.newaxis]\n lst[i] = a\n elif a.ndim != 2:\n raise ValueError(\n 'Only 1 or 2 dimensional arrays can be column stacked')\n\n return concatenate(lst, axis=1)\n\n\ndef concatenate(tup, axis=0, out=None):\n \"\"\"Joins arrays along an axis.\n\n Args:\n tup (sequence of arrays): Arrays to be joined. All of these should have\n same dimensionalities except the specified axis.\n axis (int or None): The axis to join arrays along.\n If axis is None, arrays are flattened before use.\n Default is 0.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: Joined array.\n\n .. seealso:: :func:`numpy.concatenate`\n\n \"\"\"\n if axis is None:\n tup = [m.ravel() for m in tup]\n axis = 0\n return _core.concatenate_method(tup, axis, out)\n\n\ndef dstack(tup):\n \"\"\"Stacks arrays along the third axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_3d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_3d(m) for m in tup], 2)\n\n\ndef hstack(tup):\n \"\"\"Stacks arrays horizontally.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the first axis. Otherwise, the array is\n stacked along the second axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.hstack`\n\n \"\"\"\n arrs = [cupy.atleast_1d(a) for a in tup]\n axis = 1\n if arrs[0].ndim == 1:\n axis = 0\n return concatenate(arrs, axis)\n\n\ndef vstack(tup):\n \"\"\"Stacks arrays vertically.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the additional axis at the head.\n Otherwise, the array is stacked along the first axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_2d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_2d(m) for m in tup], 0)\n\n\ndef stack(tup, axis=0, out=None):\n \"\"\"Stacks arrays along a new axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n axis (int): Axis along which the arrays are stacked.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.stack`\n \"\"\"\n return concatenate([cupy.expand_dims(x, axis) for x in tup], axis, out)\n", "path": "cupy/_manipulation/join.py"}], "after_files": [{"content": "import cupy\nfrom cupy import _core\n\n\ndef column_stack(tup):\n \"\"\"Stacks 1-D and 2-D arrays as columns into a 2-D array.\n\n A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays\n are concatenated along the second axis.\n\n Args:\n tup (sequence of arrays): 1-D or 2-D arrays to be stacked.\n\n Returns:\n cupy.ndarray: A new 2-D array of stacked columns.\n\n .. seealso:: :func:`numpy.column_stack`\n\n \"\"\"\n if any(not isinstance(a, cupy.ndarray) for a in tup):\n raise TypeError('Only cupy arrays can be column stacked')\n\n lst = list(tup)\n for i, a in enumerate(lst):\n if a.ndim == 1:\n a = a[:, cupy.newaxis]\n lst[i] = a\n elif a.ndim != 2:\n raise ValueError(\n 'Only 1 or 2 dimensional arrays can be column stacked')\n\n return concatenate(lst, axis=1)\n\n\ndef concatenate(tup, axis=0, out=None, *, dtype=None, casting='same_kind'):\n \"\"\"Joins arrays along an axis.\n\n Args:\n tup (sequence of arrays): Arrays to be joined. All of these should have\n same dimensionalities except the specified axis.\n axis (int or None): The axis to join arrays along.\n If axis is None, arrays are flattened before use.\n Default is 0.\n out (cupy.ndarray): Output array.\n dtype (str or dtype): If provided, the destination array will have this\n dtype. Cannot be provided together with ``out``.\n casting ({\u2018no\u2019, \u2018equiv\u2019, \u2018safe\u2019, \u2018same_kind\u2019, \u2018unsafe\u2019}, optional):\n Controls what kind of data casting may occur. Defaults to\n ``'same_kind'``.\n\n Returns:\n cupy.ndarray: Joined array.\n\n .. seealso:: :func:`numpy.concatenate`\n\n \"\"\"\n if axis is None:\n tup = [m.ravel() for m in tup]\n axis = 0\n return _core.concatenate_method(tup, axis, out, dtype, casting)\n\n\ndef dstack(tup):\n \"\"\"Stacks arrays along the third axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_3d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_3d(m) for m in tup], 2)\n\n\ndef hstack(tup):\n \"\"\"Stacks arrays horizontally.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the first axis. Otherwise, the array is\n stacked along the second axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.hstack`\n\n \"\"\"\n arrs = [cupy.atleast_1d(a) for a in tup]\n axis = 1\n if arrs[0].ndim == 1:\n axis = 0\n return concatenate(arrs, axis)\n\n\ndef vstack(tup):\n \"\"\"Stacks arrays vertically.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the additional axis at the head.\n Otherwise, the array is stacked along the first axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_2d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_2d(m) for m in tup], 0)\n\n\ndef stack(tup, axis=0, out=None):\n \"\"\"Stacks arrays along a new axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n axis (int): Axis along which the arrays are stacked.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.stack`\n \"\"\"\n return concatenate([cupy.expand_dims(x, axis) for x in tup], axis, out)\n", "path": "cupy/_manipulation/join.py"}]}
1,538
322
gh_patches_debug_25497
rasdani/github-patches
git_diff
tensorflow__addons-2048
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Deprecate GELU Per https://github.com/tensorflow/community/pull/252, oonce https://github.com/tensorflow/tensorflow/pull/41178 merges we'll need to deprecate our GELU for versions of TensorFlow that include it within core. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tensorflow_addons/layers/gelu.py` Content: ``` 1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 """Implements GELU activation.""" 16 17 import tensorflow as tf 18 from tensorflow_addons.activations import gelu 19 from typeguard import typechecked 20 21 22 @tf.keras.utils.register_keras_serializable(package="Addons") 23 class GELU(tf.keras.layers.Layer): 24 """Gaussian Error Linear Unit. 25 26 A smoother version of ReLU generally used 27 in the BERT or BERT architecture based models. 28 Original paper: https://arxiv.org/abs/1606.08415 29 30 Input shape: 31 Arbitrary. Use the keyword argument `input_shape` 32 (tuple of integers, does not include the samples axis) 33 when using this layer as the first layer in a model. 34 35 Output shape: 36 Same shape as the input. 37 """ 38 39 @typechecked 40 def __init__(self, approximate: bool = True, **kwargs): 41 super().__init__(**kwargs) 42 self.approximate = approximate 43 self.supports_masking = True 44 45 def call(self, inputs): 46 return gelu(inputs, approximate=self.approximate) 47 48 def get_config(self): 49 config = {"approximate": self.approximate} 50 base_config = super().get_config() 51 return {**base_config, **config} 52 53 def compute_output_shape(self, input_shape): 54 return input_shape 55 ``` Path: `tensorflow_addons/activations/gelu.py` Content: ``` 1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 16 import tensorflow as tf 17 import math 18 import warnings 19 20 from tensorflow_addons.utils import types 21 from tensorflow_addons.utils.resource_loader import LazySO 22 from tensorflow_addons import options 23 24 _activation_so = LazySO("custom_ops/activations/_activation_ops.so") 25 26 27 @tf.keras.utils.register_keras_serializable(package="Addons") 28 def gelu(x: types.TensorLike, approximate: bool = True) -> tf.Tensor: 29 """Gaussian Error Linear Unit. 30 31 Computes gaussian error linear: 32 `0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))` or 33 `x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`, where P(X) ~ N(0, 1), 34 depending on whether approximation is enabled. 35 36 See [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415) 37 and [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805). 38 39 Args: 40 x: A `Tensor`. Must be one of the following types: 41 `float16`, `float32`, `float64`. 42 approximate: bool, whether to enable approximation. 43 Returns: 44 A `Tensor`. Has the same type as `x`. 45 """ 46 x = tf.convert_to_tensor(x) 47 48 if not options.TF_ADDONS_PY_OPS: 49 try: 50 return _gelu_custom_op(x, approximate) 51 except tf.errors.NotFoundError: 52 options.warn_fallback("gelu") 53 54 return _gelu_py(x, approximate) 55 56 57 def _gelu_custom_op(x, approximate): 58 warnings.warn( 59 "The activations custom ops are deprecated and will be removed in TensorFlow Addons " 60 "v0.12.0. \nPlease use the pure python version of Gelu instead by using the " 61 "`TF_ADDONS_PY_OPS` flag. \nFor more info about this flag, see " 62 "https://github.com/tensorflow/addons#gpucpu-custom-ops ", 63 DeprecationWarning, 64 ) 65 return _activation_so.ops.addons_gelu(x, approximate) 66 67 68 @tf.RegisterGradient("Addons>Gelu") 69 def _gelu_grad(op, grad): 70 return _activation_so.ops.addons_gelu_grad( 71 grad, op.inputs[0], op.get_attr("approximate") 72 ) 73 74 75 def _gelu_py(x: types.TensorLike, approximate: bool = True) -> tf.Tensor: 76 x = tf.convert_to_tensor(x) 77 if approximate: 78 pi = tf.cast(math.pi, x.dtype) 79 coeff = tf.cast(0.044715, x.dtype) 80 return 0.5 * x * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3)))) 81 else: 82 return 0.5 * x * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype))) 83 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tensorflow_addons/activations/gelu.py b/tensorflow_addons/activations/gelu.py --- a/tensorflow_addons/activations/gelu.py +++ b/tensorflow_addons/activations/gelu.py @@ -43,6 +43,12 @@ Returns: A `Tensor`. Has the same type as `x`. """ + warnings.warn( + "gelu activation has been migrated to core TensorFlow, " + "and will be deprecated in Addons 0.12.", + DeprecationWarning, + ) + x = tf.convert_to_tensor(x) if not options.TF_ADDONS_PY_OPS: diff --git a/tensorflow_addons/layers/gelu.py b/tensorflow_addons/layers/gelu.py --- a/tensorflow_addons/layers/gelu.py +++ b/tensorflow_addons/layers/gelu.py @@ -14,6 +14,8 @@ # ============================================================================== """Implements GELU activation.""" +import warnings + import tensorflow as tf from tensorflow_addons.activations import gelu from typeguard import typechecked @@ -38,6 +40,12 @@ @typechecked def __init__(self, approximate: bool = True, **kwargs): + warnings.warn( + "gelu activation has been migrated to core TensorFlow, " + "and will be deprecated in Addons 0.12.", + DeprecationWarning, + ) + super().__init__(**kwargs) self.approximate = approximate self.supports_masking = True
{"golden_diff": "diff --git a/tensorflow_addons/activations/gelu.py b/tensorflow_addons/activations/gelu.py\n--- a/tensorflow_addons/activations/gelu.py\n+++ b/tensorflow_addons/activations/gelu.py\n@@ -43,6 +43,12 @@\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n+ warnings.warn(\n+ \"gelu activation has been migrated to core TensorFlow, \"\n+ \"and will be deprecated in Addons 0.12.\",\n+ DeprecationWarning,\n+ )\n+\n x = tf.convert_to_tensor(x)\n \n if not options.TF_ADDONS_PY_OPS:\ndiff --git a/tensorflow_addons/layers/gelu.py b/tensorflow_addons/layers/gelu.py\n--- a/tensorflow_addons/layers/gelu.py\n+++ b/tensorflow_addons/layers/gelu.py\n@@ -14,6 +14,8 @@\n # ==============================================================================\n \"\"\"Implements GELU activation.\"\"\"\n \n+import warnings\n+\n import tensorflow as tf\n from tensorflow_addons.activations import gelu\n from typeguard import typechecked\n@@ -38,6 +40,12 @@\n \n @typechecked\n def __init__(self, approximate: bool = True, **kwargs):\n+ warnings.warn(\n+ \"gelu activation has been migrated to core TensorFlow, \"\n+ \"and will be deprecated in Addons 0.12.\",\n+ DeprecationWarning,\n+ )\n+\n super().__init__(**kwargs)\n self.approximate = approximate\n self.supports_masking = True\n", "issue": "Deprecate GELU \nPer https://github.com/tensorflow/community/pull/252, oonce https://github.com/tensorflow/tensorflow/pull/41178 merges we'll need to deprecate our GELU for versions of TensorFlow that include it within core.\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements GELU activation.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow_addons.activations import gelu\nfrom typeguard import typechecked\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass GELU(tf.keras.layers.Layer):\n \"\"\"Gaussian Error Linear Unit.\n\n A smoother version of ReLU generally used\n in the BERT or BERT architecture based models.\n Original paper: https://arxiv.org/abs/1606.08415\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n \"\"\"\n\n @typechecked\n def __init__(self, approximate: bool = True, **kwargs):\n super().__init__(**kwargs)\n self.approximate = approximate\n self.supports_masking = True\n\n def call(self, inputs):\n return gelu(inputs, approximate=self.approximate)\n\n def get_config(self):\n config = {\"approximate\": self.approximate}\n base_config = super().get_config()\n return {**base_config, **config}\n\n def compute_output_shape(self, input_shape):\n return input_shape\n", "path": "tensorflow_addons/layers/gelu.py"}, {"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport tensorflow as tf\nimport math\nimport warnings\n\nfrom tensorflow_addons.utils import types\nfrom tensorflow_addons.utils.resource_loader import LazySO\nfrom tensorflow_addons import options\n\n_activation_so = LazySO(\"custom_ops/activations/_activation_ops.so\")\n\n\[email protected]_keras_serializable(package=\"Addons\")\ndef gelu(x: types.TensorLike, approximate: bool = True) -> tf.Tensor:\n \"\"\"Gaussian Error Linear Unit.\n\n Computes gaussian error linear:\n `0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))` or\n `x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`, where P(X) ~ N(0, 1),\n depending on whether approximation is enabled.\n\n See [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)\n and [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805).\n\n Args:\n x: A `Tensor`. Must be one of the following types:\n `float16`, `float32`, `float64`.\n approximate: bool, whether to enable approximation.\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n x = tf.convert_to_tensor(x)\n\n if not options.TF_ADDONS_PY_OPS:\n try:\n return _gelu_custom_op(x, approximate)\n except tf.errors.NotFoundError:\n options.warn_fallback(\"gelu\")\n\n return _gelu_py(x, approximate)\n\n\ndef _gelu_custom_op(x, approximate):\n warnings.warn(\n \"The activations custom ops are deprecated and will be removed in TensorFlow Addons \"\n \"v0.12.0. \\nPlease use the pure python version of Gelu instead by using the \"\n \"`TF_ADDONS_PY_OPS` flag. \\nFor more info about this flag, see \"\n \"https://github.com/tensorflow/addons#gpucpu-custom-ops \",\n DeprecationWarning,\n )\n return _activation_so.ops.addons_gelu(x, approximate)\n\n\[email protected](\"Addons>Gelu\")\ndef _gelu_grad(op, grad):\n return _activation_so.ops.addons_gelu_grad(\n grad, op.inputs[0], op.get_attr(\"approximate\")\n )\n\n\ndef _gelu_py(x: types.TensorLike, approximate: bool = True) -> tf.Tensor:\n x = tf.convert_to_tensor(x)\n if approximate:\n pi = tf.cast(math.pi, x.dtype)\n coeff = tf.cast(0.044715, x.dtype)\n return 0.5 * x * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3))))\n else:\n return 0.5 * x * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype)))\n", "path": "tensorflow_addons/activations/gelu.py"}], "after_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements GELU activation.\"\"\"\n\nimport warnings\n\nimport tensorflow as tf\nfrom tensorflow_addons.activations import gelu\nfrom typeguard import typechecked\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass GELU(tf.keras.layers.Layer):\n \"\"\"Gaussian Error Linear Unit.\n\n A smoother version of ReLU generally used\n in the BERT or BERT architecture based models.\n Original paper: https://arxiv.org/abs/1606.08415\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n \"\"\"\n\n @typechecked\n def __init__(self, approximate: bool = True, **kwargs):\n warnings.warn(\n \"gelu activation has been migrated to core TensorFlow, \"\n \"and will be deprecated in Addons 0.12.\",\n DeprecationWarning,\n )\n\n super().__init__(**kwargs)\n self.approximate = approximate\n self.supports_masking = True\n\n def call(self, inputs):\n return gelu(inputs, approximate=self.approximate)\n\n def get_config(self):\n config = {\"approximate\": self.approximate}\n base_config = super().get_config()\n return {**base_config, **config}\n\n def compute_output_shape(self, input_shape):\n return input_shape\n", "path": "tensorflow_addons/layers/gelu.py"}, {"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport tensorflow as tf\nimport math\nimport warnings\n\nfrom tensorflow_addons.utils import types\nfrom tensorflow_addons.utils.resource_loader import LazySO\nfrom tensorflow_addons import options\n\n_activation_so = LazySO(\"custom_ops/activations/_activation_ops.so\")\n\n\[email protected]_keras_serializable(package=\"Addons\")\ndef gelu(x: types.TensorLike, approximate: bool = True) -> tf.Tensor:\n \"\"\"Gaussian Error Linear Unit.\n\n Computes gaussian error linear:\n `0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))` or\n `x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`, where P(X) ~ N(0, 1),\n depending on whether approximation is enabled.\n\n See [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)\n and [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805).\n\n Args:\n x: A `Tensor`. Must be one of the following types:\n `float16`, `float32`, `float64`.\n approximate: bool, whether to enable approximation.\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n warnings.warn(\n \"gelu activation has been migrated to core TensorFlow, \"\n \"and will be deprecated in Addons 0.12.\",\n DeprecationWarning,\n )\n\n x = tf.convert_to_tensor(x)\n\n if not options.TF_ADDONS_PY_OPS:\n try:\n return _gelu_custom_op(x, approximate)\n except tf.errors.NotFoundError:\n options.warn_fallback(\"gelu\")\n\n return _gelu_py(x, approximate)\n\n\ndef _gelu_custom_op(x, approximate):\n warnings.warn(\n \"The activations custom ops are deprecated and will be removed in TensorFlow Addons \"\n \"v0.12.0. \\nPlease use the pure python version of Gelu instead by using the \"\n \"`TF_ADDONS_PY_OPS` flag. \\nFor more info about this flag, see \"\n \"https://github.com/tensorflow/addons#gpucpu-custom-ops \",\n DeprecationWarning,\n )\n return _activation_so.ops.addons_gelu(x, approximate)\n\n\[email protected](\"Addons>Gelu\")\ndef _gelu_grad(op, grad):\n return _activation_so.ops.addons_gelu_grad(\n grad, op.inputs[0], op.get_attr(\"approximate\")\n )\n\n\ndef _gelu_py(x: types.TensorLike, approximate: bool = True) -> tf.Tensor:\n x = tf.convert_to_tensor(x)\n if approximate:\n pi = tf.cast(math.pi, x.dtype)\n coeff = tf.cast(0.044715, x.dtype)\n return 0.5 * x * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3))))\n else:\n return 0.5 * x * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype)))\n", "path": "tensorflow_addons/activations/gelu.py"}]}
1,870
364
gh_patches_debug_13722
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-935
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support for pymemcache 2.x and 3.x The `pymemcache` integration is currently locked to version 1.x. Current version is ~=3.5. https://github.com/open-telemetry/opentelemetry-python-contrib/blob/0b9e96dae0bfcafa1e0b28dc695e3697fbcf2864/instrumentation/opentelemetry-instrumentation-pymemcache/src/opentelemetry/instrumentation/pymemcache/package.py#L16 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `instrumentation/opentelemetry-instrumentation-pymemcache/src/opentelemetry/instrumentation/pymemcache/package.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 16 _instruments = ("pymemcache ~= 1.3",) 17 ``` Path: `opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM INSTRUMENTATION PACKAGES. 16 # RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE. 17 18 libraries = { 19 "aiohttp": { 20 "library": "aiohttp ~= 3.0", 21 "instrumentation": "opentelemetry-instrumentation-aiohttp-client==0.29b0", 22 }, 23 "aiopg": { 24 "library": "aiopg >= 0.13.0, < 1.3.0", 25 "instrumentation": "opentelemetry-instrumentation-aiopg==0.29b0", 26 }, 27 "asgiref": { 28 "library": "asgiref ~= 3.0", 29 "instrumentation": "opentelemetry-instrumentation-asgi==0.29b0", 30 }, 31 "asyncpg": { 32 "library": "asyncpg >= 0.12.0", 33 "instrumentation": "opentelemetry-instrumentation-asyncpg==0.29b0", 34 }, 35 "boto": { 36 "library": "boto~=2.0", 37 "instrumentation": "opentelemetry-instrumentation-boto==0.29b0", 38 }, 39 "botocore": { 40 "library": "botocore ~= 1.0", 41 "instrumentation": "opentelemetry-instrumentation-botocore==0.29b0", 42 }, 43 "celery": { 44 "library": "celery >= 4.0, < 6.0", 45 "instrumentation": "opentelemetry-instrumentation-celery==0.29b0", 46 }, 47 "django": { 48 "library": "django >= 1.10", 49 "instrumentation": "opentelemetry-instrumentation-django==0.29b0", 50 }, 51 "elasticsearch": { 52 "library": "elasticsearch >= 2.0", 53 "instrumentation": "opentelemetry-instrumentation-elasticsearch==0.29b0", 54 }, 55 "falcon": { 56 "library": "falcon >= 2.0.0, < 4.0.0", 57 "instrumentation": "opentelemetry-instrumentation-falcon==0.29b0", 58 }, 59 "fastapi": { 60 "library": "fastapi ~= 0.58", 61 "instrumentation": "opentelemetry-instrumentation-fastapi==0.29b0", 62 }, 63 "flask": { 64 "library": "flask >= 1.0, < 3.0", 65 "instrumentation": "opentelemetry-instrumentation-flask==0.29b0", 66 }, 67 "grpcio": { 68 "library": "grpcio ~= 1.27", 69 "instrumentation": "opentelemetry-instrumentation-grpc==0.29b0", 70 }, 71 "httpx": { 72 "library": "httpx >= 0.18.0", 73 "instrumentation": "opentelemetry-instrumentation-httpx==0.29b0", 74 }, 75 "jinja2": { 76 "library": "jinja2 >= 2.7, < 4.0", 77 "instrumentation": "opentelemetry-instrumentation-jinja2==0.29b0", 78 }, 79 "kafka-python": { 80 "library": "kafka-python >= 2.0", 81 "instrumentation": "opentelemetry-instrumentation-kafka-python==0.29b0", 82 }, 83 "mysql-connector-python": { 84 "library": "mysql-connector-python ~= 8.0", 85 "instrumentation": "opentelemetry-instrumentation-mysql==0.29b0", 86 }, 87 "pika": { 88 "library": "pika >= 0.12.0", 89 "instrumentation": "opentelemetry-instrumentation-pika==0.29b0", 90 }, 91 "psycopg2": { 92 "library": "psycopg2 >= 2.7.3.1", 93 "instrumentation": "opentelemetry-instrumentation-psycopg2==0.29b0", 94 }, 95 "pymemcache": { 96 "library": "pymemcache ~= 1.3", 97 "instrumentation": "opentelemetry-instrumentation-pymemcache==0.29b0", 98 }, 99 "pymongo": { 100 "library": "pymongo >= 3.1, < 5.0", 101 "instrumentation": "opentelemetry-instrumentation-pymongo==0.29b0", 102 }, 103 "PyMySQL": { 104 "library": "PyMySQL < 2", 105 "instrumentation": "opentelemetry-instrumentation-pymysql==0.29b0", 106 }, 107 "pyramid": { 108 "library": "pyramid >= 1.7", 109 "instrumentation": "opentelemetry-instrumentation-pyramid==0.29b0", 110 }, 111 "redis": { 112 "library": "redis >= 2.6", 113 "instrumentation": "opentelemetry-instrumentation-redis==0.29b0", 114 }, 115 "requests": { 116 "library": "requests ~= 2.0", 117 "instrumentation": "opentelemetry-instrumentation-requests==0.29b0", 118 }, 119 "scikit-learn": { 120 "library": "scikit-learn ~= 0.24.0", 121 "instrumentation": "opentelemetry-instrumentation-sklearn==0.29b0", 122 }, 123 "sqlalchemy": { 124 "library": "sqlalchemy", 125 "instrumentation": "opentelemetry-instrumentation-sqlalchemy==0.29b0", 126 }, 127 "starlette": { 128 "library": "starlette ~= 0.13.0", 129 "instrumentation": "opentelemetry-instrumentation-starlette==0.29b0", 130 }, 131 "tornado": { 132 "library": "tornado >= 5.1.1", 133 "instrumentation": "opentelemetry-instrumentation-tornado==0.29b0", 134 }, 135 "urllib3": { 136 "library": "urllib3 >= 1.0.0, < 2.0.0", 137 "instrumentation": "opentelemetry-instrumentation-urllib3==0.29b0", 138 }, 139 } 140 default_instrumentations = [ 141 "opentelemetry-instrumentation-aws-lambda==0.29b0", 142 "opentelemetry-instrumentation-dbapi==0.29b0", 143 "opentelemetry-instrumentation-logging==0.29b0", 144 "opentelemetry-instrumentation-sqlite3==0.29b0", 145 "opentelemetry-instrumentation-urllib==0.29b0", 146 "opentelemetry-instrumentation-wsgi==0.29b0", 147 ] 148 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/instrumentation/opentelemetry-instrumentation-pymemcache/src/opentelemetry/instrumentation/pymemcache/package.py b/instrumentation/opentelemetry-instrumentation-pymemcache/src/opentelemetry/instrumentation/pymemcache/package.py --- a/instrumentation/opentelemetry-instrumentation-pymemcache/src/opentelemetry/instrumentation/pymemcache/package.py +++ b/instrumentation/opentelemetry-instrumentation-pymemcache/src/opentelemetry/instrumentation/pymemcache/package.py @@ -13,4 +13,4 @@ # limitations under the License. -_instruments = ("pymemcache ~= 1.3",) +_instruments = ("pymemcache >= 1.3.5, < 4",) diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -93,7 +93,7 @@ "instrumentation": "opentelemetry-instrumentation-psycopg2==0.29b0", }, "pymemcache": { - "library": "pymemcache ~= 1.3", + "library": "pymemcache >= 1.3.5, < 4", "instrumentation": "opentelemetry-instrumentation-pymemcache==0.29b0", }, "pymongo": {
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-pymemcache/src/opentelemetry/instrumentation/pymemcache/package.py b/instrumentation/opentelemetry-instrumentation-pymemcache/src/opentelemetry/instrumentation/pymemcache/package.py\n--- a/instrumentation/opentelemetry-instrumentation-pymemcache/src/opentelemetry/instrumentation/pymemcache/package.py\n+++ b/instrumentation/opentelemetry-instrumentation-pymemcache/src/opentelemetry/instrumentation/pymemcache/package.py\n@@ -13,4 +13,4 @@\n # limitations under the License.\n \n \n-_instruments = (\"pymemcache ~= 1.3\",)\n+_instruments = (\"pymemcache >= 1.3.5, < 4\",)\ndiff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n@@ -93,7 +93,7 @@\n \"instrumentation\": \"opentelemetry-instrumentation-psycopg2==0.29b0\",\n },\n \"pymemcache\": {\n- \"library\": \"pymemcache ~= 1.3\",\n+ \"library\": \"pymemcache >= 1.3.5, < 4\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymemcache==0.29b0\",\n },\n \"pymongo\": {\n", "issue": "Support for pymemcache 2.x and 3.x\nThe `pymemcache` integration is currently locked to version 1.x. Current version is ~=3.5.\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-python-contrib/blob/0b9e96dae0bfcafa1e0b28dc695e3697fbcf2864/instrumentation/opentelemetry-instrumentation-pymemcache/src/opentelemetry/instrumentation/pymemcache/package.py#L16\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n_instruments = (\"pymemcache ~= 1.3\",)\n", "path": "instrumentation/opentelemetry-instrumentation-pymemcache/src/opentelemetry/instrumentation/pymemcache/package.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM INSTRUMENTATION PACKAGES.\n# RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE.\n\nlibraries = {\n \"aiohttp\": {\n \"library\": \"aiohttp ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiohttp-client==0.29b0\",\n },\n \"aiopg\": {\n \"library\": \"aiopg >= 0.13.0, < 1.3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiopg==0.29b0\",\n },\n \"asgiref\": {\n \"library\": \"asgiref ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asgi==0.29b0\",\n },\n \"asyncpg\": {\n \"library\": \"asyncpg >= 0.12.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asyncpg==0.29b0\",\n },\n \"boto\": {\n \"library\": \"boto~=2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-boto==0.29b0\",\n },\n \"botocore\": {\n \"library\": \"botocore ~= 1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-botocore==0.29b0\",\n },\n \"celery\": {\n \"library\": \"celery >= 4.0, < 6.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-celery==0.29b0\",\n },\n \"django\": {\n \"library\": \"django >= 1.10\",\n \"instrumentation\": \"opentelemetry-instrumentation-django==0.29b0\",\n },\n \"elasticsearch\": {\n \"library\": \"elasticsearch >= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-elasticsearch==0.29b0\",\n },\n \"falcon\": {\n \"library\": \"falcon >= 2.0.0, < 4.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-falcon==0.29b0\",\n },\n \"fastapi\": {\n \"library\": \"fastapi ~= 0.58\",\n \"instrumentation\": \"opentelemetry-instrumentation-fastapi==0.29b0\",\n },\n \"flask\": {\n \"library\": \"flask >= 1.0, < 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-flask==0.29b0\",\n },\n \"grpcio\": {\n \"library\": \"grpcio ~= 1.27\",\n \"instrumentation\": \"opentelemetry-instrumentation-grpc==0.29b0\",\n },\n \"httpx\": {\n \"library\": \"httpx >= 0.18.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-httpx==0.29b0\",\n },\n \"jinja2\": {\n \"library\": \"jinja2 >= 2.7, < 4.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-jinja2==0.29b0\",\n },\n \"kafka-python\": {\n \"library\": \"kafka-python >= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-kafka-python==0.29b0\",\n },\n \"mysql-connector-python\": {\n \"library\": \"mysql-connector-python ~= 8.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-mysql==0.29b0\",\n },\n \"pika\": {\n \"library\": \"pika >= 0.12.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-pika==0.29b0\",\n },\n \"psycopg2\": {\n \"library\": \"psycopg2 >= 2.7.3.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-psycopg2==0.29b0\",\n },\n \"pymemcache\": {\n \"library\": \"pymemcache ~= 1.3\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymemcache==0.29b0\",\n },\n \"pymongo\": {\n \"library\": \"pymongo >= 3.1, < 5.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymongo==0.29b0\",\n },\n \"PyMySQL\": {\n \"library\": \"PyMySQL < 2\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymysql==0.29b0\",\n },\n \"pyramid\": {\n \"library\": \"pyramid >= 1.7\",\n \"instrumentation\": \"opentelemetry-instrumentation-pyramid==0.29b0\",\n },\n \"redis\": {\n \"library\": \"redis >= 2.6\",\n \"instrumentation\": \"opentelemetry-instrumentation-redis==0.29b0\",\n },\n \"requests\": {\n \"library\": \"requests ~= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-requests==0.29b0\",\n },\n \"scikit-learn\": {\n \"library\": \"scikit-learn ~= 0.24.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-sklearn==0.29b0\",\n },\n \"sqlalchemy\": {\n \"library\": \"sqlalchemy\",\n \"instrumentation\": \"opentelemetry-instrumentation-sqlalchemy==0.29b0\",\n },\n \"starlette\": {\n \"library\": \"starlette ~= 0.13.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-starlette==0.29b0\",\n },\n \"tornado\": {\n \"library\": \"tornado >= 5.1.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-tornado==0.29b0\",\n },\n \"urllib3\": {\n \"library\": \"urllib3 >= 1.0.0, < 2.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-urllib3==0.29b0\",\n },\n}\ndefault_instrumentations = [\n \"opentelemetry-instrumentation-aws-lambda==0.29b0\",\n \"opentelemetry-instrumentation-dbapi==0.29b0\",\n \"opentelemetry-instrumentation-logging==0.29b0\",\n \"opentelemetry-instrumentation-sqlite3==0.29b0\",\n \"opentelemetry-instrumentation-urllib==0.29b0\",\n \"opentelemetry-instrumentation-wsgi==0.29b0\",\n]\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n_instruments = (\"pymemcache >= 1.3.5, < 4\",)\n", "path": "instrumentation/opentelemetry-instrumentation-pymemcache/src/opentelemetry/instrumentation/pymemcache/package.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM INSTRUMENTATION PACKAGES.\n# RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE.\n\nlibraries = {\n \"aiohttp\": {\n \"library\": \"aiohttp ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiohttp-client==0.29b0\",\n },\n \"aiopg\": {\n \"library\": \"aiopg >= 0.13.0, < 1.3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiopg==0.29b0\",\n },\n \"asgiref\": {\n \"library\": \"asgiref ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asgi==0.29b0\",\n },\n \"asyncpg\": {\n \"library\": \"asyncpg >= 0.12.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asyncpg==0.29b0\",\n },\n \"boto\": {\n \"library\": \"boto~=2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-boto==0.29b0\",\n },\n \"botocore\": {\n \"library\": \"botocore ~= 1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-botocore==0.29b0\",\n },\n \"celery\": {\n \"library\": \"celery >= 4.0, < 6.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-celery==0.29b0\",\n },\n \"django\": {\n \"library\": \"django >= 1.10\",\n \"instrumentation\": \"opentelemetry-instrumentation-django==0.29b0\",\n },\n \"elasticsearch\": {\n \"library\": \"elasticsearch >= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-elasticsearch==0.29b0\",\n },\n \"falcon\": {\n \"library\": \"falcon >= 2.0.0, < 4.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-falcon==0.29b0\",\n },\n \"fastapi\": {\n \"library\": \"fastapi ~= 0.58\",\n \"instrumentation\": \"opentelemetry-instrumentation-fastapi==0.29b0\",\n },\n \"flask\": {\n \"library\": \"flask >= 1.0, < 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-flask==0.29b0\",\n },\n \"grpcio\": {\n \"library\": \"grpcio ~= 1.27\",\n \"instrumentation\": \"opentelemetry-instrumentation-grpc==0.29b0\",\n },\n \"httpx\": {\n \"library\": \"httpx >= 0.18.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-httpx==0.29b0\",\n },\n \"jinja2\": {\n \"library\": \"jinja2 >= 2.7, < 4.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-jinja2==0.29b0\",\n },\n \"kafka-python\": {\n \"library\": \"kafka-python >= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-kafka-python==0.29b0\",\n },\n \"mysql-connector-python\": {\n \"library\": \"mysql-connector-python ~= 8.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-mysql==0.29b0\",\n },\n \"pika\": {\n \"library\": \"pika >= 0.12.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-pika==0.29b0\",\n },\n \"psycopg2\": {\n \"library\": \"psycopg2 >= 2.7.3.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-psycopg2==0.29b0\",\n },\n \"pymemcache\": {\n \"library\": \"pymemcache >= 1.3.5, < 4\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymemcache==0.29b0\",\n },\n \"pymongo\": {\n \"library\": \"pymongo >= 3.1, < 5.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymongo==0.29b0\",\n },\n \"PyMySQL\": {\n \"library\": \"PyMySQL < 2\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymysql==0.29b0\",\n },\n \"pyramid\": {\n \"library\": \"pyramid >= 1.7\",\n \"instrumentation\": \"opentelemetry-instrumentation-pyramid==0.29b0\",\n },\n \"redis\": {\n \"library\": \"redis >= 2.6\",\n \"instrumentation\": \"opentelemetry-instrumentation-redis==0.29b0\",\n },\n \"requests\": {\n \"library\": \"requests ~= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-requests==0.29b0\",\n },\n \"scikit-learn\": {\n \"library\": \"scikit-learn ~= 0.24.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-sklearn==0.29b0\",\n },\n \"sqlalchemy\": {\n \"library\": \"sqlalchemy\",\n \"instrumentation\": \"opentelemetry-instrumentation-sqlalchemy==0.29b0\",\n },\n \"starlette\": {\n \"library\": \"starlette ~= 0.13.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-starlette==0.29b0\",\n },\n \"tornado\": {\n \"library\": \"tornado >= 5.1.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-tornado==0.29b0\",\n },\n \"urllib3\": {\n \"library\": \"urllib3 >= 1.0.0, < 2.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-urllib3==0.29b0\",\n },\n}\ndefault_instrumentations = [\n \"opentelemetry-instrumentation-aws-lambda==0.29b0\",\n \"opentelemetry-instrumentation-dbapi==0.29b0\",\n \"opentelemetry-instrumentation-logging==0.29b0\",\n \"opentelemetry-instrumentation-sqlite3==0.29b0\",\n \"opentelemetry-instrumentation-urllib==0.29b0\",\n \"opentelemetry-instrumentation-wsgi==0.29b0\",\n]\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py"}]}
2,625
365
gh_patches_debug_7541
rasdani/github-patches
git_diff
twisted__twisted-12106
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Release 24.1.0 to unbreak users who use the latest PyPy #12084 is breaking CI for Tahoe-LAFS, so probably is breaking real-world usage for someone somewhere too. So it'd be good to have a release sooner rather than later. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/twisted/_version.py` Content: ``` 1 """ 2 Provides Twisted version information. 3 """ 4 5 # This file is auto-generated! Do not edit! 6 # Use `python -m incremental.update Twisted` to change this file. 7 8 from incremental import Version 9 10 __version__ = Version("Twisted", 23, 10, 0, post=0) 11 __all__ = ["__version__"] 12 ``` Path: `src/twisted/copyright.py` Content: ``` 1 # Copyright (c) Twisted Matrix Laboratories. 2 # See LICENSE for details. 3 4 """ 5 Copyright information for Twisted. 6 """ 7 8 9 __all__ = ["copyright", "disclaimer", "longversion", "version"] 10 11 from twisted import __version__ as version, version as _longversion 12 13 longversion = str(_longversion) 14 15 copyright = """\ 16 Copyright (c) 2001-2023 Twisted Matrix Laboratories. 17 See LICENSE for details.""" 18 19 disclaimer = """ 20 Twisted, the Framework of Your Internet 21 {} 22 23 Permission is hereby granted, free of charge, to any person obtaining 24 a copy of this software and associated documentation files (the 25 "Software"), to deal in the Software without restriction, including 26 without limitation the rights to use, copy, modify, merge, publish, 27 distribute, sublicense, and/or sell copies of the Software, and to 28 permit persons to whom the Software is furnished to do so, subject to 29 the following conditions: 30 31 The above copyright notice and this permission notice shall be 32 included in all copies or substantial portions of the Software. 33 34 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 35 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 36 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 37 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 38 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 39 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 40 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 41 42 """.format( 43 copyright, 44 ) 45 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/twisted/_version.py b/src/twisted/_version.py --- a/src/twisted/_version.py +++ b/src/twisted/_version.py @@ -7,5 +7,5 @@ from incremental import Version -__version__ = Version("Twisted", 23, 10, 0, post=0) +__version__ = Version("Twisted", 24, 3, 0, post=0) __all__ = ["__version__"] diff --git a/src/twisted/copyright.py b/src/twisted/copyright.py --- a/src/twisted/copyright.py +++ b/src/twisted/copyright.py @@ -13,7 +13,7 @@ longversion = str(_longversion) copyright = """\ -Copyright (c) 2001-2023 Twisted Matrix Laboratories. +Copyright (c) 2001-2024 Twisted Matrix Laboratories. See LICENSE for details.""" disclaimer = """
{"golden_diff": "diff --git a/src/twisted/_version.py b/src/twisted/_version.py\n--- a/src/twisted/_version.py\n+++ b/src/twisted/_version.py\n@@ -7,5 +7,5 @@\n \n from incremental import Version\n \n-__version__ = Version(\"Twisted\", 23, 10, 0, post=0)\n+__version__ = Version(\"Twisted\", 24, 3, 0, post=0)\n __all__ = [\"__version__\"]\ndiff --git a/src/twisted/copyright.py b/src/twisted/copyright.py\n--- a/src/twisted/copyright.py\n+++ b/src/twisted/copyright.py\n@@ -13,7 +13,7 @@\n longversion = str(_longversion)\n \n copyright = \"\"\"\\\n-Copyright (c) 2001-2023 Twisted Matrix Laboratories.\n+Copyright (c) 2001-2024 Twisted Matrix Laboratories.\n See LICENSE for details.\"\"\"\n \n disclaimer = \"\"\"\n", "issue": "Release 24.1.0 to unbreak users who use the latest PyPy\n#12084 is breaking CI for Tahoe-LAFS, so probably is breaking real-world usage for someone somewhere too. So it'd be good to have a release sooner rather than later.\n", "before_files": [{"content": "\"\"\"\nProvides Twisted version information.\n\"\"\"\n\n# This file is auto-generated! Do not edit!\n# Use `python -m incremental.update Twisted` to change this file.\n\nfrom incremental import Version\n\n__version__ = Version(\"Twisted\", 23, 10, 0, post=0)\n__all__ = [\"__version__\"]\n", "path": "src/twisted/_version.py"}, {"content": "# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nCopyright information for Twisted.\n\"\"\"\n\n\n__all__ = [\"copyright\", \"disclaimer\", \"longversion\", \"version\"]\n\nfrom twisted import __version__ as version, version as _longversion\n\nlongversion = str(_longversion)\n\ncopyright = \"\"\"\\\nCopyright (c) 2001-2023 Twisted Matrix Laboratories.\nSee LICENSE for details.\"\"\"\n\ndisclaimer = \"\"\"\nTwisted, the Framework of Your Internet\n{}\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\".format(\n copyright,\n)\n", "path": "src/twisted/copyright.py"}], "after_files": [{"content": "\"\"\"\nProvides Twisted version information.\n\"\"\"\n\n# This file is auto-generated! Do not edit!\n# Use `python -m incremental.update Twisted` to change this file.\n\nfrom incremental import Version\n\n__version__ = Version(\"Twisted\", 24, 3, 0, post=0)\n__all__ = [\"__version__\"]\n", "path": "src/twisted/_version.py"}, {"content": "# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nCopyright information for Twisted.\n\"\"\"\n\n\n__all__ = [\"copyright\", \"disclaimer\", \"longversion\", \"version\"]\n\nfrom twisted import __version__ as version, version as _longversion\n\nlongversion = str(_longversion)\n\ncopyright = \"\"\"\\\nCopyright (c) 2001-2024 Twisted Matrix Laboratories.\nSee LICENSE for details.\"\"\"\n\ndisclaimer = \"\"\"\nTwisted, the Framework of Your Internet\n{}\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\".format(\n copyright,\n)\n", "path": "src/twisted/copyright.py"}]}
838
228
gh_patches_debug_6887
rasdani/github-patches
git_diff
sherlock-project__sherlock-911
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [site_list.py] change numbering to reduce commit size letting the markdown renderer do the counting lets us reduce commit size and avoide possible merge conflicts. --- ``` 1. 1. 1. ``` renders to: 1. 1. 1. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `site_list.py` Content: ``` 1 """Sherlock: Supported Site Listing 2 This module generates the listing of supported sites 3 which can be found in sites.md 4 It also organizes all the sites in alphanumeric order 5 """ 6 import json 7 8 pool = list() 9 10 with open("sherlock/resources/data.json", "r", encoding="utf-8") as data_file: 11 data = json.load(data_file) 12 13 with open("sites.md", "w") as site_file: 14 data_length = len(data) 15 site_file.write(f'## List Of Supported Sites ({data_length} Sites In Total!)\n') 16 17 for social_network in data: 18 url_main = data.get(social_network).get("urlMain") 19 pool.append((social_network, url_main)) 20 21 index = 1 22 for social_network, url_main in pool: 23 site_file.write(f'{index}. [{social_network}]({url_main})\n') 24 index = index + 1 25 26 27 sorted_json_data = json.dumps(data, indent=2, sort_keys=True) 28 29 with open("sherlock/resources/data.json", "w") as data_file: 30 data_file.write(sorted_json_data) 31 32 print("Finished updating supported site listing!") 33 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/site_list.py b/site_list.py --- a/site_list.py +++ b/site_list.py @@ -18,11 +18,8 @@ url_main = data.get(social_network).get("urlMain") pool.append((social_network, url_main)) - index = 1 for social_network, url_main in pool: - site_file.write(f'{index}. [{social_network}]({url_main})\n') - index = index + 1 - + site_file.write(f'1. [{social_network}]({url_main})\n') sorted_json_data = json.dumps(data, indent=2, sort_keys=True)
{"golden_diff": "diff --git a/site_list.py b/site_list.py\n--- a/site_list.py\n+++ b/site_list.py\n@@ -18,11 +18,8 @@\n url_main = data.get(social_network).get(\"urlMain\")\n pool.append((social_network, url_main))\n \n- index = 1\n for social_network, url_main in pool:\n- site_file.write(f'{index}. [{social_network}]({url_main})\\n')\n- index = index + 1\n-\n+ site_file.write(f'1. [{social_network}]({url_main})\\n')\n \n sorted_json_data = json.dumps(data, indent=2, sort_keys=True)\n", "issue": "[site_list.py] change numbering to reduce commit size\nletting the markdown renderer do the counting lets us reduce commit size and avoide possible merge conflicts.\r\n\r\n---\r\n\r\n```\r\n1.\r\n1.\r\n1.\r\n```\r\nrenders to:\r\n\r\n1.\r\n1.\r\n1.\n", "before_files": [{"content": "\"\"\"Sherlock: Supported Site Listing\nThis module generates the listing of supported sites\nwhich can be found in sites.md\nIt also organizes all the sites in alphanumeric order\n\"\"\"\nimport json\n\npool = list()\n\nwith open(\"sherlock/resources/data.json\", \"r\", encoding=\"utf-8\") as data_file:\n data = json.load(data_file)\n\nwith open(\"sites.md\", \"w\") as site_file:\n data_length = len(data)\n site_file.write(f'## List Of Supported Sites ({data_length} Sites In Total!)\\n')\n\n for social_network in data:\n url_main = data.get(social_network).get(\"urlMain\")\n pool.append((social_network, url_main))\n\n index = 1\n for social_network, url_main in pool:\n site_file.write(f'{index}. [{social_network}]({url_main})\\n')\n index = index + 1\n\n\nsorted_json_data = json.dumps(data, indent=2, sort_keys=True)\n\nwith open(\"sherlock/resources/data.json\", \"w\") as data_file:\n data_file.write(sorted_json_data)\n\nprint(\"Finished updating supported site listing!\")\n", "path": "site_list.py"}], "after_files": [{"content": "\"\"\"Sherlock: Supported Site Listing\nThis module generates the listing of supported sites\nwhich can be found in sites.md\nIt also organizes all the sites in alphanumeric order\n\"\"\"\nimport json\n\npool = list()\n\nwith open(\"sherlock/resources/data.json\", \"r\", encoding=\"utf-8\") as data_file:\n data = json.load(data_file)\n\nwith open(\"sites.md\", \"w\") as site_file:\n data_length = len(data)\n site_file.write(f'## List Of Supported Sites ({data_length} Sites In Total!)\\n')\n\n for social_network in data:\n url_main = data.get(social_network).get(\"urlMain\")\n pool.append((social_network, url_main))\n\n for social_network, url_main in pool:\n site_file.write(f'1. [{social_network}]({url_main})\\n')\n\nsorted_json_data = json.dumps(data, indent=2, sort_keys=True)\n\nwith open(\"sherlock/resources/data.json\", \"w\") as data_file:\n data_file.write(sorted_json_data)\n\nprint(\"Finished updating supported site listing!\")\n", "path": "site_list.py"}]}
616
147
gh_patches_debug_10290
rasdani/github-patches
git_diff
goauthentik__authentik-8139
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 2023.10.6 - "Please select a username" after Azure AD login **Describe your question/** Is it now a expected behavior in 2023.10.6 version to ask every user for username input after logging in with azure ad? ![image](https://github.com/goauthentik/authentik/assets/7706996/7c35f530-62a2-4afd-92aa-c290ad221c5f) In previous versions it was simply authenticating without any prompt, using email address from Azure AD as username. Now it expects user to input username (and it leads to duplicated accounts, because users with mail as username already exist), and if you enter already existing mail as username it shows error: ![image](https://github.com/goauthentik/authentik/assets/7706996/a8aaaf90-b309-425c-92a1-174739d01fc6) I think it can be related to this fix: https://github.com/goauthentik/authentik/pull/7970 Is it possible somehow to set this username automatically, or revert back to using email address so old user accounts will work again? **Version and Deployment (please complete the following information):** - authentik version: 2023.10.6 - Deployment: helm --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `authentik/sources/oauth/types/azure_ad.py` Content: ``` 1 """AzureAD OAuth2 Views""" 2 from typing import Any 3 4 from structlog.stdlib import get_logger 5 6 from authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient 7 from authentik.sources.oauth.types.oidc import OpenIDConnectOAuth2Callback 8 from authentik.sources.oauth.types.registry import SourceType, registry 9 from authentik.sources.oauth.views.redirect import OAuthRedirect 10 11 LOGGER = get_logger() 12 13 14 class AzureADOAuthRedirect(OAuthRedirect): 15 """Azure AD OAuth2 Redirect""" 16 17 def get_additional_parameters(self, source): # pragma: no cover 18 return { 19 "scope": ["openid", "https://graph.microsoft.com/User.Read"], 20 } 21 22 23 class AzureADOAuthCallback(OpenIDConnectOAuth2Callback): 24 """AzureAD OAuth2 Callback""" 25 26 client_class = UserprofileHeaderAuthClient 27 28 def get_user_enroll_context( 29 self, 30 info: dict[str, Any], 31 ) -> dict[str, Any]: 32 mail = info.get("mail", None) or info.get("otherMails", [None])[0] 33 return { 34 "username": info.get("userPrincipalName"), 35 "email": mail, 36 "name": info.get("displayName"), 37 } 38 39 40 @registry.register() 41 class AzureADType(SourceType): 42 """Azure AD Type definition""" 43 44 callback_view = AzureADOAuthCallback 45 redirect_view = AzureADOAuthRedirect 46 verbose_name = "Azure AD" 47 name = "azuread" 48 49 urls_customizable = True 50 51 authorization_url = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize" 52 access_token_url = "https://login.microsoftonline.com/common/oauth2/v2.0/token" # nosec 53 profile_url = "https://login.microsoftonline.com/common/openid/userinfo" 54 oidc_well_known_url = ( 55 "https://login.microsoftonline.com/common/.well-known/openid-configuration" 56 ) 57 oidc_jwks_url = "https://login.microsoftonline.com/common/discovery/keys" 58 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/authentik/sources/oauth/types/azure_ad.py b/authentik/sources/oauth/types/azure_ad.py --- a/authentik/sources/oauth/types/azure_ad.py +++ b/authentik/sources/oauth/types/azure_ad.py @@ -50,7 +50,7 @@ authorization_url = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize" access_token_url = "https://login.microsoftonline.com/common/oauth2/v2.0/token" # nosec - profile_url = "https://login.microsoftonline.com/common/openid/userinfo" + profile_url = "https://graph.microsoft.com/v1.0/me" oidc_well_known_url = ( "https://login.microsoftonline.com/common/.well-known/openid-configuration" )
{"golden_diff": "diff --git a/authentik/sources/oauth/types/azure_ad.py b/authentik/sources/oauth/types/azure_ad.py\n--- a/authentik/sources/oauth/types/azure_ad.py\n+++ b/authentik/sources/oauth/types/azure_ad.py\n@@ -50,7 +50,7 @@\n \n authorization_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/authorize\"\n access_token_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/token\" # nosec\n- profile_url = \"https://login.microsoftonline.com/common/openid/userinfo\"\n+ profile_url = \"https://graph.microsoft.com/v1.0/me\"\n oidc_well_known_url = (\n \"https://login.microsoftonline.com/common/.well-known/openid-configuration\"\n )\n", "issue": "2023.10.6 - \"Please select a username\" after Azure AD login\n**Describe your question/**\r\n\r\nIs it now a expected behavior in 2023.10.6 version to ask every user for username input after logging in with azure ad?\r\n![image](https://github.com/goauthentik/authentik/assets/7706996/7c35f530-62a2-4afd-92aa-c290ad221c5f)\r\n\r\nIn previous versions it was simply authenticating without any prompt, using email address from Azure AD as username.\r\n\r\nNow it expects user to input username (and it leads to duplicated accounts, because users with mail as username already exist), and if you enter already existing mail as username it shows error:\r\n![image](https://github.com/goauthentik/authentik/assets/7706996/a8aaaf90-b309-425c-92a1-174739d01fc6)\r\n\r\nI think it can be related to this fix:\r\nhttps://github.com/goauthentik/authentik/pull/7970\r\n\r\nIs it possible somehow to set this username automatically, or revert back to using email address so old user accounts will work again?\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2023.10.6\r\n- Deployment: helm\r\n\r\n\n", "before_files": [{"content": "\"\"\"AzureAD OAuth2 Views\"\"\"\nfrom typing import Any\n\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient\nfrom authentik.sources.oauth.types.oidc import OpenIDConnectOAuth2Callback\nfrom authentik.sources.oauth.types.registry import SourceType, registry\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\n\nLOGGER = get_logger()\n\n\nclass AzureADOAuthRedirect(OAuthRedirect):\n \"\"\"Azure AD OAuth2 Redirect\"\"\"\n\n def get_additional_parameters(self, source): # pragma: no cover\n return {\n \"scope\": [\"openid\", \"https://graph.microsoft.com/User.Read\"],\n }\n\n\nclass AzureADOAuthCallback(OpenIDConnectOAuth2Callback):\n \"\"\"AzureAD OAuth2 Callback\"\"\"\n\n client_class = UserprofileHeaderAuthClient\n\n def get_user_enroll_context(\n self,\n info: dict[str, Any],\n ) -> dict[str, Any]:\n mail = info.get(\"mail\", None) or info.get(\"otherMails\", [None])[0]\n return {\n \"username\": info.get(\"userPrincipalName\"),\n \"email\": mail,\n \"name\": info.get(\"displayName\"),\n }\n\n\[email protected]()\nclass AzureADType(SourceType):\n \"\"\"Azure AD Type definition\"\"\"\n\n callback_view = AzureADOAuthCallback\n redirect_view = AzureADOAuthRedirect\n verbose_name = \"Azure AD\"\n name = \"azuread\"\n\n urls_customizable = True\n\n authorization_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/authorize\"\n access_token_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/token\" # nosec\n profile_url = \"https://login.microsoftonline.com/common/openid/userinfo\"\n oidc_well_known_url = (\n \"https://login.microsoftonline.com/common/.well-known/openid-configuration\"\n )\n oidc_jwks_url = \"https://login.microsoftonline.com/common/discovery/keys\"\n", "path": "authentik/sources/oauth/types/azure_ad.py"}], "after_files": [{"content": "\"\"\"AzureAD OAuth2 Views\"\"\"\nfrom typing import Any\n\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient\nfrom authentik.sources.oauth.types.oidc import OpenIDConnectOAuth2Callback\nfrom authentik.sources.oauth.types.registry import SourceType, registry\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\n\nLOGGER = get_logger()\n\n\nclass AzureADOAuthRedirect(OAuthRedirect):\n \"\"\"Azure AD OAuth2 Redirect\"\"\"\n\n def get_additional_parameters(self, source): # pragma: no cover\n return {\n \"scope\": [\"openid\", \"https://graph.microsoft.com/User.Read\"],\n }\n\n\nclass AzureADOAuthCallback(OpenIDConnectOAuth2Callback):\n \"\"\"AzureAD OAuth2 Callback\"\"\"\n\n client_class = UserprofileHeaderAuthClient\n\n def get_user_enroll_context(\n self,\n info: dict[str, Any],\n ) -> dict[str, Any]:\n mail = info.get(\"mail\", None) or info.get(\"otherMails\", [None])[0]\n return {\n \"username\": info.get(\"userPrincipalName\"),\n \"email\": mail,\n \"name\": info.get(\"displayName\"),\n }\n\n\[email protected]()\nclass AzureADType(SourceType):\n \"\"\"Azure AD Type definition\"\"\"\n\n callback_view = AzureADOAuthCallback\n redirect_view = AzureADOAuthRedirect\n verbose_name = \"Azure AD\"\n name = \"azuread\"\n\n urls_customizable = True\n\n authorization_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/authorize\"\n access_token_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/token\" # nosec\n profile_url = \"https://graph.microsoft.com/v1.0/me\"\n oidc_well_known_url = (\n \"https://login.microsoftonline.com/common/.well-known/openid-configuration\"\n )\n oidc_jwks_url = \"https://login.microsoftonline.com/common/discovery/keys\"\n", "path": "authentik/sources/oauth/types/azure_ad.py"}]}
1,132
175
gh_patches_debug_9688
rasdani/github-patches
git_diff
googleapis__google-cloud-python-4170
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spanner: IndexError: pop from empty list when merge_array ``` return merger(lhs, rhs, type_) File ".../google/cloud/spanner_v1/streamed.py", line 259, in _merge_array first = rhs.pop(0) IndexError: pop from empty list ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `spanner/google/cloud/spanner_v1/streamed.py` Content: ``` 1 # Copyright 2016 Google Inc. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Wrapper for streaming results.""" 16 17 from google.protobuf.struct_pb2 import ListValue 18 from google.protobuf.struct_pb2 import Value 19 from google.cloud import exceptions 20 from google.cloud.spanner_v1.proto import type_pb2 21 import six 22 23 # pylint: disable=ungrouped-imports 24 from google.cloud.spanner_v1._helpers import _parse_value_pb 25 # pylint: enable=ungrouped-imports 26 27 28 class StreamedResultSet(object): 29 """Process a sequence of partial result sets into a single set of row data. 30 31 :type response_iterator: 32 :param response_iterator: 33 Iterator yielding 34 :class:`google.cloud.spanner_v1.proto.result_set_pb2.PartialResultSet` 35 instances. 36 37 :type source: :class:`~google.cloud.spanner_v1.snapshot.Snapshot` 38 :param source: Snapshot from which the result set was fetched. 39 """ 40 def __init__(self, response_iterator, source=None): 41 self._response_iterator = response_iterator 42 self._rows = [] # Fully-processed rows 43 self._counter = 0 # Counter for processed responses 44 self._metadata = None # Until set from first PRS 45 self._stats = None # Until set from last PRS 46 self._current_row = [] # Accumulated values for incomplete row 47 self._pending_chunk = None # Incomplete value 48 self._source = source # Source snapshot 49 50 @property 51 def rows(self): 52 """Fully-processed rows. 53 54 :rtype: list of row-data lists. 55 :returns: list of completed row data, from proceesd PRS responses. 56 """ 57 return self._rows 58 59 @property 60 def fields(self): 61 """Field descriptors for result set columns. 62 63 :rtype: list of :class:`~google.cloud.spanner_v1.proto.type_pb2.Field` 64 :returns: list of fields describing column names / types. 65 """ 66 return self._metadata.row_type.fields 67 68 @property 69 def metadata(self): 70 """Result set metadata 71 72 :rtype: :class:`~.result_set_pb2.ResultSetMetadata` 73 :returns: structure describing the results 74 """ 75 return self._metadata 76 77 @property 78 def stats(self): 79 """Result set statistics 80 81 :rtype: 82 :class:`~google.cloud.spanner_v1.proto.result_set_pb2.ResultSetStats` 83 :returns: structure describing status about the response 84 """ 85 return self._stats 86 87 def _merge_chunk(self, value): 88 """Merge pending chunk with next value. 89 90 :type value: :class:`~google.protobuf.struct_pb2.Value` 91 :param value: continuation of chunked value from previous 92 partial result set. 93 94 :rtype: :class:`~google.protobuf.struct_pb2.Value` 95 :returns: the merged value 96 """ 97 current_column = len(self._current_row) 98 field = self.fields[current_column] 99 merged = _merge_by_type(self._pending_chunk, value, field.type) 100 self._pending_chunk = None 101 return merged 102 103 def _merge_values(self, values): 104 """Merge values into rows. 105 106 :type values: list of :class:`~google.protobuf.struct_pb2.Value` 107 :param values: non-chunked values from partial result set. 108 """ 109 width = len(self.fields) 110 for value in values: 111 index = len(self._current_row) 112 field = self.fields[index] 113 self._current_row.append(_parse_value_pb(value, field.type)) 114 if len(self._current_row) == width: 115 self._rows.append(self._current_row) 116 self._current_row = [] 117 118 def consume_next(self): 119 """Consume the next partial result set from the stream. 120 121 Parse the result set into new/existing rows in :attr:`_rows` 122 """ 123 response = six.next(self._response_iterator) 124 self._counter += 1 125 126 if self._metadata is None: # first response 127 metadata = self._metadata = response.metadata 128 129 source = self._source 130 if source is not None and source._transaction_id is None: 131 source._transaction_id = metadata.transaction.id 132 133 if response.HasField('stats'): # last response 134 self._stats = response.stats 135 136 values = list(response.values) 137 if self._pending_chunk is not None: 138 values[0] = self._merge_chunk(values[0]) 139 140 if response.chunked_value: 141 self._pending_chunk = values.pop() 142 143 self._merge_values(values) 144 145 def consume_all(self): 146 """Consume the streamed responses until there are no more.""" 147 while True: 148 try: 149 self.consume_next() 150 except StopIteration: 151 break 152 153 def __iter__(self): 154 iter_rows, self._rows[:] = self._rows[:], () 155 while True: 156 if not iter_rows: 157 self.consume_next() # raises StopIteration 158 iter_rows, self._rows[:] = self._rows[:], () 159 while iter_rows: 160 yield iter_rows.pop(0) 161 162 def one(self): 163 """Return exactly one result, or raise an exception. 164 165 :raises: :exc:`NotFound`: If there are no results. 166 :raises: :exc:`ValueError`: If there are multiple results. 167 :raises: :exc:`RuntimeError`: If consumption has already occurred, 168 in whole or in part. 169 """ 170 answer = self.one_or_none() 171 if answer is None: 172 raise exceptions.NotFound('No rows matched the given query.') 173 return answer 174 175 def one_or_none(self): 176 """Return exactly one result, or None if there are no results. 177 178 :raises: :exc:`ValueError`: If there are multiple results. 179 :raises: :exc:`RuntimeError`: If consumption has already occurred, 180 in whole or in part. 181 """ 182 # Sanity check: Has consumption of this query already started? 183 # If it has, then this is an exception. 184 if self._metadata is not None: 185 raise RuntimeError('Can not call `.one` or `.one_or_none` after ' 186 'stream consumption has already started.') 187 188 # Consume the first result of the stream. 189 # If there is no first result, then return None. 190 iterator = iter(self) 191 try: 192 answer = next(iterator) 193 except StopIteration: 194 return None 195 196 # Attempt to consume more. This should no-op; if we get additional 197 # rows, then this is an error case. 198 try: 199 next(iterator) 200 raise ValueError('Expected one result; got more.') 201 except StopIteration: 202 return answer 203 204 205 class Unmergeable(ValueError): 206 """Unable to merge two values. 207 208 :type lhs: :class:`google.protobuf.struct_pb2.Value` 209 :param lhs: pending value to be merged 210 211 :type rhs: :class:`google.protobuf.struct_pb2.Value` 212 :param rhs: remaining value to be merged 213 214 :type type_: :class:`google.cloud.spanner_v1.proto.type_pb2.Type` 215 :param type_: field type of values being merged 216 """ 217 def __init__(self, lhs, rhs, type_): 218 message = "Cannot merge %s values: %s %s" % ( 219 type_pb2.TypeCode.Name(type_.code), lhs, rhs) 220 super(Unmergeable, self).__init__(message) 221 222 223 def _unmergeable(lhs, rhs, type_): 224 """Helper for '_merge_by_type'.""" 225 raise Unmergeable(lhs, rhs, type_) 226 227 228 def _merge_float64(lhs, rhs, type_): # pylint: disable=unused-argument 229 """Helper for '_merge_by_type'.""" 230 lhs_kind = lhs.WhichOneof('kind') 231 if lhs_kind == 'string_value': 232 return Value(string_value=lhs.string_value + rhs.string_value) 233 rhs_kind = rhs.WhichOneof('kind') 234 array_continuation = ( 235 lhs_kind == 'number_value' and 236 rhs_kind == 'string_value' and 237 rhs.string_value == '') 238 if array_continuation: 239 return lhs 240 raise Unmergeable(lhs, rhs, type_) 241 242 243 def _merge_string(lhs, rhs, type_): # pylint: disable=unused-argument 244 """Helper for '_merge_by_type'.""" 245 return Value(string_value=lhs.string_value + rhs.string_value) 246 247 248 _UNMERGEABLE_TYPES = (type_pb2.BOOL,) 249 250 251 def _merge_array(lhs, rhs, type_): 252 """Helper for '_merge_by_type'.""" 253 element_type = type_.array_element_type 254 if element_type.code in _UNMERGEABLE_TYPES: 255 # Individual values cannot be merged, just concatenate 256 lhs.list_value.values.extend(rhs.list_value.values) 257 return lhs 258 lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values) 259 first = rhs.pop(0) 260 if first.HasField('null_value'): # can't merge 261 lhs.append(first) 262 else: 263 last = lhs.pop() 264 try: 265 merged = _merge_by_type(last, first, element_type) 266 except Unmergeable: 267 lhs.append(last) 268 lhs.append(first) 269 else: 270 lhs.append(merged) 271 return Value(list_value=ListValue(values=(lhs + rhs))) 272 273 274 def _merge_struct(lhs, rhs, type_): 275 """Helper for '_merge_by_type'.""" 276 fields = type_.struct_type.fields 277 lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values) 278 candidate_type = fields[len(lhs) - 1].type 279 first = rhs.pop(0) 280 if (first.HasField('null_value') or 281 candidate_type.code in _UNMERGEABLE_TYPES): 282 lhs.append(first) 283 else: 284 last = lhs.pop() 285 lhs.append(_merge_by_type(last, first, candidate_type)) 286 return Value(list_value=ListValue(values=lhs + rhs)) 287 288 289 _MERGE_BY_TYPE = { 290 type_pb2.ARRAY: _merge_array, 291 type_pb2.BOOL: _unmergeable, 292 type_pb2.BYTES: _merge_string, 293 type_pb2.DATE: _merge_string, 294 type_pb2.FLOAT64: _merge_float64, 295 type_pb2.INT64: _merge_string, 296 type_pb2.STRING: _merge_string, 297 type_pb2.STRUCT: _merge_struct, 298 type_pb2.TIMESTAMP: _merge_string, 299 } 300 301 302 def _merge_by_type(lhs, rhs, type_): 303 """Helper for '_merge_chunk'.""" 304 merger = _MERGE_BY_TYPE[type_.code] 305 return merger(lhs, rhs, type_) 306 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/spanner/google/cloud/spanner_v1/streamed.py b/spanner/google/cloud/spanner_v1/streamed.py --- a/spanner/google/cloud/spanner_v1/streamed.py +++ b/spanner/google/cloud/spanner_v1/streamed.py @@ -256,6 +256,12 @@ lhs.list_value.values.extend(rhs.list_value.values) return lhs lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values) + + # Sanity check: If either list is empty, short-circuit. + # This is effectively a no-op. + if not len(lhs) or not len(rhs): + return Value(list_value=ListValue(values=(lhs + rhs))) + first = rhs.pop(0) if first.HasField('null_value'): # can't merge lhs.append(first)
{"golden_diff": "diff --git a/spanner/google/cloud/spanner_v1/streamed.py b/spanner/google/cloud/spanner_v1/streamed.py\n--- a/spanner/google/cloud/spanner_v1/streamed.py\n+++ b/spanner/google/cloud/spanner_v1/streamed.py\n@@ -256,6 +256,12 @@\n lhs.list_value.values.extend(rhs.list_value.values)\n return lhs\n lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values)\n+\n+ # Sanity check: If either list is empty, short-circuit.\n+ # This is effectively a no-op.\n+ if not len(lhs) or not len(rhs):\n+ return Value(list_value=ListValue(values=(lhs + rhs)))\n+\n first = rhs.pop(0)\n if first.HasField('null_value'): # can't merge\n lhs.append(first)\n", "issue": "Spanner: IndexError: pop from empty list when merge_array\n```\r\nreturn merger(lhs, rhs, type_)\r\n File \".../google/cloud/spanner_v1/streamed.py\", line 259, in _merge_array\r\n first = rhs.pop(0)\r\nIndexError: pop from empty list\r\n```\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Wrapper for streaming results.\"\"\"\n\nfrom google.protobuf.struct_pb2 import ListValue\nfrom google.protobuf.struct_pb2 import Value\nfrom google.cloud import exceptions\nfrom google.cloud.spanner_v1.proto import type_pb2\nimport six\n\n# pylint: disable=ungrouped-imports\nfrom google.cloud.spanner_v1._helpers import _parse_value_pb\n# pylint: enable=ungrouped-imports\n\n\nclass StreamedResultSet(object):\n \"\"\"Process a sequence of partial result sets into a single set of row data.\n\n :type response_iterator:\n :param response_iterator:\n Iterator yielding\n :class:`google.cloud.spanner_v1.proto.result_set_pb2.PartialResultSet`\n instances.\n\n :type source: :class:`~google.cloud.spanner_v1.snapshot.Snapshot`\n :param source: Snapshot from which the result set was fetched.\n \"\"\"\n def __init__(self, response_iterator, source=None):\n self._response_iterator = response_iterator\n self._rows = [] # Fully-processed rows\n self._counter = 0 # Counter for processed responses\n self._metadata = None # Until set from first PRS\n self._stats = None # Until set from last PRS\n self._current_row = [] # Accumulated values for incomplete row\n self._pending_chunk = None # Incomplete value\n self._source = source # Source snapshot\n\n @property\n def rows(self):\n \"\"\"Fully-processed rows.\n\n :rtype: list of row-data lists.\n :returns: list of completed row data, from proceesd PRS responses.\n \"\"\"\n return self._rows\n\n @property\n def fields(self):\n \"\"\"Field descriptors for result set columns.\n\n :rtype: list of :class:`~google.cloud.spanner_v1.proto.type_pb2.Field`\n :returns: list of fields describing column names / types.\n \"\"\"\n return self._metadata.row_type.fields\n\n @property\n def metadata(self):\n \"\"\"Result set metadata\n\n :rtype: :class:`~.result_set_pb2.ResultSetMetadata`\n :returns: structure describing the results\n \"\"\"\n return self._metadata\n\n @property\n def stats(self):\n \"\"\"Result set statistics\n\n :rtype:\n :class:`~google.cloud.spanner_v1.proto.result_set_pb2.ResultSetStats`\n :returns: structure describing status about the response\n \"\"\"\n return self._stats\n\n def _merge_chunk(self, value):\n \"\"\"Merge pending chunk with next value.\n\n :type value: :class:`~google.protobuf.struct_pb2.Value`\n :param value: continuation of chunked value from previous\n partial result set.\n\n :rtype: :class:`~google.protobuf.struct_pb2.Value`\n :returns: the merged value\n \"\"\"\n current_column = len(self._current_row)\n field = self.fields[current_column]\n merged = _merge_by_type(self._pending_chunk, value, field.type)\n self._pending_chunk = None\n return merged\n\n def _merge_values(self, values):\n \"\"\"Merge values into rows.\n\n :type values: list of :class:`~google.protobuf.struct_pb2.Value`\n :param values: non-chunked values from partial result set.\n \"\"\"\n width = len(self.fields)\n for value in values:\n index = len(self._current_row)\n field = self.fields[index]\n self._current_row.append(_parse_value_pb(value, field.type))\n if len(self._current_row) == width:\n self._rows.append(self._current_row)\n self._current_row = []\n\n def consume_next(self):\n \"\"\"Consume the next partial result set from the stream.\n\n Parse the result set into new/existing rows in :attr:`_rows`\n \"\"\"\n response = six.next(self._response_iterator)\n self._counter += 1\n\n if self._metadata is None: # first response\n metadata = self._metadata = response.metadata\n\n source = self._source\n if source is not None and source._transaction_id is None:\n source._transaction_id = metadata.transaction.id\n\n if response.HasField('stats'): # last response\n self._stats = response.stats\n\n values = list(response.values)\n if self._pending_chunk is not None:\n values[0] = self._merge_chunk(values[0])\n\n if response.chunked_value:\n self._pending_chunk = values.pop()\n\n self._merge_values(values)\n\n def consume_all(self):\n \"\"\"Consume the streamed responses until there are no more.\"\"\"\n while True:\n try:\n self.consume_next()\n except StopIteration:\n break\n\n def __iter__(self):\n iter_rows, self._rows[:] = self._rows[:], ()\n while True:\n if not iter_rows:\n self.consume_next() # raises StopIteration\n iter_rows, self._rows[:] = self._rows[:], ()\n while iter_rows:\n yield iter_rows.pop(0)\n\n def one(self):\n \"\"\"Return exactly one result, or raise an exception.\n\n :raises: :exc:`NotFound`: If there are no results.\n :raises: :exc:`ValueError`: If there are multiple results.\n :raises: :exc:`RuntimeError`: If consumption has already occurred,\n in whole or in part.\n \"\"\"\n answer = self.one_or_none()\n if answer is None:\n raise exceptions.NotFound('No rows matched the given query.')\n return answer\n\n def one_or_none(self):\n \"\"\"Return exactly one result, or None if there are no results.\n\n :raises: :exc:`ValueError`: If there are multiple results.\n :raises: :exc:`RuntimeError`: If consumption has already occurred,\n in whole or in part.\n \"\"\"\n # Sanity check: Has consumption of this query already started?\n # If it has, then this is an exception.\n if self._metadata is not None:\n raise RuntimeError('Can not call `.one` or `.one_or_none` after '\n 'stream consumption has already started.')\n\n # Consume the first result of the stream.\n # If there is no first result, then return None.\n iterator = iter(self)\n try:\n answer = next(iterator)\n except StopIteration:\n return None\n\n # Attempt to consume more. This should no-op; if we get additional\n # rows, then this is an error case.\n try:\n next(iterator)\n raise ValueError('Expected one result; got more.')\n except StopIteration:\n return answer\n\n\nclass Unmergeable(ValueError):\n \"\"\"Unable to merge two values.\n\n :type lhs: :class:`google.protobuf.struct_pb2.Value`\n :param lhs: pending value to be merged\n\n :type rhs: :class:`google.protobuf.struct_pb2.Value`\n :param rhs: remaining value to be merged\n\n :type type_: :class:`google.cloud.spanner_v1.proto.type_pb2.Type`\n :param type_: field type of values being merged\n \"\"\"\n def __init__(self, lhs, rhs, type_):\n message = \"Cannot merge %s values: %s %s\" % (\n type_pb2.TypeCode.Name(type_.code), lhs, rhs)\n super(Unmergeable, self).__init__(message)\n\n\ndef _unmergeable(lhs, rhs, type_):\n \"\"\"Helper for '_merge_by_type'.\"\"\"\n raise Unmergeable(lhs, rhs, type_)\n\n\ndef _merge_float64(lhs, rhs, type_): # pylint: disable=unused-argument\n \"\"\"Helper for '_merge_by_type'.\"\"\"\n lhs_kind = lhs.WhichOneof('kind')\n if lhs_kind == 'string_value':\n return Value(string_value=lhs.string_value + rhs.string_value)\n rhs_kind = rhs.WhichOneof('kind')\n array_continuation = (\n lhs_kind == 'number_value' and\n rhs_kind == 'string_value' and\n rhs.string_value == '')\n if array_continuation:\n return lhs\n raise Unmergeable(lhs, rhs, type_)\n\n\ndef _merge_string(lhs, rhs, type_): # pylint: disable=unused-argument\n \"\"\"Helper for '_merge_by_type'.\"\"\"\n return Value(string_value=lhs.string_value + rhs.string_value)\n\n\n_UNMERGEABLE_TYPES = (type_pb2.BOOL,)\n\n\ndef _merge_array(lhs, rhs, type_):\n \"\"\"Helper for '_merge_by_type'.\"\"\"\n element_type = type_.array_element_type\n if element_type.code in _UNMERGEABLE_TYPES:\n # Individual values cannot be merged, just concatenate\n lhs.list_value.values.extend(rhs.list_value.values)\n return lhs\n lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values)\n first = rhs.pop(0)\n if first.HasField('null_value'): # can't merge\n lhs.append(first)\n else:\n last = lhs.pop()\n try:\n merged = _merge_by_type(last, first, element_type)\n except Unmergeable:\n lhs.append(last)\n lhs.append(first)\n else:\n lhs.append(merged)\n return Value(list_value=ListValue(values=(lhs + rhs)))\n\n\ndef _merge_struct(lhs, rhs, type_):\n \"\"\"Helper for '_merge_by_type'.\"\"\"\n fields = type_.struct_type.fields\n lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values)\n candidate_type = fields[len(lhs) - 1].type\n first = rhs.pop(0)\n if (first.HasField('null_value') or\n candidate_type.code in _UNMERGEABLE_TYPES):\n lhs.append(first)\n else:\n last = lhs.pop()\n lhs.append(_merge_by_type(last, first, candidate_type))\n return Value(list_value=ListValue(values=lhs + rhs))\n\n\n_MERGE_BY_TYPE = {\n type_pb2.ARRAY: _merge_array,\n type_pb2.BOOL: _unmergeable,\n type_pb2.BYTES: _merge_string,\n type_pb2.DATE: _merge_string,\n type_pb2.FLOAT64: _merge_float64,\n type_pb2.INT64: _merge_string,\n type_pb2.STRING: _merge_string,\n type_pb2.STRUCT: _merge_struct,\n type_pb2.TIMESTAMP: _merge_string,\n}\n\n\ndef _merge_by_type(lhs, rhs, type_):\n \"\"\"Helper for '_merge_chunk'.\"\"\"\n merger = _MERGE_BY_TYPE[type_.code]\n return merger(lhs, rhs, type_)\n", "path": "spanner/google/cloud/spanner_v1/streamed.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Wrapper for streaming results.\"\"\"\n\nfrom google.protobuf.struct_pb2 import ListValue\nfrom google.protobuf.struct_pb2 import Value\nfrom google.cloud import exceptions\nfrom google.cloud.spanner_v1.proto import type_pb2\nimport six\n\n# pylint: disable=ungrouped-imports\nfrom google.cloud.spanner_v1._helpers import _parse_value_pb\n# pylint: enable=ungrouped-imports\n\n\nclass StreamedResultSet(object):\n \"\"\"Process a sequence of partial result sets into a single set of row data.\n\n :type response_iterator:\n :param response_iterator:\n Iterator yielding\n :class:`google.cloud.spanner_v1.proto.result_set_pb2.PartialResultSet`\n instances.\n\n :type source: :class:`~google.cloud.spanner_v1.snapshot.Snapshot`\n :param source: Snapshot from which the result set was fetched.\n \"\"\"\n def __init__(self, response_iterator, source=None):\n self._response_iterator = response_iterator\n self._rows = [] # Fully-processed rows\n self._counter = 0 # Counter for processed responses\n self._metadata = None # Until set from first PRS\n self._stats = None # Until set from last PRS\n self._current_row = [] # Accumulated values for incomplete row\n self._pending_chunk = None # Incomplete value\n self._source = source # Source snapshot\n\n @property\n def rows(self):\n \"\"\"Fully-processed rows.\n\n :rtype: list of row-data lists.\n :returns: list of completed row data, from proceesd PRS responses.\n \"\"\"\n return self._rows\n\n @property\n def fields(self):\n \"\"\"Field descriptors for result set columns.\n\n :rtype: list of :class:`~google.cloud.spanner_v1.proto.type_pb2.Field`\n :returns: list of fields describing column names / types.\n \"\"\"\n return self._metadata.row_type.fields\n\n @property\n def metadata(self):\n \"\"\"Result set metadata\n\n :rtype: :class:`~.result_set_pb2.ResultSetMetadata`\n :returns: structure describing the results\n \"\"\"\n return self._metadata\n\n @property\n def stats(self):\n \"\"\"Result set statistics\n\n :rtype:\n :class:`~google.cloud.spanner_v1.proto.result_set_pb2.ResultSetStats`\n :returns: structure describing status about the response\n \"\"\"\n return self._stats\n\n def _merge_chunk(self, value):\n \"\"\"Merge pending chunk with next value.\n\n :type value: :class:`~google.protobuf.struct_pb2.Value`\n :param value: continuation of chunked value from previous\n partial result set.\n\n :rtype: :class:`~google.protobuf.struct_pb2.Value`\n :returns: the merged value\n \"\"\"\n current_column = len(self._current_row)\n field = self.fields[current_column]\n merged = _merge_by_type(self._pending_chunk, value, field.type)\n self._pending_chunk = None\n return merged\n\n def _merge_values(self, values):\n \"\"\"Merge values into rows.\n\n :type values: list of :class:`~google.protobuf.struct_pb2.Value`\n :param values: non-chunked values from partial result set.\n \"\"\"\n width = len(self.fields)\n for value in values:\n index = len(self._current_row)\n field = self.fields[index]\n self._current_row.append(_parse_value_pb(value, field.type))\n if len(self._current_row) == width:\n self._rows.append(self._current_row)\n self._current_row = []\n\n def consume_next(self):\n \"\"\"Consume the next partial result set from the stream.\n\n Parse the result set into new/existing rows in :attr:`_rows`\n \"\"\"\n response = six.next(self._response_iterator)\n self._counter += 1\n\n if self._metadata is None: # first response\n metadata = self._metadata = response.metadata\n\n source = self._source\n if source is not None and source._transaction_id is None:\n source._transaction_id = metadata.transaction.id\n\n if response.HasField('stats'): # last response\n self._stats = response.stats\n\n values = list(response.values)\n if self._pending_chunk is not None:\n values[0] = self._merge_chunk(values[0])\n\n if response.chunked_value:\n self._pending_chunk = values.pop()\n\n self._merge_values(values)\n\n def consume_all(self):\n \"\"\"Consume the streamed responses until there are no more.\"\"\"\n while True:\n try:\n self.consume_next()\n except StopIteration:\n break\n\n def __iter__(self):\n iter_rows, self._rows[:] = self._rows[:], ()\n while True:\n if not iter_rows:\n self.consume_next() # raises StopIteration\n iter_rows, self._rows[:] = self._rows[:], ()\n while iter_rows:\n yield iter_rows.pop(0)\n\n def one(self):\n \"\"\"Return exactly one result, or raise an exception.\n\n :raises: :exc:`NotFound`: If there are no results.\n :raises: :exc:`ValueError`: If there are multiple results.\n :raises: :exc:`RuntimeError`: If consumption has already occurred,\n in whole or in part.\n \"\"\"\n answer = self.one_or_none()\n if answer is None:\n raise exceptions.NotFound('No rows matched the given query.')\n return answer\n\n def one_or_none(self):\n \"\"\"Return exactly one result, or None if there are no results.\n\n :raises: :exc:`ValueError`: If there are multiple results.\n :raises: :exc:`RuntimeError`: If consumption has already occurred,\n in whole or in part.\n \"\"\"\n # Sanity check: Has consumption of this query already started?\n # If it has, then this is an exception.\n if self._metadata is not None:\n raise RuntimeError('Can not call `.one` or `.one_or_none` after '\n 'stream consumption has already started.')\n\n # Consume the first result of the stream.\n # If there is no first result, then return None.\n iterator = iter(self)\n try:\n answer = next(iterator)\n except StopIteration:\n return None\n\n # Attempt to consume more. This should no-op; if we get additional\n # rows, then this is an error case.\n try:\n next(iterator)\n raise ValueError('Expected one result; got more.')\n except StopIteration:\n return answer\n\n\nclass Unmergeable(ValueError):\n \"\"\"Unable to merge two values.\n\n :type lhs: :class:`google.protobuf.struct_pb2.Value`\n :param lhs: pending value to be merged\n\n :type rhs: :class:`google.protobuf.struct_pb2.Value`\n :param rhs: remaining value to be merged\n\n :type type_: :class:`google.cloud.spanner_v1.proto.type_pb2.Type`\n :param type_: field type of values being merged\n \"\"\"\n def __init__(self, lhs, rhs, type_):\n message = \"Cannot merge %s values: %s %s\" % (\n type_pb2.TypeCode.Name(type_.code), lhs, rhs)\n super(Unmergeable, self).__init__(message)\n\n\ndef _unmergeable(lhs, rhs, type_):\n \"\"\"Helper for '_merge_by_type'.\"\"\"\n raise Unmergeable(lhs, rhs, type_)\n\n\ndef _merge_float64(lhs, rhs, type_): # pylint: disable=unused-argument\n \"\"\"Helper for '_merge_by_type'.\"\"\"\n lhs_kind = lhs.WhichOneof('kind')\n if lhs_kind == 'string_value':\n return Value(string_value=lhs.string_value + rhs.string_value)\n rhs_kind = rhs.WhichOneof('kind')\n array_continuation = (\n lhs_kind == 'number_value' and\n rhs_kind == 'string_value' and\n rhs.string_value == '')\n if array_continuation:\n return lhs\n raise Unmergeable(lhs, rhs, type_)\n\n\ndef _merge_string(lhs, rhs, type_): # pylint: disable=unused-argument\n \"\"\"Helper for '_merge_by_type'.\"\"\"\n return Value(string_value=lhs.string_value + rhs.string_value)\n\n\n_UNMERGEABLE_TYPES = (type_pb2.BOOL,)\n\n\ndef _merge_array(lhs, rhs, type_):\n \"\"\"Helper for '_merge_by_type'.\"\"\"\n element_type = type_.array_element_type\n if element_type.code in _UNMERGEABLE_TYPES:\n # Individual values cannot be merged, just concatenate\n lhs.list_value.values.extend(rhs.list_value.values)\n return lhs\n lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values)\n\n # Sanity check: If either list is empty, short-circuit.\n # This is effectively a no-op.\n if not len(lhs) or not len(rhs):\n return Value(list_value=ListValue(values=(lhs + rhs)))\n\n first = rhs.pop(0)\n if first.HasField('null_value'): # can't merge\n lhs.append(first)\n else:\n last = lhs.pop()\n try:\n merged = _merge_by_type(last, first, element_type)\n except Unmergeable:\n lhs.append(last)\n lhs.append(first)\n else:\n lhs.append(merged)\n return Value(list_value=ListValue(values=(lhs + rhs)))\n\n\ndef _merge_struct(lhs, rhs, type_):\n \"\"\"Helper for '_merge_by_type'.\"\"\"\n fields = type_.struct_type.fields\n lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values)\n candidate_type = fields[len(lhs) - 1].type\n first = rhs.pop(0)\n if (first.HasField('null_value') or\n candidate_type.code in _UNMERGEABLE_TYPES):\n lhs.append(first)\n else:\n last = lhs.pop()\n lhs.append(_merge_by_type(last, first, candidate_type))\n return Value(list_value=ListValue(values=lhs + rhs))\n\n\n_MERGE_BY_TYPE = {\n type_pb2.ARRAY: _merge_array,\n type_pb2.BOOL: _unmergeable,\n type_pb2.BYTES: _merge_string,\n type_pb2.DATE: _merge_string,\n type_pb2.FLOAT64: _merge_float64,\n type_pb2.INT64: _merge_string,\n type_pb2.STRING: _merge_string,\n type_pb2.STRUCT: _merge_struct,\n type_pb2.TIMESTAMP: _merge_string,\n}\n\n\ndef _merge_by_type(lhs, rhs, type_):\n \"\"\"Helper for '_merge_chunk'.\"\"\"\n merger = _MERGE_BY_TYPE[type_.code]\n return merger(lhs, rhs, type_)\n", "path": "spanner/google/cloud/spanner_v1/streamed.py"}]}
3,568
186
gh_patches_debug_15565
rasdani/github-patches
git_diff
deepset-ai__haystack-7796
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [V2.2.0] ChatPromptBuilder is not export **Describe the bug** v2.2.0 => ChatPromptBuilder is not export **Error message** <img width="1102" alt="image" src="https://github.com/deepset-ai/haystack/assets/15232298/b9372767-42f5-464c-832f-cca38a00cf60"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `haystack/components/builders/__init__.py` Content: ``` 1 # SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]> 2 # 3 # SPDX-License-Identifier: Apache-2.0 4 5 from haystack.components.builders.answer_builder import AnswerBuilder 6 from haystack.components.builders.dynamic_chat_prompt_builder import DynamicChatPromptBuilder 7 from haystack.components.builders.dynamic_prompt_builder import DynamicPromptBuilder 8 from haystack.components.builders.prompt_builder import PromptBuilder 9 10 __all__ = ["AnswerBuilder", "PromptBuilder", "DynamicPromptBuilder", "DynamicChatPromptBuilder"] 11 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/haystack/components/builders/__init__.py b/haystack/components/builders/__init__.py --- a/haystack/components/builders/__init__.py +++ b/haystack/components/builders/__init__.py @@ -3,8 +3,9 @@ # SPDX-License-Identifier: Apache-2.0 from haystack.components.builders.answer_builder import AnswerBuilder +from haystack.components.builders.chat_prompt_builder import ChatPromptBuilder from haystack.components.builders.dynamic_chat_prompt_builder import DynamicChatPromptBuilder from haystack.components.builders.dynamic_prompt_builder import DynamicPromptBuilder from haystack.components.builders.prompt_builder import PromptBuilder -__all__ = ["AnswerBuilder", "PromptBuilder", "DynamicPromptBuilder", "DynamicChatPromptBuilder"] +__all__ = ["AnswerBuilder", "PromptBuilder", "DynamicPromptBuilder", "DynamicChatPromptBuilder", "ChatPromptBuilder"]
{"golden_diff": "diff --git a/haystack/components/builders/__init__.py b/haystack/components/builders/__init__.py\n--- a/haystack/components/builders/__init__.py\n+++ b/haystack/components/builders/__init__.py\n@@ -3,8 +3,9 @@\n # SPDX-License-Identifier: Apache-2.0\n \n from haystack.components.builders.answer_builder import AnswerBuilder\n+from haystack.components.builders.chat_prompt_builder import ChatPromptBuilder\n from haystack.components.builders.dynamic_chat_prompt_builder import DynamicChatPromptBuilder\n from haystack.components.builders.dynamic_prompt_builder import DynamicPromptBuilder\n from haystack.components.builders.prompt_builder import PromptBuilder\n \n-__all__ = [\"AnswerBuilder\", \"PromptBuilder\", \"DynamicPromptBuilder\", \"DynamicChatPromptBuilder\"]\n+__all__ = [\"AnswerBuilder\", \"PromptBuilder\", \"DynamicPromptBuilder\", \"DynamicChatPromptBuilder\", \"ChatPromptBuilder\"]\n", "issue": "[V2.2.0] ChatPromptBuilder is not export\n**Describe the bug**\r\nv2.2.0 => ChatPromptBuilder is not export\r\n\r\n**Error message**\r\n<img width=\"1102\" alt=\"image\" src=\"https://github.com/deepset-ai/haystack/assets/15232298/b9372767-42f5-464c-832f-cca38a00cf60\">\r\n\r\n\n", "before_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom haystack.components.builders.answer_builder import AnswerBuilder\nfrom haystack.components.builders.dynamic_chat_prompt_builder import DynamicChatPromptBuilder\nfrom haystack.components.builders.dynamic_prompt_builder import DynamicPromptBuilder\nfrom haystack.components.builders.prompt_builder import PromptBuilder\n\n__all__ = [\"AnswerBuilder\", \"PromptBuilder\", \"DynamicPromptBuilder\", \"DynamicChatPromptBuilder\"]\n", "path": "haystack/components/builders/__init__.py"}], "after_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom haystack.components.builders.answer_builder import AnswerBuilder\nfrom haystack.components.builders.chat_prompt_builder import ChatPromptBuilder\nfrom haystack.components.builders.dynamic_chat_prompt_builder import DynamicChatPromptBuilder\nfrom haystack.components.builders.dynamic_prompt_builder import DynamicPromptBuilder\nfrom haystack.components.builders.prompt_builder import PromptBuilder\n\n__all__ = [\"AnswerBuilder\", \"PromptBuilder\", \"DynamicPromptBuilder\", \"DynamicChatPromptBuilder\", \"ChatPromptBuilder\"]\n", "path": "haystack/components/builders/__init__.py"}]}
491
185
gh_patches_debug_36530
rasdani/github-patches
git_diff
getsentry__sentry-python-541
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 0.12.0 breaks Django function-based middleware Similar to #504, but a different stack trace: AttributeError: 'method-wrapper' object has no attribute '__module__' File "django/core/handlers/exception.py", line 41, in inner response = get_response(request) File "functools.py", line 33, in update_wrapper setattr(wrapper, attr, getattr(wrapped, attr)) According to sentry (kind-of neat how I get this in this case...), the `get_response` object at that point in time is `<sentry_sdk.integrations.django.middleware.AuditMiddleware object at 0x7f37d64d4450>`. This problem only occurs in 0.12.0 and newer, and with Django 1.11.x --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sentry_sdk/integrations/django/middleware.py` Content: ``` 1 """ 2 Create spans from Django middleware invocations 3 """ 4 5 from functools import wraps 6 7 from django import VERSION as DJANGO_VERSION 8 9 from sentry_sdk import Hub 10 from sentry_sdk.utils import ContextVar, transaction_from_function 11 12 from sentry_sdk._types import MYPY 13 14 if MYPY: 15 from typing import Any 16 from typing import Callable 17 from typing import TypeVar 18 19 F = TypeVar("F", bound=Callable[..., Any]) 20 21 _import_string_should_wrap_middleware = ContextVar( 22 "import_string_should_wrap_middleware" 23 ) 24 25 if DJANGO_VERSION < (1, 7): 26 import_string_name = "import_by_path" 27 else: 28 import_string_name = "import_string" 29 30 31 def patch_django_middlewares(): 32 # type: () -> None 33 from django.core.handlers import base 34 35 old_import_string = getattr(base, import_string_name) 36 37 def sentry_patched_import_string(dotted_path): 38 # type: (str) -> Any 39 rv = old_import_string(dotted_path) 40 41 if _import_string_should_wrap_middleware.get(None): 42 rv = _wrap_middleware(rv, dotted_path) 43 44 return rv 45 46 setattr(base, import_string_name, sentry_patched_import_string) 47 48 old_load_middleware = base.BaseHandler.load_middleware 49 50 def sentry_patched_load_middleware(self): 51 # type: (base.BaseHandler) -> Any 52 _import_string_should_wrap_middleware.set(True) 53 try: 54 return old_load_middleware(self) 55 finally: 56 _import_string_should_wrap_middleware.set(False) 57 58 base.BaseHandler.load_middleware = sentry_patched_load_middleware 59 60 61 def _wrap_middleware(middleware, middleware_name): 62 # type: (Any, str) -> Any 63 from sentry_sdk.integrations.django import DjangoIntegration 64 65 def _get_wrapped_method(old_method): 66 # type: (F) -> F 67 @wraps(old_method) 68 def sentry_wrapped_method(*args, **kwargs): 69 # type: (*Any, **Any) -> Any 70 hub = Hub.current 71 integration = hub.get_integration(DjangoIntegration) 72 if integration is None or not integration.middleware_spans: 73 return old_method(*args, **kwargs) 74 75 function_name = transaction_from_function(old_method) 76 77 description = middleware_name 78 function_basename = getattr(old_method, "__name__", None) 79 if function_basename: 80 description = "{}.{}".format(description, function_basename) 81 82 with hub.start_span( 83 op="django.middleware", description=description 84 ) as span: 85 span.set_tag("django.function_name", function_name) 86 span.set_tag("django.middleware_name", middleware_name) 87 return old_method(*args, **kwargs) 88 89 return sentry_wrapped_method # type: ignore 90 91 class SentryWrappingMiddleware(object): 92 def __init__(self, *args, **kwargs): 93 # type: (*Any, **Any) -> None 94 self._inner = middleware(*args, **kwargs) 95 self._call_method = None 96 97 # We need correct behavior for `hasattr()`, which we can only determine 98 # when we have an instance of the middleware we're wrapping. 99 def __getattr__(self, method_name): 100 # type: (str) -> Any 101 if method_name not in ( 102 "process_request", 103 "process_view", 104 "process_template_response", 105 "process_response", 106 "process_exception", 107 ): 108 raise AttributeError() 109 110 old_method = getattr(self._inner, method_name) 111 rv = _get_wrapped_method(old_method) 112 self.__dict__[method_name] = rv 113 return rv 114 115 def __call__(self, *args, **kwargs): 116 # type: (*Any, **Any) -> Any 117 f = self._call_method 118 if f is None: 119 self._call_method = f = _get_wrapped_method(self._inner.__call__) 120 return f(*args, **kwargs) 121 122 if hasattr(middleware, "__name__"): 123 SentryWrappingMiddleware.__name__ = middleware.__name__ 124 125 return SentryWrappingMiddleware 126 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sentry_sdk/integrations/django/middleware.py b/sentry_sdk/integrations/django/middleware.py --- a/sentry_sdk/integrations/django/middleware.py +++ b/sentry_sdk/integrations/django/middleware.py @@ -7,7 +7,11 @@ from django import VERSION as DJANGO_VERSION from sentry_sdk import Hub -from sentry_sdk.utils import ContextVar, transaction_from_function +from sentry_sdk.utils import ( + ContextVar, + transaction_from_function, + capture_internal_exceptions, +) from sentry_sdk._types import MYPY @@ -64,29 +68,36 @@ def _get_wrapped_method(old_method): # type: (F) -> F - @wraps(old_method) - def sentry_wrapped_method(*args, **kwargs): - # type: (*Any, **Any) -> Any - hub = Hub.current - integration = hub.get_integration(DjangoIntegration) - if integration is None or not integration.middleware_spans: - return old_method(*args, **kwargs) - - function_name = transaction_from_function(old_method) - - description = middleware_name - function_basename = getattr(old_method, "__name__", None) - if function_basename: - description = "{}.{}".format(description, function_basename) - - with hub.start_span( - op="django.middleware", description=description - ) as span: - span.set_tag("django.function_name", function_name) - span.set_tag("django.middleware_name", middleware_name) - return old_method(*args, **kwargs) - - return sentry_wrapped_method # type: ignore + with capture_internal_exceptions(): + + def sentry_wrapped_method(*args, **kwargs): + # type: (*Any, **Any) -> Any + hub = Hub.current + integration = hub.get_integration(DjangoIntegration) + if integration is None or not integration.middleware_spans: + return old_method(*args, **kwargs) + + function_name = transaction_from_function(old_method) + + description = middleware_name + function_basename = getattr(old_method, "__name__", None) + if function_basename: + description = "{}.{}".format(description, function_basename) + + with hub.start_span( + op="django.middleware", description=description + ) as span: + span.set_tag("django.function_name", function_name) + span.set_tag("django.middleware_name", middleware_name) + return old_method(*args, **kwargs) + + try: + # fails for __call__ of function on Python 2 (see py2.7-django-1.11) + return wraps(old_method)(sentry_wrapped_method) # type: ignore + except Exception: + return sentry_wrapped_method # type: ignore + + return old_method class SentryWrappingMiddleware(object): def __init__(self, *args, **kwargs):
{"golden_diff": "diff --git a/sentry_sdk/integrations/django/middleware.py b/sentry_sdk/integrations/django/middleware.py\n--- a/sentry_sdk/integrations/django/middleware.py\n+++ b/sentry_sdk/integrations/django/middleware.py\n@@ -7,7 +7,11 @@\n from django import VERSION as DJANGO_VERSION\n \n from sentry_sdk import Hub\n-from sentry_sdk.utils import ContextVar, transaction_from_function\n+from sentry_sdk.utils import (\n+ ContextVar,\n+ transaction_from_function,\n+ capture_internal_exceptions,\n+)\n \n from sentry_sdk._types import MYPY\n \n@@ -64,29 +68,36 @@\n \n def _get_wrapped_method(old_method):\n # type: (F) -> F\n- @wraps(old_method)\n- def sentry_wrapped_method(*args, **kwargs):\n- # type: (*Any, **Any) -> Any\n- hub = Hub.current\n- integration = hub.get_integration(DjangoIntegration)\n- if integration is None or not integration.middleware_spans:\n- return old_method(*args, **kwargs)\n-\n- function_name = transaction_from_function(old_method)\n-\n- description = middleware_name\n- function_basename = getattr(old_method, \"__name__\", None)\n- if function_basename:\n- description = \"{}.{}\".format(description, function_basename)\n-\n- with hub.start_span(\n- op=\"django.middleware\", description=description\n- ) as span:\n- span.set_tag(\"django.function_name\", function_name)\n- span.set_tag(\"django.middleware_name\", middleware_name)\n- return old_method(*args, **kwargs)\n-\n- return sentry_wrapped_method # type: ignore\n+ with capture_internal_exceptions():\n+\n+ def sentry_wrapped_method(*args, **kwargs):\n+ # type: (*Any, **Any) -> Any\n+ hub = Hub.current\n+ integration = hub.get_integration(DjangoIntegration)\n+ if integration is None or not integration.middleware_spans:\n+ return old_method(*args, **kwargs)\n+\n+ function_name = transaction_from_function(old_method)\n+\n+ description = middleware_name\n+ function_basename = getattr(old_method, \"__name__\", None)\n+ if function_basename:\n+ description = \"{}.{}\".format(description, function_basename)\n+\n+ with hub.start_span(\n+ op=\"django.middleware\", description=description\n+ ) as span:\n+ span.set_tag(\"django.function_name\", function_name)\n+ span.set_tag(\"django.middleware_name\", middleware_name)\n+ return old_method(*args, **kwargs)\n+\n+ try:\n+ # fails for __call__ of function on Python 2 (see py2.7-django-1.11)\n+ return wraps(old_method)(sentry_wrapped_method) # type: ignore\n+ except Exception:\n+ return sentry_wrapped_method # type: ignore\n+\n+ return old_method\n \n class SentryWrappingMiddleware(object):\n def __init__(self, *args, **kwargs):\n", "issue": "0.12.0 breaks Django function-based middleware\nSimilar to #504, but a different stack trace:\r\n\r\n AttributeError: 'method-wrapper' object has no attribute '__module__'\r\n File \"django/core/handlers/exception.py\", line 41, in inner\r\n response = get_response(request)\r\n File \"functools.py\", line 33, in update_wrapper\r\n setattr(wrapper, attr, getattr(wrapped, attr))\r\n\r\nAccording to sentry (kind-of neat how I get this in this case...), the `get_response` object at that point in time is `<sentry_sdk.integrations.django.middleware.AuditMiddleware object at 0x7f37d64d4450>`.\r\n\r\nThis problem only occurs in 0.12.0 and newer, and with Django 1.11.x\n", "before_files": [{"content": "\"\"\"\nCreate spans from Django middleware invocations\n\"\"\"\n\nfrom functools import wraps\n\nfrom django import VERSION as DJANGO_VERSION\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk.utils import ContextVar, transaction_from_function\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import TypeVar\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n_import_string_should_wrap_middleware = ContextVar(\n \"import_string_should_wrap_middleware\"\n)\n\nif DJANGO_VERSION < (1, 7):\n import_string_name = \"import_by_path\"\nelse:\n import_string_name = \"import_string\"\n\n\ndef patch_django_middlewares():\n # type: () -> None\n from django.core.handlers import base\n\n old_import_string = getattr(base, import_string_name)\n\n def sentry_patched_import_string(dotted_path):\n # type: (str) -> Any\n rv = old_import_string(dotted_path)\n\n if _import_string_should_wrap_middleware.get(None):\n rv = _wrap_middleware(rv, dotted_path)\n\n return rv\n\n setattr(base, import_string_name, sentry_patched_import_string)\n\n old_load_middleware = base.BaseHandler.load_middleware\n\n def sentry_patched_load_middleware(self):\n # type: (base.BaseHandler) -> Any\n _import_string_should_wrap_middleware.set(True)\n try:\n return old_load_middleware(self)\n finally:\n _import_string_should_wrap_middleware.set(False)\n\n base.BaseHandler.load_middleware = sentry_patched_load_middleware\n\n\ndef _wrap_middleware(middleware, middleware_name):\n # type: (Any, str) -> Any\n from sentry_sdk.integrations.django import DjangoIntegration\n\n def _get_wrapped_method(old_method):\n # type: (F) -> F\n @wraps(old_method)\n def sentry_wrapped_method(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(DjangoIntegration)\n if integration is None or not integration.middleware_spans:\n return old_method(*args, **kwargs)\n\n function_name = transaction_from_function(old_method)\n\n description = middleware_name\n function_basename = getattr(old_method, \"__name__\", None)\n if function_basename:\n description = \"{}.{}\".format(description, function_basename)\n\n with hub.start_span(\n op=\"django.middleware\", description=description\n ) as span:\n span.set_tag(\"django.function_name\", function_name)\n span.set_tag(\"django.middleware_name\", middleware_name)\n return old_method(*args, **kwargs)\n\n return sentry_wrapped_method # type: ignore\n\n class SentryWrappingMiddleware(object):\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n self._inner = middleware(*args, **kwargs)\n self._call_method = None\n\n # We need correct behavior for `hasattr()`, which we can only determine\n # when we have an instance of the middleware we're wrapping.\n def __getattr__(self, method_name):\n # type: (str) -> Any\n if method_name not in (\n \"process_request\",\n \"process_view\",\n \"process_template_response\",\n \"process_response\",\n \"process_exception\",\n ):\n raise AttributeError()\n\n old_method = getattr(self._inner, method_name)\n rv = _get_wrapped_method(old_method)\n self.__dict__[method_name] = rv\n return rv\n\n def __call__(self, *args, **kwargs):\n # type: (*Any, **Any) -> Any\n f = self._call_method\n if f is None:\n self._call_method = f = _get_wrapped_method(self._inner.__call__)\n return f(*args, **kwargs)\n\n if hasattr(middleware, \"__name__\"):\n SentryWrappingMiddleware.__name__ = middleware.__name__\n\n return SentryWrappingMiddleware\n", "path": "sentry_sdk/integrations/django/middleware.py"}], "after_files": [{"content": "\"\"\"\nCreate spans from Django middleware invocations\n\"\"\"\n\nfrom functools import wraps\n\nfrom django import VERSION as DJANGO_VERSION\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk.utils import (\n ContextVar,\n transaction_from_function,\n capture_internal_exceptions,\n)\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import TypeVar\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n_import_string_should_wrap_middleware = ContextVar(\n \"import_string_should_wrap_middleware\"\n)\n\nif DJANGO_VERSION < (1, 7):\n import_string_name = \"import_by_path\"\nelse:\n import_string_name = \"import_string\"\n\n\ndef patch_django_middlewares():\n # type: () -> None\n from django.core.handlers import base\n\n old_import_string = getattr(base, import_string_name)\n\n def sentry_patched_import_string(dotted_path):\n # type: (str) -> Any\n rv = old_import_string(dotted_path)\n\n if _import_string_should_wrap_middleware.get(None):\n rv = _wrap_middleware(rv, dotted_path)\n\n return rv\n\n setattr(base, import_string_name, sentry_patched_import_string)\n\n old_load_middleware = base.BaseHandler.load_middleware\n\n def sentry_patched_load_middleware(self):\n # type: (base.BaseHandler) -> Any\n _import_string_should_wrap_middleware.set(True)\n try:\n return old_load_middleware(self)\n finally:\n _import_string_should_wrap_middleware.set(False)\n\n base.BaseHandler.load_middleware = sentry_patched_load_middleware\n\n\ndef _wrap_middleware(middleware, middleware_name):\n # type: (Any, str) -> Any\n from sentry_sdk.integrations.django import DjangoIntegration\n\n def _get_wrapped_method(old_method):\n # type: (F) -> F\n with capture_internal_exceptions():\n\n def sentry_wrapped_method(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(DjangoIntegration)\n if integration is None or not integration.middleware_spans:\n return old_method(*args, **kwargs)\n\n function_name = transaction_from_function(old_method)\n\n description = middleware_name\n function_basename = getattr(old_method, \"__name__\", None)\n if function_basename:\n description = \"{}.{}\".format(description, function_basename)\n\n with hub.start_span(\n op=\"django.middleware\", description=description\n ) as span:\n span.set_tag(\"django.function_name\", function_name)\n span.set_tag(\"django.middleware_name\", middleware_name)\n return old_method(*args, **kwargs)\n\n try:\n # fails for __call__ of function on Python 2 (see py2.7-django-1.11)\n return wraps(old_method)(sentry_wrapped_method) # type: ignore\n except Exception:\n return sentry_wrapped_method # type: ignore\n\n return old_method\n\n class SentryWrappingMiddleware(object):\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n self._inner = middleware(*args, **kwargs)\n self._call_method = None\n\n # We need correct behavior for `hasattr()`, which we can only determine\n # when we have an instance of the middleware we're wrapping.\n def __getattr__(self, method_name):\n # type: (str) -> Any\n if method_name not in (\n \"process_request\",\n \"process_view\",\n \"process_template_response\",\n \"process_response\",\n \"process_exception\",\n ):\n raise AttributeError()\n\n old_method = getattr(self._inner, method_name)\n rv = _get_wrapped_method(old_method)\n self.__dict__[method_name] = rv\n return rv\n\n def __call__(self, *args, **kwargs):\n # type: (*Any, **Any) -> Any\n f = self._call_method\n if f is None:\n self._call_method = f = _get_wrapped_method(self._inner.__call__)\n return f(*args, **kwargs)\n\n if hasattr(middleware, \"__name__\"):\n SentryWrappingMiddleware.__name__ = middleware.__name__\n\n return SentryWrappingMiddleware\n", "path": "sentry_sdk/integrations/django/middleware.py"}]}
1,616
672
gh_patches_debug_24599
rasdani/github-patches
git_diff
edgedb__edgedb-1207
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error creating role using CLI tool: please specify an attribute to alter EdgeDB version: `1.0-alpha.2+dev.505.g5b68ec24` Steps to reproduce: ``` # edgedb --admin create-role test Usage: edgedb create-role [OPTIONS] ROLE_NAME Error: please specify an attribute to alter ``` Command fails with weird error message if `OPTIONS` were no provided, even though they're supposed to be optional according to help message. It works if 1 or more options are present: `edgedb --admin create-role --allow-login test` Creating role inside interactive session without options works as expected: ``` modbay_beta> CREATE ROLE test; CREATE ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `edb/cli/mng.py` Content: ``` 1 # 2 # This source file is part of the EdgeDB open source project. 3 # 4 # Copyright 2008-present MagicStack Inc. and the EdgeDB authors. 5 # 6 # Licensed under the Apache License, Version 2.0 (the "License"); 7 # you may not use this file except in compliance with the License. 8 # You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, software 13 # distributed under the License is distributed on an "AS IS" BASIS, 14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 # See the License for the specific language governing permissions and 16 # limitations under the License. 17 # 18 19 20 from __future__ import annotations 21 22 import re 23 import sys 24 import textwrap 25 26 import click 27 28 import edgedb 29 30 from edb.cli import cli 31 from edb.cli import utils 32 from edb.edgeql.quote import quote_literal as ql, quote_ident as qi 33 34 35 @cli.group(help='Modify database configuration') 36 @click.pass_context 37 def configure(ctx): 38 utils.connect(ctx) 39 40 41 @configure.command(context_settings=dict( 42 ignore_unknown_options=True, 43 )) 44 @click.pass_context 45 @click.argument('parameter', type=str) 46 @click.argument('values', nargs=-1, type=click.UNPROCESSED) 47 def insert(ctx, parameter: str, values): 48 if not values: 49 raise click.UsageError( 50 'missing configuration value properties', ctx=ctx) 51 52 try: 53 cfg_obj_name, props = _process_configure_composite_options( 54 ctx, parameter, values) 55 except NotAnObjectError as e: 56 raise click.UsageError(str(e), ctx=ctx) from None 57 58 attrs = [] 59 60 for pn, (pval, ptype) in props.items(): 61 if ptype.__type__.name == 'schema::ObjectType': 62 pval = f'(INSERT {pval})' 63 else: 64 pval = f'<{ptype.name}>{ql(pval)}' 65 66 attrs.append(f'{qi(pn)} := {pval}') 67 68 attrs = ',\n'.join(attrs) 69 70 qry = textwrap.dedent(f'''\ 71 CONFIGURE SYSTEM INSERT {cfg_obj_name} {{ 72 {textwrap.indent(attrs, ' ' * 12).strip()} 73 }} 74 ''') 75 76 try: 77 ctx.obj['conn'].execute(qry) 78 except edgedb.EdgeDBError as e: 79 raise click.ClickException(str(e)) from e 80 else: 81 click.echo(ctx.obj['conn']._get_last_status()) 82 83 84 @configure.command() 85 @click.pass_context 86 @click.argument('parameter', type=str) 87 @click.argument('value', nargs=-1, type=str) 88 def set(ctx, parameter: str, value): 89 cfg_obj_name, cfg_type, cfg_card = _process_configure_scalar( 90 ctx, parameter, []) 91 92 if cfg_card == 'ONE': 93 if len(value) > 1: 94 raise click.ClickException('too many values', ctx=ctx) 95 value = value[0] 96 val_expr = ql(value) 97 else: 98 val_expr = f'{{{", ".join(ql(v) for v in value)}}}' 99 100 # Canonicalize the values by casting them. 101 vals = ctx.obj['conn'].fetchall(f''' 102 SELECT <str><{cfg_type.name}>{val_expr} 103 ''') 104 105 args = [] 106 for val in vals: 107 if cfg_type.is_numeric or cfg_type.is_bool: 108 args.append(val) 109 elif cfg_type.is_str: 110 args.append(ql(val)) 111 else: 112 raise click.ClickException( 113 f'cannot set {parameter}: it is not a string, numeric or bool' 114 ) 115 116 args_list = ', '.join(args) 117 args_expr = f'{{{args_list}}}' 118 119 qry = textwrap.dedent(f''' 120 CONFIGURE SYSTEM SET {cfg_obj_name} := {args_expr} 121 ''') 122 123 try: 124 ctx.obj['conn'].execute(qry) 125 except edgedb.EdgeDBError as e: 126 raise click.ClickException(str(e)) from e 127 else: 128 click.echo(ctx.obj['conn']._get_last_status()) 129 130 131 @configure.command(context_settings=dict( 132 ignore_unknown_options=True, 133 )) 134 @click.pass_context 135 @click.argument('parameter', type=str) 136 @click.argument('values', nargs=-1, type=click.UNPROCESSED) 137 def reset(ctx, parameter: str, values): 138 try: 139 cfg_obj_name, props = _process_configure_composite_options( 140 ctx, parameter, values) 141 except NotAnObjectError: 142 is_scalar = True 143 else: 144 is_scalar = False 145 146 if is_scalar: 147 cfg_obj_name, cfg_type, cfg_card = _process_configure_scalar( 148 ctx, parameter, values) 149 150 qry = f'CONFIGURE SYSTEM RESET {cfg_obj_name}' 151 else: 152 attrs = [] 153 154 for pn, (pval, ptype) in props.items(): 155 if ptype.__type__.name == 'schema::ObjectType': 156 pval = f'.{pn} IS {pval}' 157 else: 158 pval = f'.{pn} = <{ptype.name}>{ql(pval)}' 159 160 attrs.append(pval) 161 162 if attrs: 163 flt = f"FILTER {' AND '.join(attrs)}" 164 else: 165 flt = '' 166 167 qry = textwrap.dedent(f''' 168 CONFIGURE SYSTEM RESET {cfg_obj_name} {flt} 169 ''') 170 171 try: 172 ctx.obj['conn'].execute(qry) 173 except edgedb.EdgeDBError as e: 174 raise click.ClickException(str(e)) from e 175 else: 176 click.echo(ctx.obj['conn']._get_last_status()) 177 178 179 class NotAnObjectError(Exception): 180 pass 181 182 183 def _process_configure_composite_options(ctx, parameter, values): 184 props = {} 185 186 cfg_objects = ctx.obj['conn'].fetchall(''' 187 WITH MODULE schema 188 SELECT ObjectType { 189 name 190 } FILTER .name LIKE 'cfg::%' 191 ''') 192 193 cfg_objmap = {} 194 195 for obj in cfg_objects: 196 _, _, obj_name = obj.name.partition('::') 197 cfg_objmap[obj_name] = obj_name 198 cfg_objmap[obj_name.lower()] = obj_name 199 200 cfg_obj_name = cfg_objmap.get(parameter) 201 if not cfg_obj_name: 202 raise NotAnObjectError( 203 f'{parameter} is not a valid configuration object') 204 205 cfg_props = ctx.obj['conn'].fetchall(''' 206 WITH 207 MODULE schema, 208 Cfg := (SELECT ObjectType FILTER .name = <str>$typename) 209 SELECT Cfg.pointers { 210 name, 211 target: {name, __type__: {name}} 212 }; 213 ''', typename=f'cfg::{cfg_obj_name}') 214 215 cfg_prop_map = {p.name: p.target for p in cfg_props} 216 217 for value in values: 218 v = re.match(r'--(\w+)(?: |=)(.*)', value) 219 if not v: 220 raise click.UsageError(f'unrecognized option: {value}', ctx=ctx) 221 222 propname = v.group(1) 223 propval = v.group(2) 224 225 proptype = cfg_prop_map.get(propname) 226 if proptype is None: 227 raise click.UsageError( 228 f'{propname!r} is not a valid {cfg_obj_name} property', 229 ctx=ctx) 230 231 if propval in cfg_objmap: 232 propval = cfg_objmap[propval] 233 234 props[propname] = (propval, proptype) 235 236 return cfg_obj_name, props 237 238 239 def _process_configure_scalar(ctx, parameter, values): 240 if values: 241 raise click.UsageError(f'unexpected option: {next(iter(values))}') 242 243 cfg_props = ctx.obj['conn'].fetchall(''' 244 WITH 245 MODULE schema, 246 Cfg := (SELECT ObjectType FILTER .name = <str>$typename) 247 SELECT Cfg.pointers { 248 name, 249 cardinality, 250 target: { 251 name, 252 __type__: {name}, 253 is_numeric := ( 254 'std::anyreal' IN 255 Cfg.pointers.target[IS ScalarType].ancestors.name), 256 is_bool := ( 257 Cfg.pointers.target.name = 'std::bool' 258 OR 'std::bool' IN 259 Cfg.pointers.target[IS ScalarType].ancestors.name), 260 is_str := ( 261 Cfg.pointers.target.name = 'std::str' 262 OR any({'std::str', 'std::anyenum'} IN 263 Cfg.pointers.target[IS ScalarType].ancestors.name)), 264 } 265 } FILTER .name = <str>$propname; 266 ''', typename=f'cfg::Config', propname=parameter) 267 268 if len(cfg_props) == 0: 269 raise click.UsageError( 270 f'{parameter!r} is not a valid configuration parameter', 271 ctx=ctx) 272 273 return parameter, cfg_props[0].target, cfg_props[0].cardinality 274 275 276 def options(options): 277 def _decorator(func): 278 for option in reversed(options): 279 func = option(func) 280 return func 281 282 return _decorator 283 284 285 _role_options = [ 286 click.option('--password/--no-password', default=None), 287 click.option('--password-from-stdin', is_flag=True, default=False), 288 ] 289 290 291 def _process_role_options(ctx, password, password_from_stdin): 292 if password is None and password_from_stdin: 293 password = True 294 295 if password is not None: 296 if password: 297 if password_from_stdin: 298 password_value = ql(sys.stdin.readline().strip('\r\n')) 299 elif sys.stdin.isatty(): 300 password_value = ql(click.prompt( 301 'Password', 302 hide_input=True, 303 confirmation_prompt=True, 304 type=str, 305 )) 306 else: 307 raise click.UsageError( 308 'input is not a TTY, please use --password-from-stdin ' 309 'to provide the password value' 310 ) 311 else: 312 password_value = '{}' 313 else: 314 password_value = None 315 316 alters = [] 317 if password_value is not None: 318 alters.append(f'SET password := {password_value}') 319 320 if not alters: 321 raise click.UsageError( 322 'please specify an attribute to alter', ctx=ctx, 323 ) 324 325 return alters 326 327 328 @cli.command(name='create-superuser-role', 329 help='Create a new database superuser') 330 @click.argument('role-name', type=str) 331 @options(_role_options) 332 @click.pass_context 333 def create_role(ctx, role_name, **kwargs): 334 utils.connect(ctx) 335 336 attrs = ";\n".join(_process_role_options(ctx, **kwargs)) 337 338 qry = f''' 339 CREATE SUPERUSER ROLE {qi(role_name)} {{ 340 {attrs} 341 }} 342 ''' 343 344 try: 345 ctx.obj['conn'].execute(qry) 346 except edgedb.EdgeDBError as e: 347 raise click.ClickException(str(e)) from e 348 349 350 @cli.command(name='alter-role', help='Modify role') 351 @click.argument('role-name', type=str) 352 @options(_role_options) 353 @click.pass_context 354 def alter_role(ctx, role_name, **kwargs): 355 utils.connect(ctx) 356 357 attrs = ";\n".join(_process_role_options(ctx, **kwargs)) 358 359 qry = f''' 360 ALTER ROLE {qi(role_name)} {{ 361 {attrs} 362 }} 363 ''' 364 365 try: 366 ctx.obj['conn'].execute(qry) 367 except edgedb.EdgeDBError as e: 368 raise click.ClickException(str(e)) from e 369 370 371 @cli.command(name='drop-role', help="Remove role") 372 @click.argument('role-name', type=str) 373 @click.pass_context 374 def drop_role(ctx, role_name, **kwargs): 375 utils.connect(ctx) 376 qry = f''' 377 DROP ROLE {qi(role_name)}; 378 ''' 379 380 try: 381 ctx.obj['conn'].execute(qry) 382 except edgedb.EdgeDBError as e: 383 raise click.ClickException(str(e)) from e 384 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/edb/cli/mng.py b/edb/cli/mng.py --- a/edb/cli/mng.py +++ b/edb/cli/mng.py @@ -288,7 +288,13 @@ ] -def _process_role_options(ctx, password, password_from_stdin): +def _process_role_options( + ctx, + password, + password_from_stdin, + allow_empty=False +): + if password is None and password_from_stdin: password = True @@ -318,6 +324,9 @@ alters.append(f'SET password := {password_value}') if not alters: + if allow_empty: + return alters + raise click.UsageError( 'please specify an attribute to alter', ctx=ctx, ) @@ -333,12 +342,19 @@ def create_role(ctx, role_name, **kwargs): utils.connect(ctx) - attrs = ";\n".join(_process_role_options(ctx, **kwargs)) + attrs = ";\n".join(_process_role_options(ctx, allow_empty=True, **kwargs)) - qry = f''' - CREATE SUPERUSER ROLE {qi(role_name)} {{ + if attrs: + formatted_attrs = f''' + {{ {attrs} }} + ''' + else: + formatted_attrs = '' + + qry = f''' + CREATE SUPERUSER ROLE {qi(role_name)} {formatted_attrs} ''' try:
{"golden_diff": "diff --git a/edb/cli/mng.py b/edb/cli/mng.py\n--- a/edb/cli/mng.py\n+++ b/edb/cli/mng.py\n@@ -288,7 +288,13 @@\n ]\n \n \n-def _process_role_options(ctx, password, password_from_stdin):\n+def _process_role_options(\n+ ctx,\n+ password,\n+ password_from_stdin,\n+ allow_empty=False\n+):\n+\n if password is None and password_from_stdin:\n password = True\n \n@@ -318,6 +324,9 @@\n alters.append(f'SET password := {password_value}')\n \n if not alters:\n+ if allow_empty:\n+ return alters\n+\n raise click.UsageError(\n 'please specify an attribute to alter', ctx=ctx,\n )\n@@ -333,12 +342,19 @@\n def create_role(ctx, role_name, **kwargs):\n utils.connect(ctx)\n \n- attrs = \";\\n\".join(_process_role_options(ctx, **kwargs))\n+ attrs = \";\\n\".join(_process_role_options(ctx, allow_empty=True, **kwargs))\n \n- qry = f'''\n- CREATE SUPERUSER ROLE {qi(role_name)} {{\n+ if attrs:\n+ formatted_attrs = f'''\n+ {{\n {attrs}\n }}\n+ '''\n+ else:\n+ formatted_attrs = ''\n+\n+ qry = f'''\n+ CREATE SUPERUSER ROLE {qi(role_name)} {formatted_attrs}\n '''\n \n try:\n", "issue": "Error creating role using CLI tool: please specify an attribute to alter\nEdgeDB version: `1.0-alpha.2+dev.505.g5b68ec24`\r\n\r\nSteps to reproduce:\r\n```\r\n# edgedb --admin create-role test\r\nUsage: edgedb create-role [OPTIONS] ROLE_NAME\r\n\r\nError: please specify an attribute to alter\r\n```\r\nCommand fails with weird error message if `OPTIONS` were no provided, even though they're supposed to be optional according to help message.\r\nIt works if 1 or more options are present: `edgedb --admin create-role --allow-login test`\r\n\r\nCreating role inside interactive session without options works as expected:\r\n```\r\nmodbay_beta> CREATE ROLE test; \r\nCREATE\r\n```\n", "before_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\n\nimport re\nimport sys\nimport textwrap\n\nimport click\n\nimport edgedb\n\nfrom edb.cli import cli\nfrom edb.cli import utils\nfrom edb.edgeql.quote import quote_literal as ql, quote_ident as qi\n\n\[email protected](help='Modify database configuration')\[email protected]_context\ndef configure(ctx):\n utils.connect(ctx)\n\n\[email protected](context_settings=dict(\n ignore_unknown_options=True,\n))\[email protected]_context\[email protected]('parameter', type=str)\[email protected]('values', nargs=-1, type=click.UNPROCESSED)\ndef insert(ctx, parameter: str, values):\n if not values:\n raise click.UsageError(\n 'missing configuration value properties', ctx=ctx)\n\n try:\n cfg_obj_name, props = _process_configure_composite_options(\n ctx, parameter, values)\n except NotAnObjectError as e:\n raise click.UsageError(str(e), ctx=ctx) from None\n\n attrs = []\n\n for pn, (pval, ptype) in props.items():\n if ptype.__type__.name == 'schema::ObjectType':\n pval = f'(INSERT {pval})'\n else:\n pval = f'<{ptype.name}>{ql(pval)}'\n\n attrs.append(f'{qi(pn)} := {pval}')\n\n attrs = ',\\n'.join(attrs)\n\n qry = textwrap.dedent(f'''\\\n CONFIGURE SYSTEM INSERT {cfg_obj_name} {{\n {textwrap.indent(attrs, ' ' * 12).strip()}\n }}\n ''')\n\n try:\n ctx.obj['conn'].execute(qry)\n except edgedb.EdgeDBError as e:\n raise click.ClickException(str(e)) from e\n else:\n click.echo(ctx.obj['conn']._get_last_status())\n\n\[email protected]()\[email protected]_context\[email protected]('parameter', type=str)\[email protected]('value', nargs=-1, type=str)\ndef set(ctx, parameter: str, value):\n cfg_obj_name, cfg_type, cfg_card = _process_configure_scalar(\n ctx, parameter, [])\n\n if cfg_card == 'ONE':\n if len(value) > 1:\n raise click.ClickException('too many values', ctx=ctx)\n value = value[0]\n val_expr = ql(value)\n else:\n val_expr = f'{{{\", \".join(ql(v) for v in value)}}}'\n\n # Canonicalize the values by casting them.\n vals = ctx.obj['conn'].fetchall(f'''\n SELECT <str><{cfg_type.name}>{val_expr}\n ''')\n\n args = []\n for val in vals:\n if cfg_type.is_numeric or cfg_type.is_bool:\n args.append(val)\n elif cfg_type.is_str:\n args.append(ql(val))\n else:\n raise click.ClickException(\n f'cannot set {parameter}: it is not a string, numeric or bool'\n )\n\n args_list = ', '.join(args)\n args_expr = f'{{{args_list}}}'\n\n qry = textwrap.dedent(f'''\n CONFIGURE SYSTEM SET {cfg_obj_name} := {args_expr}\n ''')\n\n try:\n ctx.obj['conn'].execute(qry)\n except edgedb.EdgeDBError as e:\n raise click.ClickException(str(e)) from e\n else:\n click.echo(ctx.obj['conn']._get_last_status())\n\n\[email protected](context_settings=dict(\n ignore_unknown_options=True,\n))\[email protected]_context\[email protected]('parameter', type=str)\[email protected]('values', nargs=-1, type=click.UNPROCESSED)\ndef reset(ctx, parameter: str, values):\n try:\n cfg_obj_name, props = _process_configure_composite_options(\n ctx, parameter, values)\n except NotAnObjectError:\n is_scalar = True\n else:\n is_scalar = False\n\n if is_scalar:\n cfg_obj_name, cfg_type, cfg_card = _process_configure_scalar(\n ctx, parameter, values)\n\n qry = f'CONFIGURE SYSTEM RESET {cfg_obj_name}'\n else:\n attrs = []\n\n for pn, (pval, ptype) in props.items():\n if ptype.__type__.name == 'schema::ObjectType':\n pval = f'.{pn} IS {pval}'\n else:\n pval = f'.{pn} = <{ptype.name}>{ql(pval)}'\n\n attrs.append(pval)\n\n if attrs:\n flt = f\"FILTER {' AND '.join(attrs)}\"\n else:\n flt = ''\n\n qry = textwrap.dedent(f'''\n CONFIGURE SYSTEM RESET {cfg_obj_name} {flt}\n ''')\n\n try:\n ctx.obj['conn'].execute(qry)\n except edgedb.EdgeDBError as e:\n raise click.ClickException(str(e)) from e\n else:\n click.echo(ctx.obj['conn']._get_last_status())\n\n\nclass NotAnObjectError(Exception):\n pass\n\n\ndef _process_configure_composite_options(ctx, parameter, values):\n props = {}\n\n cfg_objects = ctx.obj['conn'].fetchall('''\n WITH MODULE schema\n SELECT ObjectType {\n name\n } FILTER .name LIKE 'cfg::%'\n ''')\n\n cfg_objmap = {}\n\n for obj in cfg_objects:\n _, _, obj_name = obj.name.partition('::')\n cfg_objmap[obj_name] = obj_name\n cfg_objmap[obj_name.lower()] = obj_name\n\n cfg_obj_name = cfg_objmap.get(parameter)\n if not cfg_obj_name:\n raise NotAnObjectError(\n f'{parameter} is not a valid configuration object')\n\n cfg_props = ctx.obj['conn'].fetchall('''\n WITH\n MODULE schema,\n Cfg := (SELECT ObjectType FILTER .name = <str>$typename)\n SELECT Cfg.pointers {\n name,\n target: {name, __type__: {name}}\n };\n ''', typename=f'cfg::{cfg_obj_name}')\n\n cfg_prop_map = {p.name: p.target for p in cfg_props}\n\n for value in values:\n v = re.match(r'--(\\w+)(?: |=)(.*)', value)\n if not v:\n raise click.UsageError(f'unrecognized option: {value}', ctx=ctx)\n\n propname = v.group(1)\n propval = v.group(2)\n\n proptype = cfg_prop_map.get(propname)\n if proptype is None:\n raise click.UsageError(\n f'{propname!r} is not a valid {cfg_obj_name} property',\n ctx=ctx)\n\n if propval in cfg_objmap:\n propval = cfg_objmap[propval]\n\n props[propname] = (propval, proptype)\n\n return cfg_obj_name, props\n\n\ndef _process_configure_scalar(ctx, parameter, values):\n if values:\n raise click.UsageError(f'unexpected option: {next(iter(values))}')\n\n cfg_props = ctx.obj['conn'].fetchall('''\n WITH\n MODULE schema,\n Cfg := (SELECT ObjectType FILTER .name = <str>$typename)\n SELECT Cfg.pointers {\n name,\n cardinality,\n target: {\n name,\n __type__: {name},\n is_numeric := (\n 'std::anyreal' IN\n Cfg.pointers.target[IS ScalarType].ancestors.name),\n is_bool := (\n Cfg.pointers.target.name = 'std::bool'\n OR 'std::bool' IN\n Cfg.pointers.target[IS ScalarType].ancestors.name),\n is_str := (\n Cfg.pointers.target.name = 'std::str'\n OR any({'std::str', 'std::anyenum'} IN\n Cfg.pointers.target[IS ScalarType].ancestors.name)),\n }\n } FILTER .name = <str>$propname;\n ''', typename=f'cfg::Config', propname=parameter)\n\n if len(cfg_props) == 0:\n raise click.UsageError(\n f'{parameter!r} is not a valid configuration parameter',\n ctx=ctx)\n\n return parameter, cfg_props[0].target, cfg_props[0].cardinality\n\n\ndef options(options):\n def _decorator(func):\n for option in reversed(options):\n func = option(func)\n return func\n\n return _decorator\n\n\n_role_options = [\n click.option('--password/--no-password', default=None),\n click.option('--password-from-stdin', is_flag=True, default=False),\n]\n\n\ndef _process_role_options(ctx, password, password_from_stdin):\n if password is None and password_from_stdin:\n password = True\n\n if password is not None:\n if password:\n if password_from_stdin:\n password_value = ql(sys.stdin.readline().strip('\\r\\n'))\n elif sys.stdin.isatty():\n password_value = ql(click.prompt(\n 'Password',\n hide_input=True,\n confirmation_prompt=True,\n type=str,\n ))\n else:\n raise click.UsageError(\n 'input is not a TTY, please use --password-from-stdin '\n 'to provide the password value'\n )\n else:\n password_value = '{}'\n else:\n password_value = None\n\n alters = []\n if password_value is not None:\n alters.append(f'SET password := {password_value}')\n\n if not alters:\n raise click.UsageError(\n 'please specify an attribute to alter', ctx=ctx,\n )\n\n return alters\n\n\[email protected](name='create-superuser-role',\n help='Create a new database superuser')\[email protected]('role-name', type=str)\n@options(_role_options)\[email protected]_context\ndef create_role(ctx, role_name, **kwargs):\n utils.connect(ctx)\n\n attrs = \";\\n\".join(_process_role_options(ctx, **kwargs))\n\n qry = f'''\n CREATE SUPERUSER ROLE {qi(role_name)} {{\n {attrs}\n }}\n '''\n\n try:\n ctx.obj['conn'].execute(qry)\n except edgedb.EdgeDBError as e:\n raise click.ClickException(str(e)) from e\n\n\[email protected](name='alter-role', help='Modify role')\[email protected]('role-name', type=str)\n@options(_role_options)\[email protected]_context\ndef alter_role(ctx, role_name, **kwargs):\n utils.connect(ctx)\n\n attrs = \";\\n\".join(_process_role_options(ctx, **kwargs))\n\n qry = f'''\n ALTER ROLE {qi(role_name)} {{\n {attrs}\n }}\n '''\n\n try:\n ctx.obj['conn'].execute(qry)\n except edgedb.EdgeDBError as e:\n raise click.ClickException(str(e)) from e\n\n\[email protected](name='drop-role', help=\"Remove role\")\[email protected]('role-name', type=str)\[email protected]_context\ndef drop_role(ctx, role_name, **kwargs):\n utils.connect(ctx)\n qry = f'''\n DROP ROLE {qi(role_name)};\n '''\n\n try:\n ctx.obj['conn'].execute(qry)\n except edgedb.EdgeDBError as e:\n raise click.ClickException(str(e)) from e\n", "path": "edb/cli/mng.py"}], "after_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\n\nimport re\nimport sys\nimport textwrap\n\nimport click\n\nimport edgedb\n\nfrom edb.cli import cli\nfrom edb.cli import utils\nfrom edb.edgeql.quote import quote_literal as ql, quote_ident as qi\n\n\[email protected](help='Modify database configuration')\[email protected]_context\ndef configure(ctx):\n utils.connect(ctx)\n\n\[email protected](context_settings=dict(\n ignore_unknown_options=True,\n))\[email protected]_context\[email protected]('parameter', type=str)\[email protected]('values', nargs=-1, type=click.UNPROCESSED)\ndef insert(ctx, parameter: str, values):\n if not values:\n raise click.UsageError(\n 'missing configuration value properties', ctx=ctx)\n\n try:\n cfg_obj_name, props = _process_configure_composite_options(\n ctx, parameter, values)\n except NotAnObjectError as e:\n raise click.UsageError(str(e), ctx=ctx) from None\n\n attrs = []\n\n for pn, (pval, ptype) in props.items():\n if ptype.__type__.name == 'schema::ObjectType':\n pval = f'(INSERT {pval})'\n else:\n pval = f'<{ptype.name}>{ql(pval)}'\n\n attrs.append(f'{qi(pn)} := {pval}')\n\n attrs = ',\\n'.join(attrs)\n\n qry = textwrap.dedent(f'''\\\n CONFIGURE SYSTEM INSERT {cfg_obj_name} {{\n {textwrap.indent(attrs, ' ' * 12).strip()}\n }}\n ''')\n\n try:\n ctx.obj['conn'].execute(qry)\n except edgedb.EdgeDBError as e:\n raise click.ClickException(str(e)) from e\n else:\n click.echo(ctx.obj['conn']._get_last_status())\n\n\[email protected]()\[email protected]_context\[email protected]('parameter', type=str)\[email protected]('value', nargs=-1, type=str)\ndef set(ctx, parameter: str, value):\n cfg_obj_name, cfg_type, cfg_card = _process_configure_scalar(\n ctx, parameter, [])\n\n if cfg_card == 'ONE':\n if len(value) > 1:\n raise click.ClickException('too many values', ctx=ctx)\n value = value[0]\n val_expr = ql(value)\n else:\n val_expr = f'{{{\", \".join(ql(v) for v in value)}}}'\n\n # Canonicalize the values by casting them.\n vals = ctx.obj['conn'].fetchall(f'''\n SELECT <str><{cfg_type.name}>{val_expr}\n ''')\n\n args = []\n for val in vals:\n if cfg_type.is_numeric or cfg_type.is_bool:\n args.append(val)\n elif cfg_type.is_str:\n args.append(ql(val))\n else:\n raise click.ClickException(\n f'cannot set {parameter}: it is not a string, numeric or bool'\n )\n\n args_list = ', '.join(args)\n args_expr = f'{{{args_list}}}'\n\n qry = textwrap.dedent(f'''\n CONFIGURE SYSTEM SET {cfg_obj_name} := {args_expr}\n ''')\n\n try:\n ctx.obj['conn'].execute(qry)\n except edgedb.EdgeDBError as e:\n raise click.ClickException(str(e)) from e\n else:\n click.echo(ctx.obj['conn']._get_last_status())\n\n\[email protected](context_settings=dict(\n ignore_unknown_options=True,\n))\[email protected]_context\[email protected]('parameter', type=str)\[email protected]('values', nargs=-1, type=click.UNPROCESSED)\ndef reset(ctx, parameter: str, values):\n try:\n cfg_obj_name, props = _process_configure_composite_options(\n ctx, parameter, values)\n except NotAnObjectError:\n is_scalar = True\n else:\n is_scalar = False\n\n if is_scalar:\n cfg_obj_name, cfg_type, cfg_card = _process_configure_scalar(\n ctx, parameter, values)\n\n qry = f'CONFIGURE SYSTEM RESET {cfg_obj_name}'\n else:\n attrs = []\n\n for pn, (pval, ptype) in props.items():\n if ptype.__type__.name == 'schema::ObjectType':\n pval = f'.{pn} IS {pval}'\n else:\n pval = f'.{pn} = <{ptype.name}>{ql(pval)}'\n\n attrs.append(pval)\n\n if attrs:\n flt = f\"FILTER {' AND '.join(attrs)}\"\n else:\n flt = ''\n\n qry = textwrap.dedent(f'''\n CONFIGURE SYSTEM RESET {cfg_obj_name} {flt}\n ''')\n\n try:\n ctx.obj['conn'].execute(qry)\n except edgedb.EdgeDBError as e:\n raise click.ClickException(str(e)) from e\n else:\n click.echo(ctx.obj['conn']._get_last_status())\n\n\nclass NotAnObjectError(Exception):\n pass\n\n\ndef _process_configure_composite_options(ctx, parameter, values):\n props = {}\n\n cfg_objects = ctx.obj['conn'].fetchall('''\n WITH MODULE schema\n SELECT ObjectType {\n name\n } FILTER .name LIKE 'cfg::%'\n ''')\n\n cfg_objmap = {}\n\n for obj in cfg_objects:\n _, _, obj_name = obj.name.partition('::')\n cfg_objmap[obj_name] = obj_name\n cfg_objmap[obj_name.lower()] = obj_name\n\n cfg_obj_name = cfg_objmap.get(parameter)\n if not cfg_obj_name:\n raise NotAnObjectError(\n f'{parameter} is not a valid configuration object')\n\n cfg_props = ctx.obj['conn'].fetchall('''\n WITH\n MODULE schema,\n Cfg := (SELECT ObjectType FILTER .name = <str>$typename)\n SELECT Cfg.pointers {\n name,\n target: {name, __type__: {name}}\n };\n ''', typename=f'cfg::{cfg_obj_name}')\n\n cfg_prop_map = {p.name: p.target for p in cfg_props}\n\n for value in values:\n v = re.match(r'--(\\w+)(?: |=)(.*)', value)\n if not v:\n raise click.UsageError(f'unrecognized option: {value}', ctx=ctx)\n\n propname = v.group(1)\n propval = v.group(2)\n\n proptype = cfg_prop_map.get(propname)\n if proptype is None:\n raise click.UsageError(\n f'{propname!r} is not a valid {cfg_obj_name} property',\n ctx=ctx)\n\n if propval in cfg_objmap:\n propval = cfg_objmap[propval]\n\n props[propname] = (propval, proptype)\n\n return cfg_obj_name, props\n\n\ndef _process_configure_scalar(ctx, parameter, values):\n if values:\n raise click.UsageError(f'unexpected option: {next(iter(values))}')\n\n cfg_props = ctx.obj['conn'].fetchall('''\n WITH\n MODULE schema,\n Cfg := (SELECT ObjectType FILTER .name = <str>$typename)\n SELECT Cfg.pointers {\n name,\n cardinality,\n target: {\n name,\n __type__: {name},\n is_numeric := (\n 'std::anyreal' IN\n Cfg.pointers.target[IS ScalarType].ancestors.name),\n is_bool := (\n Cfg.pointers.target.name = 'std::bool'\n OR 'std::bool' IN\n Cfg.pointers.target[IS ScalarType].ancestors.name),\n is_str := (\n Cfg.pointers.target.name = 'std::str'\n OR any({'std::str', 'std::anyenum'} IN\n Cfg.pointers.target[IS ScalarType].ancestors.name)),\n }\n } FILTER .name = <str>$propname;\n ''', typename=f'cfg::Config', propname=parameter)\n\n if len(cfg_props) == 0:\n raise click.UsageError(\n f'{parameter!r} is not a valid configuration parameter',\n ctx=ctx)\n\n return parameter, cfg_props[0].target, cfg_props[0].cardinality\n\n\ndef options(options):\n def _decorator(func):\n for option in reversed(options):\n func = option(func)\n return func\n\n return _decorator\n\n\n_role_options = [\n click.option('--password/--no-password', default=None),\n click.option('--password-from-stdin', is_flag=True, default=False),\n]\n\n\ndef _process_role_options(\n ctx,\n password,\n password_from_stdin,\n allow_empty=False\n):\n\n if password is None and password_from_stdin:\n password = True\n\n if password is not None:\n if password:\n if password_from_stdin:\n password_value = ql(sys.stdin.readline().strip('\\r\\n'))\n elif sys.stdin.isatty():\n password_value = ql(click.prompt(\n 'Password',\n hide_input=True,\n confirmation_prompt=True,\n type=str,\n ))\n else:\n raise click.UsageError(\n 'input is not a TTY, please use --password-from-stdin '\n 'to provide the password value'\n )\n else:\n password_value = '{}'\n else:\n password_value = None\n\n alters = []\n if password_value is not None:\n alters.append(f'SET password := {password_value}')\n\n if not alters:\n if allow_empty:\n return alters\n\n raise click.UsageError(\n 'please specify an attribute to alter', ctx=ctx,\n )\n\n return alters\n\n\[email protected](name='create-superuser-role',\n help='Create a new database superuser')\[email protected]('role-name', type=str)\n@options(_role_options)\[email protected]_context\ndef create_role(ctx, role_name, **kwargs):\n utils.connect(ctx)\n\n attrs = \";\\n\".join(_process_role_options(ctx, allow_empty=True, **kwargs))\n\n if attrs:\n formatted_attrs = f'''\n {{\n {attrs}\n }}\n '''\n else:\n formatted_attrs = ''\n\n qry = f'''\n CREATE SUPERUSER ROLE {qi(role_name)} {formatted_attrs}\n '''\n\n try:\n ctx.obj['conn'].execute(qry)\n except edgedb.EdgeDBError as e:\n raise click.ClickException(str(e)) from e\n\n\[email protected](name='alter-role', help='Modify role')\[email protected]('role-name', type=str)\n@options(_role_options)\[email protected]_context\ndef alter_role(ctx, role_name, **kwargs):\n utils.connect(ctx)\n\n attrs = \";\\n\".join(_process_role_options(ctx, **kwargs))\n\n qry = f'''\n ALTER ROLE {qi(role_name)} {{\n {attrs}\n }}\n '''\n\n try:\n ctx.obj['conn'].execute(qry)\n except edgedb.EdgeDBError as e:\n raise click.ClickException(str(e)) from e\n\n\[email protected](name='drop-role', help=\"Remove role\")\[email protected]('role-name', type=str)\[email protected]_context\ndef drop_role(ctx, role_name, **kwargs):\n utils.connect(ctx)\n qry = f'''\n DROP ROLE {qi(role_name)};\n '''\n\n try:\n ctx.obj['conn'].execute(qry)\n except edgedb.EdgeDBError as e:\n raise click.ClickException(str(e)) from e\n", "path": "edb/cli/mng.py"}]}
4,062
333
gh_patches_debug_28409
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-182
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- recycleapp_be not working for some addresses when I enter my address into the configuration.yaml I receive this error on restart: ``` fetch failed for source Recycle!: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py", line 116, in fetch entries = self._source.fetch() File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py", line 79, in fetch entries.append(Collection(date, item["fraction"]["name"]["en"])) KeyError: 'name' ``` when I use the example address or some other addresses everything works fine. Is it a problem with my city? Because other addresses of this city also don't work, even though those addresses work on [Recycle!](https://recycleapp.be/home). this is what I have in configuration.yaml ``` waste_collection_schedule: sources: - name: recycleapp_be args: postcode: 3001 street: Waversebaan house_number: 276 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py` Content: ``` 1 import logging 2 from datetime import datetime, timedelta 3 4 import requests 5 from waste_collection_schedule import Collection # type: ignore[attr-defined] 6 7 TITLE = "Recycle!" 8 DESCRIPTION = "Source for RecycleApp.be" 9 URL = "https://www.recycleapp.be" 10 TEST_CASES = { 11 "1140 Evere, Bazellaan 1": { 12 "postcode": 1140, 13 "street": "Bazellaan", 14 "house_number": 1, 15 } 16 } 17 18 _LOGGER = logging.getLogger(__name__) 19 20 21 class Source: 22 def __init__(self, postcode, street, house_number): 23 self._postcode = postcode 24 self._street = street 25 self._house_number = house_number 26 27 def fetch(self): 28 url = "https://recycleapp.be/api/app/v1" 29 headers = { 30 "x-secret": "Crgja3EGWe8jdapyr4EEoMBgZACYYjRRcRpaMQrLDW9HJBvmgkfGQyYqLgeXPavAGvnJqkV87PBB2b8zx43q46sUgzqio4yRZbABhtKeagkVKypTEDjKfPgGycjLyJTtLHYpzwJgp4YmmCuJZN9ZmJY8CGEoFs8MKfdJpU9RjkEVfngmmk2LYD4QzFegLNKUbcCeAdEW", 31 "x-consumer": "recycleapp.be", 32 "User-Agent": "", 33 "Authorization": "", 34 } 35 r = requests.get(f"{url}/access-token", headers=headers) 36 headers["Authorization"] = r.json()["accessToken"] 37 38 params = {"q": self._postcode} 39 r = requests.get(f"{url}/zipcodes", params=params, headers=headers) 40 if r.status_code != 200: 41 _LOGGER.error("Get zip code failed") 42 return [] 43 zipcodeId = r.json()["items"][0]["id"] 44 45 params = {"q": self._street, "zipcodes": zipcodeId} 46 r = requests.get(f"{url}/streets", params=params, headers=headers) 47 if r.status_code != 200: 48 _LOGGER.error("Get street id failed") 49 return [] 50 51 for item in r.json()["items"]: 52 if item["name"] == self._street: 53 streetId = item["id"] 54 if streetId is None: 55 streetId = r.json()["items"][0]["id"] 56 57 now = datetime.now() 58 fromDate = now.strftime("%Y-%m-%d") 59 untilDate = (now + timedelta(days=365)).strftime("%Y-%m-%d") 60 params = { 61 "zipcodeId": zipcodeId, 62 "streetId": streetId, 63 "houseNumber": self._house_number, 64 "fromDate": fromDate, 65 "untilDate": untilDate, 66 # "size":100, 67 } 68 r = requests.get(f"{url}/collections", params=params, headers=headers) 69 if r.status_code != 200: 70 _LOGGER.error("Get data failed") 71 return [] 72 73 entries = [] 74 for item in r.json()["items"]: 75 if "exception" in item and "replacedBy" in item["exception"]: 76 continue 77 78 date = datetime.strptime(item["timestamp"], "%Y-%m-%dT%H:%M:%S.000Z").date() 79 entries.append(Collection(date, item["fraction"]["name"]["en"])) 80 return entries 81 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py @@ -12,17 +12,29 @@ "postcode": 1140, "street": "Bazellaan", "house_number": 1, - } + }, + "3001, Waversebaan 276 with events": { + "postcode": 3001, + "street": "Waversebaan", + "house_number": 276, + }, + "3001, Waversebaan 276 without events": { + "postcode": 3001, + "street": "Waversebaan", + "house_number": 276, + "add_events": False, + }, } _LOGGER = logging.getLogger(__name__) class Source: - def __init__(self, postcode, street, house_number): + def __init__(self, postcode, street, house_number, add_events=True): self._postcode = postcode self._street = street self._house_number = house_number + self._add_events = add_events def fetch(self): url = "https://recycleapp.be/api/app/v1" @@ -76,5 +88,9 @@ continue date = datetime.strptime(item["timestamp"], "%Y-%m-%dT%H:%M:%S.000Z").date() - entries.append(Collection(date, item["fraction"]["name"]["en"])) + if item["type"] == "collection": + entries.append(Collection(date, item["fraction"]["name"]["en"])) + elif item["type"] == "event" and self._add_events: + entries.append(Collection(date, item["event"]["title"]["en"])) + return entries
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\n@@ -12,17 +12,29 @@\n \"postcode\": 1140,\n \"street\": \"Bazellaan\",\n \"house_number\": 1,\n- }\n+ },\n+ \"3001, Waversebaan 276 with events\": {\n+ \"postcode\": 3001,\n+ \"street\": \"Waversebaan\",\n+ \"house_number\": 276,\n+ },\n+ \"3001, Waversebaan 276 without events\": {\n+ \"postcode\": 3001,\n+ \"street\": \"Waversebaan\",\n+ \"house_number\": 276,\n+ \"add_events\": False,\n+ },\n }\n \n _LOGGER = logging.getLogger(__name__)\n \n \n class Source:\n- def __init__(self, postcode, street, house_number):\n+ def __init__(self, postcode, street, house_number, add_events=True):\n self._postcode = postcode\n self._street = street\n self._house_number = house_number\n+ self._add_events = add_events\n \n def fetch(self):\n url = \"https://recycleapp.be/api/app/v1\"\n@@ -76,5 +88,9 @@\n continue\n \n date = datetime.strptime(item[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S.000Z\").date()\n- entries.append(Collection(date, item[\"fraction\"][\"name\"][\"en\"]))\n+ if item[\"type\"] == \"collection\":\n+ entries.append(Collection(date, item[\"fraction\"][\"name\"][\"en\"]))\n+ elif item[\"type\"] == \"event\" and self._add_events:\n+ entries.append(Collection(date, item[\"event\"][\"title\"][\"en\"]))\n+\n return entries\n", "issue": "recycleapp_be not working for some addresses\nwhen I enter my address into the configuration.yaml I receive this error on restart:\r\n```\r\nfetch failed for source Recycle!: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py\", \r\nline 116, in fetch entries = self._source.fetch() File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\", \r\nline 79, in fetch entries.append(Collection(date, item[\"fraction\"][\"name\"][\"en\"])) KeyError: 'name'\r\n```\r\nwhen I use the example address or some other addresses everything works fine. Is it a problem with my city? Because other addresses of this city also don't work, even though those addresses work on [Recycle!](https://recycleapp.be/home).\r\nthis is what I have in configuration.yaml\r\n```\r\nwaste_collection_schedule:\r\n sources:\r\n - name: recycleapp_be\r\n args:\r\n postcode: 3001\r\n street: Waversebaan\r\n house_number: 276\r\n```\n", "before_files": [{"content": "import logging\nfrom datetime import datetime, timedelta\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Recycle!\"\nDESCRIPTION = \"Source for RecycleApp.be\"\nURL = \"https://www.recycleapp.be\"\nTEST_CASES = {\n \"1140 Evere, Bazellaan 1\": {\n \"postcode\": 1140,\n \"street\": \"Bazellaan\",\n \"house_number\": 1,\n }\n}\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, postcode, street, house_number):\n self._postcode = postcode\n self._street = street\n self._house_number = house_number\n\n def fetch(self):\n url = \"https://recycleapp.be/api/app/v1\"\n headers = {\n \"x-secret\": \"Crgja3EGWe8jdapyr4EEoMBgZACYYjRRcRpaMQrLDW9HJBvmgkfGQyYqLgeXPavAGvnJqkV87PBB2b8zx43q46sUgzqio4yRZbABhtKeagkVKypTEDjKfPgGycjLyJTtLHYpzwJgp4YmmCuJZN9ZmJY8CGEoFs8MKfdJpU9RjkEVfngmmk2LYD4QzFegLNKUbcCeAdEW\",\n \"x-consumer\": \"recycleapp.be\",\n \"User-Agent\": \"\",\n \"Authorization\": \"\",\n }\n r = requests.get(f\"{url}/access-token\", headers=headers)\n headers[\"Authorization\"] = r.json()[\"accessToken\"]\n\n params = {\"q\": self._postcode}\n r = requests.get(f\"{url}/zipcodes\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get zip code failed\")\n return []\n zipcodeId = r.json()[\"items\"][0][\"id\"]\n\n params = {\"q\": self._street, \"zipcodes\": zipcodeId}\n r = requests.get(f\"{url}/streets\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get street id failed\")\n return []\n\n for item in r.json()[\"items\"]:\n if item[\"name\"] == self._street:\n streetId = item[\"id\"]\n if streetId is None:\n streetId = r.json()[\"items\"][0][\"id\"]\n\n now = datetime.now()\n fromDate = now.strftime(\"%Y-%m-%d\")\n untilDate = (now + timedelta(days=365)).strftime(\"%Y-%m-%d\")\n params = {\n \"zipcodeId\": zipcodeId,\n \"streetId\": streetId,\n \"houseNumber\": self._house_number,\n \"fromDate\": fromDate,\n \"untilDate\": untilDate,\n # \"size\":100,\n }\n r = requests.get(f\"{url}/collections\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get data failed\")\n return []\n\n entries = []\n for item in r.json()[\"items\"]:\n if \"exception\" in item and \"replacedBy\" in item[\"exception\"]:\n continue\n\n date = datetime.strptime(item[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S.000Z\").date()\n entries.append(Collection(date, item[\"fraction\"][\"name\"][\"en\"]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py"}], "after_files": [{"content": "import logging\nfrom datetime import datetime, timedelta\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Recycle!\"\nDESCRIPTION = \"Source for RecycleApp.be\"\nURL = \"https://www.recycleapp.be\"\nTEST_CASES = {\n \"1140 Evere, Bazellaan 1\": {\n \"postcode\": 1140,\n \"street\": \"Bazellaan\",\n \"house_number\": 1,\n },\n \"3001, Waversebaan 276 with events\": {\n \"postcode\": 3001,\n \"street\": \"Waversebaan\",\n \"house_number\": 276,\n },\n \"3001, Waversebaan 276 without events\": {\n \"postcode\": 3001,\n \"street\": \"Waversebaan\",\n \"house_number\": 276,\n \"add_events\": False,\n },\n}\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, postcode, street, house_number, add_events=True):\n self._postcode = postcode\n self._street = street\n self._house_number = house_number\n self._add_events = add_events\n\n def fetch(self):\n url = \"https://recycleapp.be/api/app/v1\"\n headers = {\n \"x-secret\": \"Crgja3EGWe8jdapyr4EEoMBgZACYYjRRcRpaMQrLDW9HJBvmgkfGQyYqLgeXPavAGvnJqkV87PBB2b8zx43q46sUgzqio4yRZbABhtKeagkVKypTEDjKfPgGycjLyJTtLHYpzwJgp4YmmCuJZN9ZmJY8CGEoFs8MKfdJpU9RjkEVfngmmk2LYD4QzFegLNKUbcCeAdEW\",\n \"x-consumer\": \"recycleapp.be\",\n \"User-Agent\": \"\",\n \"Authorization\": \"\",\n }\n r = requests.get(f\"{url}/access-token\", headers=headers)\n headers[\"Authorization\"] = r.json()[\"accessToken\"]\n\n params = {\"q\": self._postcode}\n r = requests.get(f\"{url}/zipcodes\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get zip code failed\")\n return []\n zipcodeId = r.json()[\"items\"][0][\"id\"]\n\n params = {\"q\": self._street, \"zipcodes\": zipcodeId}\n r = requests.get(f\"{url}/streets\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get street id failed\")\n return []\n\n for item in r.json()[\"items\"]:\n if item[\"name\"] == self._street:\n streetId = item[\"id\"]\n if streetId is None:\n streetId = r.json()[\"items\"][0][\"id\"]\n\n now = datetime.now()\n fromDate = now.strftime(\"%Y-%m-%d\")\n untilDate = (now + timedelta(days=365)).strftime(\"%Y-%m-%d\")\n params = {\n \"zipcodeId\": zipcodeId,\n \"streetId\": streetId,\n \"houseNumber\": self._house_number,\n \"fromDate\": fromDate,\n \"untilDate\": untilDate,\n # \"size\":100,\n }\n r = requests.get(f\"{url}/collections\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get data failed\")\n return []\n\n entries = []\n for item in r.json()[\"items\"]:\n if \"exception\" in item and \"replacedBy\" in item[\"exception\"]:\n continue\n\n date = datetime.strptime(item[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S.000Z\").date()\n if item[\"type\"] == \"collection\":\n entries.append(Collection(date, item[\"fraction\"][\"name\"][\"en\"]))\n elif item[\"type\"] == \"event\" and self._add_events:\n entries.append(Collection(date, item[\"event\"][\"title\"][\"en\"]))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py"}]}
1,434
466
gh_patches_debug_31901
rasdani/github-patches
git_diff
fidals__shopelectro-753
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add sending mail action for given orders to admin panel We have got connection issue with smtp and didn't notify 5 client https://sentry.fidals.com/fidals/shopelectro/issues/36/ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `shopelectro/admin.py` Content: ``` 1 from django.contrib import admin 2 from django.contrib.admin.widgets import FilteredSelectMultiple 3 from django.contrib.redirects.models import Redirect 4 from django.db import models as django_models 5 from django.urls import reverse 6 from django.utils.html import format_html 7 from django.utils.translation import ugettext_lazy as _ 8 9 from django_select2.forms import ModelSelect2Widget 10 11 from ecommerce.models import Position 12 from pages.models import CustomPage, FlatPage, PageTemplate 13 from generic_admin import inlines, mixins, models, sites 14 15 from shopelectro import models as se_models 16 from shopelectro.views.admin import TableEditor 17 18 19 class SEAdminSite(sites.SiteWithTableEditor): 20 21 site_header = 'Shopelectro administration' 22 table_editor_view = TableEditor 23 24 25 def prepare_has_filter_queryset(value, db_table, queryset): 26 if not value: 27 return 28 29 query = '{}__tags__isnull'.format(db_table) 30 31 # Use brackets, because `Explicit is better than implicit`. 32 return queryset.filter(**{query: value != 'yes'}) 33 34 35 class HasTagsFilter(admin.SimpleListFilter): 36 37 product_model = se_models.Product 38 title = _('has tags') 39 parameter_name = 'has_tags' 40 41 def lookups(self, request, model_admin): 42 return ( 43 ('yes', _('Has tags')), 44 ('no', _('Has no tags')), 45 ) 46 47 def queryset(self, request, queryset): 48 return prepare_has_filter_queryset( 49 self.value(), 50 self.product_model._meta.db_table, 51 queryset 52 ) 53 54 55 class HasCategoryFilter(admin.SimpleListFilter): 56 57 product_model = se_models.Product 58 title = _('has category') 59 parameter_name = 'has_category' 60 61 def lookups(self, request, model_admin): 62 return ( 63 ('yes', _('Has category')), 64 ('no', _('Has no category')), 65 ) 66 67 def queryset(self, request, queryset): 68 return prepare_has_filter_queryset( 69 self.value(), 70 self.product_model._meta.db_table, 71 queryset 72 ) 73 74 75 class TagInline(admin.StackedInline): 76 77 model = se_models.Tag 78 extra = 0 79 80 81 class CategoryInline(inlines.CategoryInline): 82 83 model = se_models.Category 84 85 fieldsets = ((None, { 86 'classes': ('primary-chars', ), 87 'fields': ( 88 ('name', 'id'), 89 ('parent', 'correct_parent_id'), 90 ) 91 }),) 92 93 94 class ProductInline(inlines.ProductInline): 95 96 model = se_models.Product 97 98 formfield_overrides = { 99 django_models.ManyToManyField: { 100 'widget': FilteredSelectMultiple(verbose_name='Tags', is_stacked=False) 101 }, 102 } 103 104 fieldsets = ((None, { 105 'classes': ('primary-chars', ), 106 'fields': ( 107 ('name', 'id', 'vendor_code'), 108 ('category', 'correct_category_id'), 109 ('price', 'in_stock'), 110 'is_popular', 111 ('purchase_price', 'wholesale_small'), 112 ('wholesale_medium', 'wholesale_large'), 113 'tags', 114 ) 115 }),) 116 117 def formfield_for_dbfield(self, db_field, **kwargs): 118 if db_field.name == 'category': 119 kwargs['widget'] = ModelSelect2Widget( 120 model=se_models.Category, 121 search_fields=[ 122 'name__icontains', 123 'pk__startswith', 124 ], 125 ) 126 return super(ProductInline, self).formfield_for_dbfield( 127 db_field, 128 **kwargs, 129 ) 130 131 132 class PositionInline(admin.StackedInline): 133 134 model = Position 135 136 137 class CategoryPageAdmin(models.CategoryPageAdmin): 138 139 add = True 140 delete = False 141 inlines = [CategoryInline, inlines.ImageInline] 142 143 def get_queryset(self, request): 144 return ( 145 super(CategoryPageAdmin, self) 146 .get_queryset(request) 147 .select_related('shopelectro_category') 148 ) 149 150 151 class ProductPageAdmin(models.ProductPageAdmin): 152 153 add = False 154 delete = False 155 category_page_model = se_models.CategoryPage 156 list_filter = [*models.ProductPageAdmin.list_filter, HasTagsFilter, HasCategoryFilter] 157 inlines = [ProductInline, inlines.ImageInline] 158 search_fields = [ 159 'shopelectro_product__vendor_code', 'name', 'slug', 160 ] 161 162 def model_id(self, obj): 163 return obj.model.vendor_code 164 165 model_id.short_description = _('Vendor code') 166 model_id.admin_order_field = 'shopelectro_product__vendor_code' 167 168 def get_queryset(self, request): 169 return ( 170 super(ProductPageAdmin, self) 171 .get_queryset(request) 172 .select_related('shopelectro_product') 173 ) 174 175 176 class ProductFeedbackPageAdmin(admin.ModelAdmin): 177 178 add = False 179 delete = False 180 list_filter = ['rating'] 181 list_display = ['rating', 'name', 'dignities', 'limitations', 'general', 'links'] 182 183 def links(self, obj): 184 return format_html( 185 ''' 186 <a href="{url}" class="field-link" title="Посмотреть на сайте" target="_blank"> 187 <i class="fa fa-link" aria-hidden="true"></i> 188 </a> 189 '''.format(url=obj.product.url)) 190 191 links.short_description = _('Link') 192 193 def get_queryset(self, request): 194 return ( 195 super(ProductFeedbackPageAdmin, self) 196 .get_queryset(request) 197 .select_related('product') 198 ) 199 200 201 class TagGroupAdmin(admin.ModelAdmin): 202 203 list_display = ['id', 'name', 'position', 'count_tags'] 204 list_display_links = ['name'] 205 206 inlines = [TagInline] 207 208 def get_queryset(self, request): 209 return super(TagGroupAdmin, self).get_queryset(request).prefetch_related('tags') 210 211 def count_tags(self, obj): 212 return obj.tags.count() 213 214 215 class TagAdmin(admin.ModelAdmin): 216 217 search_fields = ['id', 'name'] 218 list_display = ['id', 'name', 'position', 'custom_group'] 219 list_display_links = ['name'] 220 221 def get_queryset(self, request): 222 return super(TagAdmin, self).get_queryset(request).prefetch_related('group') 223 224 def custom_group(self, obj): 225 group = obj.group 226 227 return format_html( 228 '<a href="{url}">{group}</a>', 229 group=group, 230 url=reverse('admin:shopelectro_taggroup_change', args=(group.id, )) 231 ) 232 233 custom_group.admin_order_field = 'group' 234 custom_group.short_description = _('Group') 235 236 237 class OrderAdmin(mixins.PermissionsControl): 238 239 add = False 240 inlines = [PositionInline] 241 list_display = ['id_', 'name', 'email', 'phone', 'total_price', 'payment_type', 'paid'] 242 search_fields = ['name', 'email', 'phone'] 243 list_display_links = ['name'] 244 245 def id_(self, obj): 246 return obj.fake_order_number 247 248 249 se_admin = SEAdminSite(name='se_admin') 250 se_admin.register(CustomPage, models.CustomPageAdmin) 251 se_admin.register(FlatPage, models.FlatPageAdmin) 252 se_admin.register(PageTemplate, models.CustomPageTemplateAdmin) 253 254 se_admin.register(se_models.CategoryPage, CategoryPageAdmin) 255 se_admin.register(se_models.Order, OrderAdmin) 256 se_admin.register(se_models.ProductPage, ProductPageAdmin) 257 se_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin) 258 se_admin.register(se_models.TagGroup, TagGroupAdmin) 259 se_admin.register(se_models.Tag, TagAdmin) 260 261 se_admin.register(Redirect) 262 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/shopelectro/admin.py b/shopelectro/admin.py --- a/shopelectro/admin.py +++ b/shopelectro/admin.py @@ -1,3 +1,4 @@ +from django.conf import settings from django.contrib import admin from django.contrib.admin.widgets import FilteredSelectMultiple from django.contrib.redirects.models import Redirect @@ -8,6 +9,7 @@ from django_select2.forms import ModelSelect2Widget +from ecommerce import mailer from ecommerce.models import Position from pages.models import CustomPage, FlatPage, PageTemplate from generic_admin import inlines, mixins, models, sites @@ -234,6 +236,27 @@ custom_group.short_description = _('Group') +def send_order_emails(admin_model, request, order_qs): + for order in order_qs: + context = {'shop': settings.SHOP} + if order.email: + mailer.send_order( + subject=settings.EMAIL_SUBJECTS['order'], + order=order, + extra_context=context, + ) + else: + mailer.send_order( + subject=settings.EMAIL_SUBJECTS['one_click'], + order=order, + to_customer=False, + extra_context=context, + ) + + +send_order_emails.short_description = _('Sends email notifications about placed orders') + + class OrderAdmin(mixins.PermissionsControl): add = False @@ -241,6 +264,7 @@ list_display = ['id_', 'name', 'email', 'phone', 'total_price', 'payment_type', 'paid'] search_fields = ['name', 'email', 'phone'] list_display_links = ['name'] + actions = [send_order_emails] def id_(self, obj): return obj.fake_order_number
{"golden_diff": "diff --git a/shopelectro/admin.py b/shopelectro/admin.py\n--- a/shopelectro/admin.py\n+++ b/shopelectro/admin.py\n@@ -1,3 +1,4 @@\n+from django.conf import settings\n from django.contrib import admin\n from django.contrib.admin.widgets import FilteredSelectMultiple\n from django.contrib.redirects.models import Redirect\n@@ -8,6 +9,7 @@\n \n from django_select2.forms import ModelSelect2Widget\n \n+from ecommerce import mailer\n from ecommerce.models import Position\n from pages.models import CustomPage, FlatPage, PageTemplate\n from generic_admin import inlines, mixins, models, sites\n@@ -234,6 +236,27 @@\n custom_group.short_description = _('Group')\n \n \n+def send_order_emails(admin_model, request, order_qs):\n+ for order in order_qs:\n+ context = {'shop': settings.SHOP}\n+ if order.email:\n+ mailer.send_order(\n+ subject=settings.EMAIL_SUBJECTS['order'],\n+ order=order,\n+ extra_context=context,\n+ )\n+ else:\n+ mailer.send_order(\n+ subject=settings.EMAIL_SUBJECTS['one_click'],\n+ order=order,\n+ to_customer=False,\n+ extra_context=context,\n+ )\n+\n+\n+send_order_emails.short_description = _('Sends email notifications about placed orders')\n+\n+\n class OrderAdmin(mixins.PermissionsControl):\n \n add = False\n@@ -241,6 +264,7 @@\n list_display = ['id_', 'name', 'email', 'phone', 'total_price', 'payment_type', 'paid']\n search_fields = ['name', 'email', 'phone']\n list_display_links = ['name']\n+ actions = [send_order_emails]\n \n def id_(self, obj):\n return obj.fake_order_number\n", "issue": "Add sending mail action for given orders to admin panel\nWe have got connection issue with smtp and didn't notify 5 client\r\nhttps://sentry.fidals.com/fidals/shopelectro/issues/36/\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\nfrom django.contrib.redirects.models import Redirect\nfrom django.db import models as django_models\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django_select2.forms import ModelSelect2Widget\n\nfrom ecommerce.models import Position\nfrom pages.models import CustomPage, FlatPage, PageTemplate\nfrom generic_admin import inlines, mixins, models, sites\n\nfrom shopelectro import models as se_models\nfrom shopelectro.views.admin import TableEditor\n\n\nclass SEAdminSite(sites.SiteWithTableEditor):\n\n site_header = 'Shopelectro administration'\n table_editor_view = TableEditor\n\n\ndef prepare_has_filter_queryset(value, db_table, queryset):\n if not value:\n return\n\n query = '{}__tags__isnull'.format(db_table)\n\n # Use brackets, because `Explicit is better than implicit`.\n return queryset.filter(**{query: value != 'yes'})\n\n\nclass HasTagsFilter(admin.SimpleListFilter):\n\n product_model = se_models.Product\n title = _('has tags')\n parameter_name = 'has_tags'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Has tags')),\n ('no', _('Has no tags')),\n )\n\n def queryset(self, request, queryset):\n return prepare_has_filter_queryset(\n self.value(),\n self.product_model._meta.db_table,\n queryset\n )\n\n\nclass HasCategoryFilter(admin.SimpleListFilter):\n\n product_model = se_models.Product\n title = _('has category')\n parameter_name = 'has_category'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Has category')),\n ('no', _('Has no category')),\n )\n\n def queryset(self, request, queryset):\n return prepare_has_filter_queryset(\n self.value(),\n self.product_model._meta.db_table,\n queryset\n )\n\n\nclass TagInline(admin.StackedInline):\n\n model = se_models.Tag\n extra = 0\n\n\nclass CategoryInline(inlines.CategoryInline):\n\n model = se_models.Category\n\n fieldsets = ((None, {\n 'classes': ('primary-chars', ),\n 'fields': (\n ('name', 'id'),\n ('parent', 'correct_parent_id'),\n )\n }),)\n\n\nclass ProductInline(inlines.ProductInline):\n\n model = se_models.Product\n\n formfield_overrides = {\n django_models.ManyToManyField: {\n 'widget': FilteredSelectMultiple(verbose_name='Tags', is_stacked=False)\n },\n }\n\n fieldsets = ((None, {\n 'classes': ('primary-chars', ),\n 'fields': (\n ('name', 'id', 'vendor_code'),\n ('category', 'correct_category_id'),\n ('price', 'in_stock'),\n 'is_popular',\n ('purchase_price', 'wholesale_small'),\n ('wholesale_medium', 'wholesale_large'),\n 'tags',\n )\n }),)\n\n def formfield_for_dbfield(self, db_field, **kwargs):\n if db_field.name == 'category':\n kwargs['widget'] = ModelSelect2Widget(\n model=se_models.Category,\n search_fields=[\n 'name__icontains',\n 'pk__startswith',\n ],\n )\n return super(ProductInline, self).formfield_for_dbfield(\n db_field,\n **kwargs,\n )\n\n\nclass PositionInline(admin.StackedInline):\n\n model = Position\n\n\nclass CategoryPageAdmin(models.CategoryPageAdmin):\n\n add = True\n delete = False\n inlines = [CategoryInline, inlines.ImageInline]\n\n def get_queryset(self, request):\n return (\n super(CategoryPageAdmin, self)\n .get_queryset(request)\n .select_related('shopelectro_category')\n )\n\n\nclass ProductPageAdmin(models.ProductPageAdmin):\n\n add = False\n delete = False\n category_page_model = se_models.CategoryPage\n list_filter = [*models.ProductPageAdmin.list_filter, HasTagsFilter, HasCategoryFilter]\n inlines = [ProductInline, inlines.ImageInline]\n search_fields = [\n 'shopelectro_product__vendor_code', 'name', 'slug',\n ]\n\n def model_id(self, obj):\n return obj.model.vendor_code\n\n model_id.short_description = _('Vendor code')\n model_id.admin_order_field = 'shopelectro_product__vendor_code'\n\n def get_queryset(self, request):\n return (\n super(ProductPageAdmin, self)\n .get_queryset(request)\n .select_related('shopelectro_product')\n )\n\n\nclass ProductFeedbackPageAdmin(admin.ModelAdmin):\n\n add = False\n delete = False\n list_filter = ['rating']\n list_display = ['rating', 'name', 'dignities', 'limitations', 'general', 'links']\n\n def links(self, obj):\n return format_html(\n '''\n <a href=\"{url}\" class=\"field-link\" title=\"\u041f\u043e\u0441\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u043d\u0430 \u0441\u0430\u0439\u0442\u0435\" target=\"_blank\">\n <i class=\"fa fa-link\" aria-hidden=\"true\"></i>\n </a>\n '''.format(url=obj.product.url))\n\n links.short_description = _('Link')\n\n def get_queryset(self, request):\n return (\n super(ProductFeedbackPageAdmin, self)\n .get_queryset(request)\n .select_related('product')\n )\n\n\nclass TagGroupAdmin(admin.ModelAdmin):\n\n list_display = ['id', 'name', 'position', 'count_tags']\n list_display_links = ['name']\n\n inlines = [TagInline]\n\n def get_queryset(self, request):\n return super(TagGroupAdmin, self).get_queryset(request).prefetch_related('tags')\n\n def count_tags(self, obj):\n return obj.tags.count()\n\n\nclass TagAdmin(admin.ModelAdmin):\n\n search_fields = ['id', 'name']\n list_display = ['id', 'name', 'position', 'custom_group']\n list_display_links = ['name']\n\n def get_queryset(self, request):\n return super(TagAdmin, self).get_queryset(request).prefetch_related('group')\n\n def custom_group(self, obj):\n group = obj.group\n\n return format_html(\n '<a href=\"{url}\">{group}</a>',\n group=group,\n url=reverse('admin:shopelectro_taggroup_change', args=(group.id, ))\n )\n\n custom_group.admin_order_field = 'group'\n custom_group.short_description = _('Group')\n\n\nclass OrderAdmin(mixins.PermissionsControl):\n\n add = False\n inlines = [PositionInline]\n list_display = ['id_', 'name', 'email', 'phone', 'total_price', 'payment_type', 'paid']\n search_fields = ['name', 'email', 'phone']\n list_display_links = ['name']\n\n def id_(self, obj):\n return obj.fake_order_number\n\n\nse_admin = SEAdminSite(name='se_admin')\nse_admin.register(CustomPage, models.CustomPageAdmin)\nse_admin.register(FlatPage, models.FlatPageAdmin)\nse_admin.register(PageTemplate, models.CustomPageTemplateAdmin)\n\nse_admin.register(se_models.CategoryPage, CategoryPageAdmin)\nse_admin.register(se_models.Order, OrderAdmin)\nse_admin.register(se_models.ProductPage, ProductPageAdmin)\nse_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin)\nse_admin.register(se_models.TagGroup, TagGroupAdmin)\nse_admin.register(se_models.Tag, TagAdmin)\n\nse_admin.register(Redirect)\n", "path": "shopelectro/admin.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib import admin\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\nfrom django.contrib.redirects.models import Redirect\nfrom django.db import models as django_models\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django_select2.forms import ModelSelect2Widget\n\nfrom ecommerce import mailer\nfrom ecommerce.models import Position\nfrom pages.models import CustomPage, FlatPage, PageTemplate\nfrom generic_admin import inlines, mixins, models, sites\n\nfrom shopelectro import models as se_models\nfrom shopelectro.views.admin import TableEditor\n\n\nclass SEAdminSite(sites.SiteWithTableEditor):\n\n site_header = 'Shopelectro administration'\n table_editor_view = TableEditor\n\n\ndef prepare_has_filter_queryset(value, db_table, queryset):\n if not value:\n return\n\n query = '{}__tags__isnull'.format(db_table)\n\n # Use brackets, because `Explicit is better than implicit`.\n return queryset.filter(**{query: value != 'yes'})\n\n\nclass HasTagsFilter(admin.SimpleListFilter):\n\n product_model = se_models.Product\n title = _('has tags')\n parameter_name = 'has_tags'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Has tags')),\n ('no', _('Has no tags')),\n )\n\n def queryset(self, request, queryset):\n return prepare_has_filter_queryset(\n self.value(),\n self.product_model._meta.db_table,\n queryset\n )\n\n\nclass HasCategoryFilter(admin.SimpleListFilter):\n\n product_model = se_models.Product\n title = _('has category')\n parameter_name = 'has_category'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Has category')),\n ('no', _('Has no category')),\n )\n\n def queryset(self, request, queryset):\n return prepare_has_filter_queryset(\n self.value(),\n self.product_model._meta.db_table,\n queryset\n )\n\n\nclass TagInline(admin.StackedInline):\n\n model = se_models.Tag\n extra = 0\n\n\nclass CategoryInline(inlines.CategoryInline):\n\n model = se_models.Category\n\n fieldsets = ((None, {\n 'classes': ('primary-chars', ),\n 'fields': (\n ('name', 'id'),\n ('parent', 'correct_parent_id'),\n )\n }),)\n\n\nclass ProductInline(inlines.ProductInline):\n\n model = se_models.Product\n\n formfield_overrides = {\n django_models.ManyToManyField: {\n 'widget': FilteredSelectMultiple(verbose_name='Tags', is_stacked=False)\n },\n }\n\n fieldsets = ((None, {\n 'classes': ('primary-chars', ),\n 'fields': (\n ('name', 'id', 'vendor_code'),\n ('category', 'correct_category_id'),\n ('price', 'in_stock'),\n 'is_popular',\n ('purchase_price', 'wholesale_small'),\n ('wholesale_medium', 'wholesale_large'),\n 'tags',\n )\n }),)\n\n def formfield_for_dbfield(self, db_field, **kwargs):\n if db_field.name == 'category':\n kwargs['widget'] = ModelSelect2Widget(\n model=se_models.Category,\n search_fields=[\n 'name__icontains',\n 'pk__startswith',\n ],\n )\n return super(ProductInline, self).formfield_for_dbfield(\n db_field,\n **kwargs,\n )\n\n\nclass PositionInline(admin.StackedInline):\n\n model = Position\n\n\nclass CategoryPageAdmin(models.CategoryPageAdmin):\n\n add = True\n delete = False\n inlines = [CategoryInline, inlines.ImageInline]\n\n def get_queryset(self, request):\n return (\n super(CategoryPageAdmin, self)\n .get_queryset(request)\n .select_related('shopelectro_category')\n )\n\n\nclass ProductPageAdmin(models.ProductPageAdmin):\n\n add = False\n delete = False\n category_page_model = se_models.CategoryPage\n list_filter = [*models.ProductPageAdmin.list_filter, HasTagsFilter, HasCategoryFilter]\n inlines = [ProductInline, inlines.ImageInline]\n search_fields = [\n 'shopelectro_product__vendor_code', 'name', 'slug',\n ]\n\n def model_id(self, obj):\n return obj.model.vendor_code\n\n model_id.short_description = _('Vendor code')\n model_id.admin_order_field = 'shopelectro_product__vendor_code'\n\n def get_queryset(self, request):\n return (\n super(ProductPageAdmin, self)\n .get_queryset(request)\n .select_related('shopelectro_product')\n )\n\n\nclass ProductFeedbackPageAdmin(admin.ModelAdmin):\n\n add = False\n delete = False\n list_filter = ['rating']\n list_display = ['rating', 'name', 'dignities', 'limitations', 'general', 'links']\n\n def links(self, obj):\n return format_html(\n '''\n <a href=\"{url}\" class=\"field-link\" title=\"\u041f\u043e\u0441\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u043d\u0430 \u0441\u0430\u0439\u0442\u0435\" target=\"_blank\">\n <i class=\"fa fa-link\" aria-hidden=\"true\"></i>\n </a>\n '''.format(url=obj.product.url))\n\n links.short_description = _('Link')\n\n def get_queryset(self, request):\n return (\n super(ProductFeedbackPageAdmin, self)\n .get_queryset(request)\n .select_related('product')\n )\n\n\nclass TagGroupAdmin(admin.ModelAdmin):\n\n list_display = ['id', 'name', 'position', 'count_tags']\n list_display_links = ['name']\n\n inlines = [TagInline]\n\n def get_queryset(self, request):\n return super(TagGroupAdmin, self).get_queryset(request).prefetch_related('tags')\n\n def count_tags(self, obj):\n return obj.tags.count()\n\n\nclass TagAdmin(admin.ModelAdmin):\n\n search_fields = ['id', 'name']\n list_display = ['id', 'name', 'position', 'custom_group']\n list_display_links = ['name']\n\n def get_queryset(self, request):\n return super(TagAdmin, self).get_queryset(request).prefetch_related('group')\n\n def custom_group(self, obj):\n group = obj.group\n\n return format_html(\n '<a href=\"{url}\">{group}</a>',\n group=group,\n url=reverse('admin:shopelectro_taggroup_change', args=(group.id, ))\n )\n\n custom_group.admin_order_field = 'group'\n custom_group.short_description = _('Group')\n\n\ndef send_order_emails(admin_model, request, order_qs):\n for order in order_qs:\n context = {'shop': settings.SHOP}\n if order.email:\n mailer.send_order(\n subject=settings.EMAIL_SUBJECTS['order'],\n order=order,\n extra_context=context,\n )\n else:\n mailer.send_order(\n subject=settings.EMAIL_SUBJECTS['one_click'],\n order=order,\n to_customer=False,\n extra_context=context,\n )\n\n\nsend_order_emails.short_description = _('Sends email notifications about placed orders')\n\n\nclass OrderAdmin(mixins.PermissionsControl):\n\n add = False\n inlines = [PositionInline]\n list_display = ['id_', 'name', 'email', 'phone', 'total_price', 'payment_type', 'paid']\n search_fields = ['name', 'email', 'phone']\n list_display_links = ['name']\n actions = [send_order_emails]\n\n def id_(self, obj):\n return obj.fake_order_number\n\n\nse_admin = SEAdminSite(name='se_admin')\nse_admin.register(CustomPage, models.CustomPageAdmin)\nse_admin.register(FlatPage, models.FlatPageAdmin)\nse_admin.register(PageTemplate, models.CustomPageTemplateAdmin)\n\nse_admin.register(se_models.CategoryPage, CategoryPageAdmin)\nse_admin.register(se_models.Order, OrderAdmin)\nse_admin.register(se_models.ProductPage, ProductPageAdmin)\nse_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin)\nse_admin.register(se_models.TagGroup, TagGroupAdmin)\nse_admin.register(se_models.Tag, TagAdmin)\n\nse_admin.register(Redirect)\n", "path": "shopelectro/admin.py"}]}
2,616
401
gh_patches_debug_27458
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-4889
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Py3.6: Unable to find .../site-packages/importlib_resources/version.txt" Hello, On latest version of pyinstaller, the hook for importlib_resource seems to look for a non existing version.txt file. It is not provided by the latest version 1.2.0 of the backport: https://gitlab.com/python-devs/importlib_resources --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `PyInstaller/hooks/hook-importlib_resources.py` Content: ``` 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2019-2020, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License (version 2 5 # or later) with exception for distributing the bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 # 9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) 10 #----------------------------------------------------------------------------- 11 """ 12 `importlib_resources` is a backport of the 3.7+ module `importlib.resources` 13 """ 14 15 import os 16 from PyInstaller.compat import is_py37 17 from PyInstaller.utils.hooks import get_module_file_attribute 18 19 # Include the version.txt file, used to set __version__ 20 res_loc = os.path.dirname(get_module_file_attribute('importlib_resources')) 21 datas = [ 22 (os.path.join(res_loc, 'version.txt'), 'importlib_resources'), 23 ] 24 25 # Replicate the module's version checks to exclude unused modules. 26 if is_py37: 27 # Stdlib now has the implmentation of this, so the backports 28 # aren't used at all 29 excludedmodules = [ 30 'importlib_resources._py2', 31 'importlib_resources._py3', 32 ] 33 else: 34 excludedmodules = ['importlib_resources._py2'] 35 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/PyInstaller/hooks/hook-importlib_resources.py b/PyInstaller/hooks/hook-importlib_resources.py --- a/PyInstaller/hooks/hook-importlib_resources.py +++ b/PyInstaller/hooks/hook-importlib_resources.py @@ -9,26 +9,25 @@ # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) #----------------------------------------------------------------------------- """ -`importlib_resources` is a backport of the 3.7+ module `importlib.resources` +`importlib_resources` is a backport of the 3.9+ module `importlib.resources` """ import os -from PyInstaller.compat import is_py37 -from PyInstaller.utils.hooks import get_module_file_attribute +from PyInstaller.utils.hooks import get_module_file_attribute, \ + is_module_satisfies, copy_metadata -# Include the version.txt file, used to set __version__ -res_loc = os.path.dirname(get_module_file_attribute('importlib_resources')) -datas = [ - (os.path.join(res_loc, 'version.txt'), 'importlib_resources'), -] - -# Replicate the module's version checks to exclude unused modules. -if is_py37: - # Stdlib now has the implmentation of this, so the backports - # aren't used at all - excludedmodules = [ - 'importlib_resources._py2', - 'importlib_resources._py3', - ] +if is_module_satisfies("importlib_resources >= 1.2.0"): + # since 1.2.0 importlib.metadata is used + datas = copy_metadata('importlib_resources') else: - excludedmodules = ['importlib_resources._py2'] + # include the version.txt file, used to set __version__ + res_loc = os.path.dirname(get_module_file_attribute('importlib_resources')) + datas = [ + (os.path.join(res_loc, 'version.txt'), 'importlib_resources'), + ] + +if is_module_satisfies("importlib_resources >= 1.3.1"): + hiddenimports = ['importlib_resources.trees'] + +# this is only required for python2 support +excludedimports = ['importlib_resources._py2']
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-importlib_resources.py b/PyInstaller/hooks/hook-importlib_resources.py\n--- a/PyInstaller/hooks/hook-importlib_resources.py\n+++ b/PyInstaller/hooks/hook-importlib_resources.py\n@@ -9,26 +9,25 @@\n # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n #-----------------------------------------------------------------------------\n \"\"\"\n-`importlib_resources` is a backport of the 3.7+ module `importlib.resources`\n+`importlib_resources` is a backport of the 3.9+ module `importlib.resources`\n \"\"\"\n \n import os\n-from PyInstaller.compat import is_py37\n-from PyInstaller.utils.hooks import get_module_file_attribute\n+from PyInstaller.utils.hooks import get_module_file_attribute, \\\n+ is_module_satisfies, copy_metadata\n \n-# Include the version.txt file, used to set __version__\n-res_loc = os.path.dirname(get_module_file_attribute('importlib_resources'))\n-datas = [\n- (os.path.join(res_loc, 'version.txt'), 'importlib_resources'),\n-]\n-\n-# Replicate the module's version checks to exclude unused modules.\n-if is_py37:\n- # Stdlib now has the implmentation of this, so the backports\n- # aren't used at all\n- excludedmodules = [\n- 'importlib_resources._py2',\n- 'importlib_resources._py3',\n- ]\n+if is_module_satisfies(\"importlib_resources >= 1.2.0\"):\n+ # since 1.2.0 importlib.metadata is used\n+ datas = copy_metadata('importlib_resources')\n else:\n- excludedmodules = ['importlib_resources._py2']\n+ # include the version.txt file, used to set __version__\n+ res_loc = os.path.dirname(get_module_file_attribute('importlib_resources'))\n+ datas = [\n+ (os.path.join(res_loc, 'version.txt'), 'importlib_resources'),\n+ ]\n+\n+if is_module_satisfies(\"importlib_resources >= 1.3.1\"):\n+ hiddenimports = ['importlib_resources.trees']\n+\n+# this is only required for python2 support\n+excludedimports = ['importlib_resources._py2']\n", "issue": "Py3.6: Unable to find .../site-packages/importlib_resources/version.txt\"\nHello,\r\n\r\nOn latest version of pyinstaller, the hook for importlib_resource seems to look for a non existing version.txt file. It is not provided by the latest version 1.2.0 of the backport: https://gitlab.com/python-devs/importlib_resources\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2019-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\"\"\"\n`importlib_resources` is a backport of the 3.7+ module `importlib.resources`\n\"\"\"\n\nimport os\nfrom PyInstaller.compat import is_py37\nfrom PyInstaller.utils.hooks import get_module_file_attribute\n\n# Include the version.txt file, used to set __version__\nres_loc = os.path.dirname(get_module_file_attribute('importlib_resources'))\ndatas = [\n (os.path.join(res_loc, 'version.txt'), 'importlib_resources'),\n]\n\n# Replicate the module's version checks to exclude unused modules.\nif is_py37:\n # Stdlib now has the implmentation of this, so the backports\n # aren't used at all\n excludedmodules = [\n 'importlib_resources._py2',\n 'importlib_resources._py3',\n ]\nelse:\n excludedmodules = ['importlib_resources._py2']\n", "path": "PyInstaller/hooks/hook-importlib_resources.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2019-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\"\"\"\n`importlib_resources` is a backport of the 3.9+ module `importlib.resources`\n\"\"\"\n\nimport os\nfrom PyInstaller.utils.hooks import get_module_file_attribute, \\\n is_module_satisfies, copy_metadata\n\nif is_module_satisfies(\"importlib_resources >= 1.2.0\"):\n # since 1.2.0 importlib.metadata is used\n datas = copy_metadata('importlib_resources')\nelse:\n # include the version.txt file, used to set __version__\n res_loc = os.path.dirname(get_module_file_attribute('importlib_resources'))\n datas = [\n (os.path.join(res_loc, 'version.txt'), 'importlib_resources'),\n ]\n\nif is_module_satisfies(\"importlib_resources >= 1.3.1\"):\n hiddenimports = ['importlib_resources.trees']\n\n# this is only required for python2 support\nexcludedimports = ['importlib_resources._py2']\n", "path": "PyInstaller/hooks/hook-importlib_resources.py"}]}
676
491
gh_patches_debug_27546
rasdani/github-patches
git_diff
TheAlgorithms__Python-10822
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improve our test coverage ### Feature description Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase. ### How to find low-coverage files Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage: ``` ---------- coverage: platform linux, python 3.12.0-final-0 ----------- Name Stmts Miss Cover Missing ----------------------------------------------------------------------------------------------------------- quantum/q_fourier_transform.py 30 30 0% 14-93 scripts/validate_solutions.py 54 54 0% 2-94 strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129 ... ``` The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests. Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage. _**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue. ### How to add doctests A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring: ```py def add(a: int, b: int) -> int: """ Adds two non-negative numbers. >>> add(1, 1) 2 >>> add(2, 5) 7 >>> add(1, 0) 1 >>> add(-1, -1) Traceback (most recent last): ... ValueError: Numbers must be non-negative """ ``` For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc). Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it. _**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `maths/binomial_coefficient.py` Content: ``` 1 def binomial_coefficient(n: int, r: int) -> int: 2 """ 3 Find binomial coefficient using pascals triangle. 4 5 >>> binomial_coefficient(10, 5) 6 252 7 """ 8 c = [0 for i in range(r + 1)] 9 # nc0 = 1 10 c[0] = 1 11 for i in range(1, n + 1): 12 # to compute current row from previous row. 13 j = min(i, r) 14 while j > 0: 15 c[j] += c[j - 1] 16 j -= 1 17 return c[r] 18 19 20 print(binomial_coefficient(n=10, r=5)) 21 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/maths/binomial_coefficient.py b/maths/binomial_coefficient.py --- a/maths/binomial_coefficient.py +++ b/maths/binomial_coefficient.py @@ -1,10 +1,48 @@ def binomial_coefficient(n: int, r: int) -> int: """ - Find binomial coefficient using pascals triangle. + Find binomial coefficient using Pascal's triangle. + + Calculate C(n, r) using Pascal's triangle. + + :param n: The total number of items. + :param r: The number of items to choose. + :return: The binomial coefficient C(n, r). >>> binomial_coefficient(10, 5) 252 + >>> binomial_coefficient(10, 0) + 1 + >>> binomial_coefficient(0, 10) + 1 + >>> binomial_coefficient(10, 10) + 1 + >>> binomial_coefficient(5, 2) + 10 + >>> binomial_coefficient(5, 6) + 0 + >>> binomial_coefficient(3, 5) + 0 + >>> binomial_coefficient(-2, 3) + Traceback (most recent call last): + ... + ValueError: n and r must be non-negative integers + >>> binomial_coefficient(5, -1) + Traceback (most recent call last): + ... + ValueError: n and r must be non-negative integers + >>> binomial_coefficient(10.1, 5) + Traceback (most recent call last): + ... + TypeError: 'float' object cannot be interpreted as an integer + >>> binomial_coefficient(10, 5.1) + Traceback (most recent call last): + ... + TypeError: 'float' object cannot be interpreted as an integer """ + if n < 0 or r < 0: + raise ValueError("n and r must be non-negative integers") + if 0 in (n, r): + return 1 c = [0 for i in range(r + 1)] # nc0 = 1 c[0] = 1 @@ -17,4 +55,8 @@ return c[r] -print(binomial_coefficient(n=10, r=5)) +if __name__ == "__main__": + from doctest import testmod + + testmod() + print(binomial_coefficient(n=10, r=5))
{"golden_diff": "diff --git a/maths/binomial_coefficient.py b/maths/binomial_coefficient.py\n--- a/maths/binomial_coefficient.py\n+++ b/maths/binomial_coefficient.py\n@@ -1,10 +1,48 @@\n def binomial_coefficient(n: int, r: int) -> int:\n \"\"\"\n- Find binomial coefficient using pascals triangle.\n+ Find binomial coefficient using Pascal's triangle.\n+\n+ Calculate C(n, r) using Pascal's triangle.\n+\n+ :param n: The total number of items.\n+ :param r: The number of items to choose.\n+ :return: The binomial coefficient C(n, r).\n \n >>> binomial_coefficient(10, 5)\n 252\n+ >>> binomial_coefficient(10, 0)\n+ 1\n+ >>> binomial_coefficient(0, 10)\n+ 1\n+ >>> binomial_coefficient(10, 10)\n+ 1\n+ >>> binomial_coefficient(5, 2)\n+ 10\n+ >>> binomial_coefficient(5, 6)\n+ 0\n+ >>> binomial_coefficient(3, 5)\n+ 0\n+ >>> binomial_coefficient(-2, 3)\n+ Traceback (most recent call last):\n+ ...\n+ ValueError: n and r must be non-negative integers\n+ >>> binomial_coefficient(5, -1)\n+ Traceback (most recent call last):\n+ ...\n+ ValueError: n and r must be non-negative integers\n+ >>> binomial_coefficient(10.1, 5)\n+ Traceback (most recent call last):\n+ ...\n+ TypeError: 'float' object cannot be interpreted as an integer\n+ >>> binomial_coefficient(10, 5.1)\n+ Traceback (most recent call last):\n+ ...\n+ TypeError: 'float' object cannot be interpreted as an integer\n \"\"\"\n+ if n < 0 or r < 0:\n+ raise ValueError(\"n and r must be non-negative integers\")\n+ if 0 in (n, r):\n+ return 1\n c = [0 for i in range(r + 1)]\n # nc0 = 1\n c[0] = 1\n@@ -17,4 +55,8 @@\n return c[r]\n \n \n-print(binomial_coefficient(n=10, r=5))\n+if __name__ == \"__main__\":\n+ from doctest import testmod\n+\n+ testmod()\n+ print(binomial_coefficient(n=10, r=5))\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "def binomial_coefficient(n: int, r: int) -> int:\n \"\"\"\n Find binomial coefficient using pascals triangle.\n\n >>> binomial_coefficient(10, 5)\n 252\n \"\"\"\n c = [0 for i in range(r + 1)]\n # nc0 = 1\n c[0] = 1\n for i in range(1, n + 1):\n # to compute current row from previous row.\n j = min(i, r)\n while j > 0:\n c[j] += c[j - 1]\n j -= 1\n return c[r]\n\n\nprint(binomial_coefficient(n=10, r=5))\n", "path": "maths/binomial_coefficient.py"}], "after_files": [{"content": "def binomial_coefficient(n: int, r: int) -> int:\n \"\"\"\n Find binomial coefficient using Pascal's triangle.\n\n Calculate C(n, r) using Pascal's triangle.\n\n :param n: The total number of items.\n :param r: The number of items to choose.\n :return: The binomial coefficient C(n, r).\n\n >>> binomial_coefficient(10, 5)\n 252\n >>> binomial_coefficient(10, 0)\n 1\n >>> binomial_coefficient(0, 10)\n 1\n >>> binomial_coefficient(10, 10)\n 1\n >>> binomial_coefficient(5, 2)\n 10\n >>> binomial_coefficient(5, 6)\n 0\n >>> binomial_coefficient(3, 5)\n 0\n >>> binomial_coefficient(-2, 3)\n Traceback (most recent call last):\n ...\n ValueError: n and r must be non-negative integers\n >>> binomial_coefficient(5, -1)\n Traceback (most recent call last):\n ...\n ValueError: n and r must be non-negative integers\n >>> binomial_coefficient(10.1, 5)\n Traceback (most recent call last):\n ...\n TypeError: 'float' object cannot be interpreted as an integer\n >>> binomial_coefficient(10, 5.1)\n Traceback (most recent call last):\n ...\n TypeError: 'float' object cannot be interpreted as an integer\n \"\"\"\n if n < 0 or r < 0:\n raise ValueError(\"n and r must be non-negative integers\")\n if 0 in (n, r):\n return 1\n c = [0 for i in range(r + 1)]\n # nc0 = 1\n c[0] = 1\n for i in range(1, n + 1):\n # to compute current row from previous row.\n j = min(i, r)\n while j > 0:\n c[j] += c[j - 1]\n j -= 1\n return c[r]\n\n\nif __name__ == \"__main__\":\n from doctest import testmod\n\n testmod()\n print(binomial_coefficient(n=10, r=5))\n", "path": "maths/binomial_coefficient.py"}]}
1,293
595
gh_patches_debug_16578
rasdani/github-patches
git_diff
doccano__doccano-1668
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Pagination of the project list When fetching projects in the project list page, is it intentional that all projects are fetched at once even though there is pagination? Endpoint of project list fetching: `/v1/projects` When there are a lot of projects, it takes a long time to display them. Your Environment --------- doccano v1.5.5 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `backend/api/views/project.py` Content: ``` 1 from django.conf import settings 2 from rest_framework import generics, status 3 from rest_framework.permissions import IsAdminUser, IsAuthenticated 4 from rest_framework.response import Response 5 6 from members.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly 7 8 from ..models import Project 9 from ..serializers import ProjectPolymorphicSerializer 10 11 12 class ProjectList(generics.ListCreateAPIView): 13 serializer_class = ProjectPolymorphicSerializer 14 pagination_class = None 15 16 def get_permissions(self): 17 if self.request.method == 'GET': 18 self.permission_classes = [IsAuthenticated, ] 19 else: 20 self.permission_classes = [IsAuthenticated & IsAdminUser] 21 return super().get_permissions() 22 23 def get_queryset(self): 24 return Project.objects.filter(role_mappings__user=self.request.user) 25 26 def perform_create(self, serializer): 27 serializer.save(created_by=self.request.user) 28 29 def delete(self, request, *args, **kwargs): 30 delete_ids = request.data['ids'] 31 projects = Project.objects.filter( 32 role_mappings__user=self.request.user, 33 role_mappings__role__name=settings.ROLE_PROJECT_ADMIN, 34 pk__in=delete_ids 35 ) 36 # Todo: I want to use bulk delete. 37 # But it causes the constraint error. 38 # See https://github.com/django-polymorphic/django-polymorphic/issues/229 39 for project in projects: 40 project.delete() 41 return Response(status=status.HTTP_204_NO_CONTENT) 42 43 44 class ProjectDetail(generics.RetrieveUpdateDestroyAPIView): 45 queryset = Project.objects.all() 46 serializer_class = ProjectPolymorphicSerializer 47 lookup_url_kwarg = 'project_id' 48 permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)] 49 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/backend/api/views/project.py b/backend/api/views/project.py --- a/backend/api/views/project.py +++ b/backend/api/views/project.py @@ -1,5 +1,6 @@ from django.conf import settings -from rest_framework import generics, status +from django_filters.rest_framework import DjangoFilterBackend +from rest_framework import filters, generics, status from rest_framework.permissions import IsAdminUser, IsAuthenticated from rest_framework.response import Response @@ -11,7 +12,8 @@ class ProjectList(generics.ListCreateAPIView): serializer_class = ProjectPolymorphicSerializer - pagination_class = None + filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter) + search_fields = ('name', 'description') def get_permissions(self): if self.request.method == 'GET':
{"golden_diff": "diff --git a/backend/api/views/project.py b/backend/api/views/project.py\n--- a/backend/api/views/project.py\n+++ b/backend/api/views/project.py\n@@ -1,5 +1,6 @@\n from django.conf import settings\n-from rest_framework import generics, status\n+from django_filters.rest_framework import DjangoFilterBackend\n+from rest_framework import filters, generics, status\n from rest_framework.permissions import IsAdminUser, IsAuthenticated\n from rest_framework.response import Response\n \n@@ -11,7 +12,8 @@\n \n class ProjectList(generics.ListCreateAPIView):\n serializer_class = ProjectPolymorphicSerializer\n- pagination_class = None\n+ filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)\n+ search_fields = ('name', 'description')\n \n def get_permissions(self):\n if self.request.method == 'GET':\n", "issue": "Pagination of the project list\nWhen fetching projects in the project list page, is it intentional that all projects are fetched at once even though there is pagination?\r\n\r\nEndpoint of project list fetching: `/v1/projects`\r\n\r\nWhen there are a lot of projects, it takes a long time to display them.\r\n\r\nYour Environment\r\n---------\r\ndoccano v1.5.5\n", "before_files": [{"content": "from django.conf import settings\nfrom rest_framework import generics, status\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom members.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly\n\nfrom ..models import Project\nfrom ..serializers import ProjectPolymorphicSerializer\n\n\nclass ProjectList(generics.ListCreateAPIView):\n serializer_class = ProjectPolymorphicSerializer\n pagination_class = None\n\n def get_permissions(self):\n if self.request.method == 'GET':\n self.permission_classes = [IsAuthenticated, ]\n else:\n self.permission_classes = [IsAuthenticated & IsAdminUser]\n return super().get_permissions()\n\n def get_queryset(self):\n return Project.objects.filter(role_mappings__user=self.request.user)\n\n def perform_create(self, serializer):\n serializer.save(created_by=self.request.user)\n\n def delete(self, request, *args, **kwargs):\n delete_ids = request.data['ids']\n projects = Project.objects.filter(\n role_mappings__user=self.request.user,\n role_mappings__role__name=settings.ROLE_PROJECT_ADMIN,\n pk__in=delete_ids\n )\n # Todo: I want to use bulk delete.\n # But it causes the constraint error.\n # See https://github.com/django-polymorphic/django-polymorphic/issues/229\n for project in projects:\n project.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass ProjectDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Project.objects.all()\n serializer_class = ProjectPolymorphicSerializer\n lookup_url_kwarg = 'project_id'\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n", "path": "backend/api/views/project.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import filters, generics, status\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom members.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly\n\nfrom ..models import Project\nfrom ..serializers import ProjectPolymorphicSerializer\n\n\nclass ProjectList(generics.ListCreateAPIView):\n serializer_class = ProjectPolymorphicSerializer\n filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)\n search_fields = ('name', 'description')\n\n def get_permissions(self):\n if self.request.method == 'GET':\n self.permission_classes = [IsAuthenticated, ]\n else:\n self.permission_classes = [IsAuthenticated & IsAdminUser]\n return super().get_permissions()\n\n def get_queryset(self):\n return Project.objects.filter(role_mappings__user=self.request.user)\n\n def perform_create(self, serializer):\n serializer.save(created_by=self.request.user)\n\n def delete(self, request, *args, **kwargs):\n delete_ids = request.data['ids']\n projects = Project.objects.filter(\n role_mappings__user=self.request.user,\n role_mappings__role__name=settings.ROLE_PROJECT_ADMIN,\n pk__in=delete_ids\n )\n # Todo: I want to use bulk delete.\n # But it causes the constraint error.\n # See https://github.com/django-polymorphic/django-polymorphic/issues/229\n for project in projects:\n project.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass ProjectDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Project.objects.all()\n serializer_class = ProjectPolymorphicSerializer\n lookup_url_kwarg = 'project_id'\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n", "path": "backend/api/views/project.py"}]}
792
185
gh_patches_debug_38925
rasdani/github-patches
git_diff
sktime__sktime-5942
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] TDistribution pdf does not consider location/scale **Describe the bug** The location/scale (mu/sigma) are not considered at all in methods: pdf, log_pdf, cdf or ppf For example, the case pdf: ``` def pdf(self, x): """Probability density function.""" d = self.loc[x.index, x.columns] pdf_arr = gamma((d._df + 1) / 2) pdf_arr = pdf_arr / (np.sqrt(np.pi * d._df) * gamma(d._df / 2)) pdf_arr = pdf_arr * (1 + x**2 / d._df) ** (-(d._df + 1) / 2) return pd.DataFrame(pdf_arr, index=x.index, columns=x.columns) ``` Here, in line 6, instead of x**2 one should use ((x - d._mu)/d._sigma)**2 sktime version 0.25.1 [BUG] TDistribution computes incorrectly variance **Describe the bug** TDistribution does not use scale (sigma) for variance computation ``` def var(self): ... df_arr = self._df.copy() df_arr = df_arr.astype(np.float32) df_arr[df_arr <= 2] = np.inf mask = (df_arr > 2) & (df_arr != np.inf) df_arr[mask] = df_arr[mask] / (df_arr[mask] - 2) return pd.DataFrame(df_arr, index=self.index, columns=self.columns) ``` one should correct the previous to last line to: ``` df_arr[mask] = self._sigma** 2 * df_arr[mask] / (df_arr[mask] - 2) ``` sktime version 0.25.1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sktime/proba/t.py` Content: ``` 1 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file) 2 """Student's t-distribution.""" 3 4 __author__ = ["Alex-JG3"] 5 6 import numpy as np 7 import pandas as pd 8 from scipy.special import betaincinv, gamma, hyp2f1, loggamma 9 10 from sktime.proba.base import BaseDistribution 11 12 13 class TDistribution(BaseDistribution): 14 """Student's t-distribution (sktime native). 15 16 Parameters 17 ---------- 18 mean : float or array of float (1D or 2D) 19 mean of the t-distribution distribution 20 sd : float or array of float (1D or 2D), must be positive 21 standard deviation of the t-distribution distribution 22 df : float or array of float (1D or 2D), must be positive 23 Degrees of freedom of the t-distribution 24 index : pd.Index, optional, default = RangeIndex 25 columns : pd.Index, optional, default = RangeIndex 26 27 Example 28 ------- 29 >>> from sktime.proba.t import TDistribution 30 31 >>> n = TDistribution(mu=[[0, 1], [2, 3], [4, 5]], sigma=1, df=10) 32 """ 33 34 _tags = { 35 "authors": ["Alex-JG3"], 36 "maintainers": ["Alex-JG3"], 37 "capabilities:approx": ["pdfnorm", "energy"], 38 "capabilities:exact": ["mean", "var", "pdf", "log_pdf", "cdf", "ppf"], 39 "distr:measuretype": "continuous", 40 } 41 42 def __init__(self, mu, sigma, df=1, index=None, columns=None): 43 self.mu = mu 44 self.sigma = sigma 45 self.df = df 46 self.index = index 47 self.columns = columns 48 49 self._mu, self._sigma, self._df = self._get_bc_params( 50 self.mu, self.sigma, self.df 51 ) 52 shape = self._mu.shape 53 54 if index is None: 55 index = pd.RangeIndex(shape[0]) 56 57 if columns is None: 58 columns = pd.RangeIndex(shape[1]) 59 60 super().__init__(index=index, columns=columns) 61 62 def mean(self): 63 r"""Return expected value of the distribution. 64 65 Let :math:`X` be a random variable with the distribution of `self`. 66 Returns the expectation :math:`\mathbb{E}[X]`. The expectation, 67 :math:`\mathbb{E}[X]`, as infinite if :math:`\nu \le 1`. 68 69 Returns 70 ------- 71 pd.DataFrame with same rows, columns as `self` 72 expected value of distribution (entry-wise) 73 """ 74 mean_arr = self._mu.copy() 75 if (self._df <= 1).any(): 76 mean_arr = mean_arr.astype(np.float32) 77 mean_arr[self._df <= 1] = np.inf 78 return pd.DataFrame(mean_arr, index=self.index, columns=self.columns) 79 80 def var(self): 81 r"""Return element/entry-wise variance of the distribution. 82 83 Let :math:`X` be a random variable with the distribution of `self`. 84 Returns, 85 86 .. math:: 87 \mathbb{V}[X] = \begin{cases} 88 \frac{\nu}{\nu - 2} & \text{if} \nu > 2, \\ 89 \infty & \text{if} \nu \le 2, \\ 90 \begin{cases} 91 92 Where :math:`\nu` is the degrees of freedom of the t-distribution. 93 94 Returns 95 ------- 96 pd.DataFrame with same rows, columns as `self` 97 variance of distribution (entry-wise) 98 """ 99 df_arr = self._df.copy() 100 df_arr = df_arr.astype(np.float32) 101 df_arr[df_arr <= 2] = np.inf 102 mask = (df_arr > 2) & (df_arr != np.inf) 103 df_arr[mask] = df_arr[mask] / (df_arr[mask] - 2) 104 return pd.DataFrame(df_arr, index=self.index, columns=self.columns) 105 106 def pdf(self, x): 107 """Probability density function.""" 108 d = self.loc[x.index, x.columns] 109 pdf_arr = gamma((d._df + 1) / 2) 110 pdf_arr = pdf_arr / (np.sqrt(np.pi * d._df) * gamma(d._df / 2)) 111 pdf_arr = pdf_arr * (1 + x**2 / d._df) ** (-(d._df + 1) / 2) 112 return pd.DataFrame(pdf_arr, index=x.index, columns=x.columns) 113 114 def log_pdf(self, x): 115 """Logarithmic probability density function.""" 116 d = self.loc[x.index, x.columns] 117 lpdf_arr = loggamma((d._df + 1) / 2) 118 lpdf_arr = lpdf_arr - 0.5 * np.log(d._df * np.pi) 119 lpdf_arr = lpdf_arr - loggamma(d._df / 2) 120 lpdf_arr = lpdf_arr - ((d._df + 1) / 2) * np.log(1 + x**2 / d._df) 121 return pd.DataFrame(lpdf_arr, index=x.index, columns=x.columns) 122 123 def cdf(self, x): 124 """Cumulative distribution function.""" 125 d = self.loc[x.index, x.columns] 126 cdf_arr = x * gamma((d._df + 1) / 2) 127 cdf_arr = cdf_arr * hyp2f1(0.5, (d._df + 1) / 2, 3 / 2, -(x**2) / d._df) 128 cdf_arr = 0.5 + cdf_arr / (np.sqrt(np.pi * d._df) * gamma(d._df / 2)) 129 return pd.DataFrame(cdf_arr, index=x.index, columns=x.columns) 130 131 def ppf(self, p): 132 """Quantile function = percent point function = inverse cdf.""" 133 d = self.loc[p.index, p.columns] 134 ppf_arr = p.to_numpy(copy=True) 135 ppf_arr[p.values == 0.5] = 0.0 136 ppf_arr[p.values <= 0] = -np.inf 137 ppf_arr[p.values >= 1] = np.inf 138 139 mask1 = (p.values < 0.5) & (p.values > 0) 140 mask2 = (p.values < 1) & (p.values > 0.5) 141 ppf_arr[mask1] = 1 / betaincinv(0.5 * d._df[mask1], 0.5, 2 * ppf_arr[mask1]) 142 ppf_arr[mask2] = 1 / betaincinv( 143 0.5 * d._df[mask2], 0.5, 2 * (1 - ppf_arr[mask2]) 144 ) 145 ppf_arr[mask1 | mask2] = np.sqrt(ppf_arr[mask1 | mask2] - 1) 146 ppf_arr[mask1 | mask2] = np.sqrt(d._df[mask1 | mask2]) * ppf_arr[mask1 | mask2] 147 ppf_arr[mask1] = -ppf_arr[mask1] 148 return pd.DataFrame(ppf_arr, index=p.index, columns=p.columns) 149 150 @classmethod 151 def get_test_params(cls, parameter_set="default"): 152 """Return testing parameter settings for the estimator.""" 153 params1 = {"mu": [[0, 1], [2, 3], [4, 5]], "sigma": 1} 154 params2 = { 155 "mu": 0, 156 "sigma": 1, 157 "index": pd.Index([1, 2, 5]), 158 "columns": pd.Index(["a", "b"]), 159 } 160 return [params1, params2] 161 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sktime/proba/t.py b/sktime/proba/t.py --- a/sktime/proba/t.py +++ b/sktime/proba/t.py @@ -100,7 +100,7 @@ df_arr = df_arr.astype(np.float32) df_arr[df_arr <= 2] = np.inf mask = (df_arr > 2) & (df_arr != np.inf) - df_arr[mask] = df_arr[mask] / (df_arr[mask] - 2) + df_arr[mask] = self._sigma[mask] ** 2 * df_arr[mask] / (df_arr[mask] - 2) return pd.DataFrame(df_arr, index=self.index, columns=self.columns) def pdf(self, x): @@ -108,7 +108,10 @@ d = self.loc[x.index, x.columns] pdf_arr = gamma((d._df + 1) / 2) pdf_arr = pdf_arr / (np.sqrt(np.pi * d._df) * gamma(d._df / 2)) - pdf_arr = pdf_arr * (1 + x**2 / d._df) ** (-(d._df + 1) / 2) + pdf_arr = pdf_arr * (1 + ((x - d._mu) / d._sigma) ** 2 / d._df) ** ( + -(d._df + 1) / 2 + ) + pdf_arr = pdf_arr / d._sigma return pd.DataFrame(pdf_arr, index=x.index, columns=x.columns) def log_pdf(self, x): @@ -117,14 +120,18 @@ lpdf_arr = loggamma((d._df + 1) / 2) lpdf_arr = lpdf_arr - 0.5 * np.log(d._df * np.pi) lpdf_arr = lpdf_arr - loggamma(d._df / 2) - lpdf_arr = lpdf_arr - ((d._df + 1) / 2) * np.log(1 + x**2 / d._df) + lpdf_arr = lpdf_arr - ((d._df + 1) / 2) * np.log( + 1 + ((x - d._mu) / d._sigma) ** 2 / d._df + ) + lpdf_arr = lpdf_arr - np.log(d._sigma) return pd.DataFrame(lpdf_arr, index=x.index, columns=x.columns) def cdf(self, x): """Cumulative distribution function.""" d = self.loc[x.index, x.columns] - cdf_arr = x * gamma((d._df + 1) / 2) - cdf_arr = cdf_arr * hyp2f1(0.5, (d._df + 1) / 2, 3 / 2, -(x**2) / d._df) + x_ = (x - d._mu) / d._sigma + cdf_arr = x_ * gamma((d._df + 1) / 2) + cdf_arr = cdf_arr * hyp2f1(0.5, (d._df + 1) / 2, 3 / 2, -(x_**2) / d._df) cdf_arr = 0.5 + cdf_arr / (np.sqrt(np.pi * d._df) * gamma(d._df / 2)) return pd.DataFrame(cdf_arr, index=x.index, columns=x.columns) @@ -145,6 +152,7 @@ ppf_arr[mask1 | mask2] = np.sqrt(ppf_arr[mask1 | mask2] - 1) ppf_arr[mask1 | mask2] = np.sqrt(d._df[mask1 | mask2]) * ppf_arr[mask1 | mask2] ppf_arr[mask1] = -ppf_arr[mask1] + ppf_arr = d._sigma * ppf_arr + d._mu return pd.DataFrame(ppf_arr, index=p.index, columns=p.columns) @classmethod
{"golden_diff": "diff --git a/sktime/proba/t.py b/sktime/proba/t.py\n--- a/sktime/proba/t.py\n+++ b/sktime/proba/t.py\n@@ -100,7 +100,7 @@\n df_arr = df_arr.astype(np.float32)\n df_arr[df_arr <= 2] = np.inf\n mask = (df_arr > 2) & (df_arr != np.inf)\n- df_arr[mask] = df_arr[mask] / (df_arr[mask] - 2)\n+ df_arr[mask] = self._sigma[mask] ** 2 * df_arr[mask] / (df_arr[mask] - 2)\n return pd.DataFrame(df_arr, index=self.index, columns=self.columns)\n \n def pdf(self, x):\n@@ -108,7 +108,10 @@\n d = self.loc[x.index, x.columns]\n pdf_arr = gamma((d._df + 1) / 2)\n pdf_arr = pdf_arr / (np.sqrt(np.pi * d._df) * gamma(d._df / 2))\n- pdf_arr = pdf_arr * (1 + x**2 / d._df) ** (-(d._df + 1) / 2)\n+ pdf_arr = pdf_arr * (1 + ((x - d._mu) / d._sigma) ** 2 / d._df) ** (\n+ -(d._df + 1) / 2\n+ )\n+ pdf_arr = pdf_arr / d._sigma\n return pd.DataFrame(pdf_arr, index=x.index, columns=x.columns)\n \n def log_pdf(self, x):\n@@ -117,14 +120,18 @@\n lpdf_arr = loggamma((d._df + 1) / 2)\n lpdf_arr = lpdf_arr - 0.5 * np.log(d._df * np.pi)\n lpdf_arr = lpdf_arr - loggamma(d._df / 2)\n- lpdf_arr = lpdf_arr - ((d._df + 1) / 2) * np.log(1 + x**2 / d._df)\n+ lpdf_arr = lpdf_arr - ((d._df + 1) / 2) * np.log(\n+ 1 + ((x - d._mu) / d._sigma) ** 2 / d._df\n+ )\n+ lpdf_arr = lpdf_arr - np.log(d._sigma)\n return pd.DataFrame(lpdf_arr, index=x.index, columns=x.columns)\n \n def cdf(self, x):\n \"\"\"Cumulative distribution function.\"\"\"\n d = self.loc[x.index, x.columns]\n- cdf_arr = x * gamma((d._df + 1) / 2)\n- cdf_arr = cdf_arr * hyp2f1(0.5, (d._df + 1) / 2, 3 / 2, -(x**2) / d._df)\n+ x_ = (x - d._mu) / d._sigma\n+ cdf_arr = x_ * gamma((d._df + 1) / 2)\n+ cdf_arr = cdf_arr * hyp2f1(0.5, (d._df + 1) / 2, 3 / 2, -(x_**2) / d._df)\n cdf_arr = 0.5 + cdf_arr / (np.sqrt(np.pi * d._df) * gamma(d._df / 2))\n return pd.DataFrame(cdf_arr, index=x.index, columns=x.columns)\n \n@@ -145,6 +152,7 @@\n ppf_arr[mask1 | mask2] = np.sqrt(ppf_arr[mask1 | mask2] - 1)\n ppf_arr[mask1 | mask2] = np.sqrt(d._df[mask1 | mask2]) * ppf_arr[mask1 | mask2]\n ppf_arr[mask1] = -ppf_arr[mask1]\n+ ppf_arr = d._sigma * ppf_arr + d._mu\n return pd.DataFrame(ppf_arr, index=p.index, columns=p.columns)\n \n @classmethod\n", "issue": "[BUG] TDistribution pdf does not consider location/scale\n**Describe the bug**\r\nThe location/scale (mu/sigma) are not considered at all in methods: pdf, log_pdf, cdf or ppf\r\n\r\nFor example, the case pdf:\r\n\r\n``` \r\ndef pdf(self, x):\r\n \"\"\"Probability density function.\"\"\"\r\n d = self.loc[x.index, x.columns]\r\n pdf_arr = gamma((d._df + 1) / 2)\r\n pdf_arr = pdf_arr / (np.sqrt(np.pi * d._df) * gamma(d._df / 2))\r\n pdf_arr = pdf_arr * (1 + x**2 / d._df) ** (-(d._df + 1) / 2)\r\n return pd.DataFrame(pdf_arr, index=x.index, columns=x.columns)\r\n```\r\n\r\nHere, in line 6, instead of x**2 one should use ((x - d._mu)/d._sigma)**2\r\n\r\n\r\nsktime version 0.25.1\r\n\r\n\n[BUG] TDistribution computes incorrectly variance \n**Describe the bug**\r\n\r\nTDistribution does not use scale (sigma) for variance computation\r\n\r\n``` \r\ndef var(self):\r\n...\r\n df_arr = self._df.copy()\r\n df_arr = df_arr.astype(np.float32)\r\n df_arr[df_arr <= 2] = np.inf\r\n mask = (df_arr > 2) & (df_arr != np.inf)\r\n df_arr[mask] = df_arr[mask] / (df_arr[mask] - 2)\r\n return pd.DataFrame(df_arr, index=self.index, columns=self.columns)\r\n```\r\n\r\none should correct the previous to last line to:\r\n\r\n```\r\ndf_arr[mask] = self._sigma** 2 * df_arr[mask] / (df_arr[mask] - 2)\r\n```\r\n\r\nsktime version 0.25.1\n", "before_files": [{"content": "# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Student's t-distribution.\"\"\"\n\n__author__ = [\"Alex-JG3\"]\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.special import betaincinv, gamma, hyp2f1, loggamma\n\nfrom sktime.proba.base import BaseDistribution\n\n\nclass TDistribution(BaseDistribution):\n \"\"\"Student's t-distribution (sktime native).\n\n Parameters\n ----------\n mean : float or array of float (1D or 2D)\n mean of the t-distribution distribution\n sd : float or array of float (1D or 2D), must be positive\n standard deviation of the t-distribution distribution\n df : float or array of float (1D or 2D), must be positive\n Degrees of freedom of the t-distribution\n index : pd.Index, optional, default = RangeIndex\n columns : pd.Index, optional, default = RangeIndex\n\n Example\n -------\n >>> from sktime.proba.t import TDistribution\n\n >>> n = TDistribution(mu=[[0, 1], [2, 3], [4, 5]], sigma=1, df=10)\n \"\"\"\n\n _tags = {\n \"authors\": [\"Alex-JG3\"],\n \"maintainers\": [\"Alex-JG3\"],\n \"capabilities:approx\": [\"pdfnorm\", \"energy\"],\n \"capabilities:exact\": [\"mean\", \"var\", \"pdf\", \"log_pdf\", \"cdf\", \"ppf\"],\n \"distr:measuretype\": \"continuous\",\n }\n\n def __init__(self, mu, sigma, df=1, index=None, columns=None):\n self.mu = mu\n self.sigma = sigma\n self.df = df\n self.index = index\n self.columns = columns\n\n self._mu, self._sigma, self._df = self._get_bc_params(\n self.mu, self.sigma, self.df\n )\n shape = self._mu.shape\n\n if index is None:\n index = pd.RangeIndex(shape[0])\n\n if columns is None:\n columns = pd.RangeIndex(shape[1])\n\n super().__init__(index=index, columns=columns)\n\n def mean(self):\n r\"\"\"Return expected value of the distribution.\n\n Let :math:`X` be a random variable with the distribution of `self`.\n Returns the expectation :math:`\\mathbb{E}[X]`. The expectation,\n :math:`\\mathbb{E}[X]`, as infinite if :math:`\\nu \\le 1`.\n\n Returns\n -------\n pd.DataFrame with same rows, columns as `self`\n expected value of distribution (entry-wise)\n \"\"\"\n mean_arr = self._mu.copy()\n if (self._df <= 1).any():\n mean_arr = mean_arr.astype(np.float32)\n mean_arr[self._df <= 1] = np.inf\n return pd.DataFrame(mean_arr, index=self.index, columns=self.columns)\n\n def var(self):\n r\"\"\"Return element/entry-wise variance of the distribution.\n\n Let :math:`X` be a random variable with the distribution of `self`.\n Returns,\n\n .. math::\n \\mathbb{V}[X] = \\begin{cases}\n \\frac{\\nu}{\\nu - 2} & \\text{if} \\nu > 2, \\\\\n \\infty & \\text{if} \\nu \\le 2, \\\\\n \\begin{cases}\n\n Where :math:`\\nu` is the degrees of freedom of the t-distribution.\n\n Returns\n -------\n pd.DataFrame with same rows, columns as `self`\n variance of distribution (entry-wise)\n \"\"\"\n df_arr = self._df.copy()\n df_arr = df_arr.astype(np.float32)\n df_arr[df_arr <= 2] = np.inf\n mask = (df_arr > 2) & (df_arr != np.inf)\n df_arr[mask] = df_arr[mask] / (df_arr[mask] - 2)\n return pd.DataFrame(df_arr, index=self.index, columns=self.columns)\n\n def pdf(self, x):\n \"\"\"Probability density function.\"\"\"\n d = self.loc[x.index, x.columns]\n pdf_arr = gamma((d._df + 1) / 2)\n pdf_arr = pdf_arr / (np.sqrt(np.pi * d._df) * gamma(d._df / 2))\n pdf_arr = pdf_arr * (1 + x**2 / d._df) ** (-(d._df + 1) / 2)\n return pd.DataFrame(pdf_arr, index=x.index, columns=x.columns)\n\n def log_pdf(self, x):\n \"\"\"Logarithmic probability density function.\"\"\"\n d = self.loc[x.index, x.columns]\n lpdf_arr = loggamma((d._df + 1) / 2)\n lpdf_arr = lpdf_arr - 0.5 * np.log(d._df * np.pi)\n lpdf_arr = lpdf_arr - loggamma(d._df / 2)\n lpdf_arr = lpdf_arr - ((d._df + 1) / 2) * np.log(1 + x**2 / d._df)\n return pd.DataFrame(lpdf_arr, index=x.index, columns=x.columns)\n\n def cdf(self, x):\n \"\"\"Cumulative distribution function.\"\"\"\n d = self.loc[x.index, x.columns]\n cdf_arr = x * gamma((d._df + 1) / 2)\n cdf_arr = cdf_arr * hyp2f1(0.5, (d._df + 1) / 2, 3 / 2, -(x**2) / d._df)\n cdf_arr = 0.5 + cdf_arr / (np.sqrt(np.pi * d._df) * gamma(d._df / 2))\n return pd.DataFrame(cdf_arr, index=x.index, columns=x.columns)\n\n def ppf(self, p):\n \"\"\"Quantile function = percent point function = inverse cdf.\"\"\"\n d = self.loc[p.index, p.columns]\n ppf_arr = p.to_numpy(copy=True)\n ppf_arr[p.values == 0.5] = 0.0\n ppf_arr[p.values <= 0] = -np.inf\n ppf_arr[p.values >= 1] = np.inf\n\n mask1 = (p.values < 0.5) & (p.values > 0)\n mask2 = (p.values < 1) & (p.values > 0.5)\n ppf_arr[mask1] = 1 / betaincinv(0.5 * d._df[mask1], 0.5, 2 * ppf_arr[mask1])\n ppf_arr[mask2] = 1 / betaincinv(\n 0.5 * d._df[mask2], 0.5, 2 * (1 - ppf_arr[mask2])\n )\n ppf_arr[mask1 | mask2] = np.sqrt(ppf_arr[mask1 | mask2] - 1)\n ppf_arr[mask1 | mask2] = np.sqrt(d._df[mask1 | mask2]) * ppf_arr[mask1 | mask2]\n ppf_arr[mask1] = -ppf_arr[mask1]\n return pd.DataFrame(ppf_arr, index=p.index, columns=p.columns)\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\"\"\"\n params1 = {\"mu\": [[0, 1], [2, 3], [4, 5]], \"sigma\": 1}\n params2 = {\n \"mu\": 0,\n \"sigma\": 1,\n \"index\": pd.Index([1, 2, 5]),\n \"columns\": pd.Index([\"a\", \"b\"]),\n }\n return [params1, params2]\n", "path": "sktime/proba/t.py"}], "after_files": [{"content": "# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Student's t-distribution.\"\"\"\n\n__author__ = [\"Alex-JG3\"]\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.special import betaincinv, gamma, hyp2f1, loggamma\n\nfrom sktime.proba.base import BaseDistribution\n\n\nclass TDistribution(BaseDistribution):\n \"\"\"Student's t-distribution (sktime native).\n\n Parameters\n ----------\n mean : float or array of float (1D or 2D)\n mean of the t-distribution distribution\n sd : float or array of float (1D or 2D), must be positive\n standard deviation of the t-distribution distribution\n df : float or array of float (1D or 2D), must be positive\n Degrees of freedom of the t-distribution\n index : pd.Index, optional, default = RangeIndex\n columns : pd.Index, optional, default = RangeIndex\n\n Example\n -------\n >>> from sktime.proba.t import TDistribution\n\n >>> n = TDistribution(mu=[[0, 1], [2, 3], [4, 5]], sigma=1, df=10)\n \"\"\"\n\n _tags = {\n \"authors\": [\"Alex-JG3\"],\n \"maintainers\": [\"Alex-JG3\"],\n \"capabilities:approx\": [\"pdfnorm\", \"energy\"],\n \"capabilities:exact\": [\"mean\", \"var\", \"pdf\", \"log_pdf\", \"cdf\", \"ppf\"],\n \"distr:measuretype\": \"continuous\",\n }\n\n def __init__(self, mu, sigma, df=1, index=None, columns=None):\n self.mu = mu\n self.sigma = sigma\n self.df = df\n self.index = index\n self.columns = columns\n\n self._mu, self._sigma, self._df = self._get_bc_params(\n self.mu, self.sigma, self.df\n )\n shape = self._mu.shape\n\n if index is None:\n index = pd.RangeIndex(shape[0])\n\n if columns is None:\n columns = pd.RangeIndex(shape[1])\n\n super().__init__(index=index, columns=columns)\n\n def mean(self):\n r\"\"\"Return expected value of the distribution.\n\n Let :math:`X` be a random variable with the distribution of `self`.\n Returns the expectation :math:`\\mathbb{E}[X]`. The expectation,\n :math:`\\mathbb{E}[X]`, as infinite if :math:`\\nu \\le 1`.\n\n Returns\n -------\n pd.DataFrame with same rows, columns as `self`\n expected value of distribution (entry-wise)\n \"\"\"\n mean_arr = self._mu.copy()\n if (self._df <= 1).any():\n mean_arr = mean_arr.astype(np.float32)\n mean_arr[self._df <= 1] = np.inf\n return pd.DataFrame(mean_arr, index=self.index, columns=self.columns)\n\n def var(self):\n r\"\"\"Return element/entry-wise variance of the distribution.\n\n Let :math:`X` be a random variable with the distribution of `self`.\n Returns,\n\n .. math::\n \\mathbb{V}[X] = \\begin{cases}\n \\frac{\\nu}{\\nu - 2} & \\text{if} \\nu > 2, \\\\\n \\infty & \\text{if} \\nu \\le 2, \\\\\n \\begin{cases}\n\n Where :math:`\\nu` is the degrees of freedom of the t-distribution.\n\n Returns\n -------\n pd.DataFrame with same rows, columns as `self`\n variance of distribution (entry-wise)\n \"\"\"\n df_arr = self._df.copy()\n df_arr = df_arr.astype(np.float32)\n df_arr[df_arr <= 2] = np.inf\n mask = (df_arr > 2) & (df_arr != np.inf)\n df_arr[mask] = self._sigma[mask] ** 2 * df_arr[mask] / (df_arr[mask] - 2)\n return pd.DataFrame(df_arr, index=self.index, columns=self.columns)\n\n def pdf(self, x):\n \"\"\"Probability density function.\"\"\"\n d = self.loc[x.index, x.columns]\n pdf_arr = gamma((d._df + 1) / 2)\n pdf_arr = pdf_arr / (np.sqrt(np.pi * d._df) * gamma(d._df / 2))\n pdf_arr = pdf_arr * (1 + ((x - d._mu) / d._sigma) ** 2 / d._df) ** (\n -(d._df + 1) / 2\n )\n pdf_arr = pdf_arr / d._sigma\n return pd.DataFrame(pdf_arr, index=x.index, columns=x.columns)\n\n def log_pdf(self, x):\n \"\"\"Logarithmic probability density function.\"\"\"\n d = self.loc[x.index, x.columns]\n lpdf_arr = loggamma((d._df + 1) / 2)\n lpdf_arr = lpdf_arr - 0.5 * np.log(d._df * np.pi)\n lpdf_arr = lpdf_arr - loggamma(d._df / 2)\n lpdf_arr = lpdf_arr - ((d._df + 1) / 2) * np.log(\n 1 + ((x - d._mu) / d._sigma) ** 2 / d._df\n )\n lpdf_arr = lpdf_arr - np.log(d._sigma)\n return pd.DataFrame(lpdf_arr, index=x.index, columns=x.columns)\n\n def cdf(self, x):\n \"\"\"Cumulative distribution function.\"\"\"\n d = self.loc[x.index, x.columns]\n x_ = (x - d._mu) / d._sigma\n cdf_arr = x_ * gamma((d._df + 1) / 2)\n cdf_arr = cdf_arr * hyp2f1(0.5, (d._df + 1) / 2, 3 / 2, -(x_**2) / d._df)\n cdf_arr = 0.5 + cdf_arr / (np.sqrt(np.pi * d._df) * gamma(d._df / 2))\n return pd.DataFrame(cdf_arr, index=x.index, columns=x.columns)\n\n def ppf(self, p):\n \"\"\"Quantile function = percent point function = inverse cdf.\"\"\"\n d = self.loc[p.index, p.columns]\n ppf_arr = p.to_numpy(copy=True)\n ppf_arr[p.values == 0.5] = 0.0\n ppf_arr[p.values <= 0] = -np.inf\n ppf_arr[p.values >= 1] = np.inf\n\n mask1 = (p.values < 0.5) & (p.values > 0)\n mask2 = (p.values < 1) & (p.values > 0.5)\n ppf_arr[mask1] = 1 / betaincinv(0.5 * d._df[mask1], 0.5, 2 * ppf_arr[mask1])\n ppf_arr[mask2] = 1 / betaincinv(\n 0.5 * d._df[mask2], 0.5, 2 * (1 - ppf_arr[mask2])\n )\n ppf_arr[mask1 | mask2] = np.sqrt(ppf_arr[mask1 | mask2] - 1)\n ppf_arr[mask1 | mask2] = np.sqrt(d._df[mask1 | mask2]) * ppf_arr[mask1 | mask2]\n ppf_arr[mask1] = -ppf_arr[mask1]\n ppf_arr = d._sigma * ppf_arr + d._mu\n return pd.DataFrame(ppf_arr, index=p.index, columns=p.columns)\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\"\"\"\n params1 = {\"mu\": [[0, 1], [2, 3], [4, 5]], \"sigma\": 1}\n params2 = {\n \"mu\": 0,\n \"sigma\": 1,\n \"index\": pd.Index([1, 2, 5]),\n \"columns\": pd.Index([\"a\", \"b\"]),\n }\n return [params1, params2]\n", "path": "sktime/proba/t.py"}]}
2,747
915
gh_patches_debug_27773
rasdani/github-patches
git_diff
pypa__pip-12569
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Show 'home-page' project URL when Home-Page metadata value is not set ### What's the problem this feature will solve? When installing a pyproject.toml-only project, the `Home-Page` metadata won't be set (as there's no way to specify it: PEP 621 only maps to `Project-Url`). This means `pip show` won't provide any URL to the user. ### Describe the solution you'd like If `Home-Page` is not set, instead select one of the project URLs (if present), with the following order of preference: * Home-page * Homepage * homepage * Source * fallback: the first project-URL in the metadata ### Alternative Solutions `pip show -v` shows all project URLs, but that still shows an empty `Home-page:` field ### Additional context Project URLs were included in `pip show -v` in #10827 ### Code of Conduct - [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pip/_internal/commands/show.py` Content: ``` 1 import logging 2 from optparse import Values 3 from typing import Generator, Iterable, Iterator, List, NamedTuple, Optional 4 5 from pip._vendor.packaging.utils import canonicalize_name 6 7 from pip._internal.cli.base_command import Command 8 from pip._internal.cli.status_codes import ERROR, SUCCESS 9 from pip._internal.metadata import BaseDistribution, get_default_environment 10 from pip._internal.utils.misc import write_output 11 12 logger = logging.getLogger(__name__) 13 14 15 class ShowCommand(Command): 16 """ 17 Show information about one or more installed packages. 18 19 The output is in RFC-compliant mail header format. 20 """ 21 22 usage = """ 23 %prog [options] <package> ...""" 24 ignore_require_venv = True 25 26 def add_options(self) -> None: 27 self.cmd_opts.add_option( 28 "-f", 29 "--files", 30 dest="files", 31 action="store_true", 32 default=False, 33 help="Show the full list of installed files for each package.", 34 ) 35 36 self.parser.insert_option_group(0, self.cmd_opts) 37 38 def run(self, options: Values, args: List[str]) -> int: 39 if not args: 40 logger.warning("ERROR: Please provide a package name or names.") 41 return ERROR 42 query = args 43 44 results = search_packages_info(query) 45 if not print_results( 46 results, list_files=options.files, verbose=options.verbose 47 ): 48 return ERROR 49 return SUCCESS 50 51 52 class _PackageInfo(NamedTuple): 53 name: str 54 version: str 55 location: str 56 editable_project_location: Optional[str] 57 requires: List[str] 58 required_by: List[str] 59 installer: str 60 metadata_version: str 61 classifiers: List[str] 62 summary: str 63 homepage: str 64 project_urls: List[str] 65 author: str 66 author_email: str 67 license: str 68 entry_points: List[str] 69 files: Optional[List[str]] 70 71 72 def search_packages_info(query: List[str]) -> Generator[_PackageInfo, None, None]: 73 """ 74 Gather details from installed distributions. Print distribution name, 75 version, location, and installed files. Installed files requires a 76 pip generated 'installed-files.txt' in the distributions '.egg-info' 77 directory. 78 """ 79 env = get_default_environment() 80 81 installed = {dist.canonical_name: dist for dist in env.iter_all_distributions()} 82 query_names = [canonicalize_name(name) for name in query] 83 missing = sorted( 84 [name for name, pkg in zip(query, query_names) if pkg not in installed] 85 ) 86 if missing: 87 logger.warning("Package(s) not found: %s", ", ".join(missing)) 88 89 def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]: 90 return ( 91 dist.metadata["Name"] or "UNKNOWN" 92 for dist in installed.values() 93 if current_dist.canonical_name 94 in {canonicalize_name(d.name) for d in dist.iter_dependencies()} 95 ) 96 97 for query_name in query_names: 98 try: 99 dist = installed[query_name] 100 except KeyError: 101 continue 102 103 requires = sorted( 104 # Avoid duplicates in requirements (e.g. due to environment markers). 105 {req.name for req in dist.iter_dependencies()}, 106 key=str.lower, 107 ) 108 required_by = sorted(_get_requiring_packages(dist), key=str.lower) 109 110 try: 111 entry_points_text = dist.read_text("entry_points.txt") 112 entry_points = entry_points_text.splitlines(keepends=False) 113 except FileNotFoundError: 114 entry_points = [] 115 116 files_iter = dist.iter_declared_entries() 117 if files_iter is None: 118 files: Optional[List[str]] = None 119 else: 120 files = sorted(files_iter) 121 122 metadata = dist.metadata 123 124 yield _PackageInfo( 125 name=dist.raw_name, 126 version=str(dist.version), 127 location=dist.location or "", 128 editable_project_location=dist.editable_project_location, 129 requires=requires, 130 required_by=required_by, 131 installer=dist.installer, 132 metadata_version=dist.metadata_version or "", 133 classifiers=metadata.get_all("Classifier", []), 134 summary=metadata.get("Summary", ""), 135 homepage=metadata.get("Home-page", ""), 136 project_urls=metadata.get_all("Project-URL", []), 137 author=metadata.get("Author", ""), 138 author_email=metadata.get("Author-email", ""), 139 license=metadata.get("License", ""), 140 entry_points=entry_points, 141 files=files, 142 ) 143 144 145 def print_results( 146 distributions: Iterable[_PackageInfo], 147 list_files: bool, 148 verbose: bool, 149 ) -> bool: 150 """ 151 Print the information from installed distributions found. 152 """ 153 results_printed = False 154 for i, dist in enumerate(distributions): 155 results_printed = True 156 if i > 0: 157 write_output("---") 158 159 write_output("Name: %s", dist.name) 160 write_output("Version: %s", dist.version) 161 write_output("Summary: %s", dist.summary) 162 write_output("Home-page: %s", dist.homepage) 163 write_output("Author: %s", dist.author) 164 write_output("Author-email: %s", dist.author_email) 165 write_output("License: %s", dist.license) 166 write_output("Location: %s", dist.location) 167 if dist.editable_project_location is not None: 168 write_output( 169 "Editable project location: %s", dist.editable_project_location 170 ) 171 write_output("Requires: %s", ", ".join(dist.requires)) 172 write_output("Required-by: %s", ", ".join(dist.required_by)) 173 174 if verbose: 175 write_output("Metadata-Version: %s", dist.metadata_version) 176 write_output("Installer: %s", dist.installer) 177 write_output("Classifiers:") 178 for classifier in dist.classifiers: 179 write_output(" %s", classifier) 180 write_output("Entry-points:") 181 for entry in dist.entry_points: 182 write_output(" %s", entry.strip()) 183 write_output("Project-URLs:") 184 for project_url in dist.project_urls: 185 write_output(" %s", project_url) 186 if list_files: 187 write_output("Files:") 188 if dist.files is None: 189 write_output("Cannot locate RECORD or installed-files.txt") 190 else: 191 for line in dist.files: 192 write_output(" %s", line.strip()) 193 return results_printed 194 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pip/_internal/commands/show.py b/src/pip/_internal/commands/show.py --- a/src/pip/_internal/commands/show.py +++ b/src/pip/_internal/commands/show.py @@ -121,6 +121,22 @@ metadata = dist.metadata + project_urls = metadata.get_all("Project-URL", []) + homepage = metadata.get("Home-page", "") + if not homepage: + # It's common that there is a "homepage" Project-URL, but Home-page + # remains unset (especially as PEP 621 doesn't surface the field). + # + # This logic was taken from PyPI's codebase. + for url in project_urls: + url_label, url = url.split(",", maxsplit=1) + normalized_label = ( + url_label.casefold().replace("-", "").replace("_", "").strip() + ) + if normalized_label == "homepage": + homepage = url.strip() + break + yield _PackageInfo( name=dist.raw_name, version=str(dist.version), @@ -132,8 +148,8 @@ metadata_version=dist.metadata_version or "", classifiers=metadata.get_all("Classifier", []), summary=metadata.get("Summary", ""), - homepage=metadata.get("Home-page", ""), - project_urls=metadata.get_all("Project-URL", []), + homepage=homepage, + project_urls=project_urls, author=metadata.get("Author", ""), author_email=metadata.get("Author-email", ""), license=metadata.get("License", ""),
{"golden_diff": "diff --git a/src/pip/_internal/commands/show.py b/src/pip/_internal/commands/show.py\n--- a/src/pip/_internal/commands/show.py\n+++ b/src/pip/_internal/commands/show.py\n@@ -121,6 +121,22 @@\n \n metadata = dist.metadata\n \n+ project_urls = metadata.get_all(\"Project-URL\", [])\n+ homepage = metadata.get(\"Home-page\", \"\")\n+ if not homepage:\n+ # It's common that there is a \"homepage\" Project-URL, but Home-page\n+ # remains unset (especially as PEP 621 doesn't surface the field).\n+ #\n+ # This logic was taken from PyPI's codebase.\n+ for url in project_urls:\n+ url_label, url = url.split(\",\", maxsplit=1)\n+ normalized_label = (\n+ url_label.casefold().replace(\"-\", \"\").replace(\"_\", \"\").strip()\n+ )\n+ if normalized_label == \"homepage\":\n+ homepage = url.strip()\n+ break\n+\n yield _PackageInfo(\n name=dist.raw_name,\n version=str(dist.version),\n@@ -132,8 +148,8 @@\n metadata_version=dist.metadata_version or \"\",\n classifiers=metadata.get_all(\"Classifier\", []),\n summary=metadata.get(\"Summary\", \"\"),\n- homepage=metadata.get(\"Home-page\", \"\"),\n- project_urls=metadata.get_all(\"Project-URL\", []),\n+ homepage=homepage,\n+ project_urls=project_urls,\n author=metadata.get(\"Author\", \"\"),\n author_email=metadata.get(\"Author-email\", \"\"),\n license=metadata.get(\"License\", \"\"),\n", "issue": "Show 'home-page' project URL when Home-Page metadata value is not set\n### What's the problem this feature will solve?\n\nWhen installing a pyproject.toml-only project, the `Home-Page` metadata won't be set (as there's no way to specify it: PEP 621 only maps to `Project-Url`). This means `pip show` won't provide any URL to the user.\n\n### Describe the solution you'd like\n\nIf `Home-Page` is not set, instead select one of the project URLs (if present), with the following order of preference:\r\n* Home-page\r\n* Homepage\r\n* homepage\r\n* Source\r\n* fallback: the first project-URL in the metadata\n\n### Alternative Solutions\n\n`pip show -v` shows all project URLs, but that still shows an empty `Home-page:` field\n\n### Additional context\n\nProject URLs were included in `pip show -v` in #10827\n\n### Code of Conduct\n\n- [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/).\n", "before_files": [{"content": "import logging\nfrom optparse import Values\nfrom typing import Generator, Iterable, Iterator, List, NamedTuple, Optional\n\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import ERROR, SUCCESS\nfrom pip._internal.metadata import BaseDistribution, get_default_environment\nfrom pip._internal.utils.misc import write_output\n\nlogger = logging.getLogger(__name__)\n\n\nclass ShowCommand(Command):\n \"\"\"\n Show information about one or more installed packages.\n\n The output is in RFC-compliant mail header format.\n \"\"\"\n\n usage = \"\"\"\n %prog [options] <package> ...\"\"\"\n ignore_require_venv = True\n\n def add_options(self) -> None:\n self.cmd_opts.add_option(\n \"-f\",\n \"--files\",\n dest=\"files\",\n action=\"store_true\",\n default=False,\n help=\"Show the full list of installed files for each package.\",\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options: Values, args: List[str]) -> int:\n if not args:\n logger.warning(\"ERROR: Please provide a package name or names.\")\n return ERROR\n query = args\n\n results = search_packages_info(query)\n if not print_results(\n results, list_files=options.files, verbose=options.verbose\n ):\n return ERROR\n return SUCCESS\n\n\nclass _PackageInfo(NamedTuple):\n name: str\n version: str\n location: str\n editable_project_location: Optional[str]\n requires: List[str]\n required_by: List[str]\n installer: str\n metadata_version: str\n classifiers: List[str]\n summary: str\n homepage: str\n project_urls: List[str]\n author: str\n author_email: str\n license: str\n entry_points: List[str]\n files: Optional[List[str]]\n\n\ndef search_packages_info(query: List[str]) -> Generator[_PackageInfo, None, None]:\n \"\"\"\n Gather details from installed distributions. Print distribution name,\n version, location, and installed files. Installed files requires a\n pip generated 'installed-files.txt' in the distributions '.egg-info'\n directory.\n \"\"\"\n env = get_default_environment()\n\n installed = {dist.canonical_name: dist for dist in env.iter_all_distributions()}\n query_names = [canonicalize_name(name) for name in query]\n missing = sorted(\n [name for name, pkg in zip(query, query_names) if pkg not in installed]\n )\n if missing:\n logger.warning(\"Package(s) not found: %s\", \", \".join(missing))\n\n def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]:\n return (\n dist.metadata[\"Name\"] or \"UNKNOWN\"\n for dist in installed.values()\n if current_dist.canonical_name\n in {canonicalize_name(d.name) for d in dist.iter_dependencies()}\n )\n\n for query_name in query_names:\n try:\n dist = installed[query_name]\n except KeyError:\n continue\n\n requires = sorted(\n # Avoid duplicates in requirements (e.g. due to environment markers).\n {req.name for req in dist.iter_dependencies()},\n key=str.lower,\n )\n required_by = sorted(_get_requiring_packages(dist), key=str.lower)\n\n try:\n entry_points_text = dist.read_text(\"entry_points.txt\")\n entry_points = entry_points_text.splitlines(keepends=False)\n except FileNotFoundError:\n entry_points = []\n\n files_iter = dist.iter_declared_entries()\n if files_iter is None:\n files: Optional[List[str]] = None\n else:\n files = sorted(files_iter)\n\n metadata = dist.metadata\n\n yield _PackageInfo(\n name=dist.raw_name,\n version=str(dist.version),\n location=dist.location or \"\",\n editable_project_location=dist.editable_project_location,\n requires=requires,\n required_by=required_by,\n installer=dist.installer,\n metadata_version=dist.metadata_version or \"\",\n classifiers=metadata.get_all(\"Classifier\", []),\n summary=metadata.get(\"Summary\", \"\"),\n homepage=metadata.get(\"Home-page\", \"\"),\n project_urls=metadata.get_all(\"Project-URL\", []),\n author=metadata.get(\"Author\", \"\"),\n author_email=metadata.get(\"Author-email\", \"\"),\n license=metadata.get(\"License\", \"\"),\n entry_points=entry_points,\n files=files,\n )\n\n\ndef print_results(\n distributions: Iterable[_PackageInfo],\n list_files: bool,\n verbose: bool,\n) -> bool:\n \"\"\"\n Print the information from installed distributions found.\n \"\"\"\n results_printed = False\n for i, dist in enumerate(distributions):\n results_printed = True\n if i > 0:\n write_output(\"---\")\n\n write_output(\"Name: %s\", dist.name)\n write_output(\"Version: %s\", dist.version)\n write_output(\"Summary: %s\", dist.summary)\n write_output(\"Home-page: %s\", dist.homepage)\n write_output(\"Author: %s\", dist.author)\n write_output(\"Author-email: %s\", dist.author_email)\n write_output(\"License: %s\", dist.license)\n write_output(\"Location: %s\", dist.location)\n if dist.editable_project_location is not None:\n write_output(\n \"Editable project location: %s\", dist.editable_project_location\n )\n write_output(\"Requires: %s\", \", \".join(dist.requires))\n write_output(\"Required-by: %s\", \", \".join(dist.required_by))\n\n if verbose:\n write_output(\"Metadata-Version: %s\", dist.metadata_version)\n write_output(\"Installer: %s\", dist.installer)\n write_output(\"Classifiers:\")\n for classifier in dist.classifiers:\n write_output(\" %s\", classifier)\n write_output(\"Entry-points:\")\n for entry in dist.entry_points:\n write_output(\" %s\", entry.strip())\n write_output(\"Project-URLs:\")\n for project_url in dist.project_urls:\n write_output(\" %s\", project_url)\n if list_files:\n write_output(\"Files:\")\n if dist.files is None:\n write_output(\"Cannot locate RECORD or installed-files.txt\")\n else:\n for line in dist.files:\n write_output(\" %s\", line.strip())\n return results_printed\n", "path": "src/pip/_internal/commands/show.py"}], "after_files": [{"content": "import logging\nfrom optparse import Values\nfrom typing import Generator, Iterable, Iterator, List, NamedTuple, Optional\n\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import ERROR, SUCCESS\nfrom pip._internal.metadata import BaseDistribution, get_default_environment\nfrom pip._internal.utils.misc import write_output\n\nlogger = logging.getLogger(__name__)\n\n\nclass ShowCommand(Command):\n \"\"\"\n Show information about one or more installed packages.\n\n The output is in RFC-compliant mail header format.\n \"\"\"\n\n usage = \"\"\"\n %prog [options] <package> ...\"\"\"\n ignore_require_venv = True\n\n def add_options(self) -> None:\n self.cmd_opts.add_option(\n \"-f\",\n \"--files\",\n dest=\"files\",\n action=\"store_true\",\n default=False,\n help=\"Show the full list of installed files for each package.\",\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options: Values, args: List[str]) -> int:\n if not args:\n logger.warning(\"ERROR: Please provide a package name or names.\")\n return ERROR\n query = args\n\n results = search_packages_info(query)\n if not print_results(\n results, list_files=options.files, verbose=options.verbose\n ):\n return ERROR\n return SUCCESS\n\n\nclass _PackageInfo(NamedTuple):\n name: str\n version: str\n location: str\n editable_project_location: Optional[str]\n requires: List[str]\n required_by: List[str]\n installer: str\n metadata_version: str\n classifiers: List[str]\n summary: str\n homepage: str\n project_urls: List[str]\n author: str\n author_email: str\n license: str\n entry_points: List[str]\n files: Optional[List[str]]\n\n\ndef search_packages_info(query: List[str]) -> Generator[_PackageInfo, None, None]:\n \"\"\"\n Gather details from installed distributions. Print distribution name,\n version, location, and installed files. Installed files requires a\n pip generated 'installed-files.txt' in the distributions '.egg-info'\n directory.\n \"\"\"\n env = get_default_environment()\n\n installed = {dist.canonical_name: dist for dist in env.iter_all_distributions()}\n query_names = [canonicalize_name(name) for name in query]\n missing = sorted(\n [name for name, pkg in zip(query, query_names) if pkg not in installed]\n )\n if missing:\n logger.warning(\"Package(s) not found: %s\", \", \".join(missing))\n\n def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]:\n return (\n dist.metadata[\"Name\"] or \"UNKNOWN\"\n for dist in installed.values()\n if current_dist.canonical_name\n in {canonicalize_name(d.name) for d in dist.iter_dependencies()}\n )\n\n for query_name in query_names:\n try:\n dist = installed[query_name]\n except KeyError:\n continue\n\n requires = sorted(\n # Avoid duplicates in requirements (e.g. due to environment markers).\n {req.name for req in dist.iter_dependencies()},\n key=str.lower,\n )\n required_by = sorted(_get_requiring_packages(dist), key=str.lower)\n\n try:\n entry_points_text = dist.read_text(\"entry_points.txt\")\n entry_points = entry_points_text.splitlines(keepends=False)\n except FileNotFoundError:\n entry_points = []\n\n files_iter = dist.iter_declared_entries()\n if files_iter is None:\n files: Optional[List[str]] = None\n else:\n files = sorted(files_iter)\n\n metadata = dist.metadata\n\n project_urls = metadata.get_all(\"Project-URL\", [])\n homepage = metadata.get(\"Home-page\", \"\")\n if not homepage:\n # It's common that there is a \"homepage\" Project-URL, but Home-page\n # remains unset (especially as PEP 621 doesn't surface the field).\n #\n # This logic was taken from PyPI's codebase.\n for url in project_urls:\n url_label, url = url.split(\",\", maxsplit=1)\n normalized_label = (\n url_label.casefold().replace(\"-\", \"\").replace(\"_\", \"\").strip()\n )\n if normalized_label == \"homepage\":\n homepage = url.strip()\n break\n\n yield _PackageInfo(\n name=dist.raw_name,\n version=str(dist.version),\n location=dist.location or \"\",\n editable_project_location=dist.editable_project_location,\n requires=requires,\n required_by=required_by,\n installer=dist.installer,\n metadata_version=dist.metadata_version or \"\",\n classifiers=metadata.get_all(\"Classifier\", []),\n summary=metadata.get(\"Summary\", \"\"),\n homepage=homepage,\n project_urls=project_urls,\n author=metadata.get(\"Author\", \"\"),\n author_email=metadata.get(\"Author-email\", \"\"),\n license=metadata.get(\"License\", \"\"),\n entry_points=entry_points,\n files=files,\n )\n\n\ndef print_results(\n distributions: Iterable[_PackageInfo],\n list_files: bool,\n verbose: bool,\n) -> bool:\n \"\"\"\n Print the information from installed distributions found.\n \"\"\"\n results_printed = False\n for i, dist in enumerate(distributions):\n results_printed = True\n if i > 0:\n write_output(\"---\")\n\n write_output(\"Name: %s\", dist.name)\n write_output(\"Version: %s\", dist.version)\n write_output(\"Summary: %s\", dist.summary)\n write_output(\"Home-page: %s\", dist.homepage)\n write_output(\"Author: %s\", dist.author)\n write_output(\"Author-email: %s\", dist.author_email)\n write_output(\"License: %s\", dist.license)\n write_output(\"Location: %s\", dist.location)\n if dist.editable_project_location is not None:\n write_output(\n \"Editable project location: %s\", dist.editable_project_location\n )\n write_output(\"Requires: %s\", \", \".join(dist.requires))\n write_output(\"Required-by: %s\", \", \".join(dist.required_by))\n\n if verbose:\n write_output(\"Metadata-Version: %s\", dist.metadata_version)\n write_output(\"Installer: %s\", dist.installer)\n write_output(\"Classifiers:\")\n for classifier in dist.classifiers:\n write_output(\" %s\", classifier)\n write_output(\"Entry-points:\")\n for entry in dist.entry_points:\n write_output(\" %s\", entry.strip())\n write_output(\"Project-URLs:\")\n for project_url in dist.project_urls:\n write_output(\" %s\", project_url)\n if list_files:\n write_output(\"Files:\")\n if dist.files is None:\n write_output(\"Cannot locate RECORD or installed-files.txt\")\n else:\n for line in dist.files:\n write_output(\" %s\", line.strip())\n return results_printed\n", "path": "src/pip/_internal/commands/show.py"}]}
2,337
357
gh_patches_debug_42271
rasdani/github-patches
git_diff
google__flax-2204
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- codediff section not respecting html container In the new `sphinx_book_theme` our custom `codediff` directive is rendering its content outside the container, overflowing until the end of the page. As shown in this screenshot, it appear to be rendered underneath the `contents` section because its not respecting its section boundaries: ![image](https://user-images.githubusercontent.com/5862228/173973531-5e548ee8-53f6-4978-9744-f9a3a54b5470.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 # Copyright 2022 The Flax Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Configuration file for the Sphinx documentation builder.""" 16 17 18 # This file only contains a selection of the most common options. For a full 19 # list see the documentation: 20 # https://www.sphinx-doc.org/en/master/usage/configuration.html 21 22 # -- Path setup -------------------------------------------------------------- 23 24 # If extensions (or modules to document with autodoc) are in another directory, 25 # add these directories to sys.path here. If the directory is relative to the 26 # documentation root, use os.path.abspath to make it absolute, like shown here. 27 # 28 # import os 29 # import sys 30 # sys.path.insert(0, os.path.abspath('.')) 31 32 import os 33 import sys 34 sys.path.insert(0, os.path.abspath('..')) 35 # Include local extension. 36 sys.path.append(os.path.abspath('./_ext')) 37 38 # patch sphinx 39 import docs.conf_sphinx_patch 40 # -- Project information ----------------------------------------------------- 41 42 project = 'Flax' 43 copyright = '2020, The Flax authors' # pylint: disable=redefined-builtin 44 author = 'The Flax authors' 45 46 47 # -- General configuration --------------------------------------------------- 48 49 # Add any Sphinx extension module names here, as strings. They can be 50 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 51 # ones. 52 extensions = [ 53 'sphinx.ext.autodoc', 54 'sphinx.ext.autosummary', 55 'sphinx.ext.autosectionlabel', 56 'sphinx.ext.doctest', 57 'sphinx.ext.intersphinx', 58 'sphinx.ext.mathjax', 59 'sphinx.ext.napoleon', 60 'sphinx.ext.viewcode', 61 'myst_nb', 62 'codediff', 63 'sphinx_markdown_tables' 64 ] 65 66 # Add any paths that contain templates here, relative to this directory. 67 templates_path = ['_templates'] 68 69 # List of patterns, relative to source directory, that match files and 70 # directories to ignore when looking for source files. 71 # This pattern also affects html_static_path and html_extra_path. 72 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 73 74 # The suffix(es) of source filenames. 75 # Note: important to list ipynb before md here: we have both md and ipynb 76 # copies of each notebook, and myst will choose which to convert based on 77 # the order in the source_suffix list. Notebooks which are not executed have 78 # outputs stored in ipynb but not in md, so we must convert the ipynb. 79 source_suffix = ['.rst', '.ipynb', '.md'] 80 81 autosummary_generate = True 82 83 master_doc = 'index' 84 85 autodoc_typehints = 'none' 86 87 88 # -- Options for HTML output ------------------------------------------------- 89 90 # The theme to use for HTML and HTML Help pages. See the documentation for 91 # a list of builtin themes. 92 # 93 html_theme = 'sphinx_book_theme' 94 # html_style = 'css/flax_theme.css' 95 96 # The name of an image file (relative to this directory) to place at the top 97 # of the sidebar. 98 html_logo = './flax.png' 99 100 # Add any paths that contain custom static files (such as style sheets) here, 101 # relative to this directory. They are copied after the builtin static files, 102 # so a file named "default.css" will overwrite the builtin "default.css". 103 html_static_path = ['_static'] 104 105 html_theme_options = { 106 "repository_url": "https://github.com/google/flax", 107 "use_repository_button": True, # add a "link to repository" button 108 "use_issues_button": False, # add an "Open an Issue" button 109 "path_to_docs": "docs", # used to compute the path to launch notebooks in colab 110 "launch_buttons": { 111 "colab_url": "https://colab.research.google.com/", 112 }, 113 } 114 115 # -- Options for myst ---------------------------------------------- 116 # Notebook cell execution timeout; defaults to 30. 117 nb_execution_timeout = 100 118 # List of patterns, relative to source directory, that match notebook 119 # files that will not be executed. 120 nb_execution_excludepatterns = [ 121 'notebooks/annotated_mnist.ipynb', # <-- times out 122 ] 123 # raise exceptions on execution so CI can catch errors 124 nb_execution_allow_errors = False 125 nb_execution_raise_on_error = True 126 127 # -- Extension configuration ------------------------------------------------- 128 129 # Tell sphinx-autodoc-typehints to generate stub parameter annotations including 130 # types, even if the parameters aren't explicitly documented. 131 always_document_param_types = True 132 ``` Path: `docs/_ext/codediff.py` Content: ``` 1 # Copyright 2022 The Flax Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Sphinx directive for creating code diff tables. 16 17 Use directive as follows: 18 19 .. codediff:: 20 :title_left: <LEFT_CODE_BLOCK_TITLE> 21 :title_right: <RIGHT_CODE_BLOCK_TITLE> 22 23 <CODE_BLOCK_LEFT> 24 --- 25 <CODE_BLOCK_RIGHT> 26 27 In order to highlight a line of code, prepend it with "#!". 28 """ 29 import itertools 30 31 from docutils import nodes 32 from docutils.parsers.rst import directives 33 from docutils.statemachine import ViewList 34 35 import sphinx 36 from sphinx.util.docutils import SphinxDirective 37 38 39 class CodeDiffParser: 40 41 def parse(self, lines, title_left='Base', title_right='Diff', code_sep='---'): 42 if code_sep not in lines: 43 raise ValueError('Code separator not found! Code snippets should be ' 44 f'separated by {code_sep}.') 45 idx = lines.index(code_sep) 46 code_left = self._code_block(lines[0: idx]) 47 test_code = lines[idx+1:] 48 code_right = self._code_block(test_code) 49 50 self.max_left = max(len(x) for x in code_left + [title_left]) 51 self.max_right = max(len(x) for x in code_right + [title_right]) 52 53 output = [ 54 self._hline(), 55 self._table_row(title_left, title_right), 56 self._hline(), 57 ] 58 59 for l, r in itertools.zip_longest(code_left, code_right, fillvalue=''): 60 output += [self._table_row(l, r)] 61 62 return output + [self._hline()], test_code 63 64 def _code_block(self, lines): 65 """Creates a codeblock.""" 66 # Remove right trailing whitespace so we can detect the comments. 67 lines = [x.rstrip() for x in lines] 68 highlight = lambda x: x.endswith('#!') 69 code = map(lambda x: x[:-2].rstrip() if highlight(x) else x, lines) 70 highlights = [i + 1 for i in range(len(lines)) if highlight(lines[i])] 71 highlights = ','.join(str(i) for i in highlights) 72 73 directive = ['.. code-block:: python'] 74 if highlights: 75 directive += [f' :emphasize-lines: {highlights}'] 76 77 # Indent code and add empty line so the code is picked up by the directive. 78 return directive + [''] + list(map(lambda x: ' ' + x, code)) 79 80 def _hline(self): 81 return '+' + '-'*(self.max_left+2) + '+' + '-'*(self.max_right+2) + '+' 82 83 def _rfill(self, text, max_len): 84 return text + ' ' * (max_len-len(text)) 85 86 def _table_row(self, left, right): 87 text_left = self._rfill(left, self.max_left) 88 text_right = self._rfill(right, self.max_right) 89 return '| ' + text_left + ' | ' + text_right + ' |' 90 91 92 class CodeDiffDirective(SphinxDirective): 93 has_content = True 94 option_spec = { 95 'title_left': directives.unchanged, 96 'title_right': directives.unchanged, 97 'code_sep': directives.unchanged, 98 } 99 100 def run(self): 101 table_code, test_code = CodeDiffParser().parse( 102 list(self.content), **self.options) 103 104 # Create a test node as a comment node so it won't show up in the docs. 105 # We add attribute "testnodetype" so it is be picked up by the doctest 106 # builder. This functionality is not officially documented but can be found 107 # in the source code: 108 # https://github.com/sphinx-doc/sphinx/blob/3.x/sphinx/ext/doctest.py 109 # (search for 'testnodetype'). 110 test_code = '\n'.join(test_code) 111 test_node = nodes.comment(test_code, test_code, testnodetype='testcode') 112 # Set the source info so the error message is correct when testing. 113 self.set_source_info(test_node) 114 test_node['options'] = {} 115 test_node['language'] = 'python3' 116 117 # The table node is the side-by-side diff view that will be shown on RTD. 118 table_node = nodes.paragraph() 119 self.content = ViewList(table_code, self.content.parent) 120 self.state.nested_parse(self.content, self.content_offset, table_node) 121 122 return [table_node, test_node] 123 124 125 def setup(app): 126 app.add_directive('codediff', CodeDiffDirective) 127 128 return { 129 'version': sphinx.__display_version__, 130 'parallel_read_safe': True, 131 'parallel_write_safe': True, 132 } 133 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/_ext/codediff.py b/docs/_ext/codediff.py --- a/docs/_ext/codediff.py +++ b/docs/_ext/codediff.py @@ -24,9 +24,10 @@ --- <CODE_BLOCK_RIGHT> -In order to highlight a line of code, prepend it with "#!". +In order to highlight a line of code, append "#!" to it. """ import itertools +from typing import List, Tuple from docutils import nodes from docutils.parsers.rst import directives @@ -35,10 +36,14 @@ import sphinx from sphinx.util.docutils import SphinxDirective +MISSING = object() class CodeDiffParser: - def parse(self, lines, title_left='Base', title_right='Diff', code_sep='---'): + def parse( + self, lines, title_left='Base', title_right='Diff', code_sep='---', sync=MISSING): + sync = sync is not MISSING + if code_sep not in lines: raise ValueError('Code separator not found! Code snippets should be ' f'separated by {code_sep}.') @@ -47,19 +52,10 @@ test_code = lines[idx+1:] code_right = self._code_block(test_code) - self.max_left = max(len(x) for x in code_left + [title_left]) - self.max_right = max(len(x) for x in code_right + [title_right]) - - output = [ - self._hline(), - self._table_row(title_left, title_right), - self._hline(), - ] + output = self._tabs( + (title_left, code_left), (title_right, code_right), sync=sync) - for l, r in itertools.zip_longest(code_left, code_right, fillvalue=''): - output += [self._table_row(l, r)] - - return output + [self._hline()], test_code + return output, test_code def _code_block(self, lines): """Creates a codeblock.""" @@ -77,17 +73,20 @@ # Indent code and add empty line so the code is picked up by the directive. return directive + [''] + list(map(lambda x: ' ' + x, code)) - def _hline(self): - return '+' + '-'*(self.max_left+2) + '+' + '-'*(self.max_right+2) + '+' - - def _rfill(self, text, max_len): - return text + ' ' * (max_len-len(text)) + def _tabs(self, *contents: Tuple[str, List[str]], sync): + output = ['.. tab-set::'] + [' '] + + for title, content in contents: + output += [f' .. tab-item:: {title}'] + + if sync: + key = title.strip() + output += [f' :sync: {key}'] - def _table_row(self, left, right): - text_left = self._rfill(left, self.max_left) - text_right = self._rfill(right, self.max_right) - return '| ' + text_left + ' | ' + text_right + ' |' + output += [' '] + output += [' ' + line for line in content] + return output class CodeDiffDirective(SphinxDirective): has_content = True @@ -95,6 +94,7 @@ 'title_left': directives.unchanged, 'title_right': directives.unchanged, 'code_sep': directives.unchanged, + 'sync': directives.flag, } def run(self): diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -60,7 +60,7 @@ 'sphinx.ext.viewcode', 'myst_nb', 'codediff', - 'sphinx_markdown_tables' + 'sphinx_design', ] # Add any paths that contain templates here, relative to this directory. @@ -91,7 +91,7 @@ # a list of builtin themes. # html_theme = 'sphinx_book_theme' -# html_style = 'css/flax_theme.css' +html_css_files = ["css/flax_theme.css"] # The name of an image file (relative to this directory) to place at the top # of the sidebar.
{"golden_diff": "diff --git a/docs/_ext/codediff.py b/docs/_ext/codediff.py\n--- a/docs/_ext/codediff.py\n+++ b/docs/_ext/codediff.py\n@@ -24,9 +24,10 @@\n ---\n <CODE_BLOCK_RIGHT>\n \n-In order to highlight a line of code, prepend it with \"#!\".\n+In order to highlight a line of code, append \"#!\" to it.\n \"\"\"\n import itertools\n+from typing import List, Tuple\n \n from docutils import nodes\n from docutils.parsers.rst import directives\n@@ -35,10 +36,14 @@\n import sphinx\n from sphinx.util.docutils import SphinxDirective\n \n+MISSING = object()\n \n class CodeDiffParser:\n \n- def parse(self, lines, title_left='Base', title_right='Diff', code_sep='---'):\n+ def parse(\n+ self, lines, title_left='Base', title_right='Diff', code_sep='---', sync=MISSING):\n+ sync = sync is not MISSING\n+\n if code_sep not in lines:\n raise ValueError('Code separator not found! Code snippets should be '\n f'separated by {code_sep}.')\n@@ -47,19 +52,10 @@\n test_code = lines[idx+1:]\n code_right = self._code_block(test_code)\n \n- self.max_left = max(len(x) for x in code_left + [title_left])\n- self.max_right = max(len(x) for x in code_right + [title_right])\n-\n- output = [\n- self._hline(),\n- self._table_row(title_left, title_right),\n- self._hline(),\n- ]\n+ output = self._tabs(\n+ (title_left, code_left), (title_right, code_right), sync=sync)\n \n- for l, r in itertools.zip_longest(code_left, code_right, fillvalue=''):\n- output += [self._table_row(l, r)]\n-\n- return output + [self._hline()], test_code\n+ return output, test_code\n \n def _code_block(self, lines):\n \"\"\"Creates a codeblock.\"\"\"\n@@ -77,17 +73,20 @@\n # Indent code and add empty line so the code is picked up by the directive.\n return directive + [''] + list(map(lambda x: ' ' + x, code))\n \n- def _hline(self):\n- return '+' + '-'*(self.max_left+2) + '+' + '-'*(self.max_right+2) + '+'\n-\n- def _rfill(self, text, max_len):\n- return text + ' ' * (max_len-len(text))\n+ def _tabs(self, *contents: Tuple[str, List[str]], sync):\n+ output = ['.. tab-set::'] + [' ']\n+ \n+ for title, content in contents:\n+ output += [f' .. tab-item:: {title}']\n+ \n+ if sync:\n+ key = title.strip()\n+ output += [f' :sync: {key}']\n \n- def _table_row(self, left, right):\n- text_left = self._rfill(left, self.max_left)\n- text_right = self._rfill(right, self.max_right)\n- return '| ' + text_left + ' | ' + text_right + ' |'\n+ output += [' ']\n+ output += [' ' + line for line in content]\n \n+ return output\n \n class CodeDiffDirective(SphinxDirective):\n has_content = True\n@@ -95,6 +94,7 @@\n 'title_left': directives.unchanged,\n 'title_right': directives.unchanged,\n 'code_sep': directives.unchanged,\n+ 'sync': directives.flag,\n }\n \n def run(self):\ndiff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -60,7 +60,7 @@\n 'sphinx.ext.viewcode',\n 'myst_nb',\n 'codediff',\n- 'sphinx_markdown_tables'\n+ 'sphinx_design',\n ]\n \n # Add any paths that contain templates here, relative to this directory.\n@@ -91,7 +91,7 @@\n # a list of builtin themes.\n #\n html_theme = 'sphinx_book_theme'\n-# html_style = 'css/flax_theme.css'\n+html_css_files = [\"css/flax_theme.css\"]\n \n # The name of an image file (relative to this directory) to place at the top\n # of the sidebar.\n", "issue": "codediff section not respecting html container \nIn the new `sphinx_book_theme` our custom `codediff` directive is rendering its content outside the container, overflowing until the end of the page. As shown in this screenshot, it appear to be rendered underneath the `contents` section because its not respecting its section boundaries:\r\n\r\n![image](https://user-images.githubusercontent.com/5862228/173973531-5e548ee8-53f6-4978-9744-f9a3a54b5470.png)\r\n\r\n\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\n\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n# Include local extension.\nsys.path.append(os.path.abspath('./_ext'))\n\n# patch sphinx\nimport docs.conf_sphinx_patch\n# -- Project information -----------------------------------------------------\n\nproject = 'Flax'\ncopyright = '2020, The Flax authors' # pylint: disable=redefined-builtin\nauthor = 'The Flax authors'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'myst_nb',\n 'codediff',\n 'sphinx_markdown_tables'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The suffix(es) of source filenames.\n# Note: important to list ipynb before md here: we have both md and ipynb\n# copies of each notebook, and myst will choose which to convert based on\n# the order in the source_suffix list. Notebooks which are not executed have\n# outputs stored in ipynb but not in md, so we must convert the ipynb.\nsource_suffix = ['.rst', '.ipynb', '.md']\n\nautosummary_generate = True\n\nmaster_doc = 'index'\n\nautodoc_typehints = 'none'\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_book_theme'\n# html_style = 'css/flax_theme.css'\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = './flax.png'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_theme_options = {\n \"repository_url\": \"https://github.com/google/flax\",\n \"use_repository_button\": True, # add a \"link to repository\" button\n \"use_issues_button\": False, # add an \"Open an Issue\" button\n \"path_to_docs\": \"docs\", # used to compute the path to launch notebooks in colab\n \"launch_buttons\": {\n \"colab_url\": \"https://colab.research.google.com/\",\n },\n}\n\n# -- Options for myst ----------------------------------------------\n# Notebook cell execution timeout; defaults to 30.\nnb_execution_timeout = 100\n# List of patterns, relative to source directory, that match notebook\n# files that will not be executed.\nnb_execution_excludepatterns = [\n 'notebooks/annotated_mnist.ipynb', # <-- times out \n]\n# raise exceptions on execution so CI can catch errors\nnb_execution_allow_errors = False\nnb_execution_raise_on_error = True\n\n# -- Extension configuration -------------------------------------------------\n\n# Tell sphinx-autodoc-typehints to generate stub parameter annotations including\n# types, even if the parameters aren't explicitly documented.\nalways_document_param_types = True\n", "path": "docs/conf.py"}, {"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Sphinx directive for creating code diff tables.\n\nUse directive as follows:\n\n.. codediff::\n :title_left: <LEFT_CODE_BLOCK_TITLE>\n :title_right: <RIGHT_CODE_BLOCK_TITLE>\n\n <CODE_BLOCK_LEFT>\n ---\n <CODE_BLOCK_RIGHT>\n\nIn order to highlight a line of code, prepend it with \"#!\".\n\"\"\"\nimport itertools\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import directives\nfrom docutils.statemachine import ViewList\n\nimport sphinx\nfrom sphinx.util.docutils import SphinxDirective\n\n\nclass CodeDiffParser:\n\n def parse(self, lines, title_left='Base', title_right='Diff', code_sep='---'):\n if code_sep not in lines:\n raise ValueError('Code separator not found! Code snippets should be '\n f'separated by {code_sep}.')\n idx = lines.index(code_sep)\n code_left = self._code_block(lines[0: idx])\n test_code = lines[idx+1:]\n code_right = self._code_block(test_code)\n\n self.max_left = max(len(x) for x in code_left + [title_left])\n self.max_right = max(len(x) for x in code_right + [title_right])\n\n output = [\n self._hline(),\n self._table_row(title_left, title_right),\n self._hline(),\n ]\n\n for l, r in itertools.zip_longest(code_left, code_right, fillvalue=''):\n output += [self._table_row(l, r)]\n\n return output + [self._hline()], test_code\n\n def _code_block(self, lines):\n \"\"\"Creates a codeblock.\"\"\"\n # Remove right trailing whitespace so we can detect the comments.\n lines = [x.rstrip() for x in lines]\n highlight = lambda x: x.endswith('#!')\n code = map(lambda x: x[:-2].rstrip() if highlight(x) else x, lines)\n highlights = [i + 1 for i in range(len(lines)) if highlight(lines[i])]\n highlights = ','.join(str(i) for i in highlights)\n\n directive = ['.. code-block:: python']\n if highlights:\n directive += [f' :emphasize-lines: {highlights}']\n\n # Indent code and add empty line so the code is picked up by the directive.\n return directive + [''] + list(map(lambda x: ' ' + x, code))\n\n def _hline(self):\n return '+' + '-'*(self.max_left+2) + '+' + '-'*(self.max_right+2) + '+'\n\n def _rfill(self, text, max_len):\n return text + ' ' * (max_len-len(text))\n\n def _table_row(self, left, right):\n text_left = self._rfill(left, self.max_left)\n text_right = self._rfill(right, self.max_right)\n return '| ' + text_left + ' | ' + text_right + ' |'\n\n\nclass CodeDiffDirective(SphinxDirective):\n has_content = True\n option_spec = {\n 'title_left': directives.unchanged,\n 'title_right': directives.unchanged,\n 'code_sep': directives.unchanged,\n }\n\n def run(self):\n table_code, test_code = CodeDiffParser().parse(\n list(self.content), **self.options)\n\n # Create a test node as a comment node so it won't show up in the docs.\n # We add attribute \"testnodetype\" so it is be picked up by the doctest\n # builder. This functionality is not officially documented but can be found\n # in the source code:\n # https://github.com/sphinx-doc/sphinx/blob/3.x/sphinx/ext/doctest.py\n # (search for 'testnodetype').\n test_code = '\\n'.join(test_code)\n test_node = nodes.comment(test_code, test_code, testnodetype='testcode')\n # Set the source info so the error message is correct when testing.\n self.set_source_info(test_node)\n test_node['options'] = {}\n test_node['language'] = 'python3'\n\n # The table node is the side-by-side diff view that will be shown on RTD.\n table_node = nodes.paragraph()\n self.content = ViewList(table_code, self.content.parent)\n self.state.nested_parse(self.content, self.content_offset, table_node)\n\n return [table_node, test_node]\n\n\ndef setup(app):\n app.add_directive('codediff', CodeDiffDirective)\n\n return {\n 'version': sphinx.__display_version__,\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n", "path": "docs/_ext/codediff.py"}], "after_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\n\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n# Include local extension.\nsys.path.append(os.path.abspath('./_ext'))\n\n# patch sphinx\nimport docs.conf_sphinx_patch\n# -- Project information -----------------------------------------------------\n\nproject = 'Flax'\ncopyright = '2020, The Flax authors' # pylint: disable=redefined-builtin\nauthor = 'The Flax authors'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'myst_nb',\n 'codediff',\n 'sphinx_design',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The suffix(es) of source filenames.\n# Note: important to list ipynb before md here: we have both md and ipynb\n# copies of each notebook, and myst will choose which to convert based on\n# the order in the source_suffix list. Notebooks which are not executed have\n# outputs stored in ipynb but not in md, so we must convert the ipynb.\nsource_suffix = ['.rst', '.ipynb', '.md']\n\nautosummary_generate = True\n\nmaster_doc = 'index'\n\nautodoc_typehints = 'none'\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_book_theme'\nhtml_css_files = [\"css/flax_theme.css\"]\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = './flax.png'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_theme_options = {\n \"repository_url\": \"https://github.com/google/flax\",\n \"use_repository_button\": True, # add a \"link to repository\" button\n \"use_issues_button\": False, # add an \"Open an Issue\" button\n \"path_to_docs\": \"docs\", # used to compute the path to launch notebooks in colab\n \"launch_buttons\": {\n \"colab_url\": \"https://colab.research.google.com/\",\n },\n}\n\n# -- Options for myst ----------------------------------------------\n# Notebook cell execution timeout; defaults to 30.\nnb_execution_timeout = 100\n# List of patterns, relative to source directory, that match notebook\n# files that will not be executed.\nnb_execution_excludepatterns = [\n 'notebooks/annotated_mnist.ipynb', # <-- times out \n]\n# raise exceptions on execution so CI can catch errors\nnb_execution_allow_errors = False\nnb_execution_raise_on_error = True\n\n# -- Extension configuration -------------------------------------------------\n\n# Tell sphinx-autodoc-typehints to generate stub parameter annotations including\n# types, even if the parameters aren't explicitly documented.\nalways_document_param_types = True\n", "path": "docs/conf.py"}, {"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Sphinx directive for creating code diff tables.\n\nUse directive as follows:\n\n.. codediff::\n :title_left: <LEFT_CODE_BLOCK_TITLE>\n :title_right: <RIGHT_CODE_BLOCK_TITLE>\n\n <CODE_BLOCK_LEFT>\n ---\n <CODE_BLOCK_RIGHT>\n\nIn order to highlight a line of code, append \"#!\" to it.\n\"\"\"\nimport itertools\nfrom typing import List, Tuple\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import directives\nfrom docutils.statemachine import ViewList\n\nimport sphinx\nfrom sphinx.util.docutils import SphinxDirective\n\nMISSING = object()\n\nclass CodeDiffParser:\n\n def parse(\n self, lines, title_left='Base', title_right='Diff', code_sep='---', sync=MISSING):\n sync = sync is not MISSING\n\n if code_sep not in lines:\n raise ValueError('Code separator not found! Code snippets should be '\n f'separated by {code_sep}.')\n idx = lines.index(code_sep)\n code_left = self._code_block(lines[0: idx])\n test_code = lines[idx+1:]\n code_right = self._code_block(test_code)\n\n output = self._tabs(\n (title_left, code_left), (title_right, code_right), sync=sync)\n\n return output, test_code\n\n def _code_block(self, lines):\n \"\"\"Creates a codeblock.\"\"\"\n # Remove right trailing whitespace so we can detect the comments.\n lines = [x.rstrip() for x in lines]\n highlight = lambda x: x.endswith('#!')\n code = map(lambda x: x[:-2].rstrip() if highlight(x) else x, lines)\n highlights = [i + 1 for i in range(len(lines)) if highlight(lines[i])]\n highlights = ','.join(str(i) for i in highlights)\n\n directive = ['.. code-block:: python']\n if highlights:\n directive += [f' :emphasize-lines: {highlights}']\n\n # Indent code and add empty line so the code is picked up by the directive.\n return directive + [''] + list(map(lambda x: ' ' + x, code))\n\n def _tabs(self, *contents: Tuple[str, List[str]], sync):\n output = ['.. tab-set::'] + [' ']\n \n for title, content in contents:\n output += [f' .. tab-item:: {title}']\n \n if sync:\n key = title.strip()\n output += [f' :sync: {key}']\n\n output += [' ']\n output += [' ' + line for line in content]\n\n return output\n\nclass CodeDiffDirective(SphinxDirective):\n has_content = True\n option_spec = {\n 'title_left': directives.unchanged,\n 'title_right': directives.unchanged,\n 'code_sep': directives.unchanged,\n 'sync': directives.flag,\n }\n\n def run(self):\n table_code, test_code = CodeDiffParser().parse(\n list(self.content), **self.options)\n\n # Create a test node as a comment node so it won't show up in the docs.\n # We add attribute \"testnodetype\" so it is be picked up by the doctest\n # builder. This functionality is not officially documented but can be found\n # in the source code:\n # https://github.com/sphinx-doc/sphinx/blob/3.x/sphinx/ext/doctest.py\n # (search for 'testnodetype').\n test_code = '\\n'.join(test_code)\n test_node = nodes.comment(test_code, test_code, testnodetype='testcode')\n # Set the source info so the error message is correct when testing.\n self.set_source_info(test_node)\n test_node['options'] = {}\n test_node['language'] = 'python3'\n\n # The table node is the side-by-side diff view that will be shown on RTD.\n table_node = nodes.paragraph()\n self.content = ViewList(table_code, self.content.parent)\n self.state.nested_parse(self.content, self.content_offset, table_node)\n\n return [table_node, test_node]\n\n\ndef setup(app):\n app.add_directive('codediff', CodeDiffDirective)\n\n return {\n 'version': sphinx.__display_version__,\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n", "path": "docs/_ext/codediff.py"}]}
3,221
1,007
gh_patches_debug_20614
rasdani/github-patches
git_diff
pytorch__examples-1189
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add `save_model` arg to `mnist_hogwild` example Currently the example doesn't support the `--save_model` argument like the other examples --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mnist_hogwild/main.py` Content: ``` 1 from __future__ import print_function 2 import argparse 3 import torch 4 import torch.nn as nn 5 import torch.nn.functional as F 6 import torch.multiprocessing as mp 7 from torch.utils.data.sampler import Sampler 8 from torchvision import datasets, transforms 9 10 from train import train, test 11 12 # Training settings 13 parser = argparse.ArgumentParser(description='PyTorch MNIST Example') 14 parser.add_argument('--batch-size', type=int, default=64, metavar='N', 15 help='input batch size for training (default: 64)') 16 parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', 17 help='input batch size for testing (default: 1000)') 18 parser.add_argument('--epochs', type=int, default=10, metavar='N', 19 help='number of epochs to train (default: 10)') 20 parser.add_argument('--lr', type=float, default=0.01, metavar='LR', 21 help='learning rate (default: 0.01)') 22 parser.add_argument('--momentum', type=float, default=0.5, metavar='M', 23 help='SGD momentum (default: 0.5)') 24 parser.add_argument('--seed', type=int, default=1, metavar='S', 25 help='random seed (default: 1)') 26 parser.add_argument('--log-interval', type=int, default=10, metavar='N', 27 help='how many batches to wait before logging training status') 28 parser.add_argument('--num-processes', type=int, default=2, metavar='N', 29 help='how many training processes to use (default: 2)') 30 parser.add_argument('--cuda', action='store_true', default=False, 31 help='enables CUDA training') 32 parser.add_argument('--mps', action='store_true', default=False, 33 help='enables macOS GPU training') 34 parser.add_argument('--dry-run', action='store_true', default=False, 35 help='quickly check a single pass') 36 37 class Net(nn.Module): 38 def __init__(self): 39 super(Net, self).__init__() 40 self.conv1 = nn.Conv2d(1, 10, kernel_size=5) 41 self.conv2 = nn.Conv2d(10, 20, kernel_size=5) 42 self.conv2_drop = nn.Dropout2d() 43 self.fc1 = nn.Linear(320, 50) 44 self.fc2 = nn.Linear(50, 10) 45 46 def forward(self, x): 47 x = F.relu(F.max_pool2d(self.conv1(x), 2)) 48 x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) 49 x = x.view(-1, 320) 50 x = F.relu(self.fc1(x)) 51 x = F.dropout(x, training=self.training) 52 x = self.fc2(x) 53 return F.log_softmax(x, dim=1) 54 55 56 if __name__ == '__main__': 57 args = parser.parse_args() 58 59 use_cuda = args.cuda and torch.cuda.is_available() 60 use_mps = args.mps and torch.backends.mps.is_available() 61 if use_cuda: 62 device = torch.device("cuda") 63 elif use_mps: 64 device = torch.device("mps") 65 else: 66 device = torch.device("cpu") 67 68 transform=transforms.Compose([ 69 transforms.ToTensor(), 70 transforms.Normalize((0.1307,), (0.3081,)) 71 ]) 72 dataset1 = datasets.MNIST('../data', train=True, download=True, 73 transform=transform) 74 dataset2 = datasets.MNIST('../data', train=False, 75 transform=transform) 76 kwargs = {'batch_size': args.batch_size, 77 'shuffle': True} 78 if use_cuda: 79 kwargs.update({'num_workers': 1, 80 'pin_memory': True, 81 }) 82 83 torch.manual_seed(args.seed) 84 mp.set_start_method('spawn', force=True) 85 86 model = Net().to(device) 87 model.share_memory() # gradients are allocated lazily, so they are not shared here 88 89 processes = [] 90 for rank in range(args.num_processes): 91 p = mp.Process(target=train, args=(rank, args, model, device, 92 dataset1, kwargs)) 93 # We first train the model across `num_processes` processes 94 p.start() 95 processes.append(p) 96 for p in processes: 97 p.join() 98 99 # Once training is complete, we can test the model 100 test(args, model, device, dataset2, kwargs) 101 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mnist_hogwild/main.py b/mnist_hogwild/main.py --- a/mnist_hogwild/main.py +++ b/mnist_hogwild/main.py @@ -30,7 +30,9 @@ parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training') parser.add_argument('--mps', action='store_true', default=False, - help='enables macOS GPU training') + help='enables macOS GPU training') +parser.add_argument('--save_model', action='store_true', default=False, + help='save the trained model to state_dict') parser.add_argument('--dry-run', action='store_true', default=False, help='quickly check a single pass') @@ -96,5 +98,8 @@ for p in processes: p.join() + if args.save_model: + torch.save(model.state_dict(), "MNIST_hogwild.pt") + # Once training is complete, we can test the model test(args, model, device, dataset2, kwargs)
{"golden_diff": "diff --git a/mnist_hogwild/main.py b/mnist_hogwild/main.py\n--- a/mnist_hogwild/main.py\n+++ b/mnist_hogwild/main.py\n@@ -30,7 +30,9 @@\n parser.add_argument('--cuda', action='store_true', default=False,\n help='enables CUDA training')\n parser.add_argument('--mps', action='store_true', default=False,\n- help='enables macOS GPU training')\n+ help='enables macOS GPU training')\n+parser.add_argument('--save_model', action='store_true', default=False,\n+ help='save the trained model to state_dict')\n parser.add_argument('--dry-run', action='store_true', default=False,\n help='quickly check a single pass')\n \n@@ -96,5 +98,8 @@\n for p in processes:\n p.join()\n \n+ if args.save_model:\n+ torch.save(model.state_dict(), \"MNIST_hogwild.pt\")\n+\n # Once training is complete, we can test the model\n test(args, model, device, dataset2, kwargs)\n", "issue": "Add `save_model` arg to `mnist_hogwild` example\nCurrently the example doesn't support the `--save_model` argument like the other examples\r\n\n", "before_files": [{"content": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.multiprocessing as mp\nfrom torch.utils.data.sampler import Sampler\nfrom torchvision import datasets, transforms\n\nfrom train import train, test\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--num-processes', type=int, default=2, metavar='N',\n help='how many training processes to use (default: 2)')\nparser.add_argument('--cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument('--mps', action='store_true', default=False,\n help='enables macOS GPU training')\nparser.add_argument('--dry-run', action='store_true', default=False,\n help='quickly check a single pass')\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n use_cuda = args.cuda and torch.cuda.is_available()\n use_mps = args.mps and torch.backends.mps.is_available()\n if use_cuda:\n device = torch.device(\"cuda\")\n elif use_mps:\n device = torch.device(\"mps\")\n else:\n device = torch.device(\"cpu\")\n\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n dataset1 = datasets.MNIST('../data', train=True, download=True,\n transform=transform)\n dataset2 = datasets.MNIST('../data', train=False,\n transform=transform)\n kwargs = {'batch_size': args.batch_size,\n 'shuffle': True}\n if use_cuda:\n kwargs.update({'num_workers': 1,\n 'pin_memory': True,\n })\n\n torch.manual_seed(args.seed)\n mp.set_start_method('spawn', force=True)\n\n model = Net().to(device)\n model.share_memory() # gradients are allocated lazily, so they are not shared here\n\n processes = []\n for rank in range(args.num_processes):\n p = mp.Process(target=train, args=(rank, args, model, device,\n dataset1, kwargs))\n # We first train the model across `num_processes` processes\n p.start()\n processes.append(p)\n for p in processes:\n p.join()\n\n # Once training is complete, we can test the model\n test(args, model, device, dataset2, kwargs)\n", "path": "mnist_hogwild/main.py"}], "after_files": [{"content": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.multiprocessing as mp\nfrom torch.utils.data.sampler import Sampler\nfrom torchvision import datasets, transforms\n\nfrom train import train, test\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--num-processes', type=int, default=2, metavar='N',\n help='how many training processes to use (default: 2)')\nparser.add_argument('--cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument('--mps', action='store_true', default=False,\n help='enables macOS GPU training')\nparser.add_argument('--save_model', action='store_true', default=False,\n help='save the trained model to state_dict')\nparser.add_argument('--dry-run', action='store_true', default=False,\n help='quickly check a single pass')\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n use_cuda = args.cuda and torch.cuda.is_available()\n use_mps = args.mps and torch.backends.mps.is_available()\n if use_cuda:\n device = torch.device(\"cuda\")\n elif use_mps:\n device = torch.device(\"mps\")\n else:\n device = torch.device(\"cpu\")\n\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n dataset1 = datasets.MNIST('../data', train=True, download=True,\n transform=transform)\n dataset2 = datasets.MNIST('../data', train=False,\n transform=transform)\n kwargs = {'batch_size': args.batch_size,\n 'shuffle': True}\n if use_cuda:\n kwargs.update({'num_workers': 1,\n 'pin_memory': True,\n })\n\n torch.manual_seed(args.seed)\n mp.set_start_method('spawn', force=True)\n\n model = Net().to(device)\n model.share_memory() # gradients are allocated lazily, so they are not shared here\n\n processes = []\n for rank in range(args.num_processes):\n p = mp.Process(target=train, args=(rank, args, model, device,\n dataset1, kwargs))\n # We first train the model across `num_processes` processes\n p.start()\n processes.append(p)\n for p in processes:\n p.join()\n\n if args.save_model:\n torch.save(model.state_dict(), \"MNIST_hogwild.pt\")\n\n # Once training is complete, we can test the model\n test(args, model, device, dataset2, kwargs)\n", "path": "mnist_hogwild/main.py"}]}
1,460
235
gh_patches_debug_33732
rasdani/github-patches
git_diff
qtile__qtile-1883
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- widget.Pacman needs a non existing file:'checkupdates' ``` 2020-03-30 07:09:12,582 ERROR libqtile base.py:worker():L478 problem polling to update widget pacman Traceback (most recent call last): File "/home/parsrch/.local/lib/python3.8/site-packages/libqtile/widget/base.py", line 474, in worker text = self.poll() File "/home/parsrch/.local/lib/python3.8/site-packages/libqtile/widget/pacman.py", line 49, in poll pacman = self.call_process(['checkupdates']) File "/home/parsrch/.local/lib/python3.8/site-packages/libqtile/widget/base.py", line 266, in call_process output = subprocess.check_output(command, **kwargs) File "/usr/lib/python3.8/subprocess.py", line 411, in check_output return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, File "/usr/lib/python3.8/subprocess.py", line 489, in run with Popen(*popenargs, **kwargs) as process: File "/usr/lib/python3.8/subprocess.py", line 854, in __init__ self._execute_child(args, executable, preexec_fn, close_fds, File "/usr/lib/python3.8/subprocess.py", line 1702, in _execute_child raise child_exception_type(errno_num, err_msg, err_filename) FileNotFoundError: [Errno 2] No such file or directory: 'checkupdates' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libqtile/widget/pacman.py` Content: ``` 1 # -*- coding:utf-8 -*- 2 # 3 # Copyright (C) 2012, Maximilian Köhl <[email protected]> 4 # 5 # This program is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # This program is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with this program. If not, see <http://www.gnu.org/licenses/>. 17 18 import subprocess 19 20 from libqtile.log_utils import logger 21 from libqtile.widget import base 22 23 24 class Pacman(base.ThreadedPollText): 25 """Shows number of available updates 26 27 Needs the pacman package manager installed. So will only work in Arch Linux 28 installation. 29 """ 30 orientations = base.ORIENTATION_HORIZONTAL 31 defaults = [ 32 ('unavailable', 'ffffff', 'Unavailable Color - no updates.'), 33 ('execute', None, 'Command to execute on click'), 34 ('update_interval', 60, "The update interval."), 35 ] 36 37 def __init__(self, **config): 38 logger.warning("Pacman is deprecated, please use CheckUpdates") 39 base.ThreadedPollText.__init__(self, **config) 40 self.add_defaults(Pacman.defaults) 41 42 def draw(self): 43 if self.text == '0': 44 self.layout.colour = self.unavailable 45 else: 46 self.layout.colour = self.foreground 47 base.ThreadedPollText.draw(self) 48 49 def poll(self): 50 pacman = self.call_process(['checkupdates']) 51 return str(len(pacman.splitlines())) 52 53 def button_press(self, x, y, button): 54 base.ThreadedPollText.button_press(self, x, y, button) 55 if button == 1 and self.execute is not None: 56 subprocess.Popen([self.execute], shell=True) 57 ``` Path: `libqtile/widget/__init__.py` Content: ``` 1 # Copyright (c) 2014 Rock Neurotiko 2 # Copyright (c) 2014 roger 3 # Copyright (c) 2015 David R. Andersen 4 # 5 # Permission is hereby granted, free of charge, to any person obtaining a copy 6 # of this software and associated documentation files (the "Software"), to deal 7 # in the Software without restriction, including without limitation the rights 8 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 # copies of the Software, and to permit persons to whom the Software is 10 # furnished to do so, subject to the following conditions: 11 # 12 # The above copyright notice and this permission notice shall be included in 13 # all copies or substantial portions of the Software. 14 # 15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 # SOFTWARE. 22 23 from libqtile.utils import safe_import as safe_import_ 24 # only directly import widgets that do not have any third party dependencies 25 # other than those required by qtile, otherwise use the same import function 26 from libqtile.widget.base import Mirror # noqa: F401 27 from libqtile.widget.chord import Chord # noqa: F401 28 from libqtile.widget.clock import Clock # noqa: F401 29 from libqtile.widget.currentlayout import ( # noqa: F401 30 CurrentLayout, 31 CurrentLayoutIcon, 32 ) 33 from libqtile.widget.groupbox import AGroupBox, GroupBox # noqa: F401 34 from libqtile.widget.import_error import make_error 35 from libqtile.widget.prompt import Prompt # noqa: F401 36 from libqtile.widget.quick_exit import QuickExit # noqa: F401 37 from libqtile.widget.systray import Systray # noqa: F401 38 from libqtile.widget.textbox import TextBox # noqa: F401 39 from libqtile.widget.windowname import WindowName # noqa: F401 40 41 42 def safe_import(module_name, class_name): 43 safe_import_( 44 (".widget", module_name), class_name, globals(), fallback=make_error 45 ) 46 47 48 safe_import("backlight", "Backlight") 49 safe_import("battery", ["Battery", "BatteryIcon"]) 50 safe_import("currentscreen", "CurrentScreen") 51 safe_import("debuginfo", "DebugInfo") 52 safe_import( 53 "graph", 54 [ 55 "CPUGraph", 56 "MemoryGraph", 57 "SwapGraph", 58 "NetGraph", 59 "HDDGraph", 60 "HDDBusyGraph", 61 ], 62 ) 63 safe_import("maildir", "Maildir") 64 safe_import("notify", "Notify") 65 safe_import("sensors", "ThermalSensor") 66 safe_import("sep", "Sep") 67 safe_import("she", "She") 68 safe_import("spacer", "Spacer") 69 safe_import("generic_poll_text", ["GenPollText", "GenPollUrl"]) 70 safe_import("volume", "Volume") 71 safe_import("windowtabs", "WindowTabs") 72 safe_import("keyboardlayout", "KeyboardLayout") 73 safe_import("df", "DF") 74 safe_import("image", "Image") 75 safe_import("gmail_checker", "GmailChecker") 76 safe_import("clipboard", "Clipboard") 77 safe_import("countdown", "Countdown") 78 safe_import("tasklist", "TaskList") 79 safe_import("pacman", "Pacman") 80 safe_import("launchbar", "LaunchBar") 81 safe_import("canto", "Canto") 82 safe_import("mpriswidget", "Mpris") 83 safe_import("mpris2widget", "Mpris2") 84 safe_import("mpd2widget", "Mpd2") 85 safe_import("yahoo_weather", "YahooWeather") 86 safe_import("bitcoin_ticker", "BitcoinTicker") 87 safe_import("wlan", "Wlan") 88 safe_import("khal_calendar", "KhalCalendar") 89 safe_import("imapwidget", "ImapWidget") 90 safe_import("net", "Net") 91 safe_import("keyboardkbdd", "KeyboardKbdd") 92 safe_import("cmus", "Cmus") 93 safe_import("wallpaper", "Wallpaper") 94 safe_import("check_updates", "CheckUpdates") 95 safe_import("moc", "Moc") 96 safe_import("memory", "Memory") 97 safe_import("cpu", "CPU") 98 safe_import("idlerpg", "IdleRPG") 99 safe_import("pomodoro", "Pomodoro") 100 safe_import("stock_ticker", "StockTicker") 101 safe_import("caps_num_lock_indicator", "CapsNumLockIndicator") 102 safe_import("quick_exit", "QuickExit") 103 safe_import("pulse_volume", "PulseVolume") 104 safe_import("chord", "Chord") 105 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libqtile/widget/__init__.py b/libqtile/widget/__init__.py --- a/libqtile/widget/__init__.py +++ b/libqtile/widget/__init__.py @@ -76,7 +76,6 @@ safe_import("clipboard", "Clipboard") safe_import("countdown", "Countdown") safe_import("tasklist", "TaskList") -safe_import("pacman", "Pacman") safe_import("launchbar", "LaunchBar") safe_import("canto", "Canto") safe_import("mpriswidget", "Mpris") diff --git a/libqtile/widget/pacman.py b/libqtile/widget/pacman.py deleted file mode 100644 --- a/libqtile/widget/pacman.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding:utf-8 -*- -# -# Copyright (C) 2012, Maximilian Köhl <[email protected]> -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import subprocess - -from libqtile.log_utils import logger -from libqtile.widget import base - - -class Pacman(base.ThreadedPollText): - """Shows number of available updates - - Needs the pacman package manager installed. So will only work in Arch Linux - installation. - """ - orientations = base.ORIENTATION_HORIZONTAL - defaults = [ - ('unavailable', 'ffffff', 'Unavailable Color - no updates.'), - ('execute', None, 'Command to execute on click'), - ('update_interval', 60, "The update interval."), - ] - - def __init__(self, **config): - logger.warning("Pacman is deprecated, please use CheckUpdates") - base.ThreadedPollText.__init__(self, **config) - self.add_defaults(Pacman.defaults) - - def draw(self): - if self.text == '0': - self.layout.colour = self.unavailable - else: - self.layout.colour = self.foreground - base.ThreadedPollText.draw(self) - - def poll(self): - pacman = self.call_process(['checkupdates']) - return str(len(pacman.splitlines())) - - def button_press(self, x, y, button): - base.ThreadedPollText.button_press(self, x, y, button) - if button == 1 and self.execute is not None: - subprocess.Popen([self.execute], shell=True)
{"golden_diff": "diff --git a/libqtile/widget/__init__.py b/libqtile/widget/__init__.py\n--- a/libqtile/widget/__init__.py\n+++ b/libqtile/widget/__init__.py\n@@ -76,7 +76,6 @@\n safe_import(\"clipboard\", \"Clipboard\")\n safe_import(\"countdown\", \"Countdown\")\n safe_import(\"tasklist\", \"TaskList\")\n-safe_import(\"pacman\", \"Pacman\")\n safe_import(\"launchbar\", \"LaunchBar\")\n safe_import(\"canto\", \"Canto\")\n safe_import(\"mpriswidget\", \"Mpris\")\ndiff --git a/libqtile/widget/pacman.py b/libqtile/widget/pacman.py\ndeleted file mode 100644\n--- a/libqtile/widget/pacman.py\n+++ /dev/null\n@@ -1,56 +0,0 @@\n-# -*- coding:utf-8 -*-\n-#\n-# Copyright (C) 2012, Maximilian K\u00f6hl <[email protected]>\n-#\n-# This program is free software: you can redistribute it and/or modify\n-# it under the terms of the GNU General Public License as published by\n-# the Free Software Foundation, either version 3 of the License, or\n-# (at your option) any later version.\n-#\n-# This program is distributed in the hope that it will be useful,\n-# but WITHOUT ANY WARRANTY; without even the implied warranty of\n-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n-# GNU General Public License for more details.\n-#\n-# You should have received a copy of the GNU General Public License\n-# along with this program. If not, see <http://www.gnu.org/licenses/>.\n-\n-import subprocess\n-\n-from libqtile.log_utils import logger\n-from libqtile.widget import base\n-\n-\n-class Pacman(base.ThreadedPollText):\n- \"\"\"Shows number of available updates\n-\n- Needs the pacman package manager installed. So will only work in Arch Linux\n- installation.\n- \"\"\"\n- orientations = base.ORIENTATION_HORIZONTAL\n- defaults = [\n- ('unavailable', 'ffffff', 'Unavailable Color - no updates.'),\n- ('execute', None, 'Command to execute on click'),\n- ('update_interval', 60, \"The update interval.\"),\n- ]\n-\n- def __init__(self, **config):\n- logger.warning(\"Pacman is deprecated, please use CheckUpdates\")\n- base.ThreadedPollText.__init__(self, **config)\n- self.add_defaults(Pacman.defaults)\n-\n- def draw(self):\n- if self.text == '0':\n- self.layout.colour = self.unavailable\n- else:\n- self.layout.colour = self.foreground\n- base.ThreadedPollText.draw(self)\n-\n- def poll(self):\n- pacman = self.call_process(['checkupdates'])\n- return str(len(pacman.splitlines()))\n-\n- def button_press(self, x, y, button):\n- base.ThreadedPollText.button_press(self, x, y, button)\n- if button == 1 and self.execute is not None:\n- subprocess.Popen([self.execute], shell=True)\n", "issue": "widget.Pacman needs a non existing file:'checkupdates'\n```\r\n2020-03-30 07:09:12,582 ERROR libqtile base.py:worker():L478 problem polling to update widget pacman\r\nTraceback (most recent call last):\r\n File \"/home/parsrch/.local/lib/python3.8/site-packages/libqtile/widget/base.py\", line 474, in worker\r\n text = self.poll()\r\n File \"/home/parsrch/.local/lib/python3.8/site-packages/libqtile/widget/pacman.py\", line 49, in poll\r\n pacman = self.call_process(['checkupdates'])\r\n File \"/home/parsrch/.local/lib/python3.8/site-packages/libqtile/widget/base.py\", line 266, in call_process\r\n output = subprocess.check_output(command, **kwargs)\r\n File \"/usr/lib/python3.8/subprocess.py\", line 411, in check_output\r\n return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,\r\n File \"/usr/lib/python3.8/subprocess.py\", line 489, in run\r\n with Popen(*popenargs, **kwargs) as process:\r\n File \"/usr/lib/python3.8/subprocess.py\", line 854, in __init__\r\n self._execute_child(args, executable, preexec_fn, close_fds,\r\n File \"/usr/lib/python3.8/subprocess.py\", line 1702, in _execute_child\r\n raise child_exception_type(errno_num, err_msg, err_filename)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'checkupdates'\r\n```\n", "before_files": [{"content": "# -*- coding:utf-8 -*-\n#\n# Copyright (C) 2012, Maximilian K\u00f6hl <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport subprocess\n\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass Pacman(base.ThreadedPollText):\n \"\"\"Shows number of available updates\n\n Needs the pacman package manager installed. So will only work in Arch Linux\n installation.\n \"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n ('unavailable', 'ffffff', 'Unavailable Color - no updates.'),\n ('execute', None, 'Command to execute on click'),\n ('update_interval', 60, \"The update interval.\"),\n ]\n\n def __init__(self, **config):\n logger.warning(\"Pacman is deprecated, please use CheckUpdates\")\n base.ThreadedPollText.__init__(self, **config)\n self.add_defaults(Pacman.defaults)\n\n def draw(self):\n if self.text == '0':\n self.layout.colour = self.unavailable\n else:\n self.layout.colour = self.foreground\n base.ThreadedPollText.draw(self)\n\n def poll(self):\n pacman = self.call_process(['checkupdates'])\n return str(len(pacman.splitlines()))\n\n def button_press(self, x, y, button):\n base.ThreadedPollText.button_press(self, x, y, button)\n if button == 1 and self.execute is not None:\n subprocess.Popen([self.execute], shell=True)\n", "path": "libqtile/widget/pacman.py"}, {"content": "# Copyright (c) 2014 Rock Neurotiko\n# Copyright (c) 2014 roger\n# Copyright (c) 2015 David R. Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom libqtile.utils import safe_import as safe_import_\n# only directly import widgets that do not have any third party dependencies\n# other than those required by qtile, otherwise use the same import function\nfrom libqtile.widget.base import Mirror # noqa: F401\nfrom libqtile.widget.chord import Chord # noqa: F401\nfrom libqtile.widget.clock import Clock # noqa: F401\nfrom libqtile.widget.currentlayout import ( # noqa: F401\n CurrentLayout,\n CurrentLayoutIcon,\n)\nfrom libqtile.widget.groupbox import AGroupBox, GroupBox # noqa: F401\nfrom libqtile.widget.import_error import make_error\nfrom libqtile.widget.prompt import Prompt # noqa: F401\nfrom libqtile.widget.quick_exit import QuickExit # noqa: F401\nfrom libqtile.widget.systray import Systray # noqa: F401\nfrom libqtile.widget.textbox import TextBox # noqa: F401\nfrom libqtile.widget.windowname import WindowName # noqa: F401\n\n\ndef safe_import(module_name, class_name):\n safe_import_(\n (\".widget\", module_name), class_name, globals(), fallback=make_error\n )\n\n\nsafe_import(\"backlight\", \"Backlight\")\nsafe_import(\"battery\", [\"Battery\", \"BatteryIcon\"])\nsafe_import(\"currentscreen\", \"CurrentScreen\")\nsafe_import(\"debuginfo\", \"DebugInfo\")\nsafe_import(\n \"graph\",\n [\n \"CPUGraph\",\n \"MemoryGraph\",\n \"SwapGraph\",\n \"NetGraph\",\n \"HDDGraph\",\n \"HDDBusyGraph\",\n ],\n)\nsafe_import(\"maildir\", \"Maildir\")\nsafe_import(\"notify\", \"Notify\")\nsafe_import(\"sensors\", \"ThermalSensor\")\nsafe_import(\"sep\", \"Sep\")\nsafe_import(\"she\", \"She\")\nsafe_import(\"spacer\", \"Spacer\")\nsafe_import(\"generic_poll_text\", [\"GenPollText\", \"GenPollUrl\"])\nsafe_import(\"volume\", \"Volume\")\nsafe_import(\"windowtabs\", \"WindowTabs\")\nsafe_import(\"keyboardlayout\", \"KeyboardLayout\")\nsafe_import(\"df\", \"DF\")\nsafe_import(\"image\", \"Image\")\nsafe_import(\"gmail_checker\", \"GmailChecker\")\nsafe_import(\"clipboard\", \"Clipboard\")\nsafe_import(\"countdown\", \"Countdown\")\nsafe_import(\"tasklist\", \"TaskList\")\nsafe_import(\"pacman\", \"Pacman\")\nsafe_import(\"launchbar\", \"LaunchBar\")\nsafe_import(\"canto\", \"Canto\")\nsafe_import(\"mpriswidget\", \"Mpris\")\nsafe_import(\"mpris2widget\", \"Mpris2\")\nsafe_import(\"mpd2widget\", \"Mpd2\")\nsafe_import(\"yahoo_weather\", \"YahooWeather\")\nsafe_import(\"bitcoin_ticker\", \"BitcoinTicker\")\nsafe_import(\"wlan\", \"Wlan\")\nsafe_import(\"khal_calendar\", \"KhalCalendar\")\nsafe_import(\"imapwidget\", \"ImapWidget\")\nsafe_import(\"net\", \"Net\")\nsafe_import(\"keyboardkbdd\", \"KeyboardKbdd\")\nsafe_import(\"cmus\", \"Cmus\")\nsafe_import(\"wallpaper\", \"Wallpaper\")\nsafe_import(\"check_updates\", \"CheckUpdates\")\nsafe_import(\"moc\", \"Moc\")\nsafe_import(\"memory\", \"Memory\")\nsafe_import(\"cpu\", \"CPU\")\nsafe_import(\"idlerpg\", \"IdleRPG\")\nsafe_import(\"pomodoro\", \"Pomodoro\")\nsafe_import(\"stock_ticker\", \"StockTicker\")\nsafe_import(\"caps_num_lock_indicator\", \"CapsNumLockIndicator\")\nsafe_import(\"quick_exit\", \"QuickExit\")\nsafe_import(\"pulse_volume\", \"PulseVolume\")\nsafe_import(\"chord\", \"Chord\")\n", "path": "libqtile/widget/__init__.py"}], "after_files": [{"content": null, "path": "libqtile/widget/pacman.py"}, {"content": "# Copyright (c) 2014 Rock Neurotiko\n# Copyright (c) 2014 roger\n# Copyright (c) 2015 David R. Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom libqtile.utils import safe_import as safe_import_\n# only directly import widgets that do not have any third party dependencies\n# other than those required by qtile, otherwise use the same import function\nfrom libqtile.widget.base import Mirror # noqa: F401\nfrom libqtile.widget.chord import Chord # noqa: F401\nfrom libqtile.widget.clock import Clock # noqa: F401\nfrom libqtile.widget.currentlayout import ( # noqa: F401\n CurrentLayout,\n CurrentLayoutIcon,\n)\nfrom libqtile.widget.groupbox import AGroupBox, GroupBox # noqa: F401\nfrom libqtile.widget.import_error import make_error\nfrom libqtile.widget.prompt import Prompt # noqa: F401\nfrom libqtile.widget.quick_exit import QuickExit # noqa: F401\nfrom libqtile.widget.systray import Systray # noqa: F401\nfrom libqtile.widget.textbox import TextBox # noqa: F401\nfrom libqtile.widget.windowname import WindowName # noqa: F401\n\n\ndef safe_import(module_name, class_name):\n safe_import_(\n (\".widget\", module_name), class_name, globals(), fallback=make_error\n )\n\n\nsafe_import(\"backlight\", \"Backlight\")\nsafe_import(\"battery\", [\"Battery\", \"BatteryIcon\"])\nsafe_import(\"currentscreen\", \"CurrentScreen\")\nsafe_import(\"debuginfo\", \"DebugInfo\")\nsafe_import(\n \"graph\",\n [\n \"CPUGraph\",\n \"MemoryGraph\",\n \"SwapGraph\",\n \"NetGraph\",\n \"HDDGraph\",\n \"HDDBusyGraph\",\n ],\n)\nsafe_import(\"maildir\", \"Maildir\")\nsafe_import(\"notify\", \"Notify\")\nsafe_import(\"sensors\", \"ThermalSensor\")\nsafe_import(\"sep\", \"Sep\")\nsafe_import(\"she\", \"She\")\nsafe_import(\"spacer\", \"Spacer\")\nsafe_import(\"generic_poll_text\", [\"GenPollText\", \"GenPollUrl\"])\nsafe_import(\"volume\", \"Volume\")\nsafe_import(\"windowtabs\", \"WindowTabs\")\nsafe_import(\"keyboardlayout\", \"KeyboardLayout\")\nsafe_import(\"df\", \"DF\")\nsafe_import(\"image\", \"Image\")\nsafe_import(\"gmail_checker\", \"GmailChecker\")\nsafe_import(\"clipboard\", \"Clipboard\")\nsafe_import(\"countdown\", \"Countdown\")\nsafe_import(\"tasklist\", \"TaskList\")\nsafe_import(\"launchbar\", \"LaunchBar\")\nsafe_import(\"canto\", \"Canto\")\nsafe_import(\"mpriswidget\", \"Mpris\")\nsafe_import(\"mpris2widget\", \"Mpris2\")\nsafe_import(\"mpd2widget\", \"Mpd2\")\nsafe_import(\"yahoo_weather\", \"YahooWeather\")\nsafe_import(\"bitcoin_ticker\", \"BitcoinTicker\")\nsafe_import(\"wlan\", \"Wlan\")\nsafe_import(\"khal_calendar\", \"KhalCalendar\")\nsafe_import(\"imapwidget\", \"ImapWidget\")\nsafe_import(\"net\", \"Net\")\nsafe_import(\"keyboardkbdd\", \"KeyboardKbdd\")\nsafe_import(\"cmus\", \"Cmus\")\nsafe_import(\"wallpaper\", \"Wallpaper\")\nsafe_import(\"check_updates\", \"CheckUpdates\")\nsafe_import(\"moc\", \"Moc\")\nsafe_import(\"memory\", \"Memory\")\nsafe_import(\"cpu\", \"CPU\")\nsafe_import(\"idlerpg\", \"IdleRPG\")\nsafe_import(\"pomodoro\", \"Pomodoro\")\nsafe_import(\"stock_ticker\", \"StockTicker\")\nsafe_import(\"caps_num_lock_indicator\", \"CapsNumLockIndicator\")\nsafe_import(\"quick_exit\", \"QuickExit\")\nsafe_import(\"pulse_volume\", \"PulseVolume\")\nsafe_import(\"chord\", \"Chord\")\n", "path": "libqtile/widget/__init__.py"}]}
2,493
686
gh_patches_debug_5067
rasdani/github-patches
git_diff
jupyterhub__jupyterhub-1111
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Passwords beginning or ending with a whitespace are not supported Due to POST argument stripping, passwords with a beginning or ending whitespace are not allowed. **How to reproduce the issue** Set up a user password with an ending or beginning whitespace. **What you expected to happen** The user should be allowed to login with the password, given that the password should be any complicated sequence of characters the user can reproduce. **What actually happens** The user is denied access, because the LoginHandler will strip all posted values before considering the password for authentication (line 81, get_argument has a default "strip=True") **Share what version of JupyterHub you are using** HEAD (006488fc749923851df97d47d8850bdf5fd157cf) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `jupyterhub/handlers/login.py` Content: ``` 1 """HTTP Handlers for the hub server""" 2 3 # Copyright (c) Jupyter Development Team. 4 # Distributed under the terms of the Modified BSD License. 5 6 from urllib.parse import urlparse 7 8 from tornado.escape import url_escape 9 from tornado import gen 10 from tornado.httputil import url_concat 11 12 from .base import BaseHandler 13 14 15 class LogoutHandler(BaseHandler): 16 """Log a user out by clearing their login cookie.""" 17 def get(self): 18 user = self.get_current_user() 19 if user: 20 self.log.info("User logged out: %s", user.name) 21 self.clear_login_cookie() 22 self.statsd.incr('logout') 23 if self.authenticator.auto_login: 24 self.render('logout.html') 25 else: 26 self.redirect(self.settings['login_url'], permanent=False) 27 28 29 class LoginHandler(BaseHandler): 30 """Render the login page.""" 31 32 def _render(self, login_error=None, username=None): 33 return self.render_template('login.html', 34 next=url_escape(self.get_argument('next', default='')), 35 username=username, 36 login_error=login_error, 37 custom_html=self.authenticator.custom_html, 38 login_url=self.settings['login_url'], 39 authenticator_login_url=self.authenticator.login_url(self.hub.server.base_url), 40 ) 41 42 def get(self): 43 self.statsd.incr('login.request') 44 next_url = self.get_argument('next', '') 45 if (next_url + '/').startswith('%s://%s/' % (self.request.protocol, self.request.host)): 46 # treat absolute URLs for our host as absolute paths: 47 next_url = urlparse(next_url).path 48 elif not next_url.startswith('/'): 49 # disallow non-absolute next URLs (e.g. full URLs to other hosts) 50 next_url = '' 51 user = self.get_current_user() 52 if user: 53 if not next_url: 54 if user.running: 55 next_url = user.url 56 else: 57 next_url = self.hub.server.base_url 58 # set new login cookie 59 # because single-user cookie may have been cleared or incorrect 60 self.set_login_cookie(self.get_current_user()) 61 self.redirect(next_url, permanent=False) 62 else: 63 if self.authenticator.auto_login: 64 auto_login_url = self.authenticator.login_url(self.hub.server.base_url) 65 if auto_login_url == self.settings['login_url']: 66 self.authenticator.auto_login = False 67 self.log.warning("Authenticator.auto_login cannot be used without a custom login_url") 68 else: 69 if next_url: 70 auto_login_url = url_concat(auto_login_url, {'next': next_url}) 71 self.redirect(auto_login_url) 72 return 73 username = self.get_argument('username', default='') 74 self.finish(self._render(username=username)) 75 76 @gen.coroutine 77 def post(self): 78 # parse the arguments dict 79 data = {} 80 for arg in self.request.arguments: 81 data[arg] = self.get_argument(arg) 82 83 auth_timer = self.statsd.timer('login.authenticate').start() 84 username = yield self.authenticate(data) 85 auth_timer.stop(send=False) 86 87 if username: 88 self.statsd.incr('login.success') 89 self.statsd.timing('login.authenticate.success', auth_timer.ms) 90 user = self.user_from_username(username) 91 already_running = False 92 if user.spawner: 93 status = yield user.spawner.poll() 94 already_running = (status == None) 95 if not already_running and not user.spawner.options_form: 96 yield self.spawn_single_user(user) 97 self.set_login_cookie(user) 98 next_url = self.get_argument('next', default='') 99 if not next_url.startswith('/'): 100 next_url = '' 101 next_url = next_url or self.hub.server.base_url 102 self.redirect(next_url) 103 self.log.info("User logged in: %s", username) 104 else: 105 self.statsd.incr('login.failure') 106 self.statsd.timing('login.authenticate.failure', auth_timer.ms) 107 self.log.debug("Failed login for %s", data.get('username', 'unknown user')) 108 html = self._render( 109 login_error='Invalid username or password', 110 username=username, 111 ) 112 self.finish(html) 113 114 115 # /login renders the login page or the "Login with..." link, 116 # so it should always be registered. 117 # /logout clears cookies. 118 default_handlers = [ 119 (r"/login", LoginHandler), 120 (r"/logout", LogoutHandler), 121 ] 122 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/jupyterhub/handlers/login.py b/jupyterhub/handlers/login.py --- a/jupyterhub/handlers/login.py +++ b/jupyterhub/handlers/login.py @@ -78,7 +78,7 @@ # parse the arguments dict data = {} for arg in self.request.arguments: - data[arg] = self.get_argument(arg) + data[arg] = self.get_argument(arg, strip=False) auth_timer = self.statsd.timer('login.authenticate').start() username = yield self.authenticate(data)
{"golden_diff": "diff --git a/jupyterhub/handlers/login.py b/jupyterhub/handlers/login.py\n--- a/jupyterhub/handlers/login.py\n+++ b/jupyterhub/handlers/login.py\n@@ -78,7 +78,7 @@\n # parse the arguments dict\n data = {}\n for arg in self.request.arguments:\n- data[arg] = self.get_argument(arg)\n+ data[arg] = self.get_argument(arg, strip=False)\n \n auth_timer = self.statsd.timer('login.authenticate').start()\n username = yield self.authenticate(data)\n", "issue": "Passwords beginning or ending with a whitespace are not supported\nDue to POST argument stripping, passwords with a beginning or ending whitespace are not allowed.\r\n\r\n**How to reproduce the issue**\r\nSet up a user password with an ending or beginning whitespace.\r\n\r\n**What you expected to happen**\r\nThe user should be allowed to login with the password, given that the password should be any complicated sequence of characters the user can reproduce.\r\n\r\n**What actually happens**\r\nThe user is denied access, because the LoginHandler will strip all posted values before considering the password for authentication (line 81, get_argument has a default \"strip=True\")\r\n\r\n**Share what version of JupyterHub you are using**\r\nHEAD (006488fc749923851df97d47d8850bdf5fd157cf)\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"HTTP Handlers for the hub server\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom urllib.parse import urlparse\n\nfrom tornado.escape import url_escape\nfrom tornado import gen\nfrom tornado.httputil import url_concat\n\nfrom .base import BaseHandler\n\n\nclass LogoutHandler(BaseHandler):\n \"\"\"Log a user out by clearing their login cookie.\"\"\"\n def get(self):\n user = self.get_current_user()\n if user:\n self.log.info(\"User logged out: %s\", user.name)\n self.clear_login_cookie()\n self.statsd.incr('logout')\n if self.authenticator.auto_login:\n self.render('logout.html')\n else:\n self.redirect(self.settings['login_url'], permanent=False)\n\n\nclass LoginHandler(BaseHandler):\n \"\"\"Render the login page.\"\"\"\n\n def _render(self, login_error=None, username=None):\n return self.render_template('login.html',\n next=url_escape(self.get_argument('next', default='')),\n username=username,\n login_error=login_error,\n custom_html=self.authenticator.custom_html,\n login_url=self.settings['login_url'],\n authenticator_login_url=self.authenticator.login_url(self.hub.server.base_url),\n )\n\n def get(self):\n self.statsd.incr('login.request')\n next_url = self.get_argument('next', '')\n if (next_url + '/').startswith('%s://%s/' % (self.request.protocol, self.request.host)):\n # treat absolute URLs for our host as absolute paths:\n next_url = urlparse(next_url).path\n elif not next_url.startswith('/'):\n # disallow non-absolute next URLs (e.g. full URLs to other hosts)\n next_url = ''\n user = self.get_current_user()\n if user:\n if not next_url:\n if user.running:\n next_url = user.url\n else:\n next_url = self.hub.server.base_url\n # set new login cookie\n # because single-user cookie may have been cleared or incorrect\n self.set_login_cookie(self.get_current_user())\n self.redirect(next_url, permanent=False)\n else:\n if self.authenticator.auto_login:\n auto_login_url = self.authenticator.login_url(self.hub.server.base_url)\n if auto_login_url == self.settings['login_url']:\n self.authenticator.auto_login = False\n self.log.warning(\"Authenticator.auto_login cannot be used without a custom login_url\")\n else:\n if next_url:\n auto_login_url = url_concat(auto_login_url, {'next': next_url})\n self.redirect(auto_login_url)\n return\n username = self.get_argument('username', default='')\n self.finish(self._render(username=username))\n\n @gen.coroutine\n def post(self):\n # parse the arguments dict\n data = {}\n for arg in self.request.arguments:\n data[arg] = self.get_argument(arg)\n\n auth_timer = self.statsd.timer('login.authenticate').start()\n username = yield self.authenticate(data)\n auth_timer.stop(send=False)\n\n if username:\n self.statsd.incr('login.success')\n self.statsd.timing('login.authenticate.success', auth_timer.ms)\n user = self.user_from_username(username)\n already_running = False\n if user.spawner:\n status = yield user.spawner.poll()\n already_running = (status == None)\n if not already_running and not user.spawner.options_form:\n yield self.spawn_single_user(user)\n self.set_login_cookie(user)\n next_url = self.get_argument('next', default='')\n if not next_url.startswith('/'):\n next_url = ''\n next_url = next_url or self.hub.server.base_url\n self.redirect(next_url)\n self.log.info(\"User logged in: %s\", username)\n else:\n self.statsd.incr('login.failure')\n self.statsd.timing('login.authenticate.failure', auth_timer.ms)\n self.log.debug(\"Failed login for %s\", data.get('username', 'unknown user'))\n html = self._render(\n login_error='Invalid username or password',\n username=username,\n )\n self.finish(html)\n\n\n# /login renders the login page or the \"Login with...\" link,\n# so it should always be registered.\n# /logout clears cookies.\ndefault_handlers = [\n (r\"/login\", LoginHandler),\n (r\"/logout\", LogoutHandler),\n]\n", "path": "jupyterhub/handlers/login.py"}], "after_files": [{"content": "\"\"\"HTTP Handlers for the hub server\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom urllib.parse import urlparse\n\nfrom tornado.escape import url_escape\nfrom tornado import gen\nfrom tornado.httputil import url_concat\n\nfrom .base import BaseHandler\n\n\nclass LogoutHandler(BaseHandler):\n \"\"\"Log a user out by clearing their login cookie.\"\"\"\n def get(self):\n user = self.get_current_user()\n if user:\n self.log.info(\"User logged out: %s\", user.name)\n self.clear_login_cookie()\n self.statsd.incr('logout')\n if self.authenticator.auto_login:\n self.render('logout.html')\n else:\n self.redirect(self.settings['login_url'], permanent=False)\n\n\nclass LoginHandler(BaseHandler):\n \"\"\"Render the login page.\"\"\"\n\n def _render(self, login_error=None, username=None):\n return self.render_template('login.html',\n next=url_escape(self.get_argument('next', default='')),\n username=username,\n login_error=login_error,\n custom_html=self.authenticator.custom_html,\n login_url=self.settings['login_url'],\n authenticator_login_url=self.authenticator.login_url(self.hub.server.base_url),\n )\n\n def get(self):\n self.statsd.incr('login.request')\n next_url = self.get_argument('next', '')\n if (next_url + '/').startswith('%s://%s/' % (self.request.protocol, self.request.host)):\n # treat absolute URLs for our host as absolute paths:\n next_url = urlparse(next_url).path\n elif not next_url.startswith('/'):\n # disallow non-absolute next URLs (e.g. full URLs to other hosts)\n next_url = ''\n user = self.get_current_user()\n if user:\n if not next_url:\n if user.running:\n next_url = user.url\n else:\n next_url = self.hub.server.base_url\n # set new login cookie\n # because single-user cookie may have been cleared or incorrect\n self.set_login_cookie(self.get_current_user())\n self.redirect(next_url, permanent=False)\n else:\n if self.authenticator.auto_login:\n auto_login_url = self.authenticator.login_url(self.hub.server.base_url)\n if auto_login_url == self.settings['login_url']:\n self.authenticator.auto_login = False\n self.log.warning(\"Authenticator.auto_login cannot be used without a custom login_url\")\n else:\n if next_url:\n auto_login_url = url_concat(auto_login_url, {'next': next_url})\n self.redirect(auto_login_url)\n return\n username = self.get_argument('username', default='')\n self.finish(self._render(username=username))\n\n @gen.coroutine\n def post(self):\n # parse the arguments dict\n data = {}\n for arg in self.request.arguments:\n data[arg] = self.get_argument(arg, strip=False)\n\n auth_timer = self.statsd.timer('login.authenticate').start()\n username = yield self.authenticate(data)\n auth_timer.stop(send=False)\n\n if username:\n self.statsd.incr('login.success')\n self.statsd.timing('login.authenticate.success', auth_timer.ms)\n user = self.user_from_username(username)\n already_running = False\n if user.spawner:\n status = yield user.spawner.poll()\n already_running = (status == None)\n if not already_running and not user.spawner.options_form:\n yield self.spawn_single_user(user)\n self.set_login_cookie(user)\n next_url = self.get_argument('next', default='')\n if not next_url.startswith('/'):\n next_url = ''\n next_url = next_url or self.hub.server.base_url\n self.redirect(next_url)\n self.log.info(\"User logged in: %s\", username)\n else:\n self.statsd.incr('login.failure')\n self.statsd.timing('login.authenticate.failure', auth_timer.ms)\n self.log.debug(\"Failed login for %s\", data.get('username', 'unknown user'))\n html = self._render(\n login_error='Invalid username or password',\n username=username,\n )\n self.finish(html)\n\n\n# /login renders the login page or the \"Login with...\" link,\n# so it should always be registered.\n# /logout clears cookies.\ndefault_handlers = [\n (r\"/login\", LoginHandler),\n (r\"/logout\", LogoutHandler),\n]\n", "path": "jupyterhub/handlers/login.py"}]}
1,622
123
gh_patches_debug_40750
rasdani/github-patches
git_diff
streamlit__streamlit-188
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Blacklist common virtualenv folders by default We should blacklist the folders below from being watched by Streamlit. This would fix the issue where some people hit the inotify watch limit when running Streamlit from a weird working directory. * */.virtualenv * */.venv * */anaconda3 * */anaconda2 * */miniconda3 * */miniconda2 * Actually let's blacklist every hidden folder: `.*` See also the config option `server.folderWatchBlacklist`. For this fix, you can probably use the same mechanism this config option uses. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/streamlit/watcher/LocalSourcesWatcher.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Copyright 2018-2019 Streamlit Inc. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 import os 17 import sys 18 import collections 19 20 try: 21 # Python 2 22 import imp as importlib 23 except ImportError: 24 # Python 3 25 import importlib 26 27 from streamlit import config 28 from streamlit import util 29 30 from streamlit.logger import get_logger 31 LOGGER = get_logger(__name__) 32 33 try: 34 # If the watchdog module is installed. 35 from streamlit.watcher.EventBasedFileWatcher import ( 36 EventBasedFileWatcher as FileWatcher, 37 ) 38 except ImportError: 39 # Fallback that doesn't use watchdog. 40 from streamlit.watcher.PollingFileWatcher import PollingFileWatcher as FileWatcher 41 42 if not config.get_option('global.disableWatchdogWarning'): 43 msg = '\n $ xcode-select --install' if util.is_darwin() else '' 44 45 LOGGER.warning(""" 46 For better performance, install the Watchdog module: 47 %s 48 $ pip install watchdog 49 50 """ % msg) 51 52 53 WatchedModule = collections.namedtuple("WatchedModule", ["watcher", "module_name"]) 54 55 56 class LocalSourcesWatcher(object): 57 def __init__(self, report, on_file_changed): 58 self._report = report 59 self._on_file_changed = on_file_changed 60 self._is_closed = False 61 62 self._folder_blacklist = config.get_option("server.folderWatchBlacklist") 63 64 # A dict of filepath -> WatchedModule. 65 self._watched_modules = {} 66 67 self._register_watcher( 68 self._report.script_path, 69 module_name=None, # Only the root script has None here. 70 ) 71 72 def on_file_changed(self, filepath): 73 if filepath not in self._watched_modules: 74 LOGGER.error("Received event for non-watched file", filepath) 75 return 76 77 wm = self._watched_modules[filepath] 78 79 if wm.module_name is not None and wm.module_name in sys.modules: 80 del sys.modules[wm.module_name] 81 82 self._on_file_changed() 83 84 def close(self): 85 for wm in self._watched_modules.values(): 86 wm.watcher.close() 87 self._watched_modules = {} 88 self._is_closed = True 89 90 def _register_watcher(self, filepath, module_name): 91 wm = WatchedModule( 92 watcher=FileWatcher(filepath, self.on_file_changed), module_name=module_name 93 ) 94 self._watched_modules[filepath] = wm 95 96 def _deregister_watcher(self, filepath): 97 if filepath not in self._watched_modules: 98 return 99 100 if filepath == self._report.script_path: 101 return 102 103 wm = self._watched_modules[filepath] 104 wm.watcher.close() 105 del self._watched_modules[filepath] 106 107 def update_watched_modules(self): 108 if self._is_closed: 109 return 110 111 local_filepaths = [] 112 113 # Clone modules dict here because we may alter the original dict inside 114 # the loop. 115 modules = dict(sys.modules) 116 117 for name, module in modules.items(): 118 try: 119 spec = getattr(module, "__spec__", None) 120 121 if spec is None: 122 filepath = getattr(module, "__file__", None) 123 if filepath is None: 124 # Some modules have neither a spec nor a file. But we 125 # can ignore those since they're not the user-created 126 # modules we want to watch anyway. 127 continue 128 else: 129 filepath = spec.origin 130 131 if filepath is None: 132 # Built-in modules (and other stuff) don't have origins. 133 continue 134 135 filepath = os.path.abspath(filepath) 136 137 if not os.path.isfile(filepath): 138 # There are some modules that have a .origin, but don't 139 # point to real files. For example, there's a module where 140 # .origin is 'built-in'. 141 continue 142 143 folder_is_blacklisted = any( 144 _file_is_in_folder(filepath, blacklisted_folder) 145 for blacklisted_folder in self._folder_blacklist 146 ) 147 148 if folder_is_blacklisted: 149 continue 150 151 file_is_new = filepath not in self._watched_modules 152 file_is_local = _file_is_in_folder(filepath, self._report.script_folder) 153 154 local_filepaths.append(filepath) 155 156 if file_is_local and file_is_new: 157 self._register_watcher(filepath, name) 158 159 except Exception: 160 # In case there's a problem introspecting some specific module, 161 # let's not stop the entire loop from running. For example, 162 # the __spec__ field in some modules (like IPython) is actually 163 # a dynamic property, which can crash if the underlying 164 # module's code has a bug (as discovered by one of our users). 165 continue 166 167 # Clone dict here because we may alter the original dict inside the 168 # loop. 169 watched_modules = dict(self._watched_modules) 170 171 # Remove no-longer-depended-on files from self._watched_modules 172 # Will this ever happen? 173 for filepath in watched_modules: 174 if filepath not in local_filepaths: 175 self._deregister_watcher(filepath) 176 177 178 def _file_is_in_folder(filepath, folderpath): 179 # Assumes filepath is an absolute path, as a teeny tiny optimization. 180 folderpath = os.path.abspath(folderpath) + "/" 181 return filepath.startswith(folderpath) 182 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/streamlit/watcher/LocalSourcesWatcher.py b/lib/streamlit/watcher/LocalSourcesWatcher.py --- a/lib/streamlit/watcher/LocalSourcesWatcher.py +++ b/lib/streamlit/watcher/LocalSourcesWatcher.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import fnmatch import os import sys import collections @@ -28,6 +29,7 @@ from streamlit import util from streamlit.logger import get_logger + LOGGER = get_logger(__name__) try: @@ -39,15 +41,24 @@ # Fallback that doesn't use watchdog. from streamlit.watcher.PollingFileWatcher import PollingFileWatcher as FileWatcher - if not config.get_option('global.disableWatchdogWarning'): - msg = '\n $ xcode-select --install' if util.is_darwin() else '' + if not config.get_option("global.disableWatchdogWarning"): + msg = "\n $ xcode-select --install" if util.is_darwin() else "" - LOGGER.warning(""" + LOGGER.warning( + """ For better performance, install the Watchdog module: %s $ pip install watchdog - """ % msg) + """ + % msg + ) + + +# Streamlit never watches files in the folders below. +DEFAULT_FOLDER_BLACKLIST = [ + "**/.*", "**/anaconda2", "**/anaconda3", "**/miniconda2", "**/miniconda3" +] WatchedModule = collections.namedtuple("WatchedModule", ["watcher", "module_name"]) @@ -61,6 +72,9 @@ self._folder_blacklist = config.get_option("server.folderWatchBlacklist") + # Blacklist some additional folders, using glob syntax. + self._folder_blacklist.extend(DEFAULT_FOLDER_BLACKLIST) + # A dict of filepath -> WatchedModule. self._watched_modules = {} @@ -140,12 +154,12 @@ # .origin is 'built-in'. continue - folder_is_blacklisted = any( + is_in_blacklisted_folder = any( _file_is_in_folder(filepath, blacklisted_folder) for blacklisted_folder in self._folder_blacklist ) - if folder_is_blacklisted: + if is_in_blacklisted_folder: continue file_is_new = filepath not in self._watched_modules @@ -175,7 +189,10 @@ self._deregister_watcher(filepath) -def _file_is_in_folder(filepath, folderpath): - # Assumes filepath is an absolute path, as a teeny tiny optimization. - folderpath = os.path.abspath(folderpath) + "/" - return filepath.startswith(folderpath) +def _file_is_in_folder(filepath, folderpath_glob): + # Strip trailing slash if it exists + if folderpath_glob.endswith("/"): + folderpath_glob = folderpath_glob[:-1] + + file_dir = os.path.dirname(filepath) + return fnmatch.fnmatch(file_dir, folderpath_glob)
{"golden_diff": "diff --git a/lib/streamlit/watcher/LocalSourcesWatcher.py b/lib/streamlit/watcher/LocalSourcesWatcher.py\n--- a/lib/streamlit/watcher/LocalSourcesWatcher.py\n+++ b/lib/streamlit/watcher/LocalSourcesWatcher.py\n@@ -13,6 +13,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import fnmatch\n import os\n import sys\n import collections\n@@ -28,6 +29,7 @@\n from streamlit import util\n \n from streamlit.logger import get_logger\n+\n LOGGER = get_logger(__name__)\n \n try:\n@@ -39,15 +41,24 @@\n # Fallback that doesn't use watchdog.\n from streamlit.watcher.PollingFileWatcher import PollingFileWatcher as FileWatcher\n \n- if not config.get_option('global.disableWatchdogWarning'):\n- msg = '\\n $ xcode-select --install' if util.is_darwin() else ''\n+ if not config.get_option(\"global.disableWatchdogWarning\"):\n+ msg = \"\\n $ xcode-select --install\" if util.is_darwin() else \"\"\n \n- LOGGER.warning(\"\"\"\n+ LOGGER.warning(\n+ \"\"\"\n For better performance, install the Watchdog module:\n %s\n $ pip install watchdog\n \n- \"\"\" % msg)\n+ \"\"\"\n+ % msg\n+ )\n+\n+\n+# Streamlit never watches files in the folders below.\n+DEFAULT_FOLDER_BLACKLIST = [\n+ \"**/.*\", \"**/anaconda2\", \"**/anaconda3\", \"**/miniconda2\", \"**/miniconda3\"\n+]\n \n \n WatchedModule = collections.namedtuple(\"WatchedModule\", [\"watcher\", \"module_name\"])\n@@ -61,6 +72,9 @@\n \n self._folder_blacklist = config.get_option(\"server.folderWatchBlacklist\")\n \n+ # Blacklist some additional folders, using glob syntax.\n+ self._folder_blacklist.extend(DEFAULT_FOLDER_BLACKLIST)\n+\n # A dict of filepath -> WatchedModule.\n self._watched_modules = {}\n \n@@ -140,12 +154,12 @@\n # .origin is 'built-in'.\n continue\n \n- folder_is_blacklisted = any(\n+ is_in_blacklisted_folder = any(\n _file_is_in_folder(filepath, blacklisted_folder)\n for blacklisted_folder in self._folder_blacklist\n )\n \n- if folder_is_blacklisted:\n+ if is_in_blacklisted_folder:\n continue\n \n file_is_new = filepath not in self._watched_modules\n@@ -175,7 +189,10 @@\n self._deregister_watcher(filepath)\n \n \n-def _file_is_in_folder(filepath, folderpath):\n- # Assumes filepath is an absolute path, as a teeny tiny optimization.\n- folderpath = os.path.abspath(folderpath) + \"/\"\n- return filepath.startswith(folderpath)\n+def _file_is_in_folder(filepath, folderpath_glob):\n+ # Strip trailing slash if it exists\n+ if folderpath_glob.endswith(\"/\"):\n+ folderpath_glob = folderpath_glob[:-1]\n+\n+ file_dir = os.path.dirname(filepath)\n+ return fnmatch.fnmatch(file_dir, folderpath_glob)\n", "issue": "Blacklist common virtualenv folders by default\nWe should blacklist the folders below from being watched by Streamlit. This would fix the issue where some people hit the inotify watch limit when running Streamlit from a weird working directory.\r\n\r\n* */.virtualenv\r\n* */.venv\r\n* */anaconda3\r\n* */anaconda2\r\n* */miniconda3\r\n* */miniconda2\r\n* Actually let's blacklist every hidden folder: `.*`\r\n\r\n\r\nSee also the config option `server.folderWatchBlacklist`. For this fix, you can probably use the same mechanism this config option uses.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018-2019 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport collections\n\ntry:\n # Python 2\n import imp as importlib\nexcept ImportError:\n # Python 3\n import importlib\n\nfrom streamlit import config\nfrom streamlit import util\n\nfrom streamlit.logger import get_logger\nLOGGER = get_logger(__name__)\n\ntry:\n # If the watchdog module is installed.\n from streamlit.watcher.EventBasedFileWatcher import (\n EventBasedFileWatcher as FileWatcher,\n )\nexcept ImportError:\n # Fallback that doesn't use watchdog.\n from streamlit.watcher.PollingFileWatcher import PollingFileWatcher as FileWatcher\n\n if not config.get_option('global.disableWatchdogWarning'):\n msg = '\\n $ xcode-select --install' if util.is_darwin() else ''\n\n LOGGER.warning(\"\"\"\n For better performance, install the Watchdog module:\n %s\n $ pip install watchdog\n\n \"\"\" % msg)\n\n\nWatchedModule = collections.namedtuple(\"WatchedModule\", [\"watcher\", \"module_name\"])\n\n\nclass LocalSourcesWatcher(object):\n def __init__(self, report, on_file_changed):\n self._report = report\n self._on_file_changed = on_file_changed\n self._is_closed = False\n\n self._folder_blacklist = config.get_option(\"server.folderWatchBlacklist\")\n\n # A dict of filepath -> WatchedModule.\n self._watched_modules = {}\n\n self._register_watcher(\n self._report.script_path,\n module_name=None, # Only the root script has None here.\n )\n\n def on_file_changed(self, filepath):\n if filepath not in self._watched_modules:\n LOGGER.error(\"Received event for non-watched file\", filepath)\n return\n\n wm = self._watched_modules[filepath]\n\n if wm.module_name is not None and wm.module_name in sys.modules:\n del sys.modules[wm.module_name]\n\n self._on_file_changed()\n\n def close(self):\n for wm in self._watched_modules.values():\n wm.watcher.close()\n self._watched_modules = {}\n self._is_closed = True\n\n def _register_watcher(self, filepath, module_name):\n wm = WatchedModule(\n watcher=FileWatcher(filepath, self.on_file_changed), module_name=module_name\n )\n self._watched_modules[filepath] = wm\n\n def _deregister_watcher(self, filepath):\n if filepath not in self._watched_modules:\n return\n\n if filepath == self._report.script_path:\n return\n\n wm = self._watched_modules[filepath]\n wm.watcher.close()\n del self._watched_modules[filepath]\n\n def update_watched_modules(self):\n if self._is_closed:\n return\n\n local_filepaths = []\n\n # Clone modules dict here because we may alter the original dict inside\n # the loop.\n modules = dict(sys.modules)\n\n for name, module in modules.items():\n try:\n spec = getattr(module, \"__spec__\", None)\n\n if spec is None:\n filepath = getattr(module, \"__file__\", None)\n if filepath is None:\n # Some modules have neither a spec nor a file. But we\n # can ignore those since they're not the user-created\n # modules we want to watch anyway.\n continue\n else:\n filepath = spec.origin\n\n if filepath is None:\n # Built-in modules (and other stuff) don't have origins.\n continue\n\n filepath = os.path.abspath(filepath)\n\n if not os.path.isfile(filepath):\n # There are some modules that have a .origin, but don't\n # point to real files. For example, there's a module where\n # .origin is 'built-in'.\n continue\n\n folder_is_blacklisted = any(\n _file_is_in_folder(filepath, blacklisted_folder)\n for blacklisted_folder in self._folder_blacklist\n )\n\n if folder_is_blacklisted:\n continue\n\n file_is_new = filepath not in self._watched_modules\n file_is_local = _file_is_in_folder(filepath, self._report.script_folder)\n\n local_filepaths.append(filepath)\n\n if file_is_local and file_is_new:\n self._register_watcher(filepath, name)\n\n except Exception:\n # In case there's a problem introspecting some specific module,\n # let's not stop the entire loop from running. For example,\n # the __spec__ field in some modules (like IPython) is actually\n # a dynamic property, which can crash if the underlying\n # module's code has a bug (as discovered by one of our users).\n continue\n\n # Clone dict here because we may alter the original dict inside the\n # loop.\n watched_modules = dict(self._watched_modules)\n\n # Remove no-longer-depended-on files from self._watched_modules\n # Will this ever happen?\n for filepath in watched_modules:\n if filepath not in local_filepaths:\n self._deregister_watcher(filepath)\n\n\ndef _file_is_in_folder(filepath, folderpath):\n # Assumes filepath is an absolute path, as a teeny tiny optimization.\n folderpath = os.path.abspath(folderpath) + \"/\"\n return filepath.startswith(folderpath)\n", "path": "lib/streamlit/watcher/LocalSourcesWatcher.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018-2019 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport fnmatch\nimport os\nimport sys\nimport collections\n\ntry:\n # Python 2\n import imp as importlib\nexcept ImportError:\n # Python 3\n import importlib\n\nfrom streamlit import config\nfrom streamlit import util\n\nfrom streamlit.logger import get_logger\n\nLOGGER = get_logger(__name__)\n\ntry:\n # If the watchdog module is installed.\n from streamlit.watcher.EventBasedFileWatcher import (\n EventBasedFileWatcher as FileWatcher,\n )\nexcept ImportError:\n # Fallback that doesn't use watchdog.\n from streamlit.watcher.PollingFileWatcher import PollingFileWatcher as FileWatcher\n\n if not config.get_option(\"global.disableWatchdogWarning\"):\n msg = \"\\n $ xcode-select --install\" if util.is_darwin() else \"\"\n\n LOGGER.warning(\n \"\"\"\n For better performance, install the Watchdog module:\n %s\n $ pip install watchdog\n\n \"\"\"\n % msg\n )\n\n\n# Streamlit never watches files in the folders below.\nDEFAULT_FOLDER_BLACKLIST = [\n \"**/.*\", \"**/anaconda2\", \"**/anaconda3\", \"**/miniconda2\", \"**/miniconda3\"\n]\n\n\nWatchedModule = collections.namedtuple(\"WatchedModule\", [\"watcher\", \"module_name\"])\n\n\nclass LocalSourcesWatcher(object):\n def __init__(self, report, on_file_changed):\n self._report = report\n self._on_file_changed = on_file_changed\n self._is_closed = False\n\n self._folder_blacklist = config.get_option(\"server.folderWatchBlacklist\")\n\n # Blacklist some additional folders, using glob syntax.\n self._folder_blacklist.extend(DEFAULT_FOLDER_BLACKLIST)\n\n # A dict of filepath -> WatchedModule.\n self._watched_modules = {}\n\n self._register_watcher(\n self._report.script_path,\n module_name=None, # Only the root script has None here.\n )\n\n def on_file_changed(self, filepath):\n if filepath not in self._watched_modules:\n LOGGER.error(\"Received event for non-watched file\", filepath)\n return\n\n wm = self._watched_modules[filepath]\n\n if wm.module_name is not None and wm.module_name in sys.modules:\n del sys.modules[wm.module_name]\n\n self._on_file_changed()\n\n def close(self):\n for wm in self._watched_modules.values():\n wm.watcher.close()\n self._watched_modules = {}\n self._is_closed = True\n\n def _register_watcher(self, filepath, module_name):\n wm = WatchedModule(\n watcher=FileWatcher(filepath, self.on_file_changed), module_name=module_name\n )\n self._watched_modules[filepath] = wm\n\n def _deregister_watcher(self, filepath):\n if filepath not in self._watched_modules:\n return\n\n if filepath == self._report.script_path:\n return\n\n wm = self._watched_modules[filepath]\n wm.watcher.close()\n del self._watched_modules[filepath]\n\n def update_watched_modules(self):\n if self._is_closed:\n return\n\n local_filepaths = []\n\n # Clone modules dict here because we may alter the original dict inside\n # the loop.\n modules = dict(sys.modules)\n\n for name, module in modules.items():\n try:\n spec = getattr(module, \"__spec__\", None)\n\n if spec is None:\n filepath = getattr(module, \"__file__\", None)\n if filepath is None:\n # Some modules have neither a spec nor a file. But we\n # can ignore those since they're not the user-created\n # modules we want to watch anyway.\n continue\n else:\n filepath = spec.origin\n\n if filepath is None:\n # Built-in modules (and other stuff) don't have origins.\n continue\n\n filepath = os.path.abspath(filepath)\n\n if not os.path.isfile(filepath):\n # There are some modules that have a .origin, but don't\n # point to real files. For example, there's a module where\n # .origin is 'built-in'.\n continue\n\n is_in_blacklisted_folder = any(\n _file_is_in_folder(filepath, blacklisted_folder)\n for blacklisted_folder in self._folder_blacklist\n )\n\n if is_in_blacklisted_folder:\n continue\n\n file_is_new = filepath not in self._watched_modules\n file_is_local = _file_is_in_folder(filepath, self._report.script_folder)\n\n local_filepaths.append(filepath)\n\n if file_is_local and file_is_new:\n self._register_watcher(filepath, name)\n\n except Exception:\n # In case there's a problem introspecting some specific module,\n # let's not stop the entire loop from running. For example,\n # the __spec__ field in some modules (like IPython) is actually\n # a dynamic property, which can crash if the underlying\n # module's code has a bug (as discovered by one of our users).\n continue\n\n # Clone dict here because we may alter the original dict inside the\n # loop.\n watched_modules = dict(self._watched_modules)\n\n # Remove no-longer-depended-on files from self._watched_modules\n # Will this ever happen?\n for filepath in watched_modules:\n if filepath not in local_filepaths:\n self._deregister_watcher(filepath)\n\n\ndef _file_is_in_folder(filepath, folderpath_glob):\n # Strip trailing slash if it exists\n if folderpath_glob.endswith(\"/\"):\n folderpath_glob = folderpath_glob[:-1]\n\n file_dir = os.path.dirname(filepath)\n return fnmatch.fnmatch(file_dir, folderpath_glob)\n", "path": "lib/streamlit/watcher/LocalSourcesWatcher.py"}]}
2,125
709
gh_patches_debug_9961
rasdani/github-patches
git_diff
pyjanitor-devs__pyjanitor-436
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Copyright notice is hardcoded to 2018 # Brief Description of Fix <!-- Please describe the fix in terms of a "before" and "after". In other words, what's not so good about the current docs page, and what you would like to see it become. copyright notice is hard-coded to 2018. eg: **©2018, Eric J. Ma. | Powered by Sphinx 2.1.2 & Alabaster 0.7.12 | Page source** I propose modifying /pyjanitor/docs/conf.py to calculate and display current year. Example starter wording is provided. --> Currently, the docs... I would like to propose a change, such that now the docs... # Relevant Context <!-- Please put here, in bullet points, links to the relevant docs page. A few starting template points are available to get you started. --> - [Link to documentation page](http://pyjanitor.readthedocs.io) - [Link to exact file to be edited](https://github.com/ericmjl/pyjanitor/AUTHORS.rst) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 """Sphinx configuration.""" 2 3 # -*- coding: utf-8 -*- 4 # 5 # Configuration file for the Sphinx documentation builder. 6 # 7 # This file does only contain a selection of the most common options. For a 8 # full list see the documentation: 9 # http://www.sphinx-doc.org/en/stable/config 10 11 # -- Path setup -------------------------------------------------------------- 12 13 # If extensions (or modules to document with autodoc) are in another directory, 14 # add these directories to sys.path here. If the directory is relative to the 15 # documentation root, use os.path.abspath to make it absolute, like shown here. 16 # 17 import os 18 import sys 19 from pathlib import Path 20 21 sys.path.insert(0, os.path.abspath(".")) 22 sys.path.insert(0, os.path.abspath("../examples")) 23 24 # Make a symlink in our sphinx source directory to the top-level 25 # examples/notebooks directory so we can include notebooks in the doc 26 notebooks = Path("./notebooks") 27 if not notebooks.exists(): 28 print("Making symlink to ../examples/notebooks") 29 notebooks.symlink_to("../examples/notebooks") 30 31 32 # -- Project information ----------------------------------------------------- 33 34 project = "pyjanitor" 35 copyright = "2018, Eric J. Ma" 36 author = "Eric J. Ma" 37 38 # The short X.Y version 39 version = "0.1.0" 40 # The full version, including alpha/beta/rc tags 41 release = "" 42 43 44 # -- General configuration --------------------------------------------------- 45 46 # If your documentation needs a minimal Sphinx version, state it here. 47 # 48 # needs_sphinx = '1.0' 49 50 # Add any Sphinx extension module names here, as strings. They can be 51 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 52 # ones. 53 extensions = [ 54 "sphinx.ext.autodoc", 55 "sphinx.ext.doctest", 56 "sphinx.ext.intersphinx", 57 "sphinx.ext.todo", 58 "sphinx.ext.coverage", 59 "sphinx.ext.viewcode", 60 "sphinx.ext.githubpages", 61 "sphinxcontrib.fulltoc", 62 "nbsphinx", 63 "sphinx.ext.autosummary", 64 ] 65 66 # Add any paths that contain templates here, relative to this directory. 67 templates_path = ["_templates"] 68 69 # The suffix(es) of source filenames. 70 # You can specify multiple suffix as a list of string: 71 # 72 # source_suffix = ['.rst', '.md'] 73 source_suffix = [".md", ".rst", ".ipynb"] 74 75 # The master toctree document. 76 master_doc = "index" 77 78 # The language for content autogenerated by Sphinx. Refer to documentation 79 # for a list of supported languages. 80 # 81 # This is also used if you do content translation via gettext catalogs. 82 # Usually you set "language" from the command line for these cases. 83 language = None 84 85 # List of patterns, relative to source directory, that match files and 86 # directories to ignore when looking for source files. 87 # This pattern also affects html_static_path and html_extra_path . 88 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"] 89 90 # The name of the Pygments (syntax highlighting) style to use. 91 pygments_style = "sphinx" 92 93 94 # -- Options for HTML output ------------------------------------------------- 95 96 # The theme to use for HTML and HTML Help pages. See the documentation for 97 # a list of builtin themes. 98 # 99 html_theme = "alabaster" 100 101 # Theme options are theme-specific and customize the look and feel of a theme 102 # further. For a list of options available for each theme, see the 103 # documentation. 104 # 105 html_theme_options = {"logo": "logo_title.svg"} 106 107 # Add any paths that contain custom static files (such as style sheets) here, 108 # relative to this directory. They are copied after the builtin static files, 109 # so a file named "default.css" will overwrite the builtin "default.css". 110 html_static_path = ["_static"] 111 112 # Custom sidebar templates, must be a dictionary that maps document names 113 # to template names. 114 # 115 # The default sidebars (for documents that don't match any pattern) are 116 # defined by theme itself. Builtin themes are using these templates by 117 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 118 # 'searchbox.html']``. 119 # 120 html_sidebars = { 121 "**": ["about.html", "navigation.html", "relations.html", "searchbox.html"] 122 } 123 124 125 # -- Options for HTMLHelp output --------------------------------------------- 126 127 # Output file base name for HTML help builder. 128 htmlhelp_basename = "pyjanitordoc" 129 130 131 # -- Options for LaTeX output ------------------------------------------------ 132 133 latex_elements = { 134 # The paper size ('letterpaper' or 'a4paper'). 135 # 136 # 'papersize': 'letterpaper', 137 # The font size ('10pt', '11pt' or '12pt'). 138 # 139 # 'pointsize': '10pt', 140 # Additional stuff for the LaTeX preamble. 141 # 142 # 'preamble': '', 143 # Latex figure (float) alignment 144 # 145 # 'figure_align': 'htbp', 146 } 147 148 # Grouping the document tree into LaTeX files. List of tuples 149 # (source start file, target name, title, 150 # author, documentclass [howto, manual, or own class]). 151 latex_documents = [ 152 ( 153 master_doc, 154 "pyjanitor.tex", 155 "pyjanitor Documentation", 156 "Eric J. Ma", 157 "manual", 158 ) 159 ] 160 161 162 # -- Options for manual page output ------------------------------------------ 163 164 # One entry per manual page. List of tuples 165 # (source start file, name, description, authors, manual section). 166 man_pages = [(master_doc, "pyjanitor", "pyjanitor Documentation", [author], 1)] 167 168 169 # -- Options for Texinfo output ---------------------------------------------- 170 171 # Grouping the document tree into Texinfo files. List of tuples 172 # (source start file, target name, title, author, 173 # dir menu entry, description, category) 174 texinfo_documents = [ 175 ( 176 master_doc, 177 "pyjanitor", 178 "pyjanitor Documentation", 179 author, 180 "pyjanitor", 181 "One line description of project.", 182 "Miscellaneous", 183 ) 184 ] 185 186 187 # -- Extension configuration ------------------------------------------------- 188 189 # -- Options for intersphinx extension --------------------------------------- 190 191 # Example configuration for intersphinx: refer to the Python standard library. 192 intersphinx_mapping = { 193 "https://docs.python.org/": None, 194 "https://pandas.pydata.org/pandas-docs/stable": None, 195 } 196 197 # -- Options for todo extension ---------------------------------------------- 198 199 # If true, `todo` and `todoList` produce output, else they produce nothing. 200 todo_include_todos = True 201 202 203 # -- Other options ----------------------------------------------------------- 204 205 autosummary_generate = True # Make _autosummary files and include them 206 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -16,6 +16,7 @@ # import os import sys +import datetime from pathlib import Path sys.path.insert(0, os.path.abspath(".")) @@ -32,7 +33,11 @@ # -- Project information ----------------------------------------------------- project = "pyjanitor" -copyright = "2018, Eric J. Ma" + + +now = datetime.datetime.now() +CurrentYear = str(now.year) +copyright = CurrentYear+", PyJanitor devs" author = "Eric J. Ma" # The short X.Y version
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -16,6 +16,7 @@\n #\n import os\n import sys\n+import datetime\n from pathlib import Path\n \n sys.path.insert(0, os.path.abspath(\".\"))\n@@ -32,7 +33,11 @@\n # -- Project information -----------------------------------------------------\n \n project = \"pyjanitor\"\n-copyright = \"2018, Eric J. Ma\"\n+\n+\n+now = datetime.datetime.now()\n+CurrentYear = str(now.year)\n+copyright = CurrentYear+\", PyJanitor devs\"\n author = \"Eric J. Ma\"\n \n # The short X.Y version\n", "issue": "Copyright notice is hardcoded to 2018\n# Brief Description of Fix\r\n\r\n<!-- Please describe the fix in terms of a \"before\" and \"after\". In other words, what's not so good about the current docs\r\npage, and what you would like to see it become. \r\n\r\ncopyright notice is hard-coded to 2018. eg: \r\n **\u00a92018, Eric J. Ma. | Powered by Sphinx 2.1.2 & Alabaster 0.7.12 | Page source**\r\nI propose modifying /pyjanitor/docs/conf.py to calculate and display current year.\r\n\r\nExample starter wording is provided. -->\r\n\r\nCurrently, the docs...\r\n\r\nI would like to propose a change, such that now the docs...\r\n\r\n# Relevant Context\r\n\r\n<!-- Please put here, in bullet points, links to the relevant docs page. A few starting template points are available\r\nto get you started. -->\r\n\r\n- [Link to documentation page](http://pyjanitor.readthedocs.io)\r\n- [Link to exact file to be edited](https://github.com/ericmjl/pyjanitor/AUTHORS.rst)\r\n\n", "before_files": [{"content": "\"\"\"Sphinx configuration.\"\"\"\n\n# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\nsys.path.insert(0, os.path.abspath(\"../examples\"))\n\n# Make a symlink in our sphinx source directory to the top-level\n# examples/notebooks directory so we can include notebooks in the doc\nnotebooks = Path(\"./notebooks\")\nif not notebooks.exists():\n print(\"Making symlink to ../examples/notebooks\")\n notebooks.symlink_to(\"../examples/notebooks\")\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pyjanitor\"\ncopyright = \"2018, Eric J. Ma\"\nauthor = \"Eric J. Ma\"\n\n# The short X.Y version\nversion = \"0.1.0\"\n# The full version, including alpha/beta/rc tags\nrelease = \"\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinxcontrib.fulltoc\",\n \"nbsphinx\",\n \"sphinx.ext.autosummary\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = [\".md\", \".rst\", \".ipynb\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"**.ipynb_checkpoints\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\"logo\": \"logo_title.svg\"}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n \"**\": [\"about.html\", \"navigation.html\", \"relations.html\", \"searchbox.html\"]\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"pyjanitordoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"pyjanitor.tex\",\n \"pyjanitor Documentation\",\n \"Eric J. Ma\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"pyjanitor\", \"pyjanitor Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"pyjanitor\",\n \"pyjanitor Documentation\",\n author,\n \"pyjanitor\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"https://docs.python.org/\": None,\n \"https://pandas.pydata.org/pandas-docs/stable\": None,\n}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Other options -----------------------------------------------------------\n\nautosummary_generate = True # Make _autosummary files and include them\n", "path": "docs/conf.py"}], "after_files": [{"content": "\"\"\"Sphinx configuration.\"\"\"\n\n# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport datetime\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\nsys.path.insert(0, os.path.abspath(\"../examples\"))\n\n# Make a symlink in our sphinx source directory to the top-level\n# examples/notebooks directory so we can include notebooks in the doc\nnotebooks = Path(\"./notebooks\")\nif not notebooks.exists():\n print(\"Making symlink to ../examples/notebooks\")\n notebooks.symlink_to(\"../examples/notebooks\")\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pyjanitor\"\n\n\nnow = datetime.datetime.now()\nCurrentYear = str(now.year)\ncopyright = CurrentYear+\", PyJanitor devs\"\nauthor = \"Eric J. Ma\"\n\n# The short X.Y version\nversion = \"0.1.0\"\n# The full version, including alpha/beta/rc tags\nrelease = \"\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinxcontrib.fulltoc\",\n \"nbsphinx\",\n \"sphinx.ext.autosummary\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = [\".md\", \".rst\", \".ipynb\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"**.ipynb_checkpoints\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\"logo\": \"logo_title.svg\"}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n \"**\": [\"about.html\", \"navigation.html\", \"relations.html\", \"searchbox.html\"]\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"pyjanitordoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"pyjanitor.tex\",\n \"pyjanitor Documentation\",\n \"Eric J. Ma\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"pyjanitor\", \"pyjanitor Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"pyjanitor\",\n \"pyjanitor Documentation\",\n author,\n \"pyjanitor\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"https://docs.python.org/\": None,\n \"https://pandas.pydata.org/pandas-docs/stable\": None,\n}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Other options -----------------------------------------------------------\n\nautosummary_generate = True # Make _autosummary files and include them\n", "path": "docs/conf.py"}]}
2,466
148
gh_patches_debug_66426
rasdani/github-patches
git_diff
Textualize__textual-3531
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Missing link styles in `Toast` notifcations This follows on from discussion #3526 about adding a clickable link in a notification. The `Toast` widget doesn't inherit the styling from the base class, so the "default" links styling defined in Widget is lost. You can obviously just add it back in like I've done here, but it might be nicer if you didn't have to do that. ```python # toast_with_link.py import webbrowser from textual.app import App class ToastLinkApp(App): # Uncomment the CSS_PATH to see the link! # CSS_PATH = "toast_with_link.css" def on_mount(self) -> None: self.notify( "Don't forget to " "[@click=link('https://github.com/Textualize/textual')]star[/] " "the repository!", timeout=10, ) def action_link(self, href: str) -> None: webbrowser.open(href) if __name__ == "__main__": app = ToastLinkApp() app.run() ``` ```css /* toast_with_link.css */ Toast { link-background: ; link-color: $text; link-style: underline; link-hover-background: $accent; link-hover-color: $text; link-hover-style: bold not underline; } ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/textual/widgets/_toast.py` Content: ``` 1 """Widgets for showing notification messages in toasts.""" 2 3 from __future__ import annotations 4 5 from typing import ClassVar 6 7 from rich.console import RenderableType 8 from rich.text import Text 9 10 from .. import on 11 from ..containers import Container 12 from ..css.query import NoMatches 13 from ..events import Click, Mount 14 from ..notifications import Notification, Notifications 15 from ._static import Static 16 17 18 class ToastHolder(Container, inherit_css=False): 19 """Container that holds a single toast. 20 21 Used to control the alignment of each of the toasts in the main toast 22 container. 23 """ 24 25 DEFAULT_CSS = """ 26 ToastHolder { 27 align-horizontal: right; 28 width: 1fr; 29 height: auto; 30 visibility: hidden; 31 } 32 """ 33 34 35 class Toast(Static, inherit_css=False): 36 """A widget for displaying short-lived notifications.""" 37 38 DEFAULT_CSS = """ 39 Toast { 40 width: 60; 41 max-width: 50%; 42 height: auto; 43 visibility: visible; 44 margin-top: 1; 45 padding: 1 1; 46 background: $panel; 47 tint: white 5%; 48 } 49 50 .toast--title { 51 text-style: bold; 52 } 53 54 Toast { 55 border-right: wide $background; 56 } 57 58 Toast.-information { 59 border-left: wide $success; 60 } 61 62 Toast.-information .toast--title { 63 color: $success-darken-1; 64 } 65 66 Toast.-warning { 67 border-left: wide $warning; 68 } 69 70 Toast.-warning .toast--title { 71 color: $warning-darken-1; 72 } 73 74 Toast.-error { 75 border-left: wide $error; 76 } 77 78 Toast.-error .toast--title { 79 color: $error-darken-1; 80 } 81 """ 82 83 COMPONENT_CLASSES: ClassVar[set[str]] = {"toast--title"} 84 """ 85 | Class | Description | 86 | :- | :- | 87 | `toast--title` | Targets the title of the toast. | 88 """ 89 90 def __init__(self, notification: Notification) -> None: 91 """Initialise the toast. 92 93 Args: 94 notification: The notification to show in the toast. 95 """ 96 super().__init__(classes=f"-{notification.severity}") 97 self._notification = notification 98 self._timeout = notification.time_left 99 100 def render(self) -> RenderableType: 101 """Render the toast's content. 102 103 Returns: 104 A Rich renderable for the title and content of the Toast. 105 """ 106 notification = self._notification 107 if notification.title: 108 header_style = self.get_component_rich_style("toast--title") 109 notification_text = Text.assemble( 110 (notification.title, header_style), 111 "\n", 112 Text.from_markup(notification.message), 113 ) 114 else: 115 notification_text = Text.assemble( 116 Text.from_markup(notification.message), 117 ) 118 return notification_text 119 120 def _on_mount(self, _: Mount) -> None: 121 """Set the time running once the toast is mounted.""" 122 self.set_timer(self._timeout, self._expire) 123 124 @on(Click) 125 def _expire(self) -> None: 126 """Remove the toast once the timer has expired.""" 127 # Before we removed ourself, we also call on the app to forget about 128 # the notification that caused us to exist. Note that we tell the 129 # app to not bother refreshing the display on our account, we're 130 # about to handle that anyway. 131 self.app._unnotify(self._notification, refresh=False) 132 # Note that we attempt to remove our parent, because we're wrapped 133 # inside an alignment container. The testing that we are is as much 134 # to keep type checkers happy as anything else. 135 (self.parent if isinstance(self.parent, ToastHolder) else self).remove() 136 137 138 class ToastRack(Container, inherit_css=False): 139 """A container for holding toasts.""" 140 141 DEFAULT_CSS = """ 142 ToastRack { 143 layer: _toastrack; 144 width: 1fr; 145 height: auto; 146 dock: top; 147 align: right bottom; 148 visibility: hidden; 149 layout: vertical; 150 overflow-y: scroll; 151 margin-bottom: 1; 152 margin-right: 1; 153 } 154 """ 155 156 @staticmethod 157 def _toast_id(notification: Notification) -> str: 158 """Create a Textual-DOM-internal ID for the given notification. 159 160 Args: 161 notification: The notification to create the ID for. 162 163 Returns: 164 An ID for the notification that can be used within the DOM. 165 """ 166 return f"--textual-toast-{notification.identity}" 167 168 def show(self, notifications: Notifications) -> None: 169 """Show the notifications as toasts. 170 171 Args: 172 notifications: The notifications to show. 173 """ 174 175 # Look for any stale toasts and remove them. 176 for toast in self.query(Toast): 177 if toast._notification not in notifications: 178 toast.remove() 179 180 # Gather up all the notifications that we don't have toasts for yet. 181 new_toasts: list[Notification] = [] 182 for notification in notifications: 183 try: 184 # See if there's already a toast for that notification. 185 _ = self.get_child_by_id(self._toast_id(notification)) 186 except NoMatches: 187 if not notification.has_expired: 188 new_toasts.append(notification) 189 190 # If we got any... 191 if new_toasts: 192 # ...mount them. 193 self.mount_all( 194 ToastHolder(Toast(toast), id=self._toast_id(toast)) 195 for toast in new_toasts 196 ) 197 self.call_later(self.scroll_end, animate=False, force=True) 198 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/textual/widgets/_toast.py b/src/textual/widgets/_toast.py --- a/src/textual/widgets/_toast.py +++ b/src/textual/widgets/_toast.py @@ -45,6 +45,12 @@ padding: 1 1; background: $panel; tint: white 5%; + link-background:; + link-color: $text; + link-style: underline; + link-hover-background: $accent; + link-hover-color: $text; + link-hover-style: bold not underline; } .toast--title {
{"golden_diff": "diff --git a/src/textual/widgets/_toast.py b/src/textual/widgets/_toast.py\n--- a/src/textual/widgets/_toast.py\n+++ b/src/textual/widgets/_toast.py\n@@ -45,6 +45,12 @@\n padding: 1 1;\n background: $panel;\n tint: white 5%;\n+ link-background:;\n+ link-color: $text;\n+ link-style: underline;\n+ link-hover-background: $accent;\n+ link-hover-color: $text;\n+ link-hover-style: bold not underline;\n }\n \n .toast--title {\n", "issue": "Missing link styles in `Toast` notifcations\nThis follows on from discussion #3526 about adding a clickable link in a notification.\r\n\r\n The `Toast` widget doesn't inherit the styling from the base class, so the \"default\" links styling defined in Widget is lost. You can obviously just add it back in like I've done here, but it might be nicer if you didn't have to do that.\r\n\r\n```python\r\n# toast_with_link.py\r\n\r\nimport webbrowser\r\n\r\nfrom textual.app import App\r\n\r\n\r\nclass ToastLinkApp(App):\r\n # Uncomment the CSS_PATH to see the link!\r\n # CSS_PATH = \"toast_with_link.css\"\r\n\r\n def on_mount(self) -> None:\r\n self.notify(\r\n \"Don't forget to \"\r\n \"[@click=link('https://github.com/Textualize/textual')]star[/] \"\r\n \"the repository!\",\r\n timeout=10,\r\n )\r\n\r\n def action_link(self, href: str) -> None:\r\n webbrowser.open(href)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = ToastLinkApp()\r\n app.run()\r\n```\r\n\r\n```css\r\n/* toast_with_link.css */\r\n\r\nToast {\r\n link-background: ;\r\n link-color: $text;\r\n link-style: underline;\r\n link-hover-background: $accent;\r\n link-hover-color: $text;\r\n link-hover-style: bold not underline;\r\n}\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\"Widgets for showing notification messages in toasts.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import ClassVar\n\nfrom rich.console import RenderableType\nfrom rich.text import Text\n\nfrom .. import on\nfrom ..containers import Container\nfrom ..css.query import NoMatches\nfrom ..events import Click, Mount\nfrom ..notifications import Notification, Notifications\nfrom ._static import Static\n\n\nclass ToastHolder(Container, inherit_css=False):\n \"\"\"Container that holds a single toast.\n\n Used to control the alignment of each of the toasts in the main toast\n container.\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n ToastHolder {\n align-horizontal: right;\n width: 1fr;\n height: auto;\n visibility: hidden;\n }\n \"\"\"\n\n\nclass Toast(Static, inherit_css=False):\n \"\"\"A widget for displaying short-lived notifications.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Toast {\n width: 60;\n max-width: 50%;\n height: auto;\n visibility: visible;\n margin-top: 1;\n padding: 1 1;\n background: $panel;\n tint: white 5%;\n }\n\n .toast--title {\n text-style: bold;\n }\n\n Toast {\n border-right: wide $background;\n }\n\n Toast.-information {\n border-left: wide $success;\n }\n\n Toast.-information .toast--title {\n color: $success-darken-1;\n }\n\n Toast.-warning {\n border-left: wide $warning;\n }\n\n Toast.-warning .toast--title {\n color: $warning-darken-1;\n }\n\n Toast.-error {\n border-left: wide $error;\n }\n\n Toast.-error .toast--title {\n color: $error-darken-1;\n }\n \"\"\"\n\n COMPONENT_CLASSES: ClassVar[set[str]] = {\"toast--title\"}\n \"\"\"\n | Class | Description |\n | :- | :- |\n | `toast--title` | Targets the title of the toast. |\n \"\"\"\n\n def __init__(self, notification: Notification) -> None:\n \"\"\"Initialise the toast.\n\n Args:\n notification: The notification to show in the toast.\n \"\"\"\n super().__init__(classes=f\"-{notification.severity}\")\n self._notification = notification\n self._timeout = notification.time_left\n\n def render(self) -> RenderableType:\n \"\"\"Render the toast's content.\n\n Returns:\n A Rich renderable for the title and content of the Toast.\n \"\"\"\n notification = self._notification\n if notification.title:\n header_style = self.get_component_rich_style(\"toast--title\")\n notification_text = Text.assemble(\n (notification.title, header_style),\n \"\\n\",\n Text.from_markup(notification.message),\n )\n else:\n notification_text = Text.assemble(\n Text.from_markup(notification.message),\n )\n return notification_text\n\n def _on_mount(self, _: Mount) -> None:\n \"\"\"Set the time running once the toast is mounted.\"\"\"\n self.set_timer(self._timeout, self._expire)\n\n @on(Click)\n def _expire(self) -> None:\n \"\"\"Remove the toast once the timer has expired.\"\"\"\n # Before we removed ourself, we also call on the app to forget about\n # the notification that caused us to exist. Note that we tell the\n # app to not bother refreshing the display on our account, we're\n # about to handle that anyway.\n self.app._unnotify(self._notification, refresh=False)\n # Note that we attempt to remove our parent, because we're wrapped\n # inside an alignment container. The testing that we are is as much\n # to keep type checkers happy as anything else.\n (self.parent if isinstance(self.parent, ToastHolder) else self).remove()\n\n\nclass ToastRack(Container, inherit_css=False):\n \"\"\"A container for holding toasts.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n ToastRack {\n layer: _toastrack;\n width: 1fr;\n height: auto;\n dock: top;\n align: right bottom;\n visibility: hidden;\n layout: vertical;\n overflow-y: scroll;\n margin-bottom: 1;\n margin-right: 1;\n }\n \"\"\"\n\n @staticmethod\n def _toast_id(notification: Notification) -> str:\n \"\"\"Create a Textual-DOM-internal ID for the given notification.\n\n Args:\n notification: The notification to create the ID for.\n\n Returns:\n An ID for the notification that can be used within the DOM.\n \"\"\"\n return f\"--textual-toast-{notification.identity}\"\n\n def show(self, notifications: Notifications) -> None:\n \"\"\"Show the notifications as toasts.\n\n Args:\n notifications: The notifications to show.\n \"\"\"\n\n # Look for any stale toasts and remove them.\n for toast in self.query(Toast):\n if toast._notification not in notifications:\n toast.remove()\n\n # Gather up all the notifications that we don't have toasts for yet.\n new_toasts: list[Notification] = []\n for notification in notifications:\n try:\n # See if there's already a toast for that notification.\n _ = self.get_child_by_id(self._toast_id(notification))\n except NoMatches:\n if not notification.has_expired:\n new_toasts.append(notification)\n\n # If we got any...\n if new_toasts:\n # ...mount them.\n self.mount_all(\n ToastHolder(Toast(toast), id=self._toast_id(toast))\n for toast in new_toasts\n )\n self.call_later(self.scroll_end, animate=False, force=True)\n", "path": "src/textual/widgets/_toast.py"}], "after_files": [{"content": "\"\"\"Widgets for showing notification messages in toasts.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import ClassVar\n\nfrom rich.console import RenderableType\nfrom rich.text import Text\n\nfrom .. import on\nfrom ..containers import Container\nfrom ..css.query import NoMatches\nfrom ..events import Click, Mount\nfrom ..notifications import Notification, Notifications\nfrom ._static import Static\n\n\nclass ToastHolder(Container, inherit_css=False):\n \"\"\"Container that holds a single toast.\n\n Used to control the alignment of each of the toasts in the main toast\n container.\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n ToastHolder {\n align-horizontal: right;\n width: 1fr;\n height: auto;\n visibility: hidden;\n }\n \"\"\"\n\n\nclass Toast(Static, inherit_css=False):\n \"\"\"A widget for displaying short-lived notifications.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Toast {\n width: 60;\n max-width: 50%;\n height: auto;\n visibility: visible;\n margin-top: 1;\n padding: 1 1;\n background: $panel;\n tint: white 5%;\n link-background:;\n link-color: $text;\n link-style: underline;\n link-hover-background: $accent;\n link-hover-color: $text;\n link-hover-style: bold not underline;\n }\n\n .toast--title {\n text-style: bold;\n }\n\n Toast {\n border-right: wide $background;\n }\n\n Toast.-information {\n border-left: wide $success;\n }\n\n Toast.-information .toast--title {\n color: $success-darken-1;\n }\n\n Toast.-warning {\n border-left: wide $warning;\n }\n\n Toast.-warning .toast--title {\n color: $warning-darken-1;\n }\n\n Toast.-error {\n border-left: wide $error;\n }\n\n Toast.-error .toast--title {\n color: $error-darken-1;\n }\n \"\"\"\n\n COMPONENT_CLASSES: ClassVar[set[str]] = {\"toast--title\"}\n \"\"\"\n | Class | Description |\n | :- | :- |\n | `toast--title` | Targets the title of the toast. |\n \"\"\"\n\n def __init__(self, notification: Notification) -> None:\n \"\"\"Initialise the toast.\n\n Args:\n notification: The notification to show in the toast.\n \"\"\"\n super().__init__(classes=f\"-{notification.severity}\")\n self._notification = notification\n self._timeout = notification.time_left\n\n def render(self) -> RenderableType:\n \"\"\"Render the toast's content.\n\n Returns:\n A Rich renderable for the title and content of the Toast.\n \"\"\"\n notification = self._notification\n if notification.title:\n header_style = self.get_component_rich_style(\"toast--title\")\n notification_text = Text.assemble(\n (notification.title, header_style),\n \"\\n\",\n Text.from_markup(notification.message),\n )\n else:\n notification_text = Text.assemble(\n Text.from_markup(notification.message),\n )\n return notification_text\n\n def _on_mount(self, _: Mount) -> None:\n \"\"\"Set the time running once the toast is mounted.\"\"\"\n self.set_timer(self._timeout, self._expire)\n\n @on(Click)\n def _expire(self) -> None:\n \"\"\"Remove the toast once the timer has expired.\"\"\"\n # Before we removed ourself, we also call on the app to forget about\n # the notification that caused us to exist. Note that we tell the\n # app to not bother refreshing the display on our account, we're\n # about to handle that anyway.\n self.app._unnotify(self._notification, refresh=False)\n # Note that we attempt to remove our parent, because we're wrapped\n # inside an alignment container. The testing that we are is as much\n # to keep type checkers happy as anything else.\n (self.parent if isinstance(self.parent, ToastHolder) else self).remove()\n\n\nclass ToastRack(Container, inherit_css=False):\n \"\"\"A container for holding toasts.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n ToastRack {\n layer: _toastrack;\n width: 1fr;\n height: auto;\n dock: top;\n align: right bottom;\n visibility: hidden;\n layout: vertical;\n overflow-y: scroll;\n margin-bottom: 1;\n margin-right: 1;\n }\n \"\"\"\n\n @staticmethod\n def _toast_id(notification: Notification) -> str:\n \"\"\"Create a Textual-DOM-internal ID for the given notification.\n\n Args:\n notification: The notification to create the ID for.\n\n Returns:\n An ID for the notification that can be used within the DOM.\n \"\"\"\n return f\"--textual-toast-{notification.identity}\"\n\n def show(self, notifications: Notifications) -> None:\n \"\"\"Show the notifications as toasts.\n\n Args:\n notifications: The notifications to show.\n \"\"\"\n\n # Look for any stale toasts and remove them.\n for toast in self.query(Toast):\n if toast._notification not in notifications:\n toast.remove()\n\n # Gather up all the notifications that we don't have toasts for yet.\n new_toasts: list[Notification] = []\n for notification in notifications:\n try:\n # See if there's already a toast for that notification.\n _ = self.get_child_by_id(self._toast_id(notification))\n except NoMatches:\n if not notification.has_expired:\n new_toasts.append(notification)\n\n # If we got any...\n if new_toasts:\n # ...mount them.\n self.mount_all(\n ToastHolder(Toast(toast), id=self._toast_id(toast))\n for toast in new_toasts\n )\n self.call_later(self.scroll_end, animate=False, force=True)\n", "path": "src/textual/widgets/_toast.py"}]}
2,260
131
gh_patches_debug_29108
rasdani/github-patches
git_diff
rlworkgroup__garage-774
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove conda environment from CI/Dockerfiles Now that we use setup.py for tracking dependencies, there's no need be creating conda environments inside the Dockerfile and CI. We can replace them with a vanilla virtualenv for greatest compatibility. I suggest we add tests which ensure that the package can still be successfully solved and installed by conda and pipenv. If they don't take too long (<2min) then we can run them in the main CI, otherwise they will be cron_job tests (which are run only nightly). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import find_packages 2 from setuptools import setup 3 4 # Required dependencies 5 required = [ 6 # Please keep alphabetized 7 'akro==0.0.6', 8 'cached_property', 9 'click', 10 'cloudpickle', 11 'cma==1.1.06', 12 # dm_control throws an error during install about not being able to 13 # find a build dependency (absl-py). Later pip executes the `install` 14 # command again and the install succeeds because absl-py has been 15 # installed. This is stupid, but harmless. 16 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/7a36377879c57777e5d5b4da5aae2cd2a29b607a', # noqa: E501 17 'dowel==0.0.2', 18 'gym[all]==0.12.4', 19 'joblib<0.13,>=0.12', 20 'matplotlib', 21 'mujoco-py<2.1,>=2.0', 22 'numpy==1.14.5', 23 'psutil', 24 # Pyglet 1.4.0 introduces some api change which breaks some 25 # gym environments 26 # See: https://github.com/openai/gym/issues/1588 27 'pyglet<1.4.0,>=1.3.0', 28 'pyprind', 29 'python-dateutil', 30 'torch==1.1.0', 31 'scikit-image', 32 'scipy', 33 'tensorflow<1.13,>=1.12.0', 34 'tensorflow-probability<0.6.0,>=0.5.0', # for tensorflow 1.12 35 'torchvision==0.3.0' 36 ] 37 38 # Dependencies for optional features 39 extras = {} 40 extras['all'] = list(set(sum(extras.values(), []))) 41 42 # Intel dependencies not included in all 43 extras['intel'] = ['intel-tensorflow<1.13,>=1.12.0'] 44 45 # Development dependencies (*not* included in "all") 46 extras['dev'] = [ 47 # Please keep alphabetized 48 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501 49 'flake8', 50 'flake8-docstrings==1.3.0', 51 'flake8-import-order', 52 'pandas', 53 'pep8-naming==0.7.0', 54 'pre-commit', 55 # pydocstyle 4.0.0 breaks flake8-docstrings 1.3.0 56 # See https://gitlab.com/pycqa/flake8-docstrings/issues/36 57 'pydocstyle<4.0.0', 58 'pylint==1.9.2', 59 'pytest>=3.6', # Required for pytest-cov on Python 3.6 60 'pytest-cov', 61 'sphinx', 62 'sphinx_rtd_theme', 63 'yapf', 64 ] 65 66 with open('README.md') as f: 67 readme = f.read() 68 69 # Get the package version dynamically 70 with open('VERSION') as v: 71 version = v.read().strip() 72 73 setup( 74 name='garage', 75 version=version, 76 author='Reinforcement Learning Working Group', 77 description='A framework for reproducible reinforcement learning research', 78 url='https://github.com/rlworkgroup/garage', 79 packages=find_packages(where='src'), 80 package_dir={'': 'src'}, 81 scripts=['scripts/garage'], 82 install_requires=required, 83 extras_require=extras, 84 license='MIT', 85 long_description=readme, 86 long_description_content_type='text/markdown', 87 classifiers=[ 88 'Development Status :: 4 - Beta', 89 'Intended Audience :: Developers', 90 'Intended Audience :: Education', 91 'Intended Audience :: Science/Research', 92 'License :: OSI Approved :: MIT License', 93 'Programming Language :: Python :: 3.5', 94 'Programming Language :: Python :: 3.6', 95 'Programming Language :: Python :: 3.7', 96 'Programming Language :: Python :: 3 :: Only', 97 'Topic :: Scientific/Engineering :: Artificial Intelligence', 98 'Topic :: Scientific/Engineering :: Mathematics', 99 'Topic :: Software Development :: Libraries', 100 ], 101 ) 102 ``` Path: `src/garage/np/algos/cem.py` Content: ``` 1 from dowel import logger, tabular 2 import numpy as np 3 4 from garage.np.algos import BatchPolopt 5 6 7 class CEM(BatchPolopt): 8 """Cross Entropy Method. 9 10 CEM works by iteratively optimizing a gaussian distribution of policy. 11 12 In each epoch, CEM does the following: 13 1. Sample n_samples policies from a gaussian distribution of 14 mean cur_mean and std cur_std. 15 2. Do rollouts for each policy. 16 3. Update cur_mean and cur_std by doing Maximum Likelihood Estimation 17 over the n_best top policies in terms of return. 18 19 Note: 20 When training CEM with LocalRunner, make sure that n_epoch_cycles for 21 runner equals to n_samples for CEM. 22 23 This implementation leverage n_epoch_cycles to do rollouts for a single 24 policy in an epoch cycle. 25 26 Args: 27 env_spec (garage.envs.EnvSpec): Environment specification. 28 policy (garage.np.policies.Policy): Action policy. 29 baseline(garage.np.baselines.Baseline): Baseline for GAE 30 (Generalized Advantage Estimation). 31 n_samples (int): Number of policies sampled in one epoch. 32 discount (float): Environment reward discount. 33 max_path_length (int): Maximum length of a single rollout. 34 best_frac (float): The best fraction. 35 init_std (float): Initial std for policy param distribution. 36 extra_std (float): Decaying std added to param distribution. 37 extra_decay_time (float): Epochs that it takes to decay extra std. 38 39 """ 40 41 def __init__(self, 42 env_spec, 43 policy, 44 baseline, 45 n_samples, 46 discount=0.99, 47 max_path_length=500, 48 init_std=1, 49 best_frac=0.05, 50 extra_std=1., 51 extra_decay_time=100): 52 super().__init__(policy, baseline, discount, max_path_length, 53 n_samples) 54 self.env_spec = env_spec 55 56 self.init_std = init_std 57 self.best_frac = best_frac 58 self.extra_std = extra_std 59 self.extra_decay_time = extra_decay_time 60 61 # epoch-wise 62 self.cur_std = self.init_std 63 self.cur_mean = self.policy.get_param_values() 64 # epoch-cycle-wise 65 self.cur_params = self.cur_mean 66 self.all_returns = [] 67 self.all_params = [self.cur_mean.copy()] 68 # fixed 69 self.n_best = int(n_samples * best_frac) 70 assert self.n_best >= 1, ( 71 f'n_samples is too low. Make sure that n_samples * best_frac >= 1') 72 self.n_params = len(self.cur_mean) 73 74 def sample_params(self, epoch): 75 extra_var_mult = max(1.0 - epoch / self.extra_decay_time, 0) 76 sample_std = np.sqrt( 77 np.square(self.cur_std) + 78 np.square(self.extra_std) * extra_var_mult) 79 return np.random.standard_normal( 80 self.n_params) * sample_std + self.cur_mean 81 82 def train_once(self, itr, paths): 83 paths = self.process_samples(itr, paths) 84 85 epoch = itr // self.n_samples 86 i_sample = itr - epoch * self.n_samples 87 tabular.record('Epoch', epoch) 88 tabular.record('# Sample', i_sample) 89 # -- Stage: Process path 90 rtn = paths['average_return'] 91 self.all_returns.append(paths['average_return']) 92 93 # -- Stage: Update policy distribution. 94 if (itr + 1) % self.n_samples == 0: 95 avg_rtns = np.array(self.all_returns) 96 best_inds = np.argsort(-avg_rtns)[:self.n_best] 97 best_params = np.array(self.all_params)[best_inds] 98 99 # MLE of normal distribution 100 self.cur_mean = best_params.mean(axis=0) 101 self.cur_std = best_params.std(axis=0) 102 self.policy.set_param_values(self.cur_mean) 103 104 # Clear for next epoch 105 rtn = max(self.all_returns) 106 self.all_returns.clear() 107 self.all_params.clear() 108 109 # -- Stage: Generate a new policy for next path sampling 110 self.cur_params = self.sample_params(itr) 111 self.all_params.append(self.cur_params.copy()) 112 self.policy.set_param_values(self.cur_params) 113 114 logger.log(tabular) 115 return rtn 116 117 def get_itr_snapshot(self, itr): 118 return dict(itr=itr, policy=self.policy, baseline=self.baseline) 119 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,6 +1,15 @@ from setuptools import find_packages from setuptools import setup +try: + # pylint: disable=unused-import + import numpy # noqa: F401 +except ImportError: + raise RuntimeError( + 'garage requires numpy in the environment to install. ' + 'Please install numpy==1.14.5 and try again. See ' + 'https://github.com/rlworkgroup/garage/issues/800 for more info.') + # Required dependencies required = [ # Please keep alphabetized @@ -58,6 +67,7 @@ 'pylint==1.9.2', 'pytest>=3.6', # Required for pytest-cov on Python 3.6 'pytest-cov', + 'pytest-xdist', 'sphinx', 'sphinx_rtd_theme', 'yapf', @@ -79,6 +89,7 @@ packages=find_packages(where='src'), package_dir={'': 'src'}, scripts=['scripts/garage'], + python_requires='>=3.5', install_requires=required, extras_require=extras, license='MIT', diff --git a/src/garage/np/algos/cem.py b/src/garage/np/algos/cem.py --- a/src/garage/np/algos/cem.py +++ b/src/garage/np/algos/cem.py @@ -68,7 +68,7 @@ # fixed self.n_best = int(n_samples * best_frac) assert self.n_best >= 1, ( - f'n_samples is too low. Make sure that n_samples * best_frac >= 1') + 'n_samples is too low. Make sure that n_samples * best_frac >= 1') self.n_params = len(self.cur_mean) def sample_params(self, epoch):
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,15 @@\n from setuptools import find_packages\n from setuptools import setup\n \n+try:\n+ # pylint: disable=unused-import\n+ import numpy # noqa: F401\n+except ImportError:\n+ raise RuntimeError(\n+ 'garage requires numpy in the environment to install. '\n+ 'Please install numpy==1.14.5 and try again. See '\n+ 'https://github.com/rlworkgroup/garage/issues/800 for more info.')\n+\n # Required dependencies\n required = [\n # Please keep alphabetized\n@@ -58,6 +67,7 @@\n 'pylint==1.9.2',\n 'pytest>=3.6', # Required for pytest-cov on Python 3.6\n 'pytest-cov',\n+ 'pytest-xdist',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf',\n@@ -79,6 +89,7 @@\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n+ python_requires='>=3.5',\n install_requires=required,\n extras_require=extras,\n license='MIT',\ndiff --git a/src/garage/np/algos/cem.py b/src/garage/np/algos/cem.py\n--- a/src/garage/np/algos/cem.py\n+++ b/src/garage/np/algos/cem.py\n@@ -68,7 +68,7 @@\n # fixed\n self.n_best = int(n_samples * best_frac)\n assert self.n_best >= 1, (\n- f'n_samples is too low. Make sure that n_samples * best_frac >= 1')\n+ 'n_samples is too low. Make sure that n_samples * best_frac >= 1')\n self.n_params = len(self.cur_mean)\n \n def sample_params(self, epoch):\n", "issue": "Remove conda environment from CI/Dockerfiles\nNow that we use setup.py for tracking dependencies, there's no need be creating conda environments inside the Dockerfile and CI. We can replace them with a vanilla virtualenv for greatest compatibility.\r\n\r\nI suggest we add tests which ensure that the package can still be successfully solved and installed by conda and pipenv. If they don't take too long (<2min) then we can run them in the main CI, otherwise they will be cron_job tests (which are run only nightly).\n", "before_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n# Required dependencies\nrequired = [\n # Please keep alphabetized\n 'akro==0.0.6',\n 'cached_property',\n 'click',\n 'cloudpickle',\n 'cma==1.1.06',\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/7a36377879c57777e5d5b4da5aae2cd2a29b607a', # noqa: E501\n 'dowel==0.0.2',\n 'gym[all]==0.12.4',\n 'joblib<0.13,>=0.12',\n 'matplotlib',\n 'mujoco-py<2.1,>=2.0',\n 'numpy==1.14.5',\n 'psutil',\n # Pyglet 1.4.0 introduces some api change which breaks some\n # gym environments\n # See: https://github.com/openai/gym/issues/1588\n 'pyglet<1.4.0,>=1.3.0',\n 'pyprind',\n 'python-dateutil',\n 'torch==1.1.0',\n 'scikit-image',\n 'scipy',\n 'tensorflow<1.13,>=1.12.0',\n 'tensorflow-probability<0.6.0,>=0.5.0', # for tensorflow 1.12\n 'torchvision==0.3.0'\n]\n\n# Dependencies for optional features\nextras = {}\nextras['all'] = list(set(sum(extras.values(), [])))\n\n# Intel dependencies not included in all\nextras['intel'] = ['intel-tensorflow<1.13,>=1.12.0']\n\n# Development dependencies (*not* included in \"all\")\nextras['dev'] = [\n # Please keep alphabetized\n 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501\n 'flake8',\n 'flake8-docstrings==1.3.0',\n 'flake8-import-order',\n 'pandas',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n # pydocstyle 4.0.0 breaks flake8-docstrings 1.3.0\n # See https://gitlab.com/pycqa/flake8-docstrings/issues/36\n 'pydocstyle<4.0.0',\n 'pylint==1.9.2',\n 'pytest>=3.6', # Required for pytest-cov on Python 3.6\n 'pytest-cov',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf',\n]\n\nwith open('README.md') as f:\n readme = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n version = v.read().strip()\n\nsetup(\n name='garage',\n version=version,\n author='Reinforcement Learning Working Group',\n description='A framework for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n install_requires=required,\n extras_require=extras,\n license='MIT',\n long_description=readme,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}, {"content": "from dowel import logger, tabular\nimport numpy as np\n\nfrom garage.np.algos import BatchPolopt\n\n\nclass CEM(BatchPolopt):\n \"\"\"Cross Entropy Method.\n\n CEM works by iteratively optimizing a gaussian distribution of policy.\n\n In each epoch, CEM does the following:\n 1. Sample n_samples policies from a gaussian distribution of\n mean cur_mean and std cur_std.\n 2. Do rollouts for each policy.\n 3. Update cur_mean and cur_std by doing Maximum Likelihood Estimation\n over the n_best top policies in terms of return.\n\n Note:\n When training CEM with LocalRunner, make sure that n_epoch_cycles for\n runner equals to n_samples for CEM.\n\n This implementation leverage n_epoch_cycles to do rollouts for a single\n policy in an epoch cycle.\n\n Args:\n env_spec (garage.envs.EnvSpec): Environment specification.\n policy (garage.np.policies.Policy): Action policy.\n baseline(garage.np.baselines.Baseline): Baseline for GAE\n (Generalized Advantage Estimation).\n n_samples (int): Number of policies sampled in one epoch.\n discount (float): Environment reward discount.\n max_path_length (int): Maximum length of a single rollout.\n best_frac (float): The best fraction.\n init_std (float): Initial std for policy param distribution.\n extra_std (float): Decaying std added to param distribution.\n extra_decay_time (float): Epochs that it takes to decay extra std.\n\n \"\"\"\n\n def __init__(self,\n env_spec,\n policy,\n baseline,\n n_samples,\n discount=0.99,\n max_path_length=500,\n init_std=1,\n best_frac=0.05,\n extra_std=1.,\n extra_decay_time=100):\n super().__init__(policy, baseline, discount, max_path_length,\n n_samples)\n self.env_spec = env_spec\n\n self.init_std = init_std\n self.best_frac = best_frac\n self.extra_std = extra_std\n self.extra_decay_time = extra_decay_time\n\n # epoch-wise\n self.cur_std = self.init_std\n self.cur_mean = self.policy.get_param_values()\n # epoch-cycle-wise\n self.cur_params = self.cur_mean\n self.all_returns = []\n self.all_params = [self.cur_mean.copy()]\n # fixed\n self.n_best = int(n_samples * best_frac)\n assert self.n_best >= 1, (\n f'n_samples is too low. Make sure that n_samples * best_frac >= 1')\n self.n_params = len(self.cur_mean)\n\n def sample_params(self, epoch):\n extra_var_mult = max(1.0 - epoch / self.extra_decay_time, 0)\n sample_std = np.sqrt(\n np.square(self.cur_std) +\n np.square(self.extra_std) * extra_var_mult)\n return np.random.standard_normal(\n self.n_params) * sample_std + self.cur_mean\n\n def train_once(self, itr, paths):\n paths = self.process_samples(itr, paths)\n\n epoch = itr // self.n_samples\n i_sample = itr - epoch * self.n_samples\n tabular.record('Epoch', epoch)\n tabular.record('# Sample', i_sample)\n # -- Stage: Process path\n rtn = paths['average_return']\n self.all_returns.append(paths['average_return'])\n\n # -- Stage: Update policy distribution.\n if (itr + 1) % self.n_samples == 0:\n avg_rtns = np.array(self.all_returns)\n best_inds = np.argsort(-avg_rtns)[:self.n_best]\n best_params = np.array(self.all_params)[best_inds]\n\n # MLE of normal distribution\n self.cur_mean = best_params.mean(axis=0)\n self.cur_std = best_params.std(axis=0)\n self.policy.set_param_values(self.cur_mean)\n\n # Clear for next epoch\n rtn = max(self.all_returns)\n self.all_returns.clear()\n self.all_params.clear()\n\n # -- Stage: Generate a new policy for next path sampling\n self.cur_params = self.sample_params(itr)\n self.all_params.append(self.cur_params.copy())\n self.policy.set_param_values(self.cur_params)\n\n logger.log(tabular)\n return rtn\n\n def get_itr_snapshot(self, itr):\n return dict(itr=itr, policy=self.policy, baseline=self.baseline)\n", "path": "src/garage/np/algos/cem.py"}], "after_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\ntry:\n # pylint: disable=unused-import\n import numpy # noqa: F401\nexcept ImportError:\n raise RuntimeError(\n 'garage requires numpy in the environment to install. '\n 'Please install numpy==1.14.5 and try again. See '\n 'https://github.com/rlworkgroup/garage/issues/800 for more info.')\n\n# Required dependencies\nrequired = [\n # Please keep alphabetized\n 'akro==0.0.6',\n 'cached_property',\n 'click',\n 'cloudpickle',\n 'cma==1.1.06',\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/7a36377879c57777e5d5b4da5aae2cd2a29b607a', # noqa: E501\n 'dowel==0.0.2',\n 'gym[all]==0.12.4',\n 'joblib<0.13,>=0.12',\n 'matplotlib',\n 'mujoco-py<2.1,>=2.0',\n 'numpy==1.14.5',\n 'psutil',\n # Pyglet 1.4.0 introduces some api change which breaks some\n # gym environments\n # See: https://github.com/openai/gym/issues/1588\n 'pyglet<1.4.0,>=1.3.0',\n 'pyprind',\n 'python-dateutil',\n 'torch==1.1.0',\n 'scikit-image',\n 'scipy',\n 'tensorflow<1.13,>=1.12.0',\n 'tensorflow-probability<0.6.0,>=0.5.0', # for tensorflow 1.12\n 'torchvision==0.3.0'\n]\n\n# Dependencies for optional features\nextras = {}\nextras['all'] = list(set(sum(extras.values(), [])))\n\n# Intel dependencies not included in all\nextras['intel'] = ['intel-tensorflow<1.13,>=1.12.0']\n\n# Development dependencies (*not* included in \"all\")\nextras['dev'] = [\n # Please keep alphabetized\n 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501\n 'flake8',\n 'flake8-docstrings==1.3.0',\n 'flake8-import-order',\n 'pandas',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n # pydocstyle 4.0.0 breaks flake8-docstrings 1.3.0\n # See https://gitlab.com/pycqa/flake8-docstrings/issues/36\n 'pydocstyle<4.0.0',\n 'pylint==1.9.2',\n 'pytest>=3.6', # Required for pytest-cov on Python 3.6\n 'pytest-cov',\n 'pytest-xdist',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf',\n]\n\nwith open('README.md') as f:\n readme = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n version = v.read().strip()\n\nsetup(\n name='garage',\n version=version,\n author='Reinforcement Learning Working Group',\n description='A framework for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n python_requires='>=3.5',\n install_requires=required,\n extras_require=extras,\n license='MIT',\n long_description=readme,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}, {"content": "from dowel import logger, tabular\nimport numpy as np\n\nfrom garage.np.algos import BatchPolopt\n\n\nclass CEM(BatchPolopt):\n \"\"\"Cross Entropy Method.\n\n CEM works by iteratively optimizing a gaussian distribution of policy.\n\n In each epoch, CEM does the following:\n 1. Sample n_samples policies from a gaussian distribution of\n mean cur_mean and std cur_std.\n 2. Do rollouts for each policy.\n 3. Update cur_mean and cur_std by doing Maximum Likelihood Estimation\n over the n_best top policies in terms of return.\n\n Note:\n When training CEM with LocalRunner, make sure that n_epoch_cycles for\n runner equals to n_samples for CEM.\n\n This implementation leverage n_epoch_cycles to do rollouts for a single\n policy in an epoch cycle.\n\n Args:\n env_spec (garage.envs.EnvSpec): Environment specification.\n policy (garage.np.policies.Policy): Action policy.\n baseline(garage.np.baselines.Baseline): Baseline for GAE\n (Generalized Advantage Estimation).\n n_samples (int): Number of policies sampled in one epoch.\n discount (float): Environment reward discount.\n max_path_length (int): Maximum length of a single rollout.\n best_frac (float): The best fraction.\n init_std (float): Initial std for policy param distribution.\n extra_std (float): Decaying std added to param distribution.\n extra_decay_time (float): Epochs that it takes to decay extra std.\n\n \"\"\"\n\n def __init__(self,\n env_spec,\n policy,\n baseline,\n n_samples,\n discount=0.99,\n max_path_length=500,\n init_std=1,\n best_frac=0.05,\n extra_std=1.,\n extra_decay_time=100):\n super().__init__(policy, baseline, discount, max_path_length,\n n_samples)\n self.env_spec = env_spec\n\n self.init_std = init_std\n self.best_frac = best_frac\n self.extra_std = extra_std\n self.extra_decay_time = extra_decay_time\n\n # epoch-wise\n self.cur_std = self.init_std\n self.cur_mean = self.policy.get_param_values()\n # epoch-cycle-wise\n self.cur_params = self.cur_mean\n self.all_returns = []\n self.all_params = [self.cur_mean.copy()]\n # fixed\n self.n_best = int(n_samples * best_frac)\n assert self.n_best >= 1, (\n 'n_samples is too low. Make sure that n_samples * best_frac >= 1')\n self.n_params = len(self.cur_mean)\n\n def sample_params(self, epoch):\n extra_var_mult = max(1.0 - epoch / self.extra_decay_time, 0)\n sample_std = np.sqrt(\n np.square(self.cur_std) +\n np.square(self.extra_std) * extra_var_mult)\n return np.random.standard_normal(\n self.n_params) * sample_std + self.cur_mean\n\n def train_once(self, itr, paths):\n paths = self.process_samples(itr, paths)\n\n epoch = itr // self.n_samples\n i_sample = itr - epoch * self.n_samples\n tabular.record('Epoch', epoch)\n tabular.record('# Sample', i_sample)\n # -- Stage: Process path\n rtn = paths['average_return']\n self.all_returns.append(paths['average_return'])\n\n # -- Stage: Update policy distribution.\n if (itr + 1) % self.n_samples == 0:\n avg_rtns = np.array(self.all_returns)\n best_inds = np.argsort(-avg_rtns)[:self.n_best]\n best_params = np.array(self.all_params)[best_inds]\n\n # MLE of normal distribution\n self.cur_mean = best_params.mean(axis=0)\n self.cur_std = best_params.std(axis=0)\n self.policy.set_param_values(self.cur_mean)\n\n # Clear for next epoch\n rtn = max(self.all_returns)\n self.all_returns.clear()\n self.all_params.clear()\n\n # -- Stage: Generate a new policy for next path sampling\n self.cur_params = self.sample_params(itr)\n self.all_params.append(self.cur_params.copy())\n self.policy.set_param_values(self.cur_params)\n\n logger.log(tabular)\n return rtn\n\n def get_itr_snapshot(self, itr):\n return dict(itr=itr, policy=self.policy, baseline=self.baseline)\n", "path": "src/garage/np/algos/cem.py"}]}
2,820
447
gh_patches_debug_34606
rasdani/github-patches
git_diff
ansible__awx-8016
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add insignts_credential paramter to tower_inventory <!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use: - http://webchat.freenode.net/?channels=ansible-awx - https://groups.google.com/forum/#!forum/awx-project We have to limit this because of limited volunteer time to respond to issues! --> ##### ISSUE TYPE - Feature Idea ##### SUMMARY <!-- Briefly describe the problem or desired enhancement. --> Per PR #7963 tower_inventory is missing support for the insights_credential API parameter. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `awx_collection/plugins/modules/tower_inventory.py` Content: ``` 1 #!/usr/bin/python 2 # coding: utf-8 -*- 3 4 # (c) 2017, Wayne Witzel III <[email protected]> 5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 6 7 from __future__ import absolute_import, division, print_function 8 __metaclass__ = type 9 10 11 ANSIBLE_METADATA = {'metadata_version': '1.1', 12 'status': ['preview'], 13 'supported_by': 'community'} 14 15 16 DOCUMENTATION = ''' 17 --- 18 module: tower_inventory 19 author: "Wayne Witzel III (@wwitzel3)" 20 short_description: create, update, or destroy Ansible Tower inventory. 21 description: 22 - Create, update, or destroy Ansible Tower inventories. See 23 U(https://www.ansible.com/tower) for an overview. 24 options: 25 name: 26 description: 27 - The name to use for the inventory. 28 required: True 29 type: str 30 description: 31 description: 32 - The description to use for the inventory. 33 type: str 34 organization: 35 description: 36 - Organization the inventory belongs to. 37 required: True 38 type: str 39 variables: 40 description: 41 - Inventory variables. 42 type: dict 43 kind: 44 description: 45 - The kind field. Cannot be modified after created. 46 default: "" 47 choices: ["", "smart"] 48 type: str 49 host_filter: 50 description: 51 - The host_filter field. Only useful when C(kind=smart). 52 type: str 53 state: 54 description: 55 - Desired state of the resource. 56 default: "present" 57 choices: ["present", "absent"] 58 type: str 59 extends_documentation_fragment: awx.awx.auth 60 ''' 61 62 63 EXAMPLES = ''' 64 - name: Add tower inventory 65 tower_inventory: 66 name: "Foo Inventory" 67 description: "Our Foo Cloud Servers" 68 organization: "Bar Org" 69 state: present 70 tower_config_file: "~/tower_cli.cfg" 71 ''' 72 73 74 from ..module_utils.tower_api import TowerAPIModule 75 import json 76 77 78 def main(): 79 # Any additional arguments that are not fields of the item can be added here 80 argument_spec = dict( 81 name=dict(required=True), 82 description=dict(), 83 organization=dict(required=True), 84 variables=dict(type='dict'), 85 kind=dict(choices=['', 'smart'], default=''), 86 host_filter=dict(), 87 state=dict(choices=['present', 'absent'], default='present'), 88 ) 89 90 # Create a module for ourselves 91 module = TowerAPIModule(argument_spec=argument_spec) 92 93 # Extract our parameters 94 name = module.params.get('name') 95 description = module.params.get('description') 96 organization = module.params.get('organization') 97 variables = module.params.get('variables') 98 state = module.params.get('state') 99 kind = module.params.get('kind') 100 host_filter = module.params.get('host_filter') 101 102 # Attempt to look up the related items the user specified (these will fail the module if not found) 103 org_id = module.resolve_name_to_id('organizations', organization) 104 105 # Attempt to look up inventory based on the provided name and org ID 106 inventory = module.get_one('inventories', **{ 107 'data': { 108 'name': name, 109 'organization': org_id 110 } 111 }) 112 113 if state == 'absent': 114 # If the state was absent we can let the module delete it if needed, the module will handle exiting from this 115 module.delete_if_needed(inventory) 116 117 # Create the data that gets sent for create and update 118 inventory_fields = { 119 'name': name, 120 'organization': org_id, 121 'kind': kind, 122 'host_filter': host_filter, 123 } 124 if description is not None: 125 inventory_fields['description'] = description 126 if variables is not None: 127 inventory_fields['variables'] = json.dumps(variables) 128 129 # We need to perform a check to make sure you are not trying to convert a regular inventory into a smart one. 130 if inventory and inventory['kind'] == '' and inventory_fields['kind'] == 'smart': 131 module.fail_json(msg='You cannot turn a regular inventory into a "smart" inventory.') 132 133 # If the state was present and we can let the module build or update the existing inventory, this will return on its own 134 module.create_or_update_if_needed(inventory, inventory_fields, endpoint='inventories', item_type='inventory') 135 136 137 if __name__ == '__main__': 138 main() 139 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/awx_collection/plugins/modules/tower_inventory.py b/awx_collection/plugins/modules/tower_inventory.py --- a/awx_collection/plugins/modules/tower_inventory.py +++ b/awx_collection/plugins/modules/tower_inventory.py @@ -48,7 +48,11 @@ type: str host_filter: description: - - The host_filter field. Only useful when C(kind=smart). + - The host_filter field. Only useful when C(kind=smart). + type: str + insights_credential: + description: + - Credentials to be used by hosts belonging to this inventory when accessing Red Hat Insights API. type: str state: description: @@ -84,6 +88,7 @@ variables=dict(type='dict'), kind=dict(choices=['', 'smart'], default=''), host_filter=dict(), + insights_credential=dict(), state=dict(choices=['present', 'absent'], default='present'), ) @@ -98,6 +103,7 @@ state = module.params.get('state') kind = module.params.get('kind') host_filter = module.params.get('host_filter') + insights_credential = module.params.get('insights_credential') # Attempt to look up the related items the user specified (these will fail the module if not found) org_id = module.resolve_name_to_id('organizations', organization) @@ -125,6 +131,8 @@ inventory_fields['description'] = description if variables is not None: inventory_fields['variables'] = json.dumps(variables) + if insights_credential is not None: + inventory_fields['insights_credential'] = module.resolve_name_to_id('credentials', insights_credential) # We need to perform a check to make sure you are not trying to convert a regular inventory into a smart one. if inventory and inventory['kind'] == '' and inventory_fields['kind'] == 'smart':
{"golden_diff": "diff --git a/awx_collection/plugins/modules/tower_inventory.py b/awx_collection/plugins/modules/tower_inventory.py\n--- a/awx_collection/plugins/modules/tower_inventory.py\n+++ b/awx_collection/plugins/modules/tower_inventory.py\n@@ -48,7 +48,11 @@\n type: str\n host_filter:\n description:\n- - The host_filter field. Only useful when C(kind=smart).\n+ - The host_filter field. Only useful when C(kind=smart).\n+ type: str\n+ insights_credential:\n+ description:\n+ - Credentials to be used by hosts belonging to this inventory when accessing Red Hat Insights API.\n type: str\n state:\n description:\n@@ -84,6 +88,7 @@\n variables=dict(type='dict'),\n kind=dict(choices=['', 'smart'], default=''),\n host_filter=dict(),\n+ insights_credential=dict(),\n state=dict(choices=['present', 'absent'], default='present'),\n )\n \n@@ -98,6 +103,7 @@\n state = module.params.get('state')\n kind = module.params.get('kind')\n host_filter = module.params.get('host_filter')\n+ insights_credential = module.params.get('insights_credential')\n \n # Attempt to look up the related items the user specified (these will fail the module if not found)\n org_id = module.resolve_name_to_id('organizations', organization)\n@@ -125,6 +131,8 @@\n inventory_fields['description'] = description\n if variables is not None:\n inventory_fields['variables'] = json.dumps(variables)\n+ if insights_credential is not None:\n+ inventory_fields['insights_credential'] = module.resolve_name_to_id('credentials', insights_credential)\n \n # We need to perform a check to make sure you are not trying to convert a regular inventory into a smart one.\n if inventory and inventory['kind'] == '' and inventory_fields['kind'] == 'smart':\n", "issue": "Add insignts_credential paramter to tower_inventory\n<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:\r\n\r\n- http://webchat.freenode.net/?channels=ansible-awx\r\n- https://groups.google.com/forum/#!forum/awx-project\r\n\r\nWe have to limit this because of limited volunteer time to respond to issues! -->\r\n\r\n##### ISSUE TYPE\r\n - Feature Idea\r\n\r\n##### SUMMARY\r\n<!-- Briefly describe the problem or desired enhancement. -->\r\nPer PR #7963 tower_inventory is missing support for the insights_credential API parameter.\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2017, Wayne Witzel III <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: tower_inventory\nauthor: \"Wayne Witzel III (@wwitzel3)\"\nshort_description: create, update, or destroy Ansible Tower inventory.\ndescription:\n - Create, update, or destroy Ansible Tower inventories. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - The name to use for the inventory.\n required: True\n type: str\n description:\n description:\n - The description to use for the inventory.\n type: str\n organization:\n description:\n - Organization the inventory belongs to.\n required: True\n type: str\n variables:\n description:\n - Inventory variables.\n type: dict\n kind:\n description:\n - The kind field. Cannot be modified after created.\n default: \"\"\n choices: [\"\", \"smart\"]\n type: str\n host_filter:\n description:\n - The host_filter field. Only useful when C(kind=smart).\n type: str\n state:\n description:\n - Desired state of the resource.\n default: \"present\"\n choices: [\"present\", \"absent\"]\n type: str\nextends_documentation_fragment: awx.awx.auth\n'''\n\n\nEXAMPLES = '''\n- name: Add tower inventory\n tower_inventory:\n name: \"Foo Inventory\"\n description: \"Our Foo Cloud Servers\"\n organization: \"Bar Org\"\n state: present\n tower_config_file: \"~/tower_cli.cfg\"\n'''\n\n\nfrom ..module_utils.tower_api import TowerAPIModule\nimport json\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(required=True),\n description=dict(),\n organization=dict(required=True),\n variables=dict(type='dict'),\n kind=dict(choices=['', 'smart'], default=''),\n host_filter=dict(),\n state=dict(choices=['present', 'absent'], default='present'),\n )\n\n # Create a module for ourselves\n module = TowerAPIModule(argument_spec=argument_spec)\n\n # Extract our parameters\n name = module.params.get('name')\n description = module.params.get('description')\n organization = module.params.get('organization')\n variables = module.params.get('variables')\n state = module.params.get('state')\n kind = module.params.get('kind')\n host_filter = module.params.get('host_filter')\n\n # Attempt to look up the related items the user specified (these will fail the module if not found)\n org_id = module.resolve_name_to_id('organizations', organization)\n\n # Attempt to look up inventory based on the provided name and org ID\n inventory = module.get_one('inventories', **{\n 'data': {\n 'name': name,\n 'organization': org_id\n }\n })\n\n if state == 'absent':\n # If the state was absent we can let the module delete it if needed, the module will handle exiting from this\n module.delete_if_needed(inventory)\n\n # Create the data that gets sent for create and update\n inventory_fields = {\n 'name': name,\n 'organization': org_id,\n 'kind': kind,\n 'host_filter': host_filter,\n }\n if description is not None:\n inventory_fields['description'] = description\n if variables is not None:\n inventory_fields['variables'] = json.dumps(variables)\n\n # We need to perform a check to make sure you are not trying to convert a regular inventory into a smart one.\n if inventory and inventory['kind'] == '' and inventory_fields['kind'] == 'smart':\n module.fail_json(msg='You cannot turn a regular inventory into a \"smart\" inventory.')\n\n # If the state was present and we can let the module build or update the existing inventory, this will return on its own\n module.create_or_update_if_needed(inventory, inventory_fields, endpoint='inventories', item_type='inventory')\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/tower_inventory.py"}], "after_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2017, Wayne Witzel III <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: tower_inventory\nauthor: \"Wayne Witzel III (@wwitzel3)\"\nshort_description: create, update, or destroy Ansible Tower inventory.\ndescription:\n - Create, update, or destroy Ansible Tower inventories. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - The name to use for the inventory.\n required: True\n type: str\n description:\n description:\n - The description to use for the inventory.\n type: str\n organization:\n description:\n - Organization the inventory belongs to.\n required: True\n type: str\n variables:\n description:\n - Inventory variables.\n type: dict\n kind:\n description:\n - The kind field. Cannot be modified after created.\n default: \"\"\n choices: [\"\", \"smart\"]\n type: str\n host_filter:\n description:\n - The host_filter field. Only useful when C(kind=smart).\n type: str\n insights_credential:\n description:\n - Credentials to be used by hosts belonging to this inventory when accessing Red Hat Insights API.\n type: str\n state:\n description:\n - Desired state of the resource.\n default: \"present\"\n choices: [\"present\", \"absent\"]\n type: str\nextends_documentation_fragment: awx.awx.auth\n'''\n\n\nEXAMPLES = '''\n- name: Add tower inventory\n tower_inventory:\n name: \"Foo Inventory\"\n description: \"Our Foo Cloud Servers\"\n organization: \"Bar Org\"\n state: present\n tower_config_file: \"~/tower_cli.cfg\"\n'''\n\n\nfrom ..module_utils.tower_api import TowerAPIModule\nimport json\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(required=True),\n description=dict(),\n organization=dict(required=True),\n variables=dict(type='dict'),\n kind=dict(choices=['', 'smart'], default=''),\n host_filter=dict(),\n insights_credential=dict(),\n state=dict(choices=['present', 'absent'], default='present'),\n )\n\n # Create a module for ourselves\n module = TowerAPIModule(argument_spec=argument_spec)\n\n # Extract our parameters\n name = module.params.get('name')\n description = module.params.get('description')\n organization = module.params.get('organization')\n variables = module.params.get('variables')\n state = module.params.get('state')\n kind = module.params.get('kind')\n host_filter = module.params.get('host_filter')\n insights_credential = module.params.get('insights_credential')\n\n # Attempt to look up the related items the user specified (these will fail the module if not found)\n org_id = module.resolve_name_to_id('organizations', organization)\n\n # Attempt to look up inventory based on the provided name and org ID\n inventory = module.get_one('inventories', **{\n 'data': {\n 'name': name,\n 'organization': org_id\n }\n })\n\n if state == 'absent':\n # If the state was absent we can let the module delete it if needed, the module will handle exiting from this\n module.delete_if_needed(inventory)\n\n # Create the data that gets sent for create and update\n inventory_fields = {\n 'name': name,\n 'organization': org_id,\n 'kind': kind,\n 'host_filter': host_filter,\n }\n if description is not None:\n inventory_fields['description'] = description\n if variables is not None:\n inventory_fields['variables'] = json.dumps(variables)\n if insights_credential is not None:\n inventory_fields['insights_credential'] = module.resolve_name_to_id('credentials', insights_credential)\n\n # We need to perform a check to make sure you are not trying to convert a regular inventory into a smart one.\n if inventory and inventory['kind'] == '' and inventory_fields['kind'] == 'smart':\n module.fail_json(msg='You cannot turn a regular inventory into a \"smart\" inventory.')\n\n # If the state was present and we can let the module build or update the existing inventory, this will return on its own\n module.create_or_update_if_needed(inventory, inventory_fields, endpoint='inventories', item_type='inventory')\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/tower_inventory.py"}]}
1,692
433
gh_patches_debug_12719
rasdani/github-patches
git_diff
microsoft__playwright-python-593
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Async API - asyncio error After updating to the 1.9.2 version, for **Async API** I get the following error: Exception ignored in: <function BaseSubprocessTransport.__del__ at 0x0000000002E1E9D0> Traceback (most recent call last): File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\base_subprocess.py", line 126, in __del__ self.close() File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\base_subprocess.py", line 104, in close proto.pipe.close() File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\proactor_events.py", line 108, in close self._loop.call_soon(self._call_connection_lost, None) File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\base_events.py", line 746, in call_soon self._check_closed() File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\base_events.py", line 510, in _check_closed raise RuntimeError('Event loop is closed') RuntimeError: Event loop is closed Exception ignored in: <function _ProactorBasePipeTransport.__del__ at 0x0000000002E4A280> Traceback (most recent call last): File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\proactor_events.py", line 116, in __del__ self.close() File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\proactor_events.py", line 108, in close self._loop.call_soon(self._call_connection_lost, None) File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\base_events.py", line 746, in call_soon self._check_closed() File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\base_events.py", line 510, in _check_closed raise RuntimeError('Event loop is closed') RuntimeError: Event loop is closed Versions: playwright=1.9.2 asyncio=3.4.3 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `playwright/_impl/_transport.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import asyncio 16 import io 17 import json 18 import os 19 import sys 20 from pathlib import Path 21 from typing import Dict, Optional 22 23 24 # Sourced from: https://github.com/pytest-dev/pytest/blob/da01ee0a4bb0af780167ecd228ab3ad249511302/src/_pytest/faulthandler.py#L69-L77 25 def _get_stderr_fileno() -> Optional[int]: 26 try: 27 return sys.stderr.fileno() 28 except (AttributeError, io.UnsupportedOperation): 29 # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file. 30 # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors 31 # This is potentially dangerous, but the best we can do. 32 if not hasattr(sys, "__stderr__") or not sys.__stderr__: 33 return None 34 return sys.__stderr__.fileno() 35 36 37 class Transport: 38 def __init__(self, driver_executable: Path) -> None: 39 super().__init__() 40 self.on_message = lambda _: None 41 self._stopped = False 42 self._driver_executable = driver_executable 43 self._loop: asyncio.AbstractEventLoop 44 45 def stop(self) -> None: 46 self._stopped = True 47 self._output.close() 48 49 async def wait_until_stopped(self) -> None: 50 await self._stopped_future 51 52 async def run(self) -> None: 53 self._loop = asyncio.get_running_loop() 54 self._stopped_future: asyncio.Future = asyncio.Future() 55 56 proc = await asyncio.create_subprocess_exec( 57 str(self._driver_executable), 58 "run-driver", 59 stdin=asyncio.subprocess.PIPE, 60 stdout=asyncio.subprocess.PIPE, 61 stderr=_get_stderr_fileno(), 62 limit=32768, 63 ) 64 assert proc.stdout 65 assert proc.stdin 66 self._output = proc.stdin 67 68 while not self._stopped: 69 try: 70 buffer = await proc.stdout.readexactly(4) 71 length = int.from_bytes(buffer, byteorder="little", signed=False) 72 buffer = bytes(0) 73 while length: 74 to_read = min(length, 32768) 75 data = await proc.stdout.readexactly(to_read) 76 length -= to_read 77 if len(buffer): 78 buffer = buffer + data 79 else: 80 buffer = data 81 obj = json.loads(buffer) 82 83 if "DEBUGP" in os.environ: # pragma: no cover 84 print("\x1b[33mRECV>\x1b[0m", json.dumps(obj, indent=2)) 85 self.on_message(obj) 86 except asyncio.IncompleteReadError: 87 break 88 await asyncio.sleep(0) 89 self._stopped_future.set_result(None) 90 91 def send(self, message: Dict) -> None: 92 msg = json.dumps(message) 93 if "DEBUGP" in os.environ: # pragma: no cover 94 print("\x1b[32mSEND>\x1b[0m", json.dumps(message, indent=2)) 95 data = msg.encode() 96 self._output.write( 97 len(data).to_bytes(4, byteorder="little", signed=False) + data 98 ) 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/playwright/_impl/_transport.py b/playwright/_impl/_transport.py --- a/playwright/_impl/_transport.py +++ b/playwright/_impl/_transport.py @@ -48,12 +48,13 @@ async def wait_until_stopped(self) -> None: await self._stopped_future + await self._proc.wait() async def run(self) -> None: self._loop = asyncio.get_running_loop() self._stopped_future: asyncio.Future = asyncio.Future() - proc = await asyncio.create_subprocess_exec( + self._proc = proc = await asyncio.create_subprocess_exec( str(self._driver_executable), "run-driver", stdin=asyncio.subprocess.PIPE,
{"golden_diff": "diff --git a/playwright/_impl/_transport.py b/playwright/_impl/_transport.py\n--- a/playwright/_impl/_transport.py\n+++ b/playwright/_impl/_transport.py\n@@ -48,12 +48,13 @@\n \n async def wait_until_stopped(self) -> None:\n await self._stopped_future\n+ await self._proc.wait()\n \n async def run(self) -> None:\n self._loop = asyncio.get_running_loop()\n self._stopped_future: asyncio.Future = asyncio.Future()\n \n- proc = await asyncio.create_subprocess_exec(\n+ self._proc = proc = await asyncio.create_subprocess_exec(\n str(self._driver_executable),\n \"run-driver\",\n stdin=asyncio.subprocess.PIPE,\n", "issue": "Async API - asyncio error\nAfter updating to the 1.9.2 version, for **Async API** I get the following error:\r\n\r\nException ignored in: <function BaseSubprocessTransport.__del__ at 0x0000000002E1E9D0>\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\base_subprocess.py\", line 126, in __del__\r\n self.close()\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\base_subprocess.py\", line 104, in close \r\n proto.pipe.close()\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\proactor_events.py\", line 108, in close \r\n self._loop.call_soon(self._call_connection_lost, None)\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\base_events.py\", line 746, in call_soon\r\n self._check_closed()\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\base_events.py\", line 510, in _check_closed\r\n raise RuntimeError('Event loop is closed')\r\nRuntimeError: Event loop is closed\r\nException ignored in: <function _ProactorBasePipeTransport.__del__ at 0x0000000002E4A280>\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\proactor_events.py\", line 116, in __del__\r\n self.close()\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\proactor_events.py\", line 108, in close\r\n self._loop.call_soon(self._call_connection_lost, None)\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\base_events.py\", line 746, in call_soon\r\n self._check_closed()\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\base_events.py\", line 510, in _check_closed\r\n raise RuntimeError('Event loop is closed')\r\nRuntimeError: Event loop is closed\r\n\r\nVersions:\r\nplaywright=1.9.2\r\nasyncio=3.4.3\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport io\nimport json\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import Dict, Optional\n\n\n# Sourced from: https://github.com/pytest-dev/pytest/blob/da01ee0a4bb0af780167ecd228ab3ad249511302/src/_pytest/faulthandler.py#L69-L77\ndef _get_stderr_fileno() -> Optional[int]:\n try:\n return sys.stderr.fileno()\n except (AttributeError, io.UnsupportedOperation):\n # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\n # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\n # This is potentially dangerous, but the best we can do.\n if not hasattr(sys, \"__stderr__\") or not sys.__stderr__:\n return None\n return sys.__stderr__.fileno()\n\n\nclass Transport:\n def __init__(self, driver_executable: Path) -> None:\n super().__init__()\n self.on_message = lambda _: None\n self._stopped = False\n self._driver_executable = driver_executable\n self._loop: asyncio.AbstractEventLoop\n\n def stop(self) -> None:\n self._stopped = True\n self._output.close()\n\n async def wait_until_stopped(self) -> None:\n await self._stopped_future\n\n async def run(self) -> None:\n self._loop = asyncio.get_running_loop()\n self._stopped_future: asyncio.Future = asyncio.Future()\n\n proc = await asyncio.create_subprocess_exec(\n str(self._driver_executable),\n \"run-driver\",\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=_get_stderr_fileno(),\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n self._output = proc.stdin\n\n while not self._stopped:\n try:\n buffer = await proc.stdout.readexactly(4)\n length = int.from_bytes(buffer, byteorder=\"little\", signed=False)\n buffer = bytes(0)\n while length:\n to_read = min(length, 32768)\n data = await proc.stdout.readexactly(to_read)\n length -= to_read\n if len(buffer):\n buffer = buffer + data\n else:\n buffer = data\n obj = json.loads(buffer)\n\n if \"DEBUGP\" in os.environ: # pragma: no cover\n print(\"\\x1b[33mRECV>\\x1b[0m\", json.dumps(obj, indent=2))\n self.on_message(obj)\n except asyncio.IncompleteReadError:\n break\n await asyncio.sleep(0)\n self._stopped_future.set_result(None)\n\n def send(self, message: Dict) -> None:\n msg = json.dumps(message)\n if \"DEBUGP\" in os.environ: # pragma: no cover\n print(\"\\x1b[32mSEND>\\x1b[0m\", json.dumps(message, indent=2))\n data = msg.encode()\n self._output.write(\n len(data).to_bytes(4, byteorder=\"little\", signed=False) + data\n )\n", "path": "playwright/_impl/_transport.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport io\nimport json\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import Dict, Optional\n\n\n# Sourced from: https://github.com/pytest-dev/pytest/blob/da01ee0a4bb0af780167ecd228ab3ad249511302/src/_pytest/faulthandler.py#L69-L77\ndef _get_stderr_fileno() -> Optional[int]:\n try:\n return sys.stderr.fileno()\n except (AttributeError, io.UnsupportedOperation):\n # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\n # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\n # This is potentially dangerous, but the best we can do.\n if not hasattr(sys, \"__stderr__\") or not sys.__stderr__:\n return None\n return sys.__stderr__.fileno()\n\n\nclass Transport:\n def __init__(self, driver_executable: Path) -> None:\n super().__init__()\n self.on_message = lambda _: None\n self._stopped = False\n self._driver_executable = driver_executable\n self._loop: asyncio.AbstractEventLoop\n\n def stop(self) -> None:\n self._stopped = True\n self._output.close()\n\n async def wait_until_stopped(self) -> None:\n await self._stopped_future\n await self._proc.wait()\n\n async def run(self) -> None:\n self._loop = asyncio.get_running_loop()\n self._stopped_future: asyncio.Future = asyncio.Future()\n\n self._proc = proc = await asyncio.create_subprocess_exec(\n str(self._driver_executable),\n \"run-driver\",\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=_get_stderr_fileno(),\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n self._output = proc.stdin\n\n while not self._stopped:\n try:\n buffer = await proc.stdout.readexactly(4)\n length = int.from_bytes(buffer, byteorder=\"little\", signed=False)\n buffer = bytes(0)\n while length:\n to_read = min(length, 32768)\n data = await proc.stdout.readexactly(to_read)\n length -= to_read\n if len(buffer):\n buffer = buffer + data\n else:\n buffer = data\n obj = json.loads(buffer)\n\n if \"DEBUGP\" in os.environ: # pragma: no cover\n print(\"\\x1b[33mRECV>\\x1b[0m\", json.dumps(obj, indent=2))\n self.on_message(obj)\n except asyncio.IncompleteReadError:\n break\n await asyncio.sleep(0)\n self._stopped_future.set_result(None)\n\n def send(self, message: Dict) -> None:\n msg = json.dumps(message)\n if \"DEBUGP\" in os.environ: # pragma: no cover\n print(\"\\x1b[32mSEND>\\x1b[0m\", json.dumps(message, indent=2))\n data = msg.encode()\n self._output.write(\n len(data).to_bytes(4, byteorder=\"little\", signed=False) + data\n )\n", "path": "playwright/_impl/_transport.py"}]}
1,873
163
gh_patches_debug_47861
rasdani/github-patches
git_diff
saleor__saleor-4008
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Duplicate choices in shipping address ### What I'm trying to achieve I'm trying to set shipping user for Russian user and there are duplicate values in "Oblast" selector. ### Steps to reproduce the problem 1. Create new shipping address 2. Country --> Russia 3. There are duplicated values in "Oblast" selector ### What I expected to happen There are no duplicated values in "Oblast" selector ### Screenshots What happens now <img src="https://user-images.githubusercontent.com/13136992/53255369-8a239600-36d6-11e9-84a6-24a10b96a321.png" width="300"> What I expect to see <img src="https://user-images.githubusercontent.com/13136992/53255400-99a2df00-36d6-11e9-8913-ecaec174487a.png" width="300"> **System information** Operating system: Manjaro Linux Browser: Google Chrome 72.0.3626.81 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/account/forms.py` Content: ``` 1 from captcha.fields import ReCaptchaField 2 from django import forms 3 from django.conf import settings 4 from django.contrib.auth import forms as django_forms, update_session_auth_hash 5 from django.utils.translation import pgettext, pgettext_lazy 6 from phonenumbers.phonenumberutil import country_code_for_region 7 8 from ..account.models import User 9 from . import emails 10 from .i18n import AddressMetaForm, get_address_form_class 11 12 13 class FormWithReCaptcha(forms.BaseForm): 14 def __new__(cls, *args, **kwargs): 15 if settings.RECAPTCHA_PUBLIC_KEY and settings.RECAPTCHA_PRIVATE_KEY: 16 # insert a Google reCaptcha field inside the form 17 # note: label is empty, the reCaptcha is self-explanatory making 18 # the form simpler for the user. 19 cls.base_fields['_captcha'] = ReCaptchaField(label='') 20 return super(FormWithReCaptcha, cls).__new__(cls) 21 22 23 def get_address_form( 24 data, country_code, initial=None, instance=None, **kwargs): 25 country_form = AddressMetaForm(data, initial=initial) 26 preview = False 27 if country_form.is_valid(): 28 country_code = country_form.cleaned_data['country'] 29 preview = country_form.cleaned_data['preview'] 30 31 if initial is None and country_code: 32 initial = {} 33 if country_code: 34 initial['phone'] = '+{}'.format(country_code_for_region(country_code)) 35 36 address_form_class = get_address_form_class(country_code) 37 38 if not preview and instance is not None: 39 address_form_class = get_address_form_class(instance.country.code) 40 address_form = address_form_class(data, instance=instance, **kwargs) 41 else: 42 initial_address = ( 43 initial if not preview 44 else data.dict() if data is not None else data) 45 address_form = address_form_class( 46 not preview and data or None, 47 initial=initial_address, 48 **kwargs) 49 return address_form, preview 50 51 52 class ChangePasswordForm(django_forms.PasswordChangeForm): 53 def __init__(self, *args, **kwargs): 54 super().__init__(*args, **kwargs) 55 self.fields['new_password1'].user = self.user 56 self.fields['old_password'].widget.attrs['placeholder'] = '' 57 self.fields['new_password1'].widget.attrs['placeholder'] = '' 58 del self.fields['new_password2'] 59 60 61 def logout_on_password_change(request, user): 62 if (update_session_auth_hash is not None and 63 not settings.LOGOUT_ON_PASSWORD_CHANGE): 64 update_session_auth_hash(request, user) 65 66 67 class LoginForm(django_forms.AuthenticationForm, FormWithReCaptcha): 68 username = forms.EmailField( 69 label=pgettext('Form field', 'Email'), max_length=75) 70 71 def __init__(self, request=None, *args, **kwargs): 72 super().__init__(request=request, *args, **kwargs) 73 if request: 74 email = request.GET.get('email') 75 if email: 76 self.fields['username'].initial = email 77 78 79 class SignupForm(forms.ModelForm, FormWithReCaptcha): 80 password = forms.CharField( 81 widget=forms.PasswordInput, 82 label=pgettext('Password', 'Password')) 83 email = forms.EmailField( 84 label=pgettext('Email', 'Email'), 85 error_messages={ 86 'unique': pgettext_lazy( 87 'Registration error', 88 'This email has already been registered.')}) 89 90 class Meta: 91 model = User 92 fields = ('email',) 93 94 def __init__(self, *args, **kwargs): 95 super().__init__(*args, **kwargs) 96 if self._meta.model.USERNAME_FIELD in self.fields: 97 self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update( 98 {'autofocus': ''}) 99 100 def save(self, request=None, commit=True): 101 user = super().save(commit=False) 102 password = self.cleaned_data['password'] 103 user.set_password(password) 104 if commit: 105 user.save() 106 return user 107 108 109 class PasswordResetForm(django_forms.PasswordResetForm, FormWithReCaptcha): 110 """Allow resetting passwords. 111 112 This subclass overrides sending emails to use templated email. 113 """ 114 115 def get_users(self, email): 116 active_users = User.objects.filter(email__iexact=email, is_active=True) 117 return active_users 118 119 def send_mail( 120 self, subject_template_name, email_template_name, context, 121 from_email, to_email, html_email_template_name=None): 122 # Passing the user object to the Celery task throws an 123 # error "'User' is not JSON serializable". Since it's not used in our 124 # template, we remove it from the context. 125 del context['user'] 126 emails.send_password_reset_email.delay(context, to_email) 127 128 129 class NameForm(forms.ModelForm): 130 class Meta: 131 model = User 132 fields = ['first_name', 'last_name'] 133 labels = { 134 'first_name': pgettext_lazy( 135 'Customer form: Given name field', 'Given name'), 136 'last_name': pgettext_lazy( 137 'Customer form: Family name field', 'Family name')} 138 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/account/forms.py b/saleor/account/forms.py --- a/saleor/account/forms.py +++ b/saleor/account/forms.py @@ -46,6 +46,11 @@ not preview and data or None, initial=initial_address, **kwargs) + + if hasattr(address_form.fields['country_area'], 'choices'): + choices = address_form.fields['country_area'].choices + choices = [(choice[1], choice[1]) for choice in choices] + address_form.fields['country_area'].choices = choices return address_form, preview
{"golden_diff": "diff --git a/saleor/account/forms.py b/saleor/account/forms.py\n--- a/saleor/account/forms.py\n+++ b/saleor/account/forms.py\n@@ -46,6 +46,11 @@\n not preview and data or None,\n initial=initial_address,\n **kwargs)\n+\n+ if hasattr(address_form.fields['country_area'], 'choices'):\n+ choices = address_form.fields['country_area'].choices\n+ choices = [(choice[1], choice[1]) for choice in choices]\n+ address_form.fields['country_area'].choices = choices\n return address_form, preview\n", "issue": "Duplicate choices in shipping address\n### What I'm trying to achieve\r\nI'm trying to set shipping user for Russian user and there are duplicate values in \"Oblast\" selector.\r\n\r\n### Steps to reproduce the problem\r\n1. Create new shipping address\r\n2. Country --> Russia\r\n3. There are duplicated values in \"Oblast\" selector\r\n\r\n### What I expected to happen\r\nThere are no duplicated values in \"Oblast\" selector\r\n\r\n### Screenshots\r\nWhat happens now\r\n<img src=\"https://user-images.githubusercontent.com/13136992/53255369-8a239600-36d6-11e9-84a6-24a10b96a321.png\" width=\"300\">\r\n\r\n\r\nWhat I expect to see\r\n<img src=\"https://user-images.githubusercontent.com/13136992/53255400-99a2df00-36d6-11e9-8913-ecaec174487a.png\" width=\"300\">\r\n\r\n\r\n**System information**\r\nOperating system: Manjaro Linux\r\nBrowser: Google Chrome 72.0.3626.81\r\n\n", "before_files": [{"content": "from captcha.fields import ReCaptchaField\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import forms as django_forms, update_session_auth_hash\nfrom django.utils.translation import pgettext, pgettext_lazy\nfrom phonenumbers.phonenumberutil import country_code_for_region\n\nfrom ..account.models import User\nfrom . import emails\nfrom .i18n import AddressMetaForm, get_address_form_class\n\n\nclass FormWithReCaptcha(forms.BaseForm):\n def __new__(cls, *args, **kwargs):\n if settings.RECAPTCHA_PUBLIC_KEY and settings.RECAPTCHA_PRIVATE_KEY:\n # insert a Google reCaptcha field inside the form\n # note: label is empty, the reCaptcha is self-explanatory making\n # the form simpler for the user.\n cls.base_fields['_captcha'] = ReCaptchaField(label='')\n return super(FormWithReCaptcha, cls).__new__(cls)\n\n\ndef get_address_form(\n data, country_code, initial=None, instance=None, **kwargs):\n country_form = AddressMetaForm(data, initial=initial)\n preview = False\n if country_form.is_valid():\n country_code = country_form.cleaned_data['country']\n preview = country_form.cleaned_data['preview']\n\n if initial is None and country_code:\n initial = {}\n if country_code:\n initial['phone'] = '+{}'.format(country_code_for_region(country_code))\n\n address_form_class = get_address_form_class(country_code)\n\n if not preview and instance is not None:\n address_form_class = get_address_form_class(instance.country.code)\n address_form = address_form_class(data, instance=instance, **kwargs)\n else:\n initial_address = (\n initial if not preview\n else data.dict() if data is not None else data)\n address_form = address_form_class(\n not preview and data or None,\n initial=initial_address,\n **kwargs)\n return address_form, preview\n\n\nclass ChangePasswordForm(django_forms.PasswordChangeForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['new_password1'].user = self.user\n self.fields['old_password'].widget.attrs['placeholder'] = ''\n self.fields['new_password1'].widget.attrs['placeholder'] = ''\n del self.fields['new_password2']\n\n\ndef logout_on_password_change(request, user):\n if (update_session_auth_hash is not None and\n not settings.LOGOUT_ON_PASSWORD_CHANGE):\n update_session_auth_hash(request, user)\n\n\nclass LoginForm(django_forms.AuthenticationForm, FormWithReCaptcha):\n username = forms.EmailField(\n label=pgettext('Form field', 'Email'), max_length=75)\n\n def __init__(self, request=None, *args, **kwargs):\n super().__init__(request=request, *args, **kwargs)\n if request:\n email = request.GET.get('email')\n if email:\n self.fields['username'].initial = email\n\n\nclass SignupForm(forms.ModelForm, FormWithReCaptcha):\n password = forms.CharField(\n widget=forms.PasswordInput,\n label=pgettext('Password', 'Password'))\n email = forms.EmailField(\n label=pgettext('Email', 'Email'),\n error_messages={\n 'unique': pgettext_lazy(\n 'Registration error',\n 'This email has already been registered.')})\n\n class Meta:\n model = User\n fields = ('email',)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self._meta.model.USERNAME_FIELD in self.fields:\n self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(\n {'autofocus': ''})\n\n def save(self, request=None, commit=True):\n user = super().save(commit=False)\n password = self.cleaned_data['password']\n user.set_password(password)\n if commit:\n user.save()\n return user\n\n\nclass PasswordResetForm(django_forms.PasswordResetForm, FormWithReCaptcha):\n \"\"\"Allow resetting passwords.\n\n This subclass overrides sending emails to use templated email.\n \"\"\"\n\n def get_users(self, email):\n active_users = User.objects.filter(email__iexact=email, is_active=True)\n return active_users\n\n def send_mail(\n self, subject_template_name, email_template_name, context,\n from_email, to_email, html_email_template_name=None):\n # Passing the user object to the Celery task throws an\n # error \"'User' is not JSON serializable\". Since it's not used in our\n # template, we remove it from the context.\n del context['user']\n emails.send_password_reset_email.delay(context, to_email)\n\n\nclass NameForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ['first_name', 'last_name']\n labels = {\n 'first_name': pgettext_lazy(\n 'Customer form: Given name field', 'Given name'),\n 'last_name': pgettext_lazy(\n 'Customer form: Family name field', 'Family name')}\n", "path": "saleor/account/forms.py"}], "after_files": [{"content": "from captcha.fields import ReCaptchaField\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import forms as django_forms, update_session_auth_hash\nfrom django.utils.translation import pgettext, pgettext_lazy\nfrom phonenumbers.phonenumberutil import country_code_for_region\n\nfrom ..account.models import User\nfrom . import emails\nfrom .i18n import AddressMetaForm, get_address_form_class\n\n\nclass FormWithReCaptcha(forms.BaseForm):\n def __new__(cls, *args, **kwargs):\n if settings.RECAPTCHA_PUBLIC_KEY and settings.RECAPTCHA_PRIVATE_KEY:\n # insert a Google reCaptcha field inside the form\n # note: label is empty, the reCaptcha is self-explanatory making\n # the form simpler for the user.\n cls.base_fields['_captcha'] = ReCaptchaField(label='')\n return super(FormWithReCaptcha, cls).__new__(cls)\n\n\ndef get_address_form(\n data, country_code, initial=None, instance=None, **kwargs):\n country_form = AddressMetaForm(data, initial=initial)\n preview = False\n if country_form.is_valid():\n country_code = country_form.cleaned_data['country']\n preview = country_form.cleaned_data['preview']\n\n if initial is None and country_code:\n initial = {}\n if country_code:\n initial['phone'] = '+{}'.format(country_code_for_region(country_code))\n\n address_form_class = get_address_form_class(country_code)\n\n if not preview and instance is not None:\n address_form_class = get_address_form_class(instance.country.code)\n address_form = address_form_class(data, instance=instance, **kwargs)\n else:\n initial_address = (\n initial if not preview\n else data.dict() if data is not None else data)\n address_form = address_form_class(\n not preview and data or None,\n initial=initial_address,\n **kwargs)\n\n if hasattr(address_form.fields['country_area'], 'choices'):\n choices = address_form.fields['country_area'].choices\n choices = [(choice[1], choice[1]) for choice in choices]\n address_form.fields['country_area'].choices = choices\n return address_form, preview\n\n\nclass ChangePasswordForm(django_forms.PasswordChangeForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['new_password1'].user = self.user\n self.fields['old_password'].widget.attrs['placeholder'] = ''\n self.fields['new_password1'].widget.attrs['placeholder'] = ''\n del self.fields['new_password2']\n\n\ndef logout_on_password_change(request, user):\n if (update_session_auth_hash is not None and\n not settings.LOGOUT_ON_PASSWORD_CHANGE):\n update_session_auth_hash(request, user)\n\n\nclass LoginForm(django_forms.AuthenticationForm, FormWithReCaptcha):\n username = forms.EmailField(\n label=pgettext('Form field', 'Email'), max_length=75)\n\n def __init__(self, request=None, *args, **kwargs):\n super().__init__(request=request, *args, **kwargs)\n if request:\n email = request.GET.get('email')\n if email:\n self.fields['username'].initial = email\n\n\nclass SignupForm(forms.ModelForm, FormWithReCaptcha):\n password = forms.CharField(\n widget=forms.PasswordInput,\n label=pgettext('Password', 'Password'))\n email = forms.EmailField(\n label=pgettext('Email', 'Email'),\n error_messages={\n 'unique': pgettext_lazy(\n 'Registration error',\n 'This email has already been registered.')})\n\n class Meta:\n model = User\n fields = ('email',)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self._meta.model.USERNAME_FIELD in self.fields:\n self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(\n {'autofocus': ''})\n\n def save(self, request=None, commit=True):\n user = super().save(commit=False)\n password = self.cleaned_data['password']\n user.set_password(password)\n if commit:\n user.save()\n return user\n\n\nclass PasswordResetForm(django_forms.PasswordResetForm, FormWithReCaptcha):\n \"\"\"Allow resetting passwords.\n\n This subclass overrides sending emails to use templated email.\n \"\"\"\n\n def get_users(self, email):\n active_users = User.objects.filter(email__iexact=email, is_active=True)\n return active_users\n\n def send_mail(\n self, subject_template_name, email_template_name, context,\n from_email, to_email, html_email_template_name=None):\n # Passing the user object to the Celery task throws an\n # error \"'User' is not JSON serializable\". Since it's not used in our\n # template, we remove it from the context.\n del context['user']\n emails.send_password_reset_email.delay(context, to_email)\n\n\nclass NameForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ['first_name', 'last_name']\n labels = {\n 'first_name': pgettext_lazy(\n 'Customer form: Given name field', 'Given name'),\n 'last_name': pgettext_lazy(\n 'Customer form: Family name field', 'Family name')}\n", "path": "saleor/account/forms.py"}]}
1,929
133
gh_patches_debug_25512
rasdani/github-patches
git_diff
openstates__openstates-scrapers-2059
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- RI failing since at least 2018-01-02 RI has been failing since 2018-01-02 Based on automated runs it appears that RI has not run successfully in 2 days (2018-01-02). ``` no pupa_settings on path, using defaults ri (scrape, import) bills: {} people: {} committees: {} 23:00:36 CRITICAL pupa: Session(s) 2018 were reported by RhodeIsland.get_session_list() but were not found in RhodeIsland.legislative_sessions or RhodeIsland.ignored_scraped_sessions. ``` Visit http://bobsled.openstates.org for more info. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `billy_metadata/ri.py` Content: ``` 1 import datetime 2 3 metadata = { 4 'name': 'Rhode Island', 5 'abbreviation': 'ri', 6 'legislature_name': 'Rhode Island General Assembly', 7 'legislature_url': 'http://www.rilin.state.ri.us/', 8 'capitol_timezone': 'America/New_York', 9 'chambers': { 10 'upper': {'name': 'Senate', 'title': 'Senator'}, 11 'lower': {'name': 'House', 'title': 'Representative'}, 12 }, 13 'terms': [ 14 { 15 'name': '2012', 16 'start_year': 2012, 17 'end_year': 2012, 18 'sessions': ['2012'], 19 }, 20 { 21 'name': '2013', 22 'start_year': 2013, 23 'end_year': 2013, 24 'sessions': ['2013'], 25 }, 26 27 { 28 'name': '2014', 29 'start_year': 2014, 30 'end_year': 2014, 31 'sessions': ['2014'], 32 }, 33 { 34 'name': '2015', 35 'start_year': 2015, 36 'end_year': 2015, 37 'sessions': ['2015'], 38 }, 39 { 40 'name': '2016', 41 'start_year': 2016, 42 'end_year': 2017, 43 'sessions': ['2016'], 44 }, 45 { 46 'name': '2017', 47 'start_year': 2017, 48 'end_year': 2018, 49 'sessions': ['2017'], 50 }, 51 ], 52 'session_details': { 53 '2012': { 54 'type': 'primary', 55 'start_date': datetime.date(2012, 1, 3), 56 'end_date': datetime.date(2012, 6, 13), 57 'display_name': '2012 Regular Session', 58 '_scraped_name': '2012', 59 }, 60 '2013': { 61 'type': 'primary', 62 'start_date': datetime.date(2013, 1, 1), 63 'end_date': datetime.date(2013, 7, 3), 64 'display_name': '2013 Regular Session', 65 '_scraped_name': '2013', 66 }, 67 '2014': { 68 'type': 'primary', 69 'start_date': datetime.date(2014, 1, 7), 70 'end_date': datetime.date(2014, 6, 21), 71 'display_name': '2014 Regular Session', 72 '_scraped_name': '2014', 73 }, 74 '2015': { 75 'type': 'primary', 76 'start_date': datetime.date(2015, 1, 6), 77 'end_date': datetime.date(2015, 6, 25), 78 'display_name': '2015 Regular Session', 79 '_scraped_name': '2015', 80 }, 81 '2016': { 82 'type': 'primary', 83 'start_date': datetime.date(2016, 1, 5), 84 'display_name': '2016 Regular Session', 85 '_scraped_name': '2016', 86 }, 87 '2017': { 88 'type': 'primary', 89 'display_name': '2017 Regular Session', 90 '_scraped_name': '2017', 91 }, 92 }, 93 'feature_flags': ['subjects', 'events', 'influenceexplorer'], 94 '_ignored_scraped_sessions': [ 95 '2015', 96 '2014', 97 '2013', 98 '2012', 99 '2011', 100 '2010', 101 '2009', 102 '2008', 103 '2007' 104 ], 105 } 106 ``` Path: `openstates/ri/__init__.py` Content: ``` 1 from pupa.scrape import Jurisdiction, Organization 2 3 from openstates.utils import url_xpath 4 5 from .bills import RIBillScraper 6 from .events import RIEventScraper 7 from .people import RIPersonScraper 8 from .committees import RICommitteeScraper 9 10 11 class RhodeIsland(Jurisdiction): 12 division_id = "ocd-division/country:us/state:ri" 13 classification = "government" 14 name = "Rhode Island" 15 url = "http://www.ri.gov/" 16 scrapers = { 17 'bills': RIBillScraper, 18 'events': RIEventScraper, 19 'people': RIPersonScraper, 20 'committees': RICommitteeScraper, 21 } 22 parties = [ 23 {'name': 'Republican'}, 24 {'name': 'Democratic'} 25 ] 26 legislative_sessions = [ 27 { 28 "_scraped_name": "2012", 29 "classification": "primary", 30 "end_date": "2012-06-13", 31 "identifier": "2012", 32 "name": "2012 Regular Session", 33 "start_date": "2012-01-03" 34 }, 35 { 36 "_scraped_name": "2013", 37 "classification": "primary", 38 "end_date": "2013-07-03", 39 "identifier": "2013", 40 "name": "2013 Regular Session", 41 "start_date": "2013-01-01" 42 }, 43 { 44 "_scraped_name": "2014", 45 "classification": "primary", 46 "end_date": "2014-06-21", 47 "identifier": "2014", 48 "name": "2014 Regular Session", 49 "start_date": "2014-01-07" 50 }, 51 { 52 "_scraped_name": "2015", 53 "classification": "primary", 54 "end_date": "2015-06-25", 55 "identifier": "2015", 56 "name": "2015 Regular Session", 57 "start_date": "2015-01-06" 58 }, 59 { 60 "_scraped_name": "2016", 61 "classification": "primary", 62 "identifier": "2016", 63 "name": "2016 Regular Session", 64 "start_date": "2016-01-05" 65 }, 66 { 67 "_scraped_name": "2017", 68 "classification": "primary", 69 "identifier": "2017", 70 "name": "2017 Regular Session", 71 "start_date": "2017-01-03", 72 "end_date": "2017-06-30", 73 } 74 ] 75 ignored_scraped_sessions = [ 76 "2015", 77 "2014", 78 "2013", 79 "2012", 80 "2011", 81 "2010", 82 "2009", 83 "2008", 84 "2007" 85 ] 86 87 def get_organizations(self): 88 legislature_name = "Rhode Island General Assembly" 89 lower_chamber_name = "House of Representatives" 90 lower_seats = 75 91 lower_title = "Representative" 92 upper_chamber_name = "Senate" 93 upper_seats = 38 94 upper_title = "Senator" 95 96 legislature = Organization(name=legislature_name, 97 classification="legislature") 98 executive = Organization(name='Office of the Governor', 99 classification="executive") 100 upper = Organization(upper_chamber_name, classification='upper', 101 parent_id=legislature._id) 102 lower = Organization(lower_chamber_name, classification='lower', 103 parent_id=legislature._id) 104 105 for n in range(1, upper_seats + 1): 106 upper.add_post( 107 label=str(n), role=upper_title, 108 division_id='{}/sldu:{}'.format(self.division_id, n)) 109 for n in range(1, lower_seats + 1): 110 lower.add_post( 111 label=str(n), role=lower_title, 112 division_id='{}/sldl:{}'.format(self.division_id, n)) 113 114 yield legislature 115 yield executive 116 yield upper 117 yield lower 118 119 def get_session_list(self): 120 return url_xpath( 121 'http://status.rilin.state.ri.us/bill_history.aspx?mode=previous', 122 '//select[@name="ctl00$rilinContent$cbYear"]/option/text()') 123 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/billy_metadata/ri.py b/billy_metadata/ri.py --- a/billy_metadata/ri.py +++ b/billy_metadata/ri.py @@ -46,7 +46,7 @@ 'name': '2017', 'start_year': 2017, 'end_year': 2018, - 'sessions': ['2017'], + 'sessions': ['2017', '2018'], }, ], 'session_details': { @@ -89,6 +89,11 @@ 'display_name': '2017 Regular Session', '_scraped_name': '2017', }, + '2018': { + 'type': 'primary', + 'display_name': '2018 Regular Session', + '_scraped_name': '2018', + }, }, 'feature_flags': ['subjects', 'events', 'influenceexplorer'], '_ignored_scraped_sessions': [ diff --git a/openstates/ri/__init__.py b/openstates/ri/__init__.py --- a/openstates/ri/__init__.py +++ b/openstates/ri/__init__.py @@ -70,7 +70,14 @@ "name": "2017 Regular Session", "start_date": "2017-01-03", "end_date": "2017-06-30", - } + }, + { + "_scraped_name": "2018", + "classification": "primary", + "identifier": "2018", + "name": "2018 Regular Session", + "start_date": "2018-01-02", + }, ] ignored_scraped_sessions = [ "2015",
{"golden_diff": "diff --git a/billy_metadata/ri.py b/billy_metadata/ri.py\n--- a/billy_metadata/ri.py\n+++ b/billy_metadata/ri.py\n@@ -46,7 +46,7 @@\n 'name': '2017',\n 'start_year': 2017,\n 'end_year': 2018,\n- 'sessions': ['2017'],\n+ 'sessions': ['2017', '2018'],\n },\n ],\n 'session_details': {\n@@ -89,6 +89,11 @@\n 'display_name': '2017 Regular Session',\n '_scraped_name': '2017',\n },\n+ '2018': {\n+ 'type': 'primary',\n+ 'display_name': '2018 Regular Session',\n+ '_scraped_name': '2018',\n+ },\n },\n 'feature_flags': ['subjects', 'events', 'influenceexplorer'],\n '_ignored_scraped_sessions': [\ndiff --git a/openstates/ri/__init__.py b/openstates/ri/__init__.py\n--- a/openstates/ri/__init__.py\n+++ b/openstates/ri/__init__.py\n@@ -70,7 +70,14 @@\n \"name\": \"2017 Regular Session\",\n \"start_date\": \"2017-01-03\",\n \"end_date\": \"2017-06-30\",\n- }\n+ },\n+ {\n+ \"_scraped_name\": \"2018\",\n+ \"classification\": \"primary\",\n+ \"identifier\": \"2018\",\n+ \"name\": \"2018 Regular Session\",\n+ \"start_date\": \"2018-01-02\",\n+ },\n ]\n ignored_scraped_sessions = [\n \"2015\",\n", "issue": "RI failing since at least 2018-01-02\nRI has been failing since 2018-01-02\n\nBased on automated runs it appears that RI has not run successfully in 2 days (2018-01-02).\n\n\n```\n no pupa_settings on path, using defaults\nri (scrape, import)\n bills: {}\n people: {}\n committees: {}\n23:00:36 CRITICAL pupa: Session(s) 2018 were reported by RhodeIsland.get_session_list() but were not found in RhodeIsland.legislative_sessions or RhodeIsland.ignored_scraped_sessions.\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "import datetime\n\nmetadata = {\n 'name': 'Rhode Island',\n 'abbreviation': 'ri',\n 'legislature_name': 'Rhode Island General Assembly',\n 'legislature_url': 'http://www.rilin.state.ri.us/',\n 'capitol_timezone': 'America/New_York',\n 'chambers': {\n 'upper': {'name': 'Senate', 'title': 'Senator'},\n 'lower': {'name': 'House', 'title': 'Representative'},\n },\n 'terms': [\n {\n 'name': '2012',\n 'start_year': 2012,\n 'end_year': 2012,\n 'sessions': ['2012'],\n },\n {\n 'name': '2013',\n 'start_year': 2013,\n 'end_year': 2013,\n 'sessions': ['2013'],\n },\n\n {\n 'name': '2014',\n 'start_year': 2014,\n 'end_year': 2014,\n 'sessions': ['2014'],\n },\n {\n 'name': '2015',\n 'start_year': 2015,\n 'end_year': 2015,\n 'sessions': ['2015'],\n },\n {\n 'name': '2016',\n 'start_year': 2016,\n 'end_year': 2017,\n 'sessions': ['2016'],\n },\n {\n 'name': '2017',\n 'start_year': 2017,\n 'end_year': 2018,\n 'sessions': ['2017'],\n },\n ],\n 'session_details': {\n '2012': {\n 'type': 'primary',\n 'start_date': datetime.date(2012, 1, 3),\n 'end_date': datetime.date(2012, 6, 13),\n 'display_name': '2012 Regular Session',\n '_scraped_name': '2012',\n },\n '2013': {\n 'type': 'primary',\n 'start_date': datetime.date(2013, 1, 1),\n 'end_date': datetime.date(2013, 7, 3),\n 'display_name': '2013 Regular Session',\n '_scraped_name': '2013',\n },\n '2014': {\n 'type': 'primary',\n 'start_date': datetime.date(2014, 1, 7),\n 'end_date': datetime.date(2014, 6, 21),\n 'display_name': '2014 Regular Session',\n '_scraped_name': '2014',\n },\n '2015': {\n 'type': 'primary',\n 'start_date': datetime.date(2015, 1, 6),\n 'end_date': datetime.date(2015, 6, 25),\n 'display_name': '2015 Regular Session',\n '_scraped_name': '2015',\n },\n '2016': {\n 'type': 'primary',\n 'start_date': datetime.date(2016, 1, 5),\n 'display_name': '2016 Regular Session',\n '_scraped_name': '2016',\n },\n '2017': {\n 'type': 'primary',\n 'display_name': '2017 Regular Session',\n '_scraped_name': '2017',\n },\n },\n 'feature_flags': ['subjects', 'events', 'influenceexplorer'],\n '_ignored_scraped_sessions': [\n '2015',\n '2014',\n '2013',\n '2012',\n '2011',\n '2010',\n '2009',\n '2008',\n '2007'\n ],\n}\n", "path": "billy_metadata/ri.py"}, {"content": "from pupa.scrape import Jurisdiction, Organization\n\nfrom openstates.utils import url_xpath\n\nfrom .bills import RIBillScraper\nfrom .events import RIEventScraper\nfrom .people import RIPersonScraper\nfrom .committees import RICommitteeScraper\n\n\nclass RhodeIsland(Jurisdiction):\n division_id = \"ocd-division/country:us/state:ri\"\n classification = \"government\"\n name = \"Rhode Island\"\n url = \"http://www.ri.gov/\"\n scrapers = {\n 'bills': RIBillScraper,\n 'events': RIEventScraper,\n 'people': RIPersonScraper,\n 'committees': RICommitteeScraper,\n }\n parties = [\n {'name': 'Republican'},\n {'name': 'Democratic'}\n ]\n legislative_sessions = [\n {\n \"_scraped_name\": \"2012\",\n \"classification\": \"primary\",\n \"end_date\": \"2012-06-13\",\n \"identifier\": \"2012\",\n \"name\": \"2012 Regular Session\",\n \"start_date\": \"2012-01-03\"\n },\n {\n \"_scraped_name\": \"2013\",\n \"classification\": \"primary\",\n \"end_date\": \"2013-07-03\",\n \"identifier\": \"2013\",\n \"name\": \"2013 Regular Session\",\n \"start_date\": \"2013-01-01\"\n },\n {\n \"_scraped_name\": \"2014\",\n \"classification\": \"primary\",\n \"end_date\": \"2014-06-21\",\n \"identifier\": \"2014\",\n \"name\": \"2014 Regular Session\",\n \"start_date\": \"2014-01-07\"\n },\n {\n \"_scraped_name\": \"2015\",\n \"classification\": \"primary\",\n \"end_date\": \"2015-06-25\",\n \"identifier\": \"2015\",\n \"name\": \"2015 Regular Session\",\n \"start_date\": \"2015-01-06\"\n },\n {\n \"_scraped_name\": \"2016\",\n \"classification\": \"primary\",\n \"identifier\": \"2016\",\n \"name\": \"2016 Regular Session\",\n \"start_date\": \"2016-01-05\"\n },\n {\n \"_scraped_name\": \"2017\",\n \"classification\": \"primary\",\n \"identifier\": \"2017\",\n \"name\": \"2017 Regular Session\",\n \"start_date\": \"2017-01-03\",\n \"end_date\": \"2017-06-30\",\n }\n ]\n ignored_scraped_sessions = [\n \"2015\",\n \"2014\",\n \"2013\",\n \"2012\",\n \"2011\",\n \"2010\",\n \"2009\",\n \"2008\",\n \"2007\"\n ]\n\n def get_organizations(self):\n legislature_name = \"Rhode Island General Assembly\"\n lower_chamber_name = \"House of Representatives\"\n lower_seats = 75\n lower_title = \"Representative\"\n upper_chamber_name = \"Senate\"\n upper_seats = 38\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n executive = Organization(name='Office of the Governor',\n classification=\"executive\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats + 1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats + 1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield executive\n yield upper\n yield lower\n\n def get_session_list(self):\n return url_xpath(\n 'http://status.rilin.state.ri.us/bill_history.aspx?mode=previous',\n '//select[@name=\"ctl00$rilinContent$cbYear\"]/option/text()')\n", "path": "openstates/ri/__init__.py"}], "after_files": [{"content": "import datetime\n\nmetadata = {\n 'name': 'Rhode Island',\n 'abbreviation': 'ri',\n 'legislature_name': 'Rhode Island General Assembly',\n 'legislature_url': 'http://www.rilin.state.ri.us/',\n 'capitol_timezone': 'America/New_York',\n 'chambers': {\n 'upper': {'name': 'Senate', 'title': 'Senator'},\n 'lower': {'name': 'House', 'title': 'Representative'},\n },\n 'terms': [\n {\n 'name': '2012',\n 'start_year': 2012,\n 'end_year': 2012,\n 'sessions': ['2012'],\n },\n {\n 'name': '2013',\n 'start_year': 2013,\n 'end_year': 2013,\n 'sessions': ['2013'],\n },\n\n {\n 'name': '2014',\n 'start_year': 2014,\n 'end_year': 2014,\n 'sessions': ['2014'],\n },\n {\n 'name': '2015',\n 'start_year': 2015,\n 'end_year': 2015,\n 'sessions': ['2015'],\n },\n {\n 'name': '2016',\n 'start_year': 2016,\n 'end_year': 2017,\n 'sessions': ['2016'],\n },\n {\n 'name': '2017',\n 'start_year': 2017,\n 'end_year': 2018,\n 'sessions': ['2017', '2018'],\n },\n ],\n 'session_details': {\n '2012': {\n 'type': 'primary',\n 'start_date': datetime.date(2012, 1, 3),\n 'end_date': datetime.date(2012, 6, 13),\n 'display_name': '2012 Regular Session',\n '_scraped_name': '2012',\n },\n '2013': {\n 'type': 'primary',\n 'start_date': datetime.date(2013, 1, 1),\n 'end_date': datetime.date(2013, 7, 3),\n 'display_name': '2013 Regular Session',\n '_scraped_name': '2013',\n },\n '2014': {\n 'type': 'primary',\n 'start_date': datetime.date(2014, 1, 7),\n 'end_date': datetime.date(2014, 6, 21),\n 'display_name': '2014 Regular Session',\n '_scraped_name': '2014',\n },\n '2015': {\n 'type': 'primary',\n 'start_date': datetime.date(2015, 1, 6),\n 'end_date': datetime.date(2015, 6, 25),\n 'display_name': '2015 Regular Session',\n '_scraped_name': '2015',\n },\n '2016': {\n 'type': 'primary',\n 'start_date': datetime.date(2016, 1, 5),\n 'display_name': '2016 Regular Session',\n '_scraped_name': '2016',\n },\n '2017': {\n 'type': 'primary',\n 'display_name': '2017 Regular Session',\n '_scraped_name': '2017',\n },\n '2018': {\n 'type': 'primary',\n 'display_name': '2018 Regular Session',\n '_scraped_name': '2018',\n },\n },\n 'feature_flags': ['subjects', 'events', 'influenceexplorer'],\n '_ignored_scraped_sessions': [\n '2015',\n '2014',\n '2013',\n '2012',\n '2011',\n '2010',\n '2009',\n '2008',\n '2007'\n ],\n}\n", "path": "billy_metadata/ri.py"}, {"content": "from pupa.scrape import Jurisdiction, Organization\n\nfrom openstates.utils import url_xpath\n\nfrom .bills import RIBillScraper\nfrom .events import RIEventScraper\nfrom .people import RIPersonScraper\nfrom .committees import RICommitteeScraper\n\n\nclass RhodeIsland(Jurisdiction):\n division_id = \"ocd-division/country:us/state:ri\"\n classification = \"government\"\n name = \"Rhode Island\"\n url = \"http://www.ri.gov/\"\n scrapers = {\n 'bills': RIBillScraper,\n 'events': RIEventScraper,\n 'people': RIPersonScraper,\n 'committees': RICommitteeScraper,\n }\n parties = [\n {'name': 'Republican'},\n {'name': 'Democratic'}\n ]\n legislative_sessions = [\n {\n \"_scraped_name\": \"2012\",\n \"classification\": \"primary\",\n \"end_date\": \"2012-06-13\",\n \"identifier\": \"2012\",\n \"name\": \"2012 Regular Session\",\n \"start_date\": \"2012-01-03\"\n },\n {\n \"_scraped_name\": \"2013\",\n \"classification\": \"primary\",\n \"end_date\": \"2013-07-03\",\n \"identifier\": \"2013\",\n \"name\": \"2013 Regular Session\",\n \"start_date\": \"2013-01-01\"\n },\n {\n \"_scraped_name\": \"2014\",\n \"classification\": \"primary\",\n \"end_date\": \"2014-06-21\",\n \"identifier\": \"2014\",\n \"name\": \"2014 Regular Session\",\n \"start_date\": \"2014-01-07\"\n },\n {\n \"_scraped_name\": \"2015\",\n \"classification\": \"primary\",\n \"end_date\": \"2015-06-25\",\n \"identifier\": \"2015\",\n \"name\": \"2015 Regular Session\",\n \"start_date\": \"2015-01-06\"\n },\n {\n \"_scraped_name\": \"2016\",\n \"classification\": \"primary\",\n \"identifier\": \"2016\",\n \"name\": \"2016 Regular Session\",\n \"start_date\": \"2016-01-05\"\n },\n {\n \"_scraped_name\": \"2017\",\n \"classification\": \"primary\",\n \"identifier\": \"2017\",\n \"name\": \"2017 Regular Session\",\n \"start_date\": \"2017-01-03\",\n \"end_date\": \"2017-06-30\",\n },\n {\n \"_scraped_name\": \"2018\",\n \"classification\": \"primary\",\n \"identifier\": \"2018\",\n \"name\": \"2018 Regular Session\",\n \"start_date\": \"2018-01-02\",\n },\n ]\n ignored_scraped_sessions = [\n \"2015\",\n \"2014\",\n \"2013\",\n \"2012\",\n \"2011\",\n \"2010\",\n \"2009\",\n \"2008\",\n \"2007\"\n ]\n\n def get_organizations(self):\n legislature_name = \"Rhode Island General Assembly\"\n lower_chamber_name = \"House of Representatives\"\n lower_seats = 75\n lower_title = \"Representative\"\n upper_chamber_name = \"Senate\"\n upper_seats = 38\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n executive = Organization(name='Office of the Governor',\n classification=\"executive\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats + 1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats + 1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield executive\n yield upper\n yield lower\n\n def get_session_list(self):\n return url_xpath(\n 'http://status.rilin.state.ri.us/bill_history.aspx?mode=previous',\n '//select[@name=\"ctl00$rilinContent$cbYear\"]/option/text()')\n", "path": "openstates/ri/__init__.py"}]}
2,887
431
gh_patches_debug_15196
rasdani/github-patches
git_diff
RedHatInsights__insights-core-3344
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Pinned jinja2 version is no longer supported and conflicts with the latest MarkupSafe **Current behavior** insights-core has pinned the jinja2 version to <=2.11.3 in #3057. This version is [no longer supported](https://github.com/pallets/jinja/issues/1585#issuecomment-1043892628). **Expected behavior** insights-core can be used with a maintained jinja2 version in applications that depend both on insights-core and jinja2. **Impact** jinja2-2.11.3 fails to import with the latest version of MarkupSafe, 2.1.0: ``` ... .tox/unit-py38/lib/python3.8/site-packages/jinja2/filters.py:13: in <module> from markupsafe import soft_unicode E ImportError: cannot import name 'soft_unicode' from 'markupsafe' (/builds/ccx/ccx-rules-ocp/.tox/unit-py38/lib64/python3.8/site-packages/markupsafe/__init__.py ``` All projects that depend on insights-core have to pin unsupported versions of MarkupSafe now. I would argue that it is important to keep up-to-date for a package that is used for secure handling of user input. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import os 2 from setuptools import setup, find_packages 3 4 __here__ = os.path.dirname(os.path.abspath(__file__)) 5 6 package_info = dict.fromkeys(["RELEASE", "COMMIT", "VERSION", "NAME"]) 7 8 for name in package_info: 9 with open(os.path.join(__here__, "insights", name)) as f: 10 package_info[name] = f.read().strip() 11 12 entry_points = { 13 'console_scripts': [ 14 'insights-collect = insights.collect:main', 15 'insights-run = insights:main', 16 'insights = insights.command_parser:main', 17 'insights-cat = insights.tools.cat:main', 18 'insights-dupkeycheck = insights.tools.dupkeycheck:main', 19 'insights-inspect = insights.tools.insights_inspect:main', 20 'insights-info = insights.tools.query:main', 21 'insights-ocpshell= insights.ocpshell:main', 22 'mangle = insights.util.mangle:main' 23 ] 24 } 25 26 runtime = set([ 27 'six', 28 'requests', 29 'redis', 30 'cachecontrol', 31 'cachecontrol[redis]', 32 'cachecontrol[filecache]', 33 'defusedxml', 34 'lockfile', 35 'jinja2<=2.11.3', 36 'pyyaml>=3.10,<=3.13; python_version < "2.7"', 37 'pyyaml; python_version >= "2.7"', 38 ]) 39 40 41 def maybe_require(pkg): 42 try: 43 __import__(pkg) 44 except ImportError: 45 runtime.add(pkg) 46 47 48 maybe_require("importlib") 49 maybe_require("argparse") 50 51 52 client = set([ 53 'requests', 54 'python-gnupg==0.4.6', 55 'oyaml' 56 ]) 57 58 develop = set([ 59 'wheel', 60 ]) 61 62 docs = set([ 63 'docutils', 64 'Sphinx', 65 'nbsphinx', 66 'sphinx_rtd_theme', 67 'ipython', 68 'colorama', 69 'jinja2<=2.11.3', 70 'Pygments', 71 'jedi<0.18.0', # Open issue with jedi 0.18.0 and iPython <= 7.19 72 # https://github.com/davidhalter/jedi/issues/1714 73 ]) 74 75 # python 2.6 requires setuptools~=36.8.0 to support this syntax 76 testing = set([ 77 'coverage==4.3.4; python_version < "2.7"', 78 'coverage; python_version >= "2.7"', 79 'pytest==3.0.6; python_version < "2.7"', 80 'pytest~=4.6.0; python_version == "2.7"', 81 'pytest; python_version >= "3"', 82 'pytest-cov==2.4.0; python_version < "2.7"', 83 'pytest-cov; python_version >= "2.7"', 84 'mock==2.0.0', 85 ]) 86 87 cluster = set([ 88 'ansible', 89 'pandas', 90 'colorama', 91 ]) 92 93 openshift = set([ 94 'openshift' 95 ]) 96 97 linting = set([ 98 'flake8==2.6.2; python_version < "2.7"', 99 'flake8; python_version >= "2.7"' 100 ]) 101 102 optional = set([ 103 'python-cjson', 104 'python-logstash', 105 'python-statsd', 106 'watchdog', 107 ]) 108 109 if __name__ == "__main__": 110 # allows for runtime modification of rpm name 111 name = os.environ.get("INSIGHTS_CORE_NAME", package_info["NAME"]) 112 113 setup( 114 name=name, 115 version=package_info["VERSION"], 116 description="Insights Core is a data collection and analysis framework", 117 long_description=open("README.rst").read(), 118 url="https://github.com/redhatinsights/insights-core", 119 author="Red Hat, Inc.", 120 author_email="[email protected]", 121 packages=find_packages(), 122 install_requires=list(runtime), 123 package_data={'': ['LICENSE']}, 124 license='Apache 2.0', 125 extras_require={ 126 'develop': list(runtime | develop | client | docs | linting | testing | cluster), 127 'develop26': list(runtime | develop | client | linting | testing | cluster), 128 'client': list(runtime | client), 129 'client-develop': list(runtime | develop | client | linting | testing), 130 'cluster': list(runtime | cluster), 131 'openshift': list(runtime | openshift), 132 'optional': list(optional), 133 'docs': list(docs), 134 'linting': list(linting | client), 135 'testing': list(testing | client) 136 }, 137 classifiers=[ 138 'Development Status :: 5 - Production/Stable', 139 'Intended Audience :: Developers', 140 'Natural Language :: English', 141 'License :: OSI Approved :: Apache Software License', 142 'Programming Language :: Python', 143 'Programming Language :: Python :: 2.6', 144 'Programming Language :: Python :: 2.7', 145 'Programming Language :: Python :: 3.3', 146 'Programming Language :: Python :: 3.4', 147 'Programming Language :: Python :: 3.5', 148 'Programming Language :: Python :: 3.6' 149 ], 150 entry_points=entry_points, 151 include_package_data=True 152 ) 153 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -32,7 +32,8 @@ 'cachecontrol[filecache]', 'defusedxml', 'lockfile', - 'jinja2<=2.11.3', + 'jinja2<=2.11.3; python_version <= "2.7"', + 'jinja2; python_version > "2.7"', 'pyyaml>=3.10,<=3.13; python_version < "2.7"', 'pyyaml; python_version >= "2.7"', ]) @@ -66,7 +67,6 @@ 'sphinx_rtd_theme', 'ipython', 'colorama', - 'jinja2<=2.11.3', 'Pygments', 'jedi<0.18.0', # Open issue with jedi 0.18.0 and iPython <= 7.19 # https://github.com/davidhalter/jedi/issues/1714
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -32,7 +32,8 @@\n 'cachecontrol[filecache]',\n 'defusedxml',\n 'lockfile',\n- 'jinja2<=2.11.3',\n+ 'jinja2<=2.11.3; python_version <= \"2.7\"',\n+ 'jinja2; python_version > \"2.7\"',\n 'pyyaml>=3.10,<=3.13; python_version < \"2.7\"',\n 'pyyaml; python_version >= \"2.7\"',\n ])\n@@ -66,7 +67,6 @@\n 'sphinx_rtd_theme',\n 'ipython',\n 'colorama',\n- 'jinja2<=2.11.3',\n 'Pygments',\n 'jedi<0.18.0', # Open issue with jedi 0.18.0 and iPython <= 7.19\n # https://github.com/davidhalter/jedi/issues/1714\n", "issue": "Pinned jinja2 version is no longer supported and conflicts with the latest MarkupSafe\n**Current behavior**\r\n\r\ninsights-core has pinned the jinja2 version to <=2.11.3 in #3057. This version is [no longer supported](https://github.com/pallets/jinja/issues/1585#issuecomment-1043892628).\r\n\r\n**Expected behavior**\r\n\r\ninsights-core can be used with a maintained jinja2 version in applications that depend both on insights-core and jinja2.\r\n\r\n**Impact**\r\n\r\njinja2-2.11.3 fails to import with the latest version of MarkupSafe, 2.1.0:\r\n\r\n```\r\n...\r\n.tox/unit-py38/lib/python3.8/site-packages/jinja2/filters.py:13: in <module>\r\n from markupsafe import soft_unicode\r\nE ImportError: cannot import name 'soft_unicode' from 'markupsafe' (/builds/ccx/ccx-rules-ocp/.tox/unit-py38/lib64/python3.8/site-packages/markupsafe/__init__.py\r\n```\r\n\r\nAll projects that depend on insights-core have to pin unsupported versions of MarkupSafe now. I would argue that it is important to keep up-to-date for a package that is used for secure handling of user input.\n", "before_files": [{"content": "import os\nfrom setuptools import setup, find_packages\n\n__here__ = os.path.dirname(os.path.abspath(__file__))\n\npackage_info = dict.fromkeys([\"RELEASE\", \"COMMIT\", \"VERSION\", \"NAME\"])\n\nfor name in package_info:\n with open(os.path.join(__here__, \"insights\", name)) as f:\n package_info[name] = f.read().strip()\n\nentry_points = {\n 'console_scripts': [\n 'insights-collect = insights.collect:main',\n 'insights-run = insights:main',\n 'insights = insights.command_parser:main',\n 'insights-cat = insights.tools.cat:main',\n 'insights-dupkeycheck = insights.tools.dupkeycheck:main',\n 'insights-inspect = insights.tools.insights_inspect:main',\n 'insights-info = insights.tools.query:main',\n 'insights-ocpshell= insights.ocpshell:main',\n 'mangle = insights.util.mangle:main'\n ]\n}\n\nruntime = set([\n 'six',\n 'requests',\n 'redis',\n 'cachecontrol',\n 'cachecontrol[redis]',\n 'cachecontrol[filecache]',\n 'defusedxml',\n 'lockfile',\n 'jinja2<=2.11.3',\n 'pyyaml>=3.10,<=3.13; python_version < \"2.7\"',\n 'pyyaml; python_version >= \"2.7\"',\n])\n\n\ndef maybe_require(pkg):\n try:\n __import__(pkg)\n except ImportError:\n runtime.add(pkg)\n\n\nmaybe_require(\"importlib\")\nmaybe_require(\"argparse\")\n\n\nclient = set([\n 'requests',\n 'python-gnupg==0.4.6',\n 'oyaml'\n])\n\ndevelop = set([\n 'wheel',\n])\n\ndocs = set([\n 'docutils',\n 'Sphinx',\n 'nbsphinx',\n 'sphinx_rtd_theme',\n 'ipython',\n 'colorama',\n 'jinja2<=2.11.3',\n 'Pygments',\n 'jedi<0.18.0', # Open issue with jedi 0.18.0 and iPython <= 7.19\n # https://github.com/davidhalter/jedi/issues/1714\n])\n\n# python 2.6 requires setuptools~=36.8.0 to support this syntax\ntesting = set([\n 'coverage==4.3.4; python_version < \"2.7\"',\n 'coverage; python_version >= \"2.7\"',\n 'pytest==3.0.6; python_version < \"2.7\"',\n 'pytest~=4.6.0; python_version == \"2.7\"',\n 'pytest; python_version >= \"3\"',\n 'pytest-cov==2.4.0; python_version < \"2.7\"',\n 'pytest-cov; python_version >= \"2.7\"',\n 'mock==2.0.0',\n])\n\ncluster = set([\n 'ansible',\n 'pandas',\n 'colorama',\n])\n\nopenshift = set([\n 'openshift'\n])\n\nlinting = set([\n 'flake8==2.6.2; python_version < \"2.7\"',\n 'flake8; python_version >= \"2.7\"'\n])\n\noptional = set([\n 'python-cjson',\n 'python-logstash',\n 'python-statsd',\n 'watchdog',\n])\n\nif __name__ == \"__main__\":\n # allows for runtime modification of rpm name\n name = os.environ.get(\"INSIGHTS_CORE_NAME\", package_info[\"NAME\"])\n\n setup(\n name=name,\n version=package_info[\"VERSION\"],\n description=\"Insights Core is a data collection and analysis framework\",\n long_description=open(\"README.rst\").read(),\n url=\"https://github.com/redhatinsights/insights-core\",\n author=\"Red Hat, Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n install_requires=list(runtime),\n package_data={'': ['LICENSE']},\n license='Apache 2.0',\n extras_require={\n 'develop': list(runtime | develop | client | docs | linting | testing | cluster),\n 'develop26': list(runtime | develop | client | linting | testing | cluster),\n 'client': list(runtime | client),\n 'client-develop': list(runtime | develop | client | linting | testing),\n 'cluster': list(runtime | cluster),\n 'openshift': list(runtime | openshift),\n 'optional': list(optional),\n 'docs': list(docs),\n 'linting': list(linting | client),\n 'testing': list(testing | client)\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ],\n entry_points=entry_points,\n include_package_data=True\n )\n", "path": "setup.py"}], "after_files": [{"content": "import os\nfrom setuptools import setup, find_packages\n\n__here__ = os.path.dirname(os.path.abspath(__file__))\n\npackage_info = dict.fromkeys([\"RELEASE\", \"COMMIT\", \"VERSION\", \"NAME\"])\n\nfor name in package_info:\n with open(os.path.join(__here__, \"insights\", name)) as f:\n package_info[name] = f.read().strip()\n\nentry_points = {\n 'console_scripts': [\n 'insights-collect = insights.collect:main',\n 'insights-run = insights:main',\n 'insights = insights.command_parser:main',\n 'insights-cat = insights.tools.cat:main',\n 'insights-dupkeycheck = insights.tools.dupkeycheck:main',\n 'insights-inspect = insights.tools.insights_inspect:main',\n 'insights-info = insights.tools.query:main',\n 'insights-ocpshell= insights.ocpshell:main',\n 'mangle = insights.util.mangle:main'\n ]\n}\n\nruntime = set([\n 'six',\n 'requests',\n 'redis',\n 'cachecontrol',\n 'cachecontrol[redis]',\n 'cachecontrol[filecache]',\n 'defusedxml',\n 'lockfile',\n 'jinja2<=2.11.3; python_version <= \"2.7\"',\n 'jinja2; python_version > \"2.7\"',\n 'pyyaml>=3.10,<=3.13; python_version < \"2.7\"',\n 'pyyaml; python_version >= \"2.7\"',\n])\n\n\ndef maybe_require(pkg):\n try:\n __import__(pkg)\n except ImportError:\n runtime.add(pkg)\n\n\nmaybe_require(\"importlib\")\nmaybe_require(\"argparse\")\n\n\nclient = set([\n 'requests',\n 'python-gnupg==0.4.6',\n 'oyaml'\n])\n\ndevelop = set([\n 'wheel',\n])\n\ndocs = set([\n 'docutils',\n 'Sphinx',\n 'nbsphinx',\n 'sphinx_rtd_theme',\n 'ipython',\n 'colorama',\n 'Pygments',\n 'jedi<0.18.0', # Open issue with jedi 0.18.0 and iPython <= 7.19\n # https://github.com/davidhalter/jedi/issues/1714\n])\n\n# python 2.6 requires setuptools~=36.8.0 to support this syntax\ntesting = set([\n 'coverage==4.3.4; python_version < \"2.7\"',\n 'coverage; python_version >= \"2.7\"',\n 'pytest==3.0.6; python_version < \"2.7\"',\n 'pytest~=4.6.0; python_version == \"2.7\"',\n 'pytest; python_version >= \"3\"',\n 'pytest-cov==2.4.0; python_version < \"2.7\"',\n 'pytest-cov; python_version >= \"2.7\"',\n 'mock==2.0.0',\n])\n\ncluster = set([\n 'ansible',\n 'pandas',\n 'colorama',\n])\n\nopenshift = set([\n 'openshift'\n])\n\nlinting = set([\n 'flake8==2.6.2; python_version < \"2.7\"',\n 'flake8; python_version >= \"2.7\"'\n])\n\noptional = set([\n 'python-cjson',\n 'python-logstash',\n 'python-statsd',\n 'watchdog',\n])\n\nif __name__ == \"__main__\":\n # allows for runtime modification of rpm name\n name = os.environ.get(\"INSIGHTS_CORE_NAME\", package_info[\"NAME\"])\n\n setup(\n name=name,\n version=package_info[\"VERSION\"],\n description=\"Insights Core is a data collection and analysis framework\",\n long_description=open(\"README.rst\").read(),\n url=\"https://github.com/redhatinsights/insights-core\",\n author=\"Red Hat, Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n install_requires=list(runtime),\n package_data={'': ['LICENSE']},\n license='Apache 2.0',\n extras_require={\n 'develop': list(runtime | develop | client | docs | linting | testing | cluster),\n 'develop26': list(runtime | develop | client | linting | testing | cluster),\n 'client': list(runtime | client),\n 'client-develop': list(runtime | develop | client | linting | testing),\n 'cluster': list(runtime | cluster),\n 'openshift': list(runtime | openshift),\n 'optional': list(optional),\n 'docs': list(docs),\n 'linting': list(linting | client),\n 'testing': list(testing | client)\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ],\n entry_points=entry_points,\n include_package_data=True\n )\n", "path": "setup.py"}]}
2,050
243
gh_patches_debug_32097
rasdani/github-patches
git_diff
dotkom__onlineweb4-1208
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Posters issues - [ ] Does not send mail [src](https://github.com/dotKom/onlineweb4/blob/f4c45b9d07bd2c20533dec840a3bdcb3b8d50dd3/apps/posters/dashboard/views.py#L106) - [x] Group is set to "Komiteer" [src](https://github.com/dotKom/onlineweb4/blob/f4c45b9d07bd2c20533dec840a3bdcb3b8d50dd3/apps/posters/dashboard/views.py#L93) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `apps/posters/dashboard/views.py` Content: ``` 1 # -*- encoding: utf-8 -*- 2 3 import json 4 5 from django.conf import settings 6 from django.contrib import messages 7 from django.contrib.auth.decorators import login_required 8 from django.contrib.auth.models import Group 9 from django.core.mail import EmailMessage 10 from django.core.exceptions import PermissionDenied, ObjectDoesNotExist 11 from django.core.urlresolvers import reverse 12 from django.forms.models import model_to_dict 13 from django.shortcuts import render, redirect, get_object_or_404, HttpResponseRedirect, HttpResponse 14 from django.utils import timezone 15 from django.utils.translation import ugettext as _ 16 from django.views.decorators.csrf import ensure_csrf_cookie 17 18 from guardian.decorators import permission_required 19 from guardian.models import UserObjectPermission, GroupObjectPermission 20 # from guardian.core import ObjectPermissionChecker 21 import guardian 22 23 from datetime import datetime, timedelta 24 25 from apps.authentication.models import OnlineUser as User 26 from apps.dashboard.tools import has_access, get_base_context 27 from apps.posters.models import Poster 28 from apps.posters.forms import AddForm, AddPosterForm, AddBongForm, AddOtherForm, EditPosterForm 29 # from apps.dashboard.posters.models import PosterForm 30 from apps.companyprofile.models import Company 31 from apps.posters.models import Poster, OrderMixin 32 from apps.posters.permissions import has_view_perms, has_view_all_perms 33 34 35 @ensure_csrf_cookie 36 @login_required 37 @permission_required('posters.overview_poster_order', return_403=True) 38 def index(request): 39 if request.is_ajax(): 40 do_ajax_shit = True 41 42 # The group with members who should populate the dropdownlist 43 group = Group.objects.get(name='proKom') 44 users_to_populate = group.user_set.all() 45 46 context = get_base_context(request) 47 48 # View to show if user not in committee, but wanting to see own orders 49 if not has_view_all_perms(request.user): 50 context['your_orders'] = [x for x in Poster.objects.filter(ordered_by=request.user) if request.user.has_perm('view_poster_order', x)] 51 return render(request, 'posters/dashboard/index.html', context) 52 53 orders = Poster.objects.all() 54 55 context['new_orders'] = orders.filter(assigned_to=None) 56 context['active_orders'] = orders.filter(finished=False).exclude(assigned_to=None) 57 context['hanging_orders'] = orders.filter(finished=True, 58 display_to__lte=datetime.now()+timedelta(days=3)) 59 60 context['workers'] = User.objects.filter(groups=Group.objects.get(name='proKom')) 61 62 return render(request, 'posters/dashboard/index.html', context) 63 64 65 @ensure_csrf_cookie 66 @login_required 67 @permission_required('posters.add_poster_order', return_403=True) 68 def add(request, order_type=0): 69 order_type = int(order_type) 70 context = get_base_context(request) 71 type_names = ("Plakat", "Bong", "Generell ") 72 type_name = type_names[order_type-1] 73 74 poster = Poster() 75 form = None 76 77 if request.method == 'POST': 78 if order_type == 1: 79 form = AddPosterForm(data=request.POST, instance=poster) 80 elif order_type == 2: 81 form = AddBongForm(data=request.POST, instance=poster) 82 elif order_type == 3: 83 # poster = GeneralOrder() 84 form = AddOtherForm(data=request.POST, instance=poster) 85 86 if form.is_valid(): 87 poster = form.save(commit=False) 88 if request.POST.get('company'): 89 poster.company = Company.objects.get(pk=request.POST.get('company')) 90 poster.ordered_by = request.user 91 # Should look for a more kosher solution 92 poster.ordered_committee = request.user.groups.exclude(name='Komiteer').filter(name__contains="Kom")[0] 93 poster.order_type = order_type 94 95 poster.save() 96 97 # for b in poster.__class__.__bases__: 98 # poster_mixin = OrderMixin.objects.get(id=poster.id) 99 # Let this user have permissions to show this order 100 UserObjectPermission.objects.assign_perm('view_poster_order', obj=poster, user=request.user) 101 GroupObjectPermission.objects.assign_perm('view_poster_order', obj=poster, group=Group.objects.get(name='proKom')) 102 103 title = unicode(poster) 104 105 # The great sending of emails 106 subject = '[ProKom] Ny bestilling for %s' % title 107 email_message = '%(message)s%(signature)s' % { 108 'message': _(''' 109 Det har blitt registrert en ny %(order_type)sbestilling pa Online sine nettsider. Dette er bestilling nummer %(id)s. 110 \n 111 Antall og type: %(num)s * %(order_type)s\n 112 Arrangement: %(event_name)s\n 113 Bestilt av: %(ordered_by)s i %(ordered_by_committee)s\n 114 Bestilt dato: %(ordered_date)s\n 115 \n 116 For mer informasjon, sjekk ut bestillingen her: %(absolute_url)s 117 ''' 118 % { 119 'site': '', 120 'order_type': type_name.lower(), 121 'num': poster.amount, 122 'ordered_by': poster.ordered_by, 123 'ordered_by_committee': poster.ordered_committee, 124 'id': poster.id, 125 'event_name': title, 126 'ordered_date': poster.ordered_date, 127 'absolute_url': request.build_absolute_uri(poster.get_dashboard_url()) 128 } 129 ), 130 'signature': _('\n\nVennlig hilsen Linjeforeningen Online') 131 } 132 from_email = settings.EMAIL_PROKOM 133 to_emails = [settings.EMAIL_PROKOM, request.user.get_email()] 134 135 try: 136 email_sent = EmailMessage(unicode(subject), unicode(message), from_email, to_emails, []).send() 137 except: 138 email_sent = False 139 140 if email_sent: 141 messages.success(request, 'Opprettet bestilling') 142 else: 143 messages.error(request, 'Klarte ikke å sende epost, men bestillingen din ble fortsatt opprettet') 144 145 return redirect(poster.get_absolute_url()) 146 else: 147 context['form'] = form 148 return render(request, 'posters/dashboard/add.html', context) 149 150 context["order_type_name"] = type_name 151 context['order_type'] = order_type 152 context['can_edit'] = True # request.user.has_perm('posters.view_poster') 153 154 if order_type == 1: 155 form = AddPosterForm() 156 elif order_type == 2: 157 form = AddBongForm() 158 elif order_type == 3: 159 form = AddOtherForm() 160 161 forms = (AddPosterForm(), AddBongForm(), AddOtherForm()) 162 163 context['form'] = forms[order_type-1] 164 165 return render(request, 'posters/dashboard/add.html', context) 166 167 168 @ensure_csrf_cookie 169 @login_required 170 def edit(request, order_id=None): 171 context = get_base_context(request) 172 context['add_poster_form'] = EditPosterForm() 173 174 if order_id: 175 poster = get_object_or_404(Poster, pk=order_id) 176 177 if request.user != poster.ordered_by and 'proKom' not in request.user.groups: 178 raise PermissionDenied 179 180 if request.POST: 181 form = AddForm(request.POST, instance=poster) 182 if form.is_valid(): 183 form.save() 184 # redirect_url = redirect(poster.get_absolute_url()) 185 return HttpResponseRedirect("../detail/"+str(poster.id)) 186 187 else: 188 context["form"] = AddForm(instance=poster) 189 190 return render(request, 'posters/dashboard/add.html', context) 191 192 193 @ensure_csrf_cookie 194 @login_required 195 @permission_required('view_poster_order', (Poster, 'pk', 'order_id'), return_403=True) 196 def detail(request, order_id=None): 197 if request.is_ajax(): 198 do_ajax_shit = True 199 200 if not order_id: 201 return HttpResponse(status=400) 202 203 context = get_base_context(request) 204 poster = get_object_or_404(Poster, pk=order_id) 205 context['poster'] = poster 206 207 if not has_view_perms(request.user, poster): 208 raise PermissionDenied 209 210 order_type = poster.order_type 211 type_names = ("Plakat", "Bong", "Generell ") 212 type_name = type_names[order_type-1] 213 context["order_type_name"] = type_name 214 215 if request.method == 'POST': 216 poster_status = request.POST.get('completed') 217 if poster_status == 'true' or poster_status == 'false': 218 poster.toggle_finished() 219 220 return render(request, 'posters/dashboard/details.html', context) 221 222 223 # Ajax 224 225 226 # @ensure_csrf_cookie 227 @login_required 228 def assign_person(request): 229 if request.is_ajax(): 230 if request.method == 'POST': 231 order_id = request.POST.get('order_id') 232 orders = Poster.objects.filter(pk=order_id) 233 assign_to_id = request.POST.get('assign_to_id') 234 assign_to = User.objects.get(pk=assign_to_id) 235 236 if orders.count() == 0: 237 response_text = json.dumps({'message': _( 238 u"""Kan ikke finne en ordre med denne IDen (%s). 239 Om feilen vedvarer etter en refresh, kontakt [email protected].""") % order_id}) 240 return HttpResponse(status=412, content=response_text) 241 242 order = orders[0] 243 244 if order.finished or order.assigned_to is not None: 245 response_text = json.dumps({'message': _(u"Denne ordren er allerede behandlet.")}) 246 return HttpResponse(status=412, content=response_text) 247 248 order.assigned_to = assign_to 249 order.save() 250 251 return HttpResponse(status=200) 252 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/apps/posters/dashboard/views.py b/apps/posters/dashboard/views.py --- a/apps/posters/dashboard/views.py +++ b/apps/posters/dashboard/views.py @@ -106,14 +106,14 @@ subject = '[ProKom] Ny bestilling for %s' % title email_message = '%(message)s%(signature)s' % { 'message': _(''' - Det har blitt registrert en ny %(order_type)sbestilling pa Online sine nettsider. Dette er bestilling nummer %(id)s. - \n - Antall og type: %(num)s * %(order_type)s\n - Arrangement: %(event_name)s\n - Bestilt av: %(ordered_by)s i %(ordered_by_committee)s\n - Bestilt dato: %(ordered_date)s\n - \n - For mer informasjon, sjekk ut bestillingen her: %(absolute_url)s +Det har blitt registrert en ny %(order_type)sbestilling pa Online sine nettsider. Dette er bestilling nummer %(id)s. +\n +Antall og type: %(num)s * %(order_type)s\n +Arrangement: %(event_name)s\n +Bestilt av: %(ordered_by)s i %(ordered_by_committee)s\n +Bestilt dato: %(ordered_date)s\n +\n +For mer informasjon, sjekk ut bestillingen her: %(absolute_url)s ''' % { 'site': '', @@ -130,13 +130,9 @@ 'signature': _('\n\nVennlig hilsen Linjeforeningen Online') } from_email = settings.EMAIL_PROKOM - to_emails = [settings.EMAIL_PROKOM, request.user.get_email()] - - try: - email_sent = EmailMessage(unicode(subject), unicode(message), from_email, to_emails, []).send() - except: - email_sent = False + to_emails = [settings.EMAIL_PROKOM, request.user.get_email().email] + email_sent = EmailMessage(unicode(subject), unicode(email_message), from_email, to_emails, []).send() if email_sent: messages.success(request, 'Opprettet bestilling') else:
{"golden_diff": "diff --git a/apps/posters/dashboard/views.py b/apps/posters/dashboard/views.py\n--- a/apps/posters/dashboard/views.py\n+++ b/apps/posters/dashboard/views.py\n@@ -106,14 +106,14 @@\n subject = '[ProKom] Ny bestilling for %s' % title\n email_message = '%(message)s%(signature)s' % {\n 'message': _('''\n- Det har blitt registrert en ny %(order_type)sbestilling pa Online sine nettsider. Dette er bestilling nummer %(id)s.\n- \\n\n- Antall og type: %(num)s * %(order_type)s\\n\n- Arrangement: %(event_name)s\\n\n- Bestilt av: %(ordered_by)s i %(ordered_by_committee)s\\n\n- Bestilt dato: %(ordered_date)s\\n\n- \\n\n- For mer informasjon, sjekk ut bestillingen her: %(absolute_url)s\n+Det har blitt registrert en ny %(order_type)sbestilling pa Online sine nettsider. Dette er bestilling nummer %(id)s.\n+\\n\n+Antall og type: %(num)s * %(order_type)s\\n\n+Arrangement: %(event_name)s\\n\n+Bestilt av: %(ordered_by)s i %(ordered_by_committee)s\\n\n+Bestilt dato: %(ordered_date)s\\n\n+\\n\n+For mer informasjon, sjekk ut bestillingen her: %(absolute_url)s\n '''\n % {\n 'site': '',\n@@ -130,13 +130,9 @@\n 'signature': _('\\n\\nVennlig hilsen Linjeforeningen Online')\n }\n from_email = settings.EMAIL_PROKOM\n- to_emails = [settings.EMAIL_PROKOM, request.user.get_email()]\n-\n- try:\n- email_sent = EmailMessage(unicode(subject), unicode(message), from_email, to_emails, []).send()\n- except:\n- email_sent = False\n+ to_emails = [settings.EMAIL_PROKOM, request.user.get_email().email]\n \n+ email_sent = EmailMessage(unicode(subject), unicode(email_message), from_email, to_emails, []).send()\n if email_sent:\n messages.success(request, 'Opprettet bestilling')\n else:\n", "issue": "Posters issues\n- [ ] Does not send mail [src](https://github.com/dotKom/onlineweb4/blob/f4c45b9d07bd2c20533dec840a3bdcb3b8d50dd3/apps/posters/dashboard/views.py#L106)\n- [x] Group is set to \"Komiteer\" [src](https://github.com/dotKom/onlineweb4/blob/f4c45b9d07bd2c20533dec840a3bdcb3b8d50dd3/apps/posters/dashboard/views.py#L93)\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\n\nimport json\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Group\nfrom django.core.mail import EmailMessage\nfrom django.core.exceptions import PermissionDenied, ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.forms.models import model_to_dict\nfrom django.shortcuts import render, redirect, get_object_or_404, HttpResponseRedirect, HttpResponse\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.csrf import ensure_csrf_cookie\n\nfrom guardian.decorators import permission_required\nfrom guardian.models import UserObjectPermission, GroupObjectPermission\n# from guardian.core import ObjectPermissionChecker\nimport guardian\n\nfrom datetime import datetime, timedelta\n\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.dashboard.tools import has_access, get_base_context\nfrom apps.posters.models import Poster\nfrom apps.posters.forms import AddForm, AddPosterForm, AddBongForm, AddOtherForm, EditPosterForm\n# from apps.dashboard.posters.models import PosterForm\nfrom apps.companyprofile.models import Company\nfrom apps.posters.models import Poster, OrderMixin\nfrom apps.posters.permissions import has_view_perms, has_view_all_perms\n\n\n@ensure_csrf_cookie\n@login_required\n@permission_required('posters.overview_poster_order', return_403=True)\ndef index(request):\n if request.is_ajax():\n do_ajax_shit = True\n\n # The group with members who should populate the dropdownlist\n group = Group.objects.get(name='proKom')\n users_to_populate = group.user_set.all()\n\n context = get_base_context(request)\n\n # View to show if user not in committee, but wanting to see own orders\n if not has_view_all_perms(request.user):\n context['your_orders'] = [x for x in Poster.objects.filter(ordered_by=request.user) if request.user.has_perm('view_poster_order', x)]\n return render(request, 'posters/dashboard/index.html', context)\n\n orders = Poster.objects.all()\n\n context['new_orders'] = orders.filter(assigned_to=None)\n context['active_orders'] = orders.filter(finished=False).exclude(assigned_to=None)\n context['hanging_orders'] = orders.filter(finished=True,\n display_to__lte=datetime.now()+timedelta(days=3))\n\n context['workers'] = User.objects.filter(groups=Group.objects.get(name='proKom'))\n\n return render(request, 'posters/dashboard/index.html', context)\n\n\n@ensure_csrf_cookie\n@login_required\n@permission_required('posters.add_poster_order', return_403=True)\ndef add(request, order_type=0):\n order_type = int(order_type)\n context = get_base_context(request)\n type_names = (\"Plakat\", \"Bong\", \"Generell \")\n type_name = type_names[order_type-1]\n\n poster = Poster()\n form = None\n\n if request.method == 'POST':\n if order_type == 1:\n form = AddPosterForm(data=request.POST, instance=poster)\n elif order_type == 2:\n form = AddBongForm(data=request.POST, instance=poster)\n elif order_type == 3:\n # poster = GeneralOrder()\n form = AddOtherForm(data=request.POST, instance=poster)\n\n if form.is_valid():\n poster = form.save(commit=False)\n if request.POST.get('company'):\n poster.company = Company.objects.get(pk=request.POST.get('company'))\n poster.ordered_by = request.user\n # Should look for a more kosher solution\n poster.ordered_committee = request.user.groups.exclude(name='Komiteer').filter(name__contains=\"Kom\")[0]\n poster.order_type = order_type\n\n poster.save()\n\n # for b in poster.__class__.__bases__:\n # poster_mixin = OrderMixin.objects.get(id=poster.id)\n # Let this user have permissions to show this order\n UserObjectPermission.objects.assign_perm('view_poster_order', obj=poster, user=request.user)\n GroupObjectPermission.objects.assign_perm('view_poster_order', obj=poster, group=Group.objects.get(name='proKom'))\n\n title = unicode(poster)\n\n # The great sending of emails\n subject = '[ProKom] Ny bestilling for %s' % title\n email_message = '%(message)s%(signature)s' % {\n 'message': _('''\n Det har blitt registrert en ny %(order_type)sbestilling pa Online sine nettsider. Dette er bestilling nummer %(id)s.\n \\n\n Antall og type: %(num)s * %(order_type)s\\n\n Arrangement: %(event_name)s\\n\n Bestilt av: %(ordered_by)s i %(ordered_by_committee)s\\n\n Bestilt dato: %(ordered_date)s\\n\n \\n\n For mer informasjon, sjekk ut bestillingen her: %(absolute_url)s\n '''\n % {\n 'site': '',\n 'order_type': type_name.lower(),\n 'num': poster.amount,\n 'ordered_by': poster.ordered_by,\n 'ordered_by_committee': poster.ordered_committee,\n 'id': poster.id,\n 'event_name': title,\n 'ordered_date': poster.ordered_date,\n 'absolute_url': request.build_absolute_uri(poster.get_dashboard_url())\n }\n ),\n 'signature': _('\\n\\nVennlig hilsen Linjeforeningen Online')\n }\n from_email = settings.EMAIL_PROKOM\n to_emails = [settings.EMAIL_PROKOM, request.user.get_email()]\n\n try:\n email_sent = EmailMessage(unicode(subject), unicode(message), from_email, to_emails, []).send()\n except:\n email_sent = False\n\n if email_sent:\n messages.success(request, 'Opprettet bestilling')\n else:\n messages.error(request, 'Klarte ikke \u00e5 sende epost, men bestillingen din ble fortsatt opprettet')\n\n return redirect(poster.get_absolute_url())\n else:\n context['form'] = form\n return render(request, 'posters/dashboard/add.html', context)\n\n context[\"order_type_name\"] = type_name\n context['order_type'] = order_type\n context['can_edit'] = True # request.user.has_perm('posters.view_poster')\n\n if order_type == 1:\n form = AddPosterForm()\n elif order_type == 2:\n form = AddBongForm()\n elif order_type == 3:\n form = AddOtherForm()\n\n forms = (AddPosterForm(), AddBongForm(), AddOtherForm())\n\n context['form'] = forms[order_type-1]\n\n return render(request, 'posters/dashboard/add.html', context)\n\n\n@ensure_csrf_cookie\n@login_required\ndef edit(request, order_id=None):\n context = get_base_context(request)\n context['add_poster_form'] = EditPosterForm()\n\n if order_id:\n poster = get_object_or_404(Poster, pk=order_id)\n\n if request.user != poster.ordered_by and 'proKom' not in request.user.groups:\n raise PermissionDenied\n\n if request.POST:\n form = AddForm(request.POST, instance=poster)\n if form.is_valid():\n form.save()\n # redirect_url = redirect(poster.get_absolute_url())\n return HttpResponseRedirect(\"../detail/\"+str(poster.id))\n\n else:\n context[\"form\"] = AddForm(instance=poster)\n\n return render(request, 'posters/dashboard/add.html', context)\n\n\n@ensure_csrf_cookie\n@login_required\n@permission_required('view_poster_order', (Poster, 'pk', 'order_id'), return_403=True)\ndef detail(request, order_id=None):\n if request.is_ajax():\n do_ajax_shit = True\n\n if not order_id:\n return HttpResponse(status=400)\n\n context = get_base_context(request)\n poster = get_object_or_404(Poster, pk=order_id)\n context['poster'] = poster\n\n if not has_view_perms(request.user, poster):\n raise PermissionDenied\n\n order_type = poster.order_type\n type_names = (\"Plakat\", \"Bong\", \"Generell \")\n type_name = type_names[order_type-1]\n context[\"order_type_name\"] = type_name\n\n if request.method == 'POST':\n poster_status = request.POST.get('completed')\n if poster_status == 'true' or poster_status == 'false':\n poster.toggle_finished()\n\n return render(request, 'posters/dashboard/details.html', context)\n\n\n# Ajax\n\n\n# @ensure_csrf_cookie\n@login_required\ndef assign_person(request):\n if request.is_ajax():\n if request.method == 'POST':\n order_id = request.POST.get('order_id')\n orders = Poster.objects.filter(pk=order_id)\n assign_to_id = request.POST.get('assign_to_id')\n assign_to = User.objects.get(pk=assign_to_id)\n\n if orders.count() == 0:\n response_text = json.dumps({'message': _(\n u\"\"\"Kan ikke finne en ordre med denne IDen (%s).\nOm feilen vedvarer etter en refresh, kontakt [email protected].\"\"\") % order_id})\n return HttpResponse(status=412, content=response_text)\n\n order = orders[0]\n\n if order.finished or order.assigned_to is not None:\n response_text = json.dumps({'message': _(u\"Denne ordren er allerede behandlet.\")})\n return HttpResponse(status=412, content=response_text)\n\n order.assigned_to = assign_to\n order.save()\n\n return HttpResponse(status=200)\n", "path": "apps/posters/dashboard/views.py"}], "after_files": [{"content": "# -*- encoding: utf-8 -*-\n\nimport json\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Group\nfrom django.core.mail import EmailMessage\nfrom django.core.exceptions import PermissionDenied, ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.forms.models import model_to_dict\nfrom django.shortcuts import render, redirect, get_object_or_404, HttpResponseRedirect, HttpResponse\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.csrf import ensure_csrf_cookie\n\nfrom guardian.decorators import permission_required\nfrom guardian.models import UserObjectPermission, GroupObjectPermission\n# from guardian.core import ObjectPermissionChecker\nimport guardian\n\nfrom datetime import datetime, timedelta\n\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.dashboard.tools import has_access, get_base_context\nfrom apps.posters.models import Poster\nfrom apps.posters.forms import AddForm, AddPosterForm, AddBongForm, AddOtherForm, EditPosterForm\n# from apps.dashboard.posters.models import PosterForm\nfrom apps.companyprofile.models import Company\nfrom apps.posters.models import Poster, OrderMixin\nfrom apps.posters.permissions import has_view_perms, has_view_all_perms\n\n\n@ensure_csrf_cookie\n@login_required\n@permission_required('posters.overview_poster_order', return_403=True)\ndef index(request):\n if request.is_ajax():\n do_ajax_shit = True\n\n # The group with members who should populate the dropdownlist\n group = Group.objects.get(name='proKom')\n users_to_populate = group.user_set.all()\n\n context = get_base_context(request)\n\n # View to show if user not in committee, but wanting to see own orders\n if not has_view_all_perms(request.user):\n context['your_orders'] = [x for x in Poster.objects.filter(ordered_by=request.user) if request.user.has_perm('view_poster_order', x)]\n return render(request, 'posters/dashboard/index.html', context)\n\n orders = Poster.objects.all()\n\n context['new_orders'] = orders.filter(assigned_to=None)\n context['active_orders'] = orders.filter(finished=False).exclude(assigned_to=None)\n context['hanging_orders'] = orders.filter(finished=True,\n display_to__lte=datetime.now()+timedelta(days=3))\n\n context['workers'] = User.objects.filter(groups=Group.objects.get(name='proKom'))\n\n return render(request, 'posters/dashboard/index.html', context)\n\n\n@ensure_csrf_cookie\n@login_required\n@permission_required('posters.add_poster_order', return_403=True)\ndef add(request, order_type=0):\n order_type = int(order_type)\n context = get_base_context(request)\n type_names = (\"Plakat\", \"Bong\", \"Generell \")\n type_name = type_names[order_type-1]\n\n poster = Poster()\n form = None\n\n if request.method == 'POST':\n if order_type == 1:\n form = AddPosterForm(data=request.POST, instance=poster)\n elif order_type == 2:\n form = AddBongForm(data=request.POST, instance=poster)\n elif order_type == 3:\n # poster = GeneralOrder()\n form = AddOtherForm(data=request.POST, instance=poster)\n\n if form.is_valid():\n poster = form.save(commit=False)\n if request.POST.get('company'):\n poster.company = Company.objects.get(pk=request.POST.get('company'))\n poster.ordered_by = request.user\n # Should look for a more kosher solution\n poster.ordered_committee = request.user.groups.exclude(name='Komiteer').filter(name__contains=\"Kom\")[0]\n poster.order_type = order_type\n\n poster.save()\n\n # for b in poster.__class__.__bases__:\n # poster_mixin = OrderMixin.objects.get(id=poster.id)\n # Let this user have permissions to show this order\n UserObjectPermission.objects.assign_perm('view_poster_order', obj=poster, user=request.user)\n GroupObjectPermission.objects.assign_perm('view_poster_order', obj=poster, group=Group.objects.get(name='proKom'))\n\n title = unicode(poster)\n\n # The great sending of emails\n subject = '[ProKom] Ny bestilling for %s' % title\n email_message = '%(message)s%(signature)s' % {\n 'message': _('''\nDet har blitt registrert en ny %(order_type)sbestilling pa Online sine nettsider. Dette er bestilling nummer %(id)s.\n\\n\nAntall og type: %(num)s * %(order_type)s\\n\nArrangement: %(event_name)s\\n\nBestilt av: %(ordered_by)s i %(ordered_by_committee)s\\n\nBestilt dato: %(ordered_date)s\\n\n\\n\nFor mer informasjon, sjekk ut bestillingen her: %(absolute_url)s\n '''\n % {\n 'site': '',\n 'order_type': type_name.lower(),\n 'num': poster.amount,\n 'ordered_by': poster.ordered_by,\n 'ordered_by_committee': poster.ordered_committee,\n 'id': poster.id,\n 'event_name': title,\n 'ordered_date': poster.ordered_date,\n 'absolute_url': request.build_absolute_uri(poster.get_dashboard_url())\n }\n ),\n 'signature': _('\\n\\nVennlig hilsen Linjeforeningen Online')\n }\n from_email = settings.EMAIL_PROKOM\n to_emails = [settings.EMAIL_PROKOM, request.user.get_email().email]\n\n email_sent = EmailMessage(unicode(subject), unicode(email_message), from_email, to_emails, []).send()\n if email_sent:\n messages.success(request, 'Opprettet bestilling')\n else:\n messages.error(request, 'Klarte ikke \u00e5 sende epost, men bestillingen din ble fortsatt opprettet')\n\n return redirect(poster.get_absolute_url())\n else:\n context['form'] = form\n return render(request, 'posters/dashboard/add.html', context)\n\n context[\"order_type_name\"] = type_name\n context['order_type'] = order_type\n context['can_edit'] = True # request.user.has_perm('posters.view_poster')\n\n if order_type == 1:\n form = AddPosterForm()\n elif order_type == 2:\n form = AddBongForm()\n elif order_type == 3:\n form = AddOtherForm()\n\n forms = (AddPosterForm(), AddBongForm(), AddOtherForm())\n\n context['form'] = forms[order_type-1]\n\n return render(request, 'posters/dashboard/add.html', context)\n\n\n@ensure_csrf_cookie\n@login_required\ndef edit(request, order_id=None):\n context = get_base_context(request)\n context['add_poster_form'] = EditPosterForm()\n\n if order_id:\n poster = get_object_or_404(Poster, pk=order_id)\n\n if request.user != poster.ordered_by and 'proKom' not in request.user.groups:\n raise PermissionDenied\n\n if request.POST:\n form = AddForm(request.POST, instance=poster)\n if form.is_valid():\n form.save()\n # redirect_url = redirect(poster.get_absolute_url())\n return HttpResponseRedirect(\"../detail/\"+str(poster.id))\n\n else:\n context[\"form\"] = AddForm(instance=poster)\n\n return render(request, 'posters/dashboard/add.html', context)\n\n\n@ensure_csrf_cookie\n@login_required\n@permission_required('view_poster_order', (Poster, 'pk', 'order_id'), return_403=True)\ndef detail(request, order_id=None):\n if request.is_ajax():\n do_ajax_shit = True\n\n if not order_id:\n return HttpResponse(status=400)\n\n context = get_base_context(request)\n poster = get_object_or_404(Poster, pk=order_id)\n context['poster'] = poster\n\n if not has_view_perms(request.user, poster):\n raise PermissionDenied\n\n order_type = poster.order_type\n type_names = (\"Plakat\", \"Bong\", \"Generell \")\n type_name = type_names[order_type-1]\n context[\"order_type_name\"] = type_name\n\n if request.method == 'POST':\n poster_status = request.POST.get('completed')\n if poster_status == 'true' or poster_status == 'false':\n poster.toggle_finished()\n\n return render(request, 'posters/dashboard/details.html', context)\n\n\n# Ajax\n\n\n# @ensure_csrf_cookie\n@login_required\ndef assign_person(request):\n if request.is_ajax():\n if request.method == 'POST':\n order_id = request.POST.get('order_id')\n orders = Poster.objects.filter(pk=order_id)\n assign_to_id = request.POST.get('assign_to_id')\n assign_to = User.objects.get(pk=assign_to_id)\n\n if orders.count() == 0:\n response_text = json.dumps({'message': _(\n u\"\"\"Kan ikke finne en ordre med denne IDen (%s).\nOm feilen vedvarer etter en refresh, kontakt [email protected].\"\"\") % order_id})\n return HttpResponse(status=412, content=response_text)\n\n order = orders[0]\n\n if order.finished or order.assigned_to is not None:\n response_text = json.dumps({'message': _(u\"Denne ordren er allerede behandlet.\")})\n return HttpResponse(status=412, content=response_text)\n\n order.assigned_to = assign_to\n order.save()\n\n return HttpResponse(status=200)\n", "path": "apps/posters/dashboard/views.py"}]}
3,174
504
gh_patches_debug_49032
rasdani/github-patches
git_diff
pulp__pulpcore-5465
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Replication assumes every plugin supports Publications The ```Replicator``` class, which is being subclassed inside plugins to support the replication feature, assumes that every plugin supports publications as it tries to access ```upstream_distribution["publication"]``` inside one of its methods (for some plugins, the dictionary simply doesn't contain the "publication" key so an exception gets raised). This forces certain subclasses of ```Replicator``` to create workarounds or rewrite the given method. I propose making the method more general, removing such assumptions. Relevant code: https://github.com/pulp/pulpcore/blob/c31a39c186fd734e9e0409321735d7d6509a12ee/pulpcore/app/replica.py#L97C9-L97C96 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pulpcore/app/replica.py` Content: ``` 1 from django.db.models import Model 2 import logging 3 4 from pulp_glue.common.context import PulpContext 5 from pulpcore.tasking.tasks import dispatch 6 from pulpcore.app.tasks.base import ( 7 general_update, 8 general_create, 9 general_multi_delete, 10 ) 11 from pulpcore.plugin.util import get_url, get_domain 12 13 _logger = logging.getLogger(__name__) 14 15 16 class ReplicaContext(PulpContext): 17 def __init__(self, **kwargs): 18 super().__init__(**kwargs) 19 self.out_buf = "" 20 self.err_buf = "" 21 22 def echo(self, message: str, nl: bool = True, err: bool = False) -> None: 23 if err: 24 self.err_buf += message 25 if nl: 26 _logger.warn("{}", self.err_buf) 27 self.err_buf = "" 28 else: 29 self.out_buf += message 30 if nl: 31 _logger.info("{}", self.out_buf) 32 self.out_buf = "" 33 34 35 class Replicator: 36 distribution_ctx_cls = None 37 repository_ctx_cls = None 38 publication_ctx_cls = None 39 remote_model_cls = None 40 repository_model_cls = None 41 distribution_model_cls = None 42 distribution_serializer_name = None 43 repository_serializer_name = None 44 remote_serializer_name = None 45 app_label = None 46 sync_task = None 47 48 def __init__(self, pulp_ctx, task_group, tls_settings): 49 """ 50 :param pulp_ctx: PulpReplicaContext 51 :param task_group: TaskGroup 52 :param ca_cert: str 53 """ 54 self.pulp_ctx = pulp_ctx 55 self.task_group = task_group 56 self.tls_settings = tls_settings 57 self.domain = get_domain() 58 self.distros_uris = [f"pdrn:{self.domain.pulp_id}:distributions"] 59 60 @staticmethod 61 def needs_update(fields_dict, model_instance): 62 """ 63 Compares a Model instance's attributes against a dictionary where keys are attribute 64 names and values are expected values. 65 """ 66 needs_update = False 67 for field_name, value in fields_dict.items(): 68 if isinstance(getattr(model_instance, field_name), Model): 69 if get_url(getattr(model_instance, field_name)) != value: 70 needs_update = True 71 elif getattr(model_instance, field_name) != value: 72 needs_update = True 73 return needs_update 74 75 def upstream_distributions(self, labels=None): 76 if labels: 77 params = {"pulp_label_select": labels} 78 else: 79 params = {} 80 offset = 0 81 list_size = 100 82 while True: 83 distributions = self.distribution_ctx_cls(self.pulp_ctx).list(list_size, offset, params) 84 for distro in distributions: 85 yield distro 86 if len(distributions) < list_size: 87 break 88 offset += list_size 89 90 def url(self, upstream_distribution): 91 return upstream_distribution["base_url"] 92 93 def remote_extra_fields(self, upstream_distribution): 94 return {} 95 96 def create_or_update_remote(self, upstream_distribution): 97 if not upstream_distribution["repository"] and not upstream_distribution["publication"]: 98 return None 99 url = self.url(upstream_distribution) 100 remote_fields_dict = {"url": url} 101 remote_fields_dict.update(self.tls_settings) 102 remote_fields_dict.update(self.remote_extra_fields(upstream_distribution)) 103 104 # Check if there is a remote pointing to this distribution 105 try: 106 remote = self.remote_model_cls.objects.get( 107 name=upstream_distribution["name"], pulp_domain=self.domain 108 ) 109 needs_update = self.needs_update(remote_fields_dict, remote) 110 if needs_update: 111 dispatch( 112 general_update, 113 task_group=self.task_group, 114 exclusive_resources=[remote], 115 args=(remote.pk, self.app_label, self.remote_serializer_name), 116 kwargs={"data": remote_fields_dict, "partial": True}, 117 ) 118 except self.remote_model_cls.DoesNotExist: 119 # Create the remote 120 remote = self.remote_model_cls(name=upstream_distribution["name"], **remote_fields_dict) 121 remote.save() 122 123 return remote 124 125 def repository_extra_fields(self, remote): 126 return {} 127 128 def create_or_update_repository(self, remote): 129 try: 130 repository = self.repository_model_cls.objects.get( 131 name=remote.name, pulp_domain=self.domain 132 ) 133 repo_fields_dict = self.repository_extra_fields(remote) 134 needs_update = self.needs_update(repo_fields_dict, repository) 135 if needs_update: 136 dispatch( 137 general_update, 138 task_group=self.task_group, 139 exclusive_resources=[repository], 140 args=(repository.pk, self.app_label, self.repository_serializer_name), 141 kwargs={"data": repo_fields_dict, "partial": True}, 142 ) 143 except self.repository_model_cls.DoesNotExist: 144 repository = self.repository_model_cls( 145 name=remote.name, **self.repository_extra_fields(remote) 146 ) 147 repository.save() 148 return repository 149 150 def distribution_data(self, repository, upstream_distribution): 151 """ 152 Return the fields that need to be updated/cleared on distributions for idempotence. 153 """ 154 return { 155 "repository": get_url(repository), 156 "publication": None, 157 "base_path": upstream_distribution["base_path"], 158 } 159 160 def create_or_update_distribution(self, repository, upstream_distribution): 161 distribution_data = self.distribution_data(repository, upstream_distribution) 162 try: 163 distro = self.distribution_model_cls.objects.get( 164 name=upstream_distribution["name"], pulp_domain=self.domain 165 ) 166 # Check that the distribution has the right repository associated 167 needs_update = self.needs_update(distribution_data, distro) 168 if needs_update: 169 # Update the distribution 170 dispatch( 171 general_update, 172 task_group=self.task_group, 173 shared_resources=[repository], 174 exclusive_resources=self.distros_uris, 175 args=(distro.pk, self.app_label, self.distribution_serializer_name), 176 kwargs={ 177 "data": distribution_data, 178 "partial": True, 179 }, 180 ) 181 except self.distribution_model_cls.DoesNotExist: 182 # Dispatch a task to create the distribution 183 distribution_data["name"] = upstream_distribution["name"] 184 dispatch( 185 general_create, 186 task_group=self.task_group, 187 shared_resources=[repository], 188 exclusive_resources=self.distros_uris, 189 args=(self.app_label, self.distribution_serializer_name), 190 kwargs={"data": distribution_data}, 191 ) 192 193 def sync_params(self, repository, remote): 194 """This method returns a dict that will be passed as kwargs to the sync task.""" 195 raise NotImplementedError("Each replicator must supply its own sync params.") 196 197 def sync(self, repository, remote): 198 dispatch( 199 self.sync_task, 200 task_group=self.task_group, 201 shared_resources=[remote], 202 exclusive_resources=[repository], 203 kwargs=self.sync_params(repository, remote), 204 ) 205 206 def remove_missing(self, names): 207 # Remove all distributions with names not present in the list of names 208 # Perform this in an extra task, because we hold a big lock here. 209 distribution_ids = [ 210 (distribution.pk, self.app_label, self.distribution_serializer_name) 211 for distribution in self.distribution_model_cls.objects.filter( 212 pulp_domain=self.domain 213 ).exclude(name__in=names) 214 ] 215 if distribution_ids: 216 dispatch( 217 general_multi_delete, 218 task_group=self.task_group, 219 exclusive_resources=self.distros_uris, 220 args=(distribution_ids,), 221 ) 222 223 # Remove all the repositories and remotes of the missing distributions 224 repositories = list( 225 self.repository_model_cls.objects.filter( 226 pulp_domain=self.domain, user_hidden=False 227 ).exclude(name__in=names) 228 ) 229 repository_ids = [ 230 (repo.pk, self.app_label, self.repository_serializer_name) for repo in repositories 231 ] 232 233 remotes = list( 234 self.remote_model_cls.objects.filter(pulp_domain=self.domain).exclude(name__in=names) 235 ) 236 remote_ids = [ 237 (remote.pk, self.app_label, self.remote_serializer_name) for remote in remotes 238 ] 239 240 if repository_ids or remote_ids: 241 dispatch( 242 general_multi_delete, 243 task_group=self.task_group, 244 exclusive_resources=repositories + remotes, 245 args=(repository_ids + remote_ids,), 246 ) 247 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pulpcore/app/replica.py b/pulpcore/app/replica.py --- a/pulpcore/app/replica.py +++ b/pulpcore/app/replica.py @@ -94,7 +94,9 @@ return {} def create_or_update_remote(self, upstream_distribution): - if not upstream_distribution["repository"] and not upstream_distribution["publication"]: + if not upstream_distribution.get("repository") and not upstream_distribution.get( + "publication" + ): return None url = self.url(upstream_distribution) remote_fields_dict = {"url": url}
{"golden_diff": "diff --git a/pulpcore/app/replica.py b/pulpcore/app/replica.py\n--- a/pulpcore/app/replica.py\n+++ b/pulpcore/app/replica.py\n@@ -94,7 +94,9 @@\n return {}\n \n def create_or_update_remote(self, upstream_distribution):\n- if not upstream_distribution[\"repository\"] and not upstream_distribution[\"publication\"]:\n+ if not upstream_distribution.get(\"repository\") and not upstream_distribution.get(\n+ \"publication\"\n+ ):\n return None\n url = self.url(upstream_distribution)\n remote_fields_dict = {\"url\": url}\n", "issue": "Replication assumes every plugin supports Publications\nThe ```Replicator``` class, which is being subclassed inside plugins to support the replication feature, assumes that every plugin supports publications as it tries to access ```upstream_distribution[\"publication\"]``` inside one of its methods (for some plugins, the dictionary simply doesn't contain the \"publication\" key so an exception gets raised). This forces certain subclasses of ```Replicator``` to create workarounds or rewrite the given method.\r\n\r\nI propose making the method more general, removing such assumptions.\r\n\r\nRelevant code: https://github.com/pulp/pulpcore/blob/c31a39c186fd734e9e0409321735d7d6509a12ee/pulpcore/app/replica.py#L97C9-L97C96\r\n\n", "before_files": [{"content": "from django.db.models import Model\nimport logging\n\nfrom pulp_glue.common.context import PulpContext\nfrom pulpcore.tasking.tasks import dispatch\nfrom pulpcore.app.tasks.base import (\n general_update,\n general_create,\n general_multi_delete,\n)\nfrom pulpcore.plugin.util import get_url, get_domain\n\n_logger = logging.getLogger(__name__)\n\n\nclass ReplicaContext(PulpContext):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.out_buf = \"\"\n self.err_buf = \"\"\n\n def echo(self, message: str, nl: bool = True, err: bool = False) -> None:\n if err:\n self.err_buf += message\n if nl:\n _logger.warn(\"{}\", self.err_buf)\n self.err_buf = \"\"\n else:\n self.out_buf += message\n if nl:\n _logger.info(\"{}\", self.out_buf)\n self.out_buf = \"\"\n\n\nclass Replicator:\n distribution_ctx_cls = None\n repository_ctx_cls = None\n publication_ctx_cls = None\n remote_model_cls = None\n repository_model_cls = None\n distribution_model_cls = None\n distribution_serializer_name = None\n repository_serializer_name = None\n remote_serializer_name = None\n app_label = None\n sync_task = None\n\n def __init__(self, pulp_ctx, task_group, tls_settings):\n \"\"\"\n :param pulp_ctx: PulpReplicaContext\n :param task_group: TaskGroup\n :param ca_cert: str\n \"\"\"\n self.pulp_ctx = pulp_ctx\n self.task_group = task_group\n self.tls_settings = tls_settings\n self.domain = get_domain()\n self.distros_uris = [f\"pdrn:{self.domain.pulp_id}:distributions\"]\n\n @staticmethod\n def needs_update(fields_dict, model_instance):\n \"\"\"\n Compares a Model instance's attributes against a dictionary where keys are attribute\n names and values are expected values.\n \"\"\"\n needs_update = False\n for field_name, value in fields_dict.items():\n if isinstance(getattr(model_instance, field_name), Model):\n if get_url(getattr(model_instance, field_name)) != value:\n needs_update = True\n elif getattr(model_instance, field_name) != value:\n needs_update = True\n return needs_update\n\n def upstream_distributions(self, labels=None):\n if labels:\n params = {\"pulp_label_select\": labels}\n else:\n params = {}\n offset = 0\n list_size = 100\n while True:\n distributions = self.distribution_ctx_cls(self.pulp_ctx).list(list_size, offset, params)\n for distro in distributions:\n yield distro\n if len(distributions) < list_size:\n break\n offset += list_size\n\n def url(self, upstream_distribution):\n return upstream_distribution[\"base_url\"]\n\n def remote_extra_fields(self, upstream_distribution):\n return {}\n\n def create_or_update_remote(self, upstream_distribution):\n if not upstream_distribution[\"repository\"] and not upstream_distribution[\"publication\"]:\n return None\n url = self.url(upstream_distribution)\n remote_fields_dict = {\"url\": url}\n remote_fields_dict.update(self.tls_settings)\n remote_fields_dict.update(self.remote_extra_fields(upstream_distribution))\n\n # Check if there is a remote pointing to this distribution\n try:\n remote = self.remote_model_cls.objects.get(\n name=upstream_distribution[\"name\"], pulp_domain=self.domain\n )\n needs_update = self.needs_update(remote_fields_dict, remote)\n if needs_update:\n dispatch(\n general_update,\n task_group=self.task_group,\n exclusive_resources=[remote],\n args=(remote.pk, self.app_label, self.remote_serializer_name),\n kwargs={\"data\": remote_fields_dict, \"partial\": True},\n )\n except self.remote_model_cls.DoesNotExist:\n # Create the remote\n remote = self.remote_model_cls(name=upstream_distribution[\"name\"], **remote_fields_dict)\n remote.save()\n\n return remote\n\n def repository_extra_fields(self, remote):\n return {}\n\n def create_or_update_repository(self, remote):\n try:\n repository = self.repository_model_cls.objects.get(\n name=remote.name, pulp_domain=self.domain\n )\n repo_fields_dict = self.repository_extra_fields(remote)\n needs_update = self.needs_update(repo_fields_dict, repository)\n if needs_update:\n dispatch(\n general_update,\n task_group=self.task_group,\n exclusive_resources=[repository],\n args=(repository.pk, self.app_label, self.repository_serializer_name),\n kwargs={\"data\": repo_fields_dict, \"partial\": True},\n )\n except self.repository_model_cls.DoesNotExist:\n repository = self.repository_model_cls(\n name=remote.name, **self.repository_extra_fields(remote)\n )\n repository.save()\n return repository\n\n def distribution_data(self, repository, upstream_distribution):\n \"\"\"\n Return the fields that need to be updated/cleared on distributions for idempotence.\n \"\"\"\n return {\n \"repository\": get_url(repository),\n \"publication\": None,\n \"base_path\": upstream_distribution[\"base_path\"],\n }\n\n def create_or_update_distribution(self, repository, upstream_distribution):\n distribution_data = self.distribution_data(repository, upstream_distribution)\n try:\n distro = self.distribution_model_cls.objects.get(\n name=upstream_distribution[\"name\"], pulp_domain=self.domain\n )\n # Check that the distribution has the right repository associated\n needs_update = self.needs_update(distribution_data, distro)\n if needs_update:\n # Update the distribution\n dispatch(\n general_update,\n task_group=self.task_group,\n shared_resources=[repository],\n exclusive_resources=self.distros_uris,\n args=(distro.pk, self.app_label, self.distribution_serializer_name),\n kwargs={\n \"data\": distribution_data,\n \"partial\": True,\n },\n )\n except self.distribution_model_cls.DoesNotExist:\n # Dispatch a task to create the distribution\n distribution_data[\"name\"] = upstream_distribution[\"name\"]\n dispatch(\n general_create,\n task_group=self.task_group,\n shared_resources=[repository],\n exclusive_resources=self.distros_uris,\n args=(self.app_label, self.distribution_serializer_name),\n kwargs={\"data\": distribution_data},\n )\n\n def sync_params(self, repository, remote):\n \"\"\"This method returns a dict that will be passed as kwargs to the sync task.\"\"\"\n raise NotImplementedError(\"Each replicator must supply its own sync params.\")\n\n def sync(self, repository, remote):\n dispatch(\n self.sync_task,\n task_group=self.task_group,\n shared_resources=[remote],\n exclusive_resources=[repository],\n kwargs=self.sync_params(repository, remote),\n )\n\n def remove_missing(self, names):\n # Remove all distributions with names not present in the list of names\n # Perform this in an extra task, because we hold a big lock here.\n distribution_ids = [\n (distribution.pk, self.app_label, self.distribution_serializer_name)\n for distribution in self.distribution_model_cls.objects.filter(\n pulp_domain=self.domain\n ).exclude(name__in=names)\n ]\n if distribution_ids:\n dispatch(\n general_multi_delete,\n task_group=self.task_group,\n exclusive_resources=self.distros_uris,\n args=(distribution_ids,),\n )\n\n # Remove all the repositories and remotes of the missing distributions\n repositories = list(\n self.repository_model_cls.objects.filter(\n pulp_domain=self.domain, user_hidden=False\n ).exclude(name__in=names)\n )\n repository_ids = [\n (repo.pk, self.app_label, self.repository_serializer_name) for repo in repositories\n ]\n\n remotes = list(\n self.remote_model_cls.objects.filter(pulp_domain=self.domain).exclude(name__in=names)\n )\n remote_ids = [\n (remote.pk, self.app_label, self.remote_serializer_name) for remote in remotes\n ]\n\n if repository_ids or remote_ids:\n dispatch(\n general_multi_delete,\n task_group=self.task_group,\n exclusive_resources=repositories + remotes,\n args=(repository_ids + remote_ids,),\n )\n", "path": "pulpcore/app/replica.py"}], "after_files": [{"content": "from django.db.models import Model\nimport logging\n\nfrom pulp_glue.common.context import PulpContext\nfrom pulpcore.tasking.tasks import dispatch\nfrom pulpcore.app.tasks.base import (\n general_update,\n general_create,\n general_multi_delete,\n)\nfrom pulpcore.plugin.util import get_url, get_domain\n\n_logger = logging.getLogger(__name__)\n\n\nclass ReplicaContext(PulpContext):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.out_buf = \"\"\n self.err_buf = \"\"\n\n def echo(self, message: str, nl: bool = True, err: bool = False) -> None:\n if err:\n self.err_buf += message\n if nl:\n _logger.warn(\"{}\", self.err_buf)\n self.err_buf = \"\"\n else:\n self.out_buf += message\n if nl:\n _logger.info(\"{}\", self.out_buf)\n self.out_buf = \"\"\n\n\nclass Replicator:\n distribution_ctx_cls = None\n repository_ctx_cls = None\n publication_ctx_cls = None\n remote_model_cls = None\n repository_model_cls = None\n distribution_model_cls = None\n distribution_serializer_name = None\n repository_serializer_name = None\n remote_serializer_name = None\n app_label = None\n sync_task = None\n\n def __init__(self, pulp_ctx, task_group, tls_settings):\n \"\"\"\n :param pulp_ctx: PulpReplicaContext\n :param task_group: TaskGroup\n :param ca_cert: str\n \"\"\"\n self.pulp_ctx = pulp_ctx\n self.task_group = task_group\n self.tls_settings = tls_settings\n self.domain = get_domain()\n self.distros_uris = [f\"pdrn:{self.domain.pulp_id}:distributions\"]\n\n @staticmethod\n def needs_update(fields_dict, model_instance):\n \"\"\"\n Compares a Model instance's attributes against a dictionary where keys are attribute\n names and values are expected values.\n \"\"\"\n needs_update = False\n for field_name, value in fields_dict.items():\n if isinstance(getattr(model_instance, field_name), Model):\n if get_url(getattr(model_instance, field_name)) != value:\n needs_update = True\n elif getattr(model_instance, field_name) != value:\n needs_update = True\n return needs_update\n\n def upstream_distributions(self, labels=None):\n if labels:\n params = {\"pulp_label_select\": labels}\n else:\n params = {}\n offset = 0\n list_size = 100\n while True:\n distributions = self.distribution_ctx_cls(self.pulp_ctx).list(list_size, offset, params)\n for distro in distributions:\n yield distro\n if len(distributions) < list_size:\n break\n offset += list_size\n\n def url(self, upstream_distribution):\n return upstream_distribution[\"base_url\"]\n\n def remote_extra_fields(self, upstream_distribution):\n return {}\n\n def create_or_update_remote(self, upstream_distribution):\n if not upstream_distribution.get(\"repository\") and not upstream_distribution.get(\n \"publication\"\n ):\n return None\n url = self.url(upstream_distribution)\n remote_fields_dict = {\"url\": url}\n remote_fields_dict.update(self.tls_settings)\n remote_fields_dict.update(self.remote_extra_fields(upstream_distribution))\n\n # Check if there is a remote pointing to this distribution\n try:\n remote = self.remote_model_cls.objects.get(\n name=upstream_distribution[\"name\"], pulp_domain=self.domain\n )\n needs_update = self.needs_update(remote_fields_dict, remote)\n if needs_update:\n dispatch(\n general_update,\n task_group=self.task_group,\n exclusive_resources=[remote],\n args=(remote.pk, self.app_label, self.remote_serializer_name),\n kwargs={\"data\": remote_fields_dict, \"partial\": True},\n )\n except self.remote_model_cls.DoesNotExist:\n # Create the remote\n remote = self.remote_model_cls(name=upstream_distribution[\"name\"], **remote_fields_dict)\n remote.save()\n\n return remote\n\n def repository_extra_fields(self, remote):\n return {}\n\n def create_or_update_repository(self, remote):\n try:\n repository = self.repository_model_cls.objects.get(\n name=remote.name, pulp_domain=self.domain\n )\n repo_fields_dict = self.repository_extra_fields(remote)\n needs_update = self.needs_update(repo_fields_dict, repository)\n if needs_update:\n dispatch(\n general_update,\n task_group=self.task_group,\n exclusive_resources=[repository],\n args=(repository.pk, self.app_label, self.repository_serializer_name),\n kwargs={\"data\": repo_fields_dict, \"partial\": True},\n )\n except self.repository_model_cls.DoesNotExist:\n repository = self.repository_model_cls(\n name=remote.name, **self.repository_extra_fields(remote)\n )\n repository.save()\n return repository\n\n def distribution_data(self, repository, upstream_distribution):\n \"\"\"\n Return the fields that need to be updated/cleared on distributions for idempotence.\n \"\"\"\n return {\n \"repository\": get_url(repository),\n \"publication\": None,\n \"base_path\": upstream_distribution[\"base_path\"],\n }\n\n def create_or_update_distribution(self, repository, upstream_distribution):\n distribution_data = self.distribution_data(repository, upstream_distribution)\n try:\n distro = self.distribution_model_cls.objects.get(\n name=upstream_distribution[\"name\"], pulp_domain=self.domain\n )\n # Check that the distribution has the right repository associated\n needs_update = self.needs_update(distribution_data, distro)\n if needs_update:\n # Update the distribution\n dispatch(\n general_update,\n task_group=self.task_group,\n shared_resources=[repository],\n exclusive_resources=self.distros_uris,\n args=(distro.pk, self.app_label, self.distribution_serializer_name),\n kwargs={\n \"data\": distribution_data,\n \"partial\": True,\n },\n )\n except self.distribution_model_cls.DoesNotExist:\n # Dispatch a task to create the distribution\n distribution_data[\"name\"] = upstream_distribution[\"name\"]\n dispatch(\n general_create,\n task_group=self.task_group,\n shared_resources=[repository],\n exclusive_resources=self.distros_uris,\n args=(self.app_label, self.distribution_serializer_name),\n kwargs={\"data\": distribution_data},\n )\n\n def sync_params(self, repository, remote):\n \"\"\"This method returns a dict that will be passed as kwargs to the sync task.\"\"\"\n raise NotImplementedError(\"Each replicator must supply its own sync params.\")\n\n def sync(self, repository, remote):\n dispatch(\n self.sync_task,\n task_group=self.task_group,\n shared_resources=[remote],\n exclusive_resources=[repository],\n kwargs=self.sync_params(repository, remote),\n )\n\n def remove_missing(self, names):\n # Remove all distributions with names not present in the list of names\n # Perform this in an extra task, because we hold a big lock here.\n distribution_ids = [\n (distribution.pk, self.app_label, self.distribution_serializer_name)\n for distribution in self.distribution_model_cls.objects.filter(\n pulp_domain=self.domain\n ).exclude(name__in=names)\n ]\n if distribution_ids:\n dispatch(\n general_multi_delete,\n task_group=self.task_group,\n exclusive_resources=self.distros_uris,\n args=(distribution_ids,),\n )\n\n # Remove all the repositories and remotes of the missing distributions\n repositories = list(\n self.repository_model_cls.objects.filter(\n pulp_domain=self.domain, user_hidden=False\n ).exclude(name__in=names)\n )\n repository_ids = [\n (repo.pk, self.app_label, self.repository_serializer_name) for repo in repositories\n ]\n\n remotes = list(\n self.remote_model_cls.objects.filter(pulp_domain=self.domain).exclude(name__in=names)\n )\n remote_ids = [\n (remote.pk, self.app_label, self.remote_serializer_name) for remote in remotes\n ]\n\n if repository_ids or remote_ids:\n dispatch(\n general_multi_delete,\n task_group=self.task_group,\n exclusive_resources=repositories + remotes,\n args=(repository_ids + remote_ids,),\n )\n", "path": "pulpcore/app/replica.py"}]}
2,817
134
gh_patches_debug_20975
rasdani/github-patches
git_diff
holoviz__panel-1318
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DataFrame widget raises exception if two columns have the same names Panel: 0.7 #### Description of expected behavior and the observed behavior Normally I don't have the same column names for my dataframes. But I was trying to reproduce this dashboard https://getbootstrap.com/docs/4.3/examples/dashboard/ and they have the same column names in multiple columns. I can see that the panel.widgets.DataFrame does not support this. I would have expected it to support this situation. #### Complete, minimal, self-contained example code that reproduces the issue ```python import panel as pn import pandas as pd table_data = pd.DataFrame([(1001, "Lorem"),], columns=["Header", "Header"]) pn.widgets.DataFrame(table_data).servable() ``` #### Stack traceback and/or browser JavaScript console output ```bash $ python -m panel serve 'scripts\issue2.py' --dev 2019-11-29 09:00:56,834 Starting Bokeh server version 1.4.0 (running on Tornado 6.0.3) 2019-11-29 09:00:56,836 User authentication hooks NOT provided (default user enabled) 2019-11-29 09:00:56,840 Bokeh app running at: http://localhost:5006/issue2 2019-11-29 09:00:56,840 Starting Bokeh server with process id: 10992 2019-11-29 09:00:58,603 Error running application handler <bokeh.application.handlers.script.ScriptHandler object at 0x00000028FF59DD48>: 'DataFrame' object has no attribute 'dtype' File "generic.py", line 5179, in __getattr__: return object.__getattribute__(self, name) Traceback (most recent call last): File "C:\repos\private\awesome-panel\.venv\lib\site-packages\bokeh\application\handlers\code_runner.py", line 179, in run exec(self._code, module.__dict__) File "C:\repos\private\awesome-panel\scripts\issue2.py", line 5, in <module> pn.widgets.DataFrame(table_data).servable() File "C:\repos\private\awesome-panel\.venv\lib\site-packages\panel\viewable.py", line 525, in servable self.server_doc(title=title) File "C:\repos\private\awesome-panel\.venv\lib\site-packages\panel\viewable.py", line 498, in server_doc model = self.get_root(doc) File "C:\repos\private\awesome-panel\.venv\lib\site-packages\panel\viewable.py", line 434, in get_root root = self._get_model(doc, comm=comm) File "C:\repos\private\awesome-panel\.venv\lib\site-packages\panel\widgets\tables.py", line 112, in _get_model model = DataTable(**self._get_properties()) File "C:\repos\private\awesome-panel\.venv\lib\site-packages\panel\widgets\tables.py", line 99, in _get_properties props['columns'] = self._get_columns() File "C:\repos\private\awesome-panel\.venv\lib\site-packages\panel\widgets\tables.py", line 61, in _get_columns kind = data.dtype.kind File "C:\repos\private\awesome-panel\.venv\lib\site-packages\pandas\core\generic.py", line 5179, in __getattr__ return object.__getattribute__(self, name) AttributeError: 'DataFrame' object has no attribute 'dtype' 2019-11-29 09:00:58,990 WebSocket connection opened 2019-11-29 09:00:58,991 ServerConnection created ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `panel/widgets/tables.py` Content: ``` 1 from __future__ import absolute_import, division, unicode_literals 2 3 import numpy as np 4 import param 5 6 from bokeh.models import ColumnDataSource 7 from bokeh.models.widgets import ( 8 DataTable, TableColumn, NumberEditor, NumberFormatter, 9 DateFormatter, DateEditor, StringFormatter, StringEditor, IntEditor 10 ) 11 12 from ..viewable import Layoutable 13 from ..util import isdatetime 14 from .base import Widget 15 16 17 class DataFrame(Widget): 18 19 editors = param.Dict(default={}, doc=""" 20 Bokeh CellEditor to use for a particular column 21 (overrides the default chosen based on the type).""") 22 23 formatters = param.Dict(default={}, doc=""" 24 Bokeh CellFormatter to use for a particular column 25 (overrides the default chosen based on the type).""") 26 27 fit_columns = param.Boolean(default=True, doc=""" 28 Whether columns should expand to the available width. This 29 results in no horizontal scrollbar showing up, but data can 30 get unreadable if there is no enough space available.""") 31 32 selection = param.List(default=[], doc=""" 33 The currently selected rows of the table.""") 34 35 row_height = param.Integer(default=25, doc=""" 36 The height of each table row.""") 37 38 widths = param.Dict(default={}, doc=""" 39 A mapping from column name to column width.""") 40 41 value = param.Parameter(default=None) 42 43 _rename = {'editors': None, 'formatters': None, 'widths': None, 44 'disabled': None} 45 46 _manual_params = ['value', 'editors', 'formatters', 'selection', 'widths'] 47 48 def __init__(self, value=None, **params): 49 super(DataFrame, self).__init__(value=value, **params) 50 self._renamed_cols = {} 51 52 def _get_columns(self): 53 if self.value is None: 54 return [] 55 56 index = [self.value.index.name or 'index'] 57 col_names = index + list(self.value.columns) 58 columns = [] 59 for col in col_names: 60 if col in self.value.columns: 61 data = self.value[col] 62 else: 63 data = self.value.index 64 kind = data.dtype.kind 65 if kind == 'i': 66 formatter = NumberFormatter() 67 editor = IntEditor() 68 elif kind == 'f': 69 formatter = NumberFormatter(format='0,0.0[00000]') 70 editor = NumberEditor() 71 elif isdatetime(data) or kind == 'M': 72 formatter = DateFormatter(format='%Y-%m-%d %H:%M:%S') 73 editor = DateEditor() 74 else: 75 formatter = StringFormatter() 76 editor = StringEditor() 77 78 if col in self.editors: 79 editor = self.editors[col] 80 if col in self.formatters: 81 formatter = self.formatters[col] 82 if str(col) != col: 83 self._renamed_cols[str(col)] = col 84 width = self.widths.get(str(col)) 85 column = TableColumn(field=str(col), title=str(col), 86 editor=editor, formatter=formatter, 87 width=width) 88 columns.append(column) 89 return columns 90 91 def _get_properties(self): 92 props = {p : getattr(self, p) for p in list(Layoutable.param) 93 if getattr(self, p) is not None} 94 if self.value is None: 95 data = {} 96 else: 97 data = {k if isinstance(k, str) else str(k): v 98 for k, v in ColumnDataSource.from_df(self.value).items()} 99 if props.get('height', None) is None: 100 length = max([len(v) for v in data.values()]) if data else 0 101 props['height'] = length * self.row_height + 30 102 props['source'] = ColumnDataSource(data=data) 103 props['columns'] = self._get_columns() 104 props['index_position'] = None 105 props['fit_columns'] = self.fit_columns 106 props['row_height'] = self.row_height 107 props['editable'] = not self.disabled 108 return props 109 110 def _process_param_change(self, msg): 111 if 'disabled' in msg: 112 msg['editable'] = not msg.pop('disabled') 113 return super(DataFrame, self)._process_param_change(msg) 114 115 def _get_model(self, doc, root=None, parent=None, comm=None): 116 model = DataTable(**self._get_properties()) 117 if root is None: 118 root = model 119 self._link_props(model.source, ['data', ('patching', 'data')], doc, root, comm) 120 self._link_props(model.source.selected, ['indices'], doc, root, comm) 121 self._models[root.ref['id']] = (model, parent) 122 return model 123 124 def _manual_update(self, events, model, doc, root, parent, comm): 125 for event in events: 126 if event.name == 'value': 127 cds = model.source 128 data = {k if isinstance(k, str) else str(k): v 129 for k, v in ColumnDataSource.from_df(self.value).items()} 130 cds.data = data 131 model.columns = self._get_columns() 132 elif event.name == 'selection': 133 model.source.selected.indices = self.selection 134 else: 135 for col in model.columns: 136 if col.name in self.editors: 137 col.editor = self.editors[col.name] 138 if col.name in self.formatters: 139 col.formatter = self.formatters[col.name] 140 if col.name in self.widths: 141 col.width = self.widths[col.name] 142 143 def _process_events(self, events): 144 if 'data' in events: 145 data = events.pop('data') 146 updated = False 147 for k, v in data.items(): 148 if k == 'index': 149 continue 150 k = self._renamed_cols.get(k, k) 151 if isinstance(v, dict): 152 v = [v for _, v in sorted(v.items(), key=lambda it: int(it[0]))] 153 try: 154 isequal = (self.value[k].values == np.asarray(v)).all() 155 except Exception: 156 isequal = False 157 if not isequal: 158 self.value[k] = v 159 updated = True 160 if updated: 161 self.param.trigger('value') 162 if 'indices' in events: 163 self.selection = events.pop('indices') 164 super(DataFrame, self)._process_events(events) 165 166 @property 167 def selected_dataframe(self): 168 """ 169 Returns a DataFrame of the currently selected rows. 170 """ 171 if not self.selection: 172 return self.value 173 return self.value.iloc[self.selection] 174 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/panel/widgets/tables.py b/panel/widgets/tables.py --- a/panel/widgets/tables.py +++ b/panel/widgets/tables.py @@ -47,8 +47,18 @@ def __init__(self, value=None, **params): super(DataFrame, self).__init__(value=value, **params) + self.param.watch(self._validate, 'value') + self._validate(None) self._renamed_cols = {} + def _validate(self, event): + if self.value is None: + return + cols = self.value.columns + if len(cols) != len(cols.drop_duplicates()): + raise ValueError('Cannot display a pandas.DataFrame with ' + 'duplicate column names.') + def _get_columns(self): if self.value is None: return [] @@ -122,6 +132,7 @@ return model def _manual_update(self, events, model, doc, root, parent, comm): + self._validate(None) for event in events: if event.name == 'value': cds = model.source
{"golden_diff": "diff --git a/panel/widgets/tables.py b/panel/widgets/tables.py\n--- a/panel/widgets/tables.py\n+++ b/panel/widgets/tables.py\n@@ -47,8 +47,18 @@\n \n def __init__(self, value=None, **params):\n super(DataFrame, self).__init__(value=value, **params)\n+ self.param.watch(self._validate, 'value')\n+ self._validate(None)\n self._renamed_cols = {}\n \n+ def _validate(self, event):\n+ if self.value is None:\n+ return\n+ cols = self.value.columns\n+ if len(cols) != len(cols.drop_duplicates()):\n+ raise ValueError('Cannot display a pandas.DataFrame with '\n+ 'duplicate column names.')\n+\n def _get_columns(self):\n if self.value is None:\n return []\n@@ -122,6 +132,7 @@\n return model\n \n def _manual_update(self, events, model, doc, root, parent, comm):\n+ self._validate(None)\n for event in events:\n if event.name == 'value':\n cds = model.source\n", "issue": "DataFrame widget raises exception if two columns have the same names\nPanel: 0.7\r\n\r\n#### Description of expected behavior and the observed behavior\r\n\r\nNormally I don't have the same column names for my dataframes. But I was trying to reproduce this dashboard https://getbootstrap.com/docs/4.3/examples/dashboard/ and they have the same column names in multiple columns.\r\n\r\nI can see that the panel.widgets.DataFrame does not support this. I would have expected it to support this situation.\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n\r\n```python\r\nimport panel as pn\r\nimport pandas as pd\r\n\r\ntable_data = pd.DataFrame([(1001, \"Lorem\"),], columns=[\"Header\", \"Header\"])\r\npn.widgets.DataFrame(table_data).servable()\r\n```\r\n#### Stack traceback and/or browser JavaScript console output\r\n\r\n```bash\r\n$ python -m panel serve 'scripts\\issue2.py' --dev\r\n2019-11-29 09:00:56,834 Starting Bokeh server version 1.4.0 (running on Tornado 6.0.3)\r\n2019-11-29 09:00:56,836 User authentication hooks NOT provided (default user enabled)\r\n2019-11-29 09:00:56,840 Bokeh app running at: http://localhost:5006/issue2\r\n2019-11-29 09:00:56,840 Starting Bokeh server with process id: 10992\r\n2019-11-29 09:00:58,603 Error running application handler <bokeh.application.handlers.script.ScriptHandler object at 0x00000028FF59DD48>: 'DataFrame' object has no attribute 'dtype'\r\nFile \"generic.py\", line 5179, in __getattr__:\r\nreturn object.__getattribute__(self, name) Traceback (most recent call last):\r\n File \"C:\\repos\\private\\awesome-panel\\.venv\\lib\\site-packages\\bokeh\\application\\handlers\\code_runner.py\", line 179, in run\r\n exec(self._code, module.__dict__)\r\n File \"C:\\repos\\private\\awesome-panel\\scripts\\issue2.py\", line 5, in <module>\r\n pn.widgets.DataFrame(table_data).servable()\r\n File \"C:\\repos\\private\\awesome-panel\\.venv\\lib\\site-packages\\panel\\viewable.py\", line 525, in servable\r\n self.server_doc(title=title)\r\n File \"C:\\repos\\private\\awesome-panel\\.venv\\lib\\site-packages\\panel\\viewable.py\", line 498, in server_doc\r\n model = self.get_root(doc)\r\n File \"C:\\repos\\private\\awesome-panel\\.venv\\lib\\site-packages\\panel\\viewable.py\", line 434, in get_root\r\n root = self._get_model(doc, comm=comm)\r\n File \"C:\\repos\\private\\awesome-panel\\.venv\\lib\\site-packages\\panel\\widgets\\tables.py\", line 112, in _get_model\r\n model = DataTable(**self._get_properties())\r\n File \"C:\\repos\\private\\awesome-panel\\.venv\\lib\\site-packages\\panel\\widgets\\tables.py\", line 99, in _get_properties\r\n props['columns'] = self._get_columns()\r\n File \"C:\\repos\\private\\awesome-panel\\.venv\\lib\\site-packages\\panel\\widgets\\tables.py\", line 61, in _get_columns\r\n kind = data.dtype.kind\r\n File \"C:\\repos\\private\\awesome-panel\\.venv\\lib\\site-packages\\pandas\\core\\generic.py\", line 5179, in __getattr__\r\n return object.__getattribute__(self, name)\r\nAttributeError: 'DataFrame' object has no attribute 'dtype'\r\n\r\n2019-11-29 09:00:58,990 WebSocket connection opened\r\n2019-11-29 09:00:58,991 ServerConnection created\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import, division, unicode_literals\n\nimport numpy as np\nimport param\n\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.widgets import (\n DataTable, TableColumn, NumberEditor, NumberFormatter,\n DateFormatter, DateEditor, StringFormatter, StringEditor, IntEditor\n)\n\nfrom ..viewable import Layoutable\nfrom ..util import isdatetime\nfrom .base import Widget\n\n\nclass DataFrame(Widget):\n\n editors = param.Dict(default={}, doc=\"\"\"\n Bokeh CellEditor to use for a particular column\n (overrides the default chosen based on the type).\"\"\")\n\n formatters = param.Dict(default={}, doc=\"\"\"\n Bokeh CellFormatter to use for a particular column\n (overrides the default chosen based on the type).\"\"\")\n\n fit_columns = param.Boolean(default=True, doc=\"\"\"\n Whether columns should expand to the available width. This\n results in no horizontal scrollbar showing up, but data can\n get unreadable if there is no enough space available.\"\"\")\n\n selection = param.List(default=[], doc=\"\"\"\n The currently selected rows of the table.\"\"\")\n\n row_height = param.Integer(default=25, doc=\"\"\"\n The height of each table row.\"\"\")\n\n widths = param.Dict(default={}, doc=\"\"\"\n A mapping from column name to column width.\"\"\")\n\n value = param.Parameter(default=None)\n\n _rename = {'editors': None, 'formatters': None, 'widths': None,\n 'disabled': None}\n\n _manual_params = ['value', 'editors', 'formatters', 'selection', 'widths']\n\n def __init__(self, value=None, **params):\n super(DataFrame, self).__init__(value=value, **params)\n self._renamed_cols = {}\n\n def _get_columns(self):\n if self.value is None:\n return []\n\n index = [self.value.index.name or 'index']\n col_names = index + list(self.value.columns)\n columns = []\n for col in col_names:\n if col in self.value.columns:\n data = self.value[col]\n else:\n data = self.value.index\n kind = data.dtype.kind\n if kind == 'i':\n formatter = NumberFormatter()\n editor = IntEditor()\n elif kind == 'f':\n formatter = NumberFormatter(format='0,0.0[00000]')\n editor = NumberEditor()\n elif isdatetime(data) or kind == 'M':\n formatter = DateFormatter(format='%Y-%m-%d %H:%M:%S')\n editor = DateEditor()\n else:\n formatter = StringFormatter()\n editor = StringEditor()\n\n if col in self.editors:\n editor = self.editors[col]\n if col in self.formatters:\n formatter = self.formatters[col]\n if str(col) != col:\n self._renamed_cols[str(col)] = col\n width = self.widths.get(str(col))\n column = TableColumn(field=str(col), title=str(col),\n editor=editor, formatter=formatter,\n width=width)\n columns.append(column)\n return columns\n\n def _get_properties(self):\n props = {p : getattr(self, p) for p in list(Layoutable.param)\n if getattr(self, p) is not None}\n if self.value is None:\n data = {}\n else:\n data = {k if isinstance(k, str) else str(k): v\n for k, v in ColumnDataSource.from_df(self.value).items()}\n if props.get('height', None) is None:\n length = max([len(v) for v in data.values()]) if data else 0\n props['height'] = length * self.row_height + 30\n props['source'] = ColumnDataSource(data=data)\n props['columns'] = self._get_columns()\n props['index_position'] = None\n props['fit_columns'] = self.fit_columns\n props['row_height'] = self.row_height\n props['editable'] = not self.disabled\n return props\n\n def _process_param_change(self, msg):\n if 'disabled' in msg:\n msg['editable'] = not msg.pop('disabled')\n return super(DataFrame, self)._process_param_change(msg)\n \n def _get_model(self, doc, root=None, parent=None, comm=None):\n model = DataTable(**self._get_properties())\n if root is None:\n root = model\n self._link_props(model.source, ['data', ('patching', 'data')], doc, root, comm)\n self._link_props(model.source.selected, ['indices'], doc, root, comm)\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _manual_update(self, events, model, doc, root, parent, comm):\n for event in events:\n if event.name == 'value':\n cds = model.source\n data = {k if isinstance(k, str) else str(k): v\n for k, v in ColumnDataSource.from_df(self.value).items()}\n cds.data = data\n model.columns = self._get_columns()\n elif event.name == 'selection':\n model.source.selected.indices = self.selection\n else:\n for col in model.columns:\n if col.name in self.editors:\n col.editor = self.editors[col.name]\n if col.name in self.formatters:\n col.formatter = self.formatters[col.name]\n if col.name in self.widths:\n col.width = self.widths[col.name]\n\n def _process_events(self, events):\n if 'data' in events:\n data = events.pop('data')\n updated = False\n for k, v in data.items():\n if k == 'index':\n continue\n k = self._renamed_cols.get(k, k)\n if isinstance(v, dict):\n v = [v for _, v in sorted(v.items(), key=lambda it: int(it[0]))]\n try:\n isequal = (self.value[k].values == np.asarray(v)).all()\n except Exception:\n isequal = False\n if not isequal:\n self.value[k] = v\n updated = True\n if updated:\n self.param.trigger('value')\n if 'indices' in events:\n self.selection = events.pop('indices')\n super(DataFrame, self)._process_events(events)\n\n @property\n def selected_dataframe(self):\n \"\"\"\n Returns a DataFrame of the currently selected rows.\n \"\"\"\n if not self.selection:\n return self.value\n return self.value.iloc[self.selection]\n", "path": "panel/widgets/tables.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, unicode_literals\n\nimport numpy as np\nimport param\n\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.widgets import (\n DataTable, TableColumn, NumberEditor, NumberFormatter,\n DateFormatter, DateEditor, StringFormatter, StringEditor, IntEditor\n)\n\nfrom ..viewable import Layoutable\nfrom ..util import isdatetime\nfrom .base import Widget\n\n\nclass DataFrame(Widget):\n\n editors = param.Dict(default={}, doc=\"\"\"\n Bokeh CellEditor to use for a particular column\n (overrides the default chosen based on the type).\"\"\")\n\n formatters = param.Dict(default={}, doc=\"\"\"\n Bokeh CellFormatter to use for a particular column\n (overrides the default chosen based on the type).\"\"\")\n\n fit_columns = param.Boolean(default=True, doc=\"\"\"\n Whether columns should expand to the available width. This\n results in no horizontal scrollbar showing up, but data can\n get unreadable if there is no enough space available.\"\"\")\n\n selection = param.List(default=[], doc=\"\"\"\n The currently selected rows of the table.\"\"\")\n\n row_height = param.Integer(default=25, doc=\"\"\"\n The height of each table row.\"\"\")\n\n widths = param.Dict(default={}, doc=\"\"\"\n A mapping from column name to column width.\"\"\")\n\n value = param.Parameter(default=None)\n\n _rename = {'editors': None, 'formatters': None, 'widths': None,\n 'disabled': None}\n\n _manual_params = ['value', 'editors', 'formatters', 'selection', 'widths']\n\n def __init__(self, value=None, **params):\n super(DataFrame, self).__init__(value=value, **params)\n self.param.watch(self._validate, 'value')\n self._validate(None)\n self._renamed_cols = {}\n\n def _validate(self, event):\n if self.value is None:\n return\n cols = self.value.columns\n if len(cols) != len(cols.drop_duplicates()):\n raise ValueError('Cannot display a pandas.DataFrame with '\n 'duplicate column names.')\n\n def _get_columns(self):\n if self.value is None:\n return []\n\n index = [self.value.index.name or 'index']\n col_names = index + list(self.value.columns)\n columns = []\n for col in col_names:\n if col in self.value.columns:\n data = self.value[col]\n else:\n data = self.value.index\n kind = data.dtype.kind\n if kind == 'i':\n formatter = NumberFormatter()\n editor = IntEditor()\n elif kind == 'f':\n formatter = NumberFormatter(format='0,0.0[00000]')\n editor = NumberEditor()\n elif isdatetime(data) or kind == 'M':\n formatter = DateFormatter(format='%Y-%m-%d %H:%M:%S')\n editor = DateEditor()\n else:\n formatter = StringFormatter()\n editor = StringEditor()\n\n if col in self.editors:\n editor = self.editors[col]\n if col in self.formatters:\n formatter = self.formatters[col]\n if str(col) != col:\n self._renamed_cols[str(col)] = col\n width = self.widths.get(str(col))\n column = TableColumn(field=str(col), title=str(col),\n editor=editor, formatter=formatter,\n width=width)\n columns.append(column)\n return columns\n\n def _get_properties(self):\n props = {p : getattr(self, p) for p in list(Layoutable.param)\n if getattr(self, p) is not None}\n if self.value is None:\n data = {}\n else:\n data = {k if isinstance(k, str) else str(k): v\n for k, v in ColumnDataSource.from_df(self.value).items()}\n if props.get('height', None) is None:\n length = max([len(v) for v in data.values()]) if data else 0\n props['height'] = length * self.row_height + 30\n props['source'] = ColumnDataSource(data=data)\n props['columns'] = self._get_columns()\n props['index_position'] = None\n props['fit_columns'] = self.fit_columns\n props['row_height'] = self.row_height\n props['editable'] = not self.disabled\n return props\n\n def _process_param_change(self, msg):\n if 'disabled' in msg:\n msg['editable'] = not msg.pop('disabled')\n return super(DataFrame, self)._process_param_change(msg)\n \n def _get_model(self, doc, root=None, parent=None, comm=None):\n model = DataTable(**self._get_properties())\n if root is None:\n root = model\n self._link_props(model.source, ['data', ('patching', 'data')], doc, root, comm)\n self._link_props(model.source.selected, ['indices'], doc, root, comm)\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _manual_update(self, events, model, doc, root, parent, comm):\n self._validate(None)\n for event in events:\n if event.name == 'value':\n cds = model.source\n data = {k if isinstance(k, str) else str(k): v\n for k, v in ColumnDataSource.from_df(self.value).items()}\n cds.data = data\n model.columns = self._get_columns()\n elif event.name == 'selection':\n model.source.selected.indices = self.selection\n else:\n for col in model.columns:\n if col.name in self.editors:\n col.editor = self.editors[col.name]\n if col.name in self.formatters:\n col.formatter = self.formatters[col.name]\n if col.name in self.widths:\n col.width = self.widths[col.name]\n\n def _process_events(self, events):\n if 'data' in events:\n data = events.pop('data')\n updated = False\n for k, v in data.items():\n if k == 'index':\n continue\n k = self._renamed_cols.get(k, k)\n if isinstance(v, dict):\n v = [v for _, v in sorted(v.items(), key=lambda it: int(it[0]))]\n try:\n isequal = (self.value[k].values == np.asarray(v)).all()\n except Exception:\n isequal = False\n if not isequal:\n self.value[k] = v\n updated = True\n if updated:\n self.param.trigger('value')\n if 'indices' in events:\n self.selection = events.pop('indices')\n super(DataFrame, self)._process_events(events)\n\n @property\n def selected_dataframe(self):\n \"\"\"\n Returns a DataFrame of the currently selected rows.\n \"\"\"\n if not self.selection:\n return self.value\n return self.value.iloc[self.selection]\n", "path": "panel/widgets/tables.py"}]}
3,006
248
gh_patches_debug_42532
rasdani/github-patches
git_diff
kserve__kserve-1343
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support cloud events for KFServing custom framework /kind feature **Describe the solution you'd like** As a user when doing feature transformation the payload can be protobuf/avro or some other binary format delivered via HTTP or KAFKA in cloudevent binary format(https://github.com/cloudevents/spec/blob/v1.0.1/kafka-protocol-binding.md#32-binary-content-mode) but currently KFServing custom framework [only supports JSON](https://github.com/kubeflow/kfserving/blob/master/python/kfserving/kfserving/handlers/http.py#L51). - I'd like to send cloud event to KFServer via KAFKA in following format and KFServer pass the decoded cloud event to `preprocess` or `predict` handler ``` ------------------ Message ------------------- Topic Name: mytopic ------------------- key ---------------------- Key: mykey ------------------ headers ------------------- ce_specversion: "1.0" ce_type: "com.example.someevent" ce_source: "/mycontext/subcontext" ce_id: "1234-1234-1234" ce_time: "2018-04-05T03:56:24Z" content-type: application/protobuf ------------------- value -------------------- ... application data encoded in protobuf ... ----------------------------------------------- consume http(cloudevent) KAFKA ----------> Kafka Event Source -----------------> InferenceService ``` ```python def has_ce_headers(headers: typing.Dict[str, str]) -> bool: return ( "ce-specversion" in headers and "ce-source" in headers and "ce-type" in headers and "ce-id" in headers ) if has_ce_headers(self.request.headers): event = from_http(self.request.headers, self.request.body) body = event['data'] else: try: body = json.loads(self.request.body) except json.decoder.JSONDecodeError as e: raise tornado.web.HTTPError( status_code=HTTPStatus.BAD_REQUEST, reason="Unrecognized request format: %s" % e) ``` - Currently HTTP headers are not exposed to the `preprocess` or `predict` handler, if a cloud event is sent to KFServer it is not possible for user to check the http headers which can be useful for tracing and examining the original event source(kafka or http). `async def predict(self, request: Dict, headers: Dict) -> Dict:` `async def preprocess(self, request: Dict, headers: Dict) -> Dict:` - Also it would be nice if KFServing can generate cloud event response so that it works with knative eventing channels seamlessly. **Anything else you would like to add:** [Miscellaneous information that will assist in solving the issue.] --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `python/kfserving/kfserving/handlers/http.py` Content: ``` 1 # Copyright 2020 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import inspect 16 import tornado.web 17 import json 18 from http import HTTPStatus 19 from kfserving.kfmodel_repository import KFModelRepository 20 21 22 class HTTPHandler(tornado.web.RequestHandler): 23 def initialize(self, models: KFModelRepository): 24 self.models = models # pylint:disable=attribute-defined-outside-init 25 26 def get_model(self, name: str): 27 model = self.models.get_model(name) 28 if model is None: 29 raise tornado.web.HTTPError( 30 status_code=HTTPStatus.NOT_FOUND, 31 reason="Model with name %s does not exist." % name 32 ) 33 if not model.ready: 34 model.load() 35 return model 36 37 def validate(self, request): 38 if ("instances" in request and not isinstance(request["instances"], list)) or \ 39 ("inputs" in request and not isinstance(request["inputs"], list)): 40 raise tornado.web.HTTPError( 41 status_code=HTTPStatus.BAD_REQUEST, 42 reason="Expected \"instances\" or \"inputs\" to be a list" 43 ) 44 return request 45 46 47 class PredictHandler(HTTPHandler): 48 async def post(self, name: str): 49 model = self.get_model(name) 50 try: 51 body = json.loads(self.request.body) 52 except json.decoder.JSONDecodeError as e: 53 raise tornado.web.HTTPError( 54 status_code=HTTPStatus.BAD_REQUEST, 55 reason="Unrecognized request format: %s" % e 56 ) 57 request = model.preprocess(body) 58 request = self.validate(request) 59 response = (await model.predict(request)) if inspect.iscoroutinefunction(model.predict) else model.predict(request) 60 response = model.postprocess(response) 61 self.write(response) 62 63 64 class ExplainHandler(HTTPHandler): 65 async def post(self, name: str): 66 model = self.get_model(name) 67 try: 68 body = json.loads(self.request.body) 69 except json.decoder.JSONDecodeError as e: 70 raise tornado.web.HTTPError( 71 status_code=HTTPStatus.BAD_REQUEST, 72 reason="Unrecognized request format: %s" % e 73 ) 74 request = model.preprocess(body) 75 request = self.validate(request) 76 response = (await model.explain(request)) if inspect.iscoroutinefunction(model.explain) else model.explain(request) 77 response = model.postprocess(response) 78 self.write(response) 79 ``` Path: `python/kfserving/kfserving/kfmodel.py` Content: ``` 1 # Copyright 2020 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import Dict 16 import sys 17 18 import json 19 import tornado.web 20 from tornado.httpclient import AsyncHTTPClient 21 22 PREDICTOR_URL_FORMAT = "http://{0}/v1/models/{1}:predict" 23 EXPLAINER_URL_FORMAT = "http://{0}/v1/models/{1}:explain" 24 PREDICTOR_V2_URL_FORMAT = "http://{0}/v2/models/{1}/infer" 25 EXPLAINER_V2_URL_FORMAT = "http://{0}/v2/models/{1}/explain" 26 27 28 # KFModel is intended to be subclassed by various components within KFServing. 29 class KFModel: 30 31 def __init__(self, name: str): 32 self.name = name 33 self.ready = False 34 self.protocol = "v1" 35 self.predictor_host = None 36 self.explainer_host = None 37 # The timeout matches what is set in generated Istio resources. 38 # We generally don't want things to time out at the request level here, 39 # timeouts should be handled elsewhere in the system. 40 self.timeout = 600 41 self._http_client_instance = None 42 43 @property 44 def _http_client(self): 45 if self._http_client_instance is None: 46 self._http_client_instance = AsyncHTTPClient(max_clients=sys.maxsize) 47 return self._http_client_instance 48 49 def load(self) -> bool: 50 self.ready = True 51 return self.ready 52 53 def preprocess(self, request: Dict) -> Dict: 54 return request 55 56 def postprocess(self, request: Dict) -> Dict: 57 return request 58 59 async def predict(self, request: Dict) -> Dict: 60 if not self.predictor_host: 61 raise NotImplementedError 62 predict_url = PREDICTOR_URL_FORMAT.format(self.predictor_host, self.name) 63 if self.protocol == "v2": 64 predict_url = PREDICTOR_V2_URL_FORMAT.format(self.predictor_host, self.name) 65 response = await self._http_client.fetch( 66 predict_url, 67 method='POST', 68 request_timeout=self.timeout, 69 body=json.dumps(request) 70 ) 71 if response.code != 200: 72 raise tornado.web.HTTPError( 73 status_code=response.code, 74 reason=response.body) 75 return json.loads(response.body) 76 77 async def explain(self, request: Dict) -> Dict: 78 if self.explainer_host is None: 79 raise NotImplementedError 80 explain_url = EXPLAINER_URL_FORMAT.format(self.predictor_host, self.name) 81 if self.protocol == "v2": 82 explain_url = EXPLAINER_V2_URL_FORMAT.format(self.predictor_host, self.name) 83 response = await self._http_client.fetch( 84 url=explain_url, 85 method='POST', 86 request_timeout=self.timeout, 87 body=json.dumps(request) 88 ) 89 if response.code != 200: 90 raise tornado.web.HTTPError( 91 status_code=response.code, 92 reason=response.body) 93 return json.loads(response.body) 94 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/python/kfserving/kfserving/handlers/http.py b/python/kfserving/kfserving/handlers/http.py --- a/python/kfserving/kfserving/handlers/http.py +++ b/python/kfserving/kfserving/handlers/http.py @@ -14,9 +14,15 @@ import inspect import tornado.web +import typing import json +import pytz +import cloudevents.exceptions as ce +from cloudevents.http import CloudEvent, from_http, is_binary, is_structured, to_binary, to_structured +from cloudevents.sdk.converters.util import has_binary_headers from http import HTTPStatus from kfserving.kfmodel_repository import KFModelRepository +from datetime import datetime class HTTPHandler(tornado.web.RequestHandler): @@ -43,21 +49,52 @@ ) return request - class PredictHandler(HTTPHandler): async def post(self, name: str): + if has_binary_headers(self.request.headers): + try: + #Use default unmarshaller if contenttype is set in header + if "ce-contenttype" in self.request.headers: + body = from_http(self.request.headers, self.request.body) + else: + body = from_http(self.request.headers, self.request.body, lambda x: x) + except (ce.MissingRequiredFields, ce.InvalidRequiredFields, ce.InvalidStructuredJSON, ce.InvalidHeadersFormat, ce.DataMarshallerError, ce.DataUnmarshallerError) as e: + raise tornado.web.HTTPError( + status_code=HTTPStatus.BAD_REQUEST, + reason="Cloud Event Exceptions: %s" % e + ) + else: + try: + body = json.loads(self.request.body) + except json.decoder.JSONDecodeError as e: + raise tornado.web.HTTPError( + status_code=HTTPStatus.BAD_REQUEST, + reason="Unrecognized request format: %s" % e + ) + model = self.get_model(name) - try: - body = json.loads(self.request.body) - except json.decoder.JSONDecodeError as e: - raise tornado.web.HTTPError( - status_code=HTTPStatus.BAD_REQUEST, - reason="Unrecognized request format: %s" % e - ) request = model.preprocess(body) request = self.validate(request) response = (await model.predict(request)) if inspect.iscoroutinefunction(model.predict) else model.predict(request) response = model.postprocess(response) + + if has_binary_headers(self.request.headers): + event = CloudEvent(body._attributes, response) + if is_binary(self.request.headers): + eventheader, eventbody = to_binary(event) + elif is_structured(self.request.headers): + eventheader, eventbody = to_structured(event) + for k, v in eventheader.items(): + if k != "ce-time": + self.set_header(k, v) + else: #utc now() timestamp + self.set_header('ce-time', datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%S.%f%z')) + + if isinstance(eventbody, (bytes, bytearray)): + response = eventbody + else: + response = eventbody.data + self.write(response) diff --git a/python/kfserving/kfserving/kfmodel.py b/python/kfserving/kfserving/kfmodel.py --- a/python/kfserving/kfserving/kfmodel.py +++ b/python/kfserving/kfserving/kfmodel.py @@ -51,7 +51,16 @@ return self.ready def preprocess(self, request: Dict) -> Dict: - return request + # If cloudevent dict, then parse 'data' field. Otherwise, pass through. + if "data" in request \ + and "time" in request \ + and "type" in request \ + and "source" in request \ + and "id" in request \ + and "specversion" in request: + return request["data"] + else: + return request def postprocess(self, request: Dict) -> Dict: return request @@ -91,3 +100,4 @@ status_code=response.code, reason=response.body) return json.loads(response.body) +
{"golden_diff": "diff --git a/python/kfserving/kfserving/handlers/http.py b/python/kfserving/kfserving/handlers/http.py\n--- a/python/kfserving/kfserving/handlers/http.py\n+++ b/python/kfserving/kfserving/handlers/http.py\n@@ -14,9 +14,15 @@\n \n import inspect\n import tornado.web\n+import typing\n import json\n+import pytz\n+import cloudevents.exceptions as ce\n+from cloudevents.http import CloudEvent, from_http, is_binary, is_structured, to_binary, to_structured\n+from cloudevents.sdk.converters.util import has_binary_headers\n from http import HTTPStatus\n from kfserving.kfmodel_repository import KFModelRepository\n+from datetime import datetime\n \n \n class HTTPHandler(tornado.web.RequestHandler):\n@@ -43,21 +49,52 @@\n )\n return request\n \n-\n class PredictHandler(HTTPHandler):\n async def post(self, name: str):\n+ if has_binary_headers(self.request.headers): \n+ try:\n+ #Use default unmarshaller if contenttype is set in header\n+ if \"ce-contenttype\" in self.request.headers:\n+ body = from_http(self.request.headers, self.request.body)\n+ else:\n+ body = from_http(self.request.headers, self.request.body, lambda x: x)\n+ except (ce.MissingRequiredFields, ce.InvalidRequiredFields, ce.InvalidStructuredJSON, ce.InvalidHeadersFormat, ce.DataMarshallerError, ce.DataUnmarshallerError) as e:\n+ raise tornado.web.HTTPError(\n+ status_code=HTTPStatus.BAD_REQUEST,\n+ reason=\"Cloud Event Exceptions: %s\" % e\n+ )\n+ else:\n+ try:\n+ body = json.loads(self.request.body)\n+ except json.decoder.JSONDecodeError as e:\n+ raise tornado.web.HTTPError(\n+ status_code=HTTPStatus.BAD_REQUEST,\n+ reason=\"Unrecognized request format: %s\" % e\n+ )\n+\n model = self.get_model(name)\n- try:\n- body = json.loads(self.request.body)\n- except json.decoder.JSONDecodeError as e:\n- raise tornado.web.HTTPError(\n- status_code=HTTPStatus.BAD_REQUEST,\n- reason=\"Unrecognized request format: %s\" % e\n- )\n request = model.preprocess(body)\n request = self.validate(request)\n response = (await model.predict(request)) if inspect.iscoroutinefunction(model.predict) else model.predict(request)\n response = model.postprocess(response)\n+\n+ if has_binary_headers(self.request.headers):\n+ event = CloudEvent(body._attributes, response)\n+ if is_binary(self.request.headers):\n+ eventheader, eventbody = to_binary(event)\n+ elif is_structured(self.request.headers):\n+ eventheader, eventbody = to_structured(event)\n+ for k, v in eventheader.items():\n+ if k != \"ce-time\":\n+ self.set_header(k, v)\n+ else: #utc now() timestamp\n+ self.set_header('ce-time', datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%S.%f%z'))\n+\n+ if isinstance(eventbody, (bytes, bytearray)):\n+ response = eventbody\n+ else:\n+ response = eventbody.data\n+\n self.write(response)\n \n \ndiff --git a/python/kfserving/kfserving/kfmodel.py b/python/kfserving/kfserving/kfmodel.py\n--- a/python/kfserving/kfserving/kfmodel.py\n+++ b/python/kfserving/kfserving/kfmodel.py\n@@ -51,7 +51,16 @@\n return self.ready\n \n def preprocess(self, request: Dict) -> Dict:\n- return request\n+ # If cloudevent dict, then parse 'data' field. Otherwise, pass through.\n+ if \"data\" in request \\\n+ and \"time\" in request \\\n+ and \"type\" in request \\\n+ and \"source\" in request \\\n+ and \"id\" in request \\\n+ and \"specversion\" in request:\n+ return request[\"data\"]\n+ else:\n+ return request\n \n def postprocess(self, request: Dict) -> Dict:\n return request\n@@ -91,3 +100,4 @@\n status_code=response.code,\n reason=response.body)\n return json.loads(response.body)\n+\n", "issue": "Support cloud events for KFServing custom framework \n/kind feature\r\n\r\n**Describe the solution you'd like**\r\nAs a user when doing feature transformation the payload can be protobuf/avro or some other binary format delivered via HTTP or KAFKA in cloudevent binary format(https://github.com/cloudevents/spec/blob/v1.0.1/kafka-protocol-binding.md#32-binary-content-mode) but currently KFServing custom framework [only supports JSON](https://github.com/kubeflow/kfserving/blob/master/python/kfserving/kfserving/handlers/http.py#L51).\r\n\r\n- I'd like to send cloud event to KFServer via KAFKA in following format and KFServer pass the decoded cloud event to `preprocess` or `predict` handler\r\n \r\n```\r\n------------------ Message -------------------\r\nTopic Name: mytopic\r\n------------------- key ----------------------\r\nKey: mykey\r\n------------------ headers -------------------\r\nce_specversion: \"1.0\"\r\nce_type: \"com.example.someevent\"\r\nce_source: \"/mycontext/subcontext\"\r\nce_id: \"1234-1234-1234\"\r\nce_time: \"2018-04-05T03:56:24Z\"\r\ncontent-type: application/protobuf\r\n------------------- value --------------------\r\n ... application data encoded in protobuf ...\r\n-----------------------------------------------\r\n\r\n consume http(cloudevent)\r\nKAFKA ----------> Kafka Event Source -----------------> InferenceService\r\n\r\n```\r\n\r\n\r\n\r\n\r\n```python\r\ndef has_ce_headers(headers: typing.Dict[str, str]) -> bool:\r\n return (\r\n \"ce-specversion\" in headers\r\n and \"ce-source\" in headers\r\n and \"ce-type\" in headers\r\n and \"ce-id\" in headers\r\n )\r\n \r\n\r\n if has_ce_headers(self.request.headers):\r\n event = from_http(self.request.headers, self.request.body)\r\n body = event['data']\r\n else:\r\n try:\r\n body = json.loads(self.request.body)\r\n except json.decoder.JSONDecodeError as e:\r\n raise tornado.web.HTTPError(\r\n status_code=HTTPStatus.BAD_REQUEST,\r\n reason=\"Unrecognized request format: %s\" % e)\r\n ``` \r\n \r\n- Currently HTTP headers are not exposed to the `preprocess` or `predict` handler, if a cloud event is sent to KFServer it is not possible for user to check the http headers which can be useful for tracing and examining the original event source(kafka or http).\r\n`async def predict(self, request: Dict, headers: Dict) -> Dict:`\r\n`async def preprocess(self, request: Dict, headers: Dict) -> Dict:`\r\n\r\n- Also it would be nice if KFServing can generate cloud event response so that it works with knative eventing channels seamlessly.\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\n", "before_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport tornado.web\nimport json\nfrom http import HTTPStatus\nfrom kfserving.kfmodel_repository import KFModelRepository\n\n\nclass HTTPHandler(tornado.web.RequestHandler):\n def initialize(self, models: KFModelRepository):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get_model(self, name: str):\n model = self.models.get_model(name)\n if model is None:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.NOT_FOUND,\n reason=\"Model with name %s does not exist.\" % name\n )\n if not model.ready:\n model.load()\n return model\n\n def validate(self, request):\n if (\"instances\" in request and not isinstance(request[\"instances\"], list)) or \\\n (\"inputs\" in request and not isinstance(request[\"inputs\"], list)):\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Expected \\\"instances\\\" or \\\"inputs\\\" to be a list\"\n )\n return request\n\n\nclass PredictHandler(HTTPHandler):\n async def post(self, name: str):\n model = self.get_model(name)\n try:\n body = json.loads(self.request.body)\n except json.decoder.JSONDecodeError as e:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Unrecognized request format: %s\" % e\n )\n request = model.preprocess(body)\n request = self.validate(request)\n response = (await model.predict(request)) if inspect.iscoroutinefunction(model.predict) else model.predict(request)\n response = model.postprocess(response)\n self.write(response)\n\n\nclass ExplainHandler(HTTPHandler):\n async def post(self, name: str):\n model = self.get_model(name)\n try:\n body = json.loads(self.request.body)\n except json.decoder.JSONDecodeError as e:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Unrecognized request format: %s\" % e\n )\n request = model.preprocess(body)\n request = self.validate(request)\n response = (await model.explain(request)) if inspect.iscoroutinefunction(model.explain) else model.explain(request)\n response = model.postprocess(response)\n self.write(response)\n", "path": "python/kfserving/kfserving/handlers/http.py"}, {"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict\nimport sys\n\nimport json\nimport tornado.web\nfrom tornado.httpclient import AsyncHTTPClient\n\nPREDICTOR_URL_FORMAT = \"http://{0}/v1/models/{1}:predict\"\nEXPLAINER_URL_FORMAT = \"http://{0}/v1/models/{1}:explain\"\nPREDICTOR_V2_URL_FORMAT = \"http://{0}/v2/models/{1}/infer\"\nEXPLAINER_V2_URL_FORMAT = \"http://{0}/v2/models/{1}/explain\"\n\n\n# KFModel is intended to be subclassed by various components within KFServing.\nclass KFModel:\n\n def __init__(self, name: str):\n self.name = name\n self.ready = False\n self.protocol = \"v1\"\n self.predictor_host = None\n self.explainer_host = None\n # The timeout matches what is set in generated Istio resources.\n # We generally don't want things to time out at the request level here,\n # timeouts should be handled elsewhere in the system.\n self.timeout = 600\n self._http_client_instance = None\n\n @property\n def _http_client(self):\n if self._http_client_instance is None:\n self._http_client_instance = AsyncHTTPClient(max_clients=sys.maxsize)\n return self._http_client_instance\n\n def load(self) -> bool:\n self.ready = True\n return self.ready\n\n def preprocess(self, request: Dict) -> Dict:\n return request\n\n def postprocess(self, request: Dict) -> Dict:\n return request\n\n async def predict(self, request: Dict) -> Dict:\n if not self.predictor_host:\n raise NotImplementedError\n predict_url = PREDICTOR_URL_FORMAT.format(self.predictor_host, self.name)\n if self.protocol == \"v2\":\n predict_url = PREDICTOR_V2_URL_FORMAT.format(self.predictor_host, self.name)\n response = await self._http_client.fetch(\n predict_url,\n method='POST',\n request_timeout=self.timeout,\n body=json.dumps(request)\n )\n if response.code != 200:\n raise tornado.web.HTTPError(\n status_code=response.code,\n reason=response.body)\n return json.loads(response.body)\n\n async def explain(self, request: Dict) -> Dict:\n if self.explainer_host is None:\n raise NotImplementedError\n explain_url = EXPLAINER_URL_FORMAT.format(self.predictor_host, self.name)\n if self.protocol == \"v2\":\n explain_url = EXPLAINER_V2_URL_FORMAT.format(self.predictor_host, self.name)\n response = await self._http_client.fetch(\n url=explain_url,\n method='POST',\n request_timeout=self.timeout,\n body=json.dumps(request)\n )\n if response.code != 200:\n raise tornado.web.HTTPError(\n status_code=response.code,\n reason=response.body)\n return json.loads(response.body)\n", "path": "python/kfserving/kfserving/kfmodel.py"}], "after_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport tornado.web\nimport typing\nimport json\nimport pytz\nimport cloudevents.exceptions as ce\nfrom cloudevents.http import CloudEvent, from_http, is_binary, is_structured, to_binary, to_structured\nfrom cloudevents.sdk.converters.util import has_binary_headers\nfrom http import HTTPStatus\nfrom kfserving.kfmodel_repository import KFModelRepository\nfrom datetime import datetime\n\n\nclass HTTPHandler(tornado.web.RequestHandler):\n def initialize(self, models: KFModelRepository):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get_model(self, name: str):\n model = self.models.get_model(name)\n if model is None:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.NOT_FOUND,\n reason=\"Model with name %s does not exist.\" % name\n )\n if not model.ready:\n model.load()\n return model\n\n def validate(self, request):\n if (\"instances\" in request and not isinstance(request[\"instances\"], list)) or \\\n (\"inputs\" in request and not isinstance(request[\"inputs\"], list)):\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Expected \\\"instances\\\" or \\\"inputs\\\" to be a list\"\n )\n return request\n\nclass PredictHandler(HTTPHandler):\n async def post(self, name: str):\n if has_binary_headers(self.request.headers): \n try:\n #Use default unmarshaller if contenttype is set in header\n if \"ce-contenttype\" in self.request.headers:\n body = from_http(self.request.headers, self.request.body)\n else:\n body = from_http(self.request.headers, self.request.body, lambda x: x)\n except (ce.MissingRequiredFields, ce.InvalidRequiredFields, ce.InvalidStructuredJSON, ce.InvalidHeadersFormat, ce.DataMarshallerError, ce.DataUnmarshallerError) as e:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Cloud Event Exceptions: %s\" % e\n )\n else:\n try:\n body = json.loads(self.request.body)\n except json.decoder.JSONDecodeError as e:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Unrecognized request format: %s\" % e\n )\n\n model = self.get_model(name)\n request = model.preprocess(body)\n request = self.validate(request)\n response = (await model.predict(request)) if inspect.iscoroutinefunction(model.predict) else model.predict(request)\n response = model.postprocess(response)\n\n if has_binary_headers(self.request.headers):\n event = CloudEvent(body._attributes, response)\n if is_binary(self.request.headers):\n eventheader, eventbody = to_binary(event)\n elif is_structured(self.request.headers):\n eventheader, eventbody = to_structured(event)\n for k, v in eventheader.items():\n if k != \"ce-time\":\n self.set_header(k, v)\n else: #utc now() timestamp\n self.set_header('ce-time', datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%S.%f%z'))\n\n if isinstance(eventbody, (bytes, bytearray)):\n response = eventbody\n else:\n response = eventbody.data\n\n self.write(response)\n\n\nclass ExplainHandler(HTTPHandler):\n async def post(self, name: str):\n model = self.get_model(name)\n try:\n body = json.loads(self.request.body)\n except json.decoder.JSONDecodeError as e:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Unrecognized request format: %s\" % e\n )\n request = model.preprocess(body)\n request = self.validate(request)\n response = (await model.explain(request)) if inspect.iscoroutinefunction(model.explain) else model.explain(request)\n response = model.postprocess(response)\n self.write(response)\n", "path": "python/kfserving/kfserving/handlers/http.py"}, {"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict\nimport sys\n\nimport json\nimport tornado.web\nfrom tornado.httpclient import AsyncHTTPClient\n\nPREDICTOR_URL_FORMAT = \"http://{0}/v1/models/{1}:predict\"\nEXPLAINER_URL_FORMAT = \"http://{0}/v1/models/{1}:explain\"\nPREDICTOR_V2_URL_FORMAT = \"http://{0}/v2/models/{1}/infer\"\nEXPLAINER_V2_URL_FORMAT = \"http://{0}/v2/models/{1}/explain\"\n\n\n# KFModel is intended to be subclassed by various components within KFServing.\nclass KFModel:\n\n def __init__(self, name: str):\n self.name = name\n self.ready = False\n self.protocol = \"v1\"\n self.predictor_host = None\n self.explainer_host = None\n # The timeout matches what is set in generated Istio resources.\n # We generally don't want things to time out at the request level here,\n # timeouts should be handled elsewhere in the system.\n self.timeout = 600\n self._http_client_instance = None\n\n @property\n def _http_client(self):\n if self._http_client_instance is None:\n self._http_client_instance = AsyncHTTPClient(max_clients=sys.maxsize)\n return self._http_client_instance\n\n def load(self) -> bool:\n self.ready = True\n return self.ready\n\n def preprocess(self, request: Dict) -> Dict:\n # If cloudevent dict, then parse 'data' field. Otherwise, pass through.\n if \"data\" in request \\\n and \"time\" in request \\\n and \"type\" in request \\\n and \"source\" in request \\\n and \"id\" in request \\\n and \"specversion\" in request:\n return request[\"data\"]\n else:\n return request\n\n def postprocess(self, request: Dict) -> Dict:\n return request\n\n async def predict(self, request: Dict) -> Dict:\n if not self.predictor_host:\n raise NotImplementedError\n predict_url = PREDICTOR_URL_FORMAT.format(self.predictor_host, self.name)\n if self.protocol == \"v2\":\n predict_url = PREDICTOR_V2_URL_FORMAT.format(self.predictor_host, self.name)\n response = await self._http_client.fetch(\n predict_url,\n method='POST',\n request_timeout=self.timeout,\n body=json.dumps(request)\n )\n if response.code != 200:\n raise tornado.web.HTTPError(\n status_code=response.code,\n reason=response.body)\n return json.loads(response.body)\n\n async def explain(self, request: Dict) -> Dict:\n if self.explainer_host is None:\n raise NotImplementedError\n explain_url = EXPLAINER_URL_FORMAT.format(self.predictor_host, self.name)\n if self.protocol == \"v2\":\n explain_url = EXPLAINER_V2_URL_FORMAT.format(self.predictor_host, self.name)\n response = await self._http_client.fetch(\n url=explain_url,\n method='POST',\n request_timeout=self.timeout,\n body=json.dumps(request)\n )\n if response.code != 200:\n raise tornado.web.HTTPError(\n status_code=response.code,\n reason=response.body)\n return json.loads(response.body)\n\n", "path": "python/kfserving/kfserving/kfmodel.py"}]}
2,609
971
gh_patches_debug_8100
rasdani/github-patches
git_diff
WeblateOrg__weblate-11568
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Time to use `build` from `setuptools` instead of `distutils`? ### Describe the problem The following feature in setuptools has been deprecated for almost 2 years and is about to be removed: https://github.com/pypa/setuptools/blob/1ed759173983656734c3606e9c97a348895e5e0c/setuptools/command/build.py#L13-L27 It might be a good idea to import `build` directly from setuptools for the following code: https://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L9 https://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L51-L58 (`build` is available directly from setuptools, starting on version v62.4.0) ### Describe the solution you would like Whenever possible, it might be a good idea to import from setuptools (and minimise imports to `distutils` to the minimum viable). ### Describe alternatives you have considered _No response_ ### Screenshots _No response_ ### Additional context _No response_ Time to use `build` from `setuptools` instead of `distutils`? ### Describe the problem The following feature in setuptools has been deprecated for almost 2 years and is about to be removed: https://github.com/pypa/setuptools/blob/1ed759173983656734c3606e9c97a348895e5e0c/setuptools/command/build.py#L13-L27 It might be a good idea to import `build` directly from setuptools for the following code: https://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L9 https://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L51-L58 (`build` is available directly from setuptools, starting on version v62.4.0) ### Describe the solution you would like Whenever possible, it might be a good idea to import from setuptools (and minimise imports to `distutils` to the minimum viable). ### Describe alternatives you have considered _No response_ ### Screenshots _No response_ ### Additional context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python3 2 3 # Copyright © Michal Čihař <[email protected]> 4 # 5 # SPDX-License-Identifier: GPL-3.0-or-later 6 7 import os 8 from distutils import log 9 from distutils.command.build import build 10 from distutils.core import Command 11 from glob import glob 12 from itertools import chain 13 14 from setuptools import setup 15 from setuptools.command.build_py import build_py 16 from setuptools.modified import newer 17 from translate.tools.pocompile import convertmo 18 19 LOCALE_MASKS = [ 20 "weblate/locale/*/LC_MESSAGES/*.po", 21 ] 22 23 24 class WeblateBuildPy(build_py): 25 def find_package_modules(self, package, package_dir): 26 """Filter settings.py from built module.""" 27 result = super().find_package_modules(package, package_dir) 28 return [item for item in result if item[2] != "weblate/settings.py"] 29 30 31 class BuildMo(Command): 32 description = "update MO files to match PO" 33 user_options = [] 34 35 def initialize_options(self) -> None: 36 self.build_base = None 37 38 def finalize_options(self) -> None: 39 self.set_undefined_options("build", ("build_base", "build_base")) 40 41 def run(self) -> None: 42 for name in chain.from_iterable(glob(mask) for mask in LOCALE_MASKS): 43 output = os.path.splitext(name)[0] + ".mo" 44 if not newer(name, output): 45 continue 46 self.announce(f"compiling {name} -> {output}", level=log.INFO) 47 with open(name, "rb") as pofile, open(output, "wb") as mofile: 48 convertmo(pofile, mofile, None) 49 50 51 class WeblateBuild(build): 52 """Override the default build with new subcommands.""" 53 54 # The build_mo has to be before build_data 55 sub_commands = [ 56 ("build_mo", lambda self: True), # noqa: ARG005 57 *build.sub_commands, 58 ] 59 60 61 setup( 62 cmdclass={"build_py": WeblateBuildPy, "build_mo": BuildMo, "build": WeblateBuild}, 63 ) 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -6,12 +6,12 @@ import os from distutils import log -from distutils.command.build import build from distutils.core import Command from glob import glob from itertools import chain from setuptools import setup +from setuptools.command.build import build from setuptools.command.build_py import build_py from setuptools.modified import newer from translate.tools.pocompile import convertmo
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,12 +6,12 @@\n \n import os\n from distutils import log\n-from distutils.command.build import build\n from distutils.core import Command\n from glob import glob\n from itertools import chain\n \n from setuptools import setup\n+from setuptools.command.build import build\n from setuptools.command.build_py import build_py\n from setuptools.modified import newer\n from translate.tools.pocompile import convertmo\n", "issue": "Time to use `build` from `setuptools` instead of `distutils`?\n### Describe the problem\n\nThe following feature in setuptools has been deprecated for almost 2 years and is about to be removed:\r\n\r\nhttps://github.com/pypa/setuptools/blob/1ed759173983656734c3606e9c97a348895e5e0c/setuptools/command/build.py#L13-L27\r\n\r\nIt might be a good idea to import `build` directly from setuptools for the following code:\r\n\r\nhttps://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L9\r\nhttps://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L51-L58\r\n\r\n(`build` is available directly from setuptools, starting on version v62.4.0)\n\n### Describe the solution you would like\n\nWhenever possible, it might be a good idea to import from setuptools (and minimise imports to `distutils` to the minimum viable).\n\n### Describe alternatives you have considered\n\n_No response_\n\n### Screenshots\n\n_No response_\n\n### Additional context\n\n_No response_\nTime to use `build` from `setuptools` instead of `distutils`?\n### Describe the problem\n\nThe following feature in setuptools has been deprecated for almost 2 years and is about to be removed:\r\n\r\nhttps://github.com/pypa/setuptools/blob/1ed759173983656734c3606e9c97a348895e5e0c/setuptools/command/build.py#L13-L27\r\n\r\nIt might be a good idea to import `build` directly from setuptools for the following code:\r\n\r\nhttps://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L9\r\nhttps://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L51-L58\r\n\r\n(`build` is available directly from setuptools, starting on version v62.4.0)\n\n### Describe the solution you would like\n\nWhenever possible, it might be a good idea to import from setuptools (and minimise imports to `distutils` to the minimum viable).\n\n### Describe alternatives you have considered\n\n_No response_\n\n### Screenshots\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nimport os\nfrom distutils import log\nfrom distutils.command.build import build\nfrom distutils.core import Command\nfrom glob import glob\nfrom itertools import chain\n\nfrom setuptools import setup\nfrom setuptools.command.build_py import build_py\nfrom setuptools.modified import newer\nfrom translate.tools.pocompile import convertmo\n\nLOCALE_MASKS = [\n \"weblate/locale/*/LC_MESSAGES/*.po\",\n]\n\n\nclass WeblateBuildPy(build_py):\n def find_package_modules(self, package, package_dir):\n \"\"\"Filter settings.py from built module.\"\"\"\n result = super().find_package_modules(package, package_dir)\n return [item for item in result if item[2] != \"weblate/settings.py\"]\n\n\nclass BuildMo(Command):\n description = \"update MO files to match PO\"\n user_options = []\n\n def initialize_options(self) -> None:\n self.build_base = None\n\n def finalize_options(self) -> None:\n self.set_undefined_options(\"build\", (\"build_base\", \"build_base\"))\n\n def run(self) -> None:\n for name in chain.from_iterable(glob(mask) for mask in LOCALE_MASKS):\n output = os.path.splitext(name)[0] + \".mo\"\n if not newer(name, output):\n continue\n self.announce(f\"compiling {name} -> {output}\", level=log.INFO)\n with open(name, \"rb\") as pofile, open(output, \"wb\") as mofile:\n convertmo(pofile, mofile, None)\n\n\nclass WeblateBuild(build):\n \"\"\"Override the default build with new subcommands.\"\"\"\n\n # The build_mo has to be before build_data\n sub_commands = [\n (\"build_mo\", lambda self: True), # noqa: ARG005\n *build.sub_commands,\n ]\n\n\nsetup(\n cmdclass={\"build_py\": WeblateBuildPy, \"build_mo\": BuildMo, \"build\": WeblateBuild},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nimport os\nfrom distutils import log\nfrom distutils.core import Command\nfrom glob import glob\nfrom itertools import chain\n\nfrom setuptools import setup\nfrom setuptools.command.build import build\nfrom setuptools.command.build_py import build_py\nfrom setuptools.modified import newer\nfrom translate.tools.pocompile import convertmo\n\nLOCALE_MASKS = [\n \"weblate/locale/*/LC_MESSAGES/*.po\",\n]\n\n\nclass WeblateBuildPy(build_py):\n def find_package_modules(self, package, package_dir):\n \"\"\"Filter settings.py from built module.\"\"\"\n result = super().find_package_modules(package, package_dir)\n return [item for item in result if item[2] != \"weblate/settings.py\"]\n\n\nclass BuildMo(Command):\n description = \"update MO files to match PO\"\n user_options = []\n\n def initialize_options(self) -> None:\n self.build_base = None\n\n def finalize_options(self) -> None:\n self.set_undefined_options(\"build\", (\"build_base\", \"build_base\"))\n\n def run(self) -> None:\n for name in chain.from_iterable(glob(mask) for mask in LOCALE_MASKS):\n output = os.path.splitext(name)[0] + \".mo\"\n if not newer(name, output):\n continue\n self.announce(f\"compiling {name} -> {output}\", level=log.INFO)\n with open(name, \"rb\") as pofile, open(output, \"wb\") as mofile:\n convertmo(pofile, mofile, None)\n\n\nclass WeblateBuild(build):\n \"\"\"Override the default build with new subcommands.\"\"\"\n\n # The build_mo has to be before build_data\n sub_commands = [\n (\"build_mo\", lambda self: True), # noqa: ARG005\n *build.sub_commands,\n ]\n\n\nsetup(\n cmdclass={\"build_py\": WeblateBuildPy, \"build_mo\": BuildMo, \"build\": WeblateBuild},\n)\n", "path": "setup.py"}]}
1,470
105
gh_patches_debug_8391
rasdani/github-patches
git_diff
scrapy__scrapy-3377
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- scrapy check exit code on exception When I run `scrapy check` and a contract raises unhandled exception, the command exits with successful code `0`. ``` $ scrapy check $SPIDER_NAME ... some stuff ... Ran 0 contracts in 0.000s OK $ echo $? 0 ``` Is this intended, or should I fix it? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/contracts/__init__.py` Content: ``` 1 import sys 2 import re 3 from functools import wraps 4 from inspect import getmembers 5 from unittest import TestCase 6 7 from scrapy.http import Request 8 from scrapy.utils.spider import iterate_spider_output 9 from scrapy.utils.python import get_spec 10 11 12 class ContractsManager(object): 13 contracts = {} 14 15 def __init__(self, contracts): 16 for contract in contracts: 17 self.contracts[contract.name] = contract 18 19 def tested_methods_from_spidercls(self, spidercls): 20 methods = [] 21 for key, value in getmembers(spidercls): 22 if (callable(value) and value.__doc__ and 23 re.search(r'^\s*@', value.__doc__, re.MULTILINE)): 24 methods.append(key) 25 26 return methods 27 28 def extract_contracts(self, method): 29 contracts = [] 30 for line in method.__doc__.split('\n'): 31 line = line.strip() 32 33 if line.startswith('@'): 34 name, args = re.match(r'@(\w+)\s*(.*)', line).groups() 35 args = re.split(r'\s+', args) 36 37 contracts.append(self.contracts[name](method, *args)) 38 39 return contracts 40 41 def from_spider(self, spider, results): 42 requests = [] 43 for method in self.tested_methods_from_spidercls(type(spider)): 44 bound_method = spider.__getattribute__(method) 45 requests.append(self.from_method(bound_method, results)) 46 47 return requests 48 49 def from_method(self, method, results): 50 contracts = self.extract_contracts(method) 51 if contracts: 52 request_cls = Request 53 for contract in contracts: 54 if contract.request_cls is not None: 55 request_cls = contract.request_cls 56 57 # calculate request args 58 args, kwargs = get_spec(request_cls.__init__) 59 kwargs['callback'] = method 60 for contract in contracts: 61 kwargs = contract.adjust_request_args(kwargs) 62 63 args.remove('self') 64 65 # check if all positional arguments are defined in kwargs 66 if set(args).issubset(set(kwargs)): 67 request = request_cls(**kwargs) 68 69 # execute pre and post hooks in order 70 for contract in reversed(contracts): 71 request = contract.add_pre_hook(request, results) 72 for contract in contracts: 73 request = contract.add_post_hook(request, results) 74 75 self._clean_req(request, method, results) 76 return request 77 78 def _clean_req(self, request, method, results): 79 """ stop the request from returning objects and records any errors """ 80 81 cb = request.callback 82 83 @wraps(cb) 84 def cb_wrapper(response): 85 try: 86 output = cb(response) 87 output = list(iterate_spider_output(output)) 88 except: 89 case = _create_testcase(method, 'callback') 90 results.addError(case, sys.exc_info()) 91 92 def eb_wrapper(failure): 93 case = _create_testcase(method, 'errback') 94 exc_info = failure.type, failure.value, failure.getTracebackObject() 95 results.addError(case, exc_info) 96 97 request.callback = cb_wrapper 98 request.errback = eb_wrapper 99 100 101 class Contract(object): 102 """ Abstract class for contracts """ 103 request_cls = None 104 105 def __init__(self, method, *args): 106 self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name) 107 self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name) 108 self.args = args 109 110 def add_pre_hook(self, request, results): 111 if hasattr(self, 'pre_process'): 112 cb = request.callback 113 114 @wraps(cb) 115 def wrapper(response): 116 try: 117 results.startTest(self.testcase_pre) 118 self.pre_process(response) 119 results.stopTest(self.testcase_pre) 120 except AssertionError: 121 results.addFailure(self.testcase_pre, sys.exc_info()) 122 except Exception: 123 results.addError(self.testcase_pre, sys.exc_info()) 124 else: 125 results.addSuccess(self.testcase_pre) 126 finally: 127 return list(iterate_spider_output(cb(response))) 128 129 request.callback = wrapper 130 131 return request 132 133 def add_post_hook(self, request, results): 134 if hasattr(self, 'post_process'): 135 cb = request.callback 136 137 @wraps(cb) 138 def wrapper(response): 139 output = list(iterate_spider_output(cb(response))) 140 try: 141 results.startTest(self.testcase_post) 142 self.post_process(output) 143 results.stopTest(self.testcase_post) 144 except AssertionError: 145 results.addFailure(self.testcase_post, sys.exc_info()) 146 except Exception: 147 results.addError(self.testcase_post, sys.exc_info()) 148 else: 149 results.addSuccess(self.testcase_post) 150 finally: 151 return output 152 153 request.callback = wrapper 154 155 return request 156 157 def adjust_request_args(self, args): 158 return args 159 160 161 def _create_testcase(method, desc): 162 spider = method.__self__.name 163 164 class ContractTestCase(TestCase): 165 def __str__(_self): 166 return "[%s] %s (%s)" % (spider, method.__name__, desc) 167 168 name = '%s_%s' % (spider, method.__name__) 169 setattr(ContractTestCase, name, lambda x: x) 170 return ContractTestCase(name) 171 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/contracts/__init__.py b/scrapy/contracts/__init__.py --- a/scrapy/contracts/__init__.py +++ b/scrapy/contracts/__init__.py @@ -42,7 +42,11 @@ requests = [] for method in self.tested_methods_from_spidercls(type(spider)): bound_method = spider.__getattribute__(method) - requests.append(self.from_method(bound_method, results)) + try: + requests.append(self.from_method(bound_method, results)) + except Exception: + case = _create_testcase(bound_method, 'contract') + results.addError(case, sys.exc_info()) return requests
{"golden_diff": "diff --git a/scrapy/contracts/__init__.py b/scrapy/contracts/__init__.py\n--- a/scrapy/contracts/__init__.py\n+++ b/scrapy/contracts/__init__.py\n@@ -42,7 +42,11 @@\n requests = []\n for method in self.tested_methods_from_spidercls(type(spider)):\n bound_method = spider.__getattribute__(method)\n- requests.append(self.from_method(bound_method, results))\n+ try:\n+ requests.append(self.from_method(bound_method, results))\n+ except Exception:\n+ case = _create_testcase(bound_method, 'contract')\n+ results.addError(case, sys.exc_info())\n \n return requests\n", "issue": "scrapy check exit code on exception\nWhen I run `scrapy check` and a contract raises unhandled exception, the command exits with successful code `0`.\r\n\r\n```\r\n$ scrapy check $SPIDER_NAME\r\n... some stuff ...\r\nRan 0 contracts in 0.000s\r\nOK\r\n$ echo $?\r\n0\r\n```\r\n\r\nIs this intended, or should I fix it?\n", "before_files": [{"content": "import sys\nimport re\nfrom functools import wraps\nfrom inspect import getmembers\nfrom unittest import TestCase\n\nfrom scrapy.http import Request\nfrom scrapy.utils.spider import iterate_spider_output\nfrom scrapy.utils.python import get_spec\n\n\nclass ContractsManager(object):\n contracts = {}\n\n def __init__(self, contracts):\n for contract in contracts:\n self.contracts[contract.name] = contract\n\n def tested_methods_from_spidercls(self, spidercls):\n methods = []\n for key, value in getmembers(spidercls):\n if (callable(value) and value.__doc__ and\n re.search(r'^\\s*@', value.__doc__, re.MULTILINE)):\n methods.append(key)\n\n return methods\n\n def extract_contracts(self, method):\n contracts = []\n for line in method.__doc__.split('\\n'):\n line = line.strip()\n\n if line.startswith('@'):\n name, args = re.match(r'@(\\w+)\\s*(.*)', line).groups()\n args = re.split(r'\\s+', args)\n\n contracts.append(self.contracts[name](method, *args))\n\n return contracts\n\n def from_spider(self, spider, results):\n requests = []\n for method in self.tested_methods_from_spidercls(type(spider)):\n bound_method = spider.__getattribute__(method)\n requests.append(self.from_method(bound_method, results))\n\n return requests\n\n def from_method(self, method, results):\n contracts = self.extract_contracts(method)\n if contracts:\n request_cls = Request\n for contract in contracts:\n if contract.request_cls is not None:\n request_cls = contract.request_cls\n\n # calculate request args\n args, kwargs = get_spec(request_cls.__init__)\n kwargs['callback'] = method\n for contract in contracts:\n kwargs = contract.adjust_request_args(kwargs)\n\n args.remove('self')\n\n # check if all positional arguments are defined in kwargs\n if set(args).issubset(set(kwargs)):\n request = request_cls(**kwargs)\n\n # execute pre and post hooks in order\n for contract in reversed(contracts):\n request = contract.add_pre_hook(request, results)\n for contract in contracts:\n request = contract.add_post_hook(request, results)\n\n self._clean_req(request, method, results)\n return request\n\n def _clean_req(self, request, method, results):\n \"\"\" stop the request from returning objects and records any errors \"\"\"\n\n cb = request.callback\n\n @wraps(cb)\n def cb_wrapper(response):\n try:\n output = cb(response)\n output = list(iterate_spider_output(output))\n except:\n case = _create_testcase(method, 'callback')\n results.addError(case, sys.exc_info())\n\n def eb_wrapper(failure):\n case = _create_testcase(method, 'errback')\n exc_info = failure.type, failure.value, failure.getTracebackObject()\n results.addError(case, exc_info)\n\n request.callback = cb_wrapper\n request.errback = eb_wrapper\n\n\nclass Contract(object):\n \"\"\" Abstract class for contracts \"\"\"\n request_cls = None\n\n def __init__(self, method, *args):\n self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)\n self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)\n self.args = args\n\n def add_pre_hook(self, request, results):\n if hasattr(self, 'pre_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n try:\n results.startTest(self.testcase_pre)\n self.pre_process(response)\n results.stopTest(self.testcase_pre)\n except AssertionError:\n results.addFailure(self.testcase_pre, sys.exc_info())\n except Exception:\n results.addError(self.testcase_pre, sys.exc_info())\n else:\n results.addSuccess(self.testcase_pre)\n finally:\n return list(iterate_spider_output(cb(response)))\n\n request.callback = wrapper\n\n return request\n\n def add_post_hook(self, request, results):\n if hasattr(self, 'post_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n output = list(iterate_spider_output(cb(response)))\n try:\n results.startTest(self.testcase_post)\n self.post_process(output)\n results.stopTest(self.testcase_post)\n except AssertionError:\n results.addFailure(self.testcase_post, sys.exc_info())\n except Exception:\n results.addError(self.testcase_post, sys.exc_info())\n else:\n results.addSuccess(self.testcase_post)\n finally:\n return output\n\n request.callback = wrapper\n\n return request\n\n def adjust_request_args(self, args):\n return args\n\n\ndef _create_testcase(method, desc):\n spider = method.__self__.name\n\n class ContractTestCase(TestCase):\n def __str__(_self):\n return \"[%s] %s (%s)\" % (spider, method.__name__, desc)\n\n name = '%s_%s' % (spider, method.__name__)\n setattr(ContractTestCase, name, lambda x: x)\n return ContractTestCase(name)\n", "path": "scrapy/contracts/__init__.py"}], "after_files": [{"content": "import sys\nimport re\nfrom functools import wraps\nfrom inspect import getmembers\nfrom unittest import TestCase\n\nfrom scrapy.http import Request\nfrom scrapy.utils.spider import iterate_spider_output\nfrom scrapy.utils.python import get_spec\n\n\nclass ContractsManager(object):\n contracts = {}\n\n def __init__(self, contracts):\n for contract in contracts:\n self.contracts[contract.name] = contract\n\n def tested_methods_from_spidercls(self, spidercls):\n methods = []\n for key, value in getmembers(spidercls):\n if (callable(value) and value.__doc__ and\n re.search(r'^\\s*@', value.__doc__, re.MULTILINE)):\n methods.append(key)\n\n return methods\n\n def extract_contracts(self, method):\n contracts = []\n for line in method.__doc__.split('\\n'):\n line = line.strip()\n\n if line.startswith('@'):\n name, args = re.match(r'@(\\w+)\\s*(.*)', line).groups()\n args = re.split(r'\\s+', args)\n\n contracts.append(self.contracts[name](method, *args))\n\n return contracts\n\n def from_spider(self, spider, results):\n requests = []\n for method in self.tested_methods_from_spidercls(type(spider)):\n bound_method = spider.__getattribute__(method)\n try:\n requests.append(self.from_method(bound_method, results))\n except Exception:\n case = _create_testcase(bound_method, 'contract')\n results.addError(case, sys.exc_info())\n\n return requests\n\n def from_method(self, method, results):\n contracts = self.extract_contracts(method)\n if contracts:\n request_cls = Request\n for contract in contracts:\n if contract.request_cls is not None:\n request_cls = contract.request_cls\n\n # calculate request args\n args, kwargs = get_spec(request_cls.__init__)\n kwargs['callback'] = method\n for contract in contracts:\n kwargs = contract.adjust_request_args(kwargs)\n\n args.remove('self')\n\n # check if all positional arguments are defined in kwargs\n if set(args).issubset(set(kwargs)):\n request = request_cls(**kwargs)\n\n # execute pre and post hooks in order\n for contract in reversed(contracts):\n request = contract.add_pre_hook(request, results)\n for contract in contracts:\n request = contract.add_post_hook(request, results)\n\n self._clean_req(request, method, results)\n return request\n\n def _clean_req(self, request, method, results):\n \"\"\" stop the request from returning objects and records any errors \"\"\"\n\n cb = request.callback\n\n @wraps(cb)\n def cb_wrapper(response):\n try:\n output = cb(response)\n output = list(iterate_spider_output(output))\n except:\n case = _create_testcase(method, 'callback')\n results.addError(case, sys.exc_info())\n\n def eb_wrapper(failure):\n case = _create_testcase(method, 'errback')\n exc_info = failure.type, failure.value, failure.getTracebackObject()\n results.addError(case, exc_info)\n\n request.callback = cb_wrapper\n request.errback = eb_wrapper\n\n\nclass Contract(object):\n \"\"\" Abstract class for contracts \"\"\"\n request_cls = None\n\n def __init__(self, method, *args):\n self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)\n self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)\n self.args = args\n\n def add_pre_hook(self, request, results):\n if hasattr(self, 'pre_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n try:\n results.startTest(self.testcase_pre)\n self.pre_process(response)\n results.stopTest(self.testcase_pre)\n except AssertionError:\n results.addFailure(self.testcase_pre, sys.exc_info())\n except Exception:\n results.addError(self.testcase_pre, sys.exc_info())\n else:\n results.addSuccess(self.testcase_pre)\n finally:\n return list(iterate_spider_output(cb(response)))\n\n request.callback = wrapper\n\n return request\n\n def add_post_hook(self, request, results):\n if hasattr(self, 'post_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n output = list(iterate_spider_output(cb(response)))\n try:\n results.startTest(self.testcase_post)\n self.post_process(output)\n results.stopTest(self.testcase_post)\n except AssertionError:\n results.addFailure(self.testcase_post, sys.exc_info())\n except Exception:\n results.addError(self.testcase_post, sys.exc_info())\n else:\n results.addSuccess(self.testcase_post)\n finally:\n return output\n\n request.callback = wrapper\n\n return request\n\n def adjust_request_args(self, args):\n return args\n\n\ndef _create_testcase(method, desc):\n spider = method.__self__.name\n\n class ContractTestCase(TestCase):\n def __str__(_self):\n return \"[%s] %s (%s)\" % (spider, method.__name__, desc)\n\n name = '%s_%s' % (spider, method.__name__)\n setattr(ContractTestCase, name, lambda x: x)\n return ContractTestCase(name)\n", "path": "scrapy/contracts/__init__.py"}]}
1,876
154
gh_patches_debug_7395
rasdani/github-patches
git_diff
svthalia__concrexit-1513
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- URL's from Events API are not always full URL's ### Describe the bug The `google_maps_url` from the Events API is not always a full URL. ### How to reproduce Steps to reproduce the behaviour: 1. On staging, go to `https://staging.thalia.nu/api/v1/events/147/` and see that the `google_maps_url` is not a full URL but a relative one. ### Expected behaviour The `google_maps_url` should always be a full URL. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/utils/snippets.py` Content: ``` 1 """Provides various utilities that are useful across the project.""" 2 import datetime 3 import hmac 4 from base64 import urlsafe_b64decode, urlsafe_b64encode 5 from collections import namedtuple 6 from _sha1 import sha1 7 8 from django.conf import settings 9 from django.template.defaultfilters import urlencode 10 from django.templatetags.static import static 11 from django.utils import timezone, dateparse 12 from rest_framework.exceptions import ParseError 13 14 15 def dict2obj(d, name="Object"): 16 return namedtuple(name, d.keys())(*d.values()) 17 18 19 def datetime_to_lectureyear(date): 20 """Convert a :class:`~datetime.date` to the start of the lectureyear. 21 22 >>> from datetime import date, datetime, timezone 23 >>> nov_23 = date(1990, 11, 7) 24 >>> datetime_to_lectureyear(nov_23) 25 1990 26 >>> mar_2 = date(1993, 3, 2) 27 >>> datetime_to_lectureyear(mar_2) 28 1992 29 30 Also works on :class:`~datetime.datetime`, but they need to be tz-aware: 31 32 >>> new_year = datetime(2000, 1, 1, tzinfo=timezone.utc) 33 >>> datetime_to_lectureyear(new_year) 34 1999 35 """ 36 if isinstance(date, timezone.datetime): 37 date = timezone.localtime(date).date() 38 sept_1 = timezone.make_aware(timezone.datetime(date.year, 9, 1)) 39 if date < sept_1.date(): 40 return date.year - 1 41 return date.year 42 43 44 def create_google_maps_url(location, zoom, size): 45 if location.lower().strip() == "online": 46 return static("img/locations/online.png") 47 if location.lower().strip() == "discord": 48 return static("img/locations/discord.png") 49 50 maps_url = ( 51 f"/maps/api/staticmap?" 52 f"center={ urlencode(location) }&" 53 f"zoom={ zoom }&size={ size }&" 54 f"markers={ urlencode(location) }&" 55 f"key={ settings.GOOGLE_MAPS_API_KEY }" 56 ) 57 58 decoded_key = urlsafe_b64decode(settings.GOOGLE_MAPS_API_SECRET) 59 60 signature = hmac.new(decoded_key, maps_url.encode(), sha1) 61 62 encoded_signature = urlsafe_b64encode(signature.digest()) 63 64 maps_url += f"&signature={encoded_signature.decode('utf-8')}" 65 66 return "https://maps.googleapis.com" + maps_url 67 68 69 def _extract_date(param): 70 """Extract the date from an arbitrary string.""" 71 if param is None: 72 return None 73 try: 74 return dateparse.parse_datetime(param) 75 except ValueError: 76 return dateparse.parse_date(param) 77 78 79 def extract_date_range(request, allow_empty=False): 80 """Extract a date range from an arbitrary string.""" 81 default_value = None 82 83 start = request.query_params.get("start", default_value) 84 if start or not allow_empty: 85 try: 86 start = dateparse.parse_datetime(start) 87 if not timezone.is_aware(start): 88 start = timezone.make_aware(start) 89 except (ValueError, AttributeError, TypeError) as e: 90 raise ParseError(detail="start query parameter invalid") from e 91 92 end = request.query_params.get("end", default_value) 93 if end or not allow_empty: 94 try: 95 end = dateparse.parse_datetime(end) 96 if not timezone.is_aware(end): 97 end = timezone.make_aware(end) 98 except (ValueError, AttributeError, TypeError) as e: 99 raise ParseError(detail="end query parameter invalid") from e 100 101 return start, end 102 103 104 def overlaps(check, others, can_equal=True): 105 """Check for overlapping date ranges. 106 107 This works by checking the maximum of the two `since` times, and the minimum of 108 the two `until` times. Because there are no infinite dates, the value date_max 109 is created for when the `until` value is None; this signifies a timespan that 110 has not ended yet and is the maximum possible date in Python's datetime. 111 112 The ranges overlap when the maximum start time is smaller than the minimum 113 end time, as can be seen in this example of two integer ranges: 114 115 check: . . . .[4]. . . . 9 116 other: . . 2 . .[5]. . . . 117 118 check: . . . .[4]. . . . 9 119 other: . . 2 . . . . . . . [date_max] 120 121 And when non overlapping: 122 check: . . . . . .[6] . . 9 123 other: . . 2 . .[5]. . . . 124 125 4 < 5 == True so these intervals overlap, while 6 < 5 == False so these intervals 126 don't overlap 127 128 The can_equal argument is used for boards, where the end date can't be the same 129 as the start date. 130 131 >>> overlaps( \ 132 dict2obj({ \ 133 'pk': 1 \ 134 , 'since': datetime.date(2018, 12, 1) \ 135 , 'until': datetime.date(2019, 1, 1) \ 136 }) \ 137 , [dict2obj({ \ 138 'pk': 2 \ 139 , 'since': datetime.date(2019, 1, 1) \ 140 , 'until': datetime.date(2019, 1, 31) \ 141 })]) 142 False 143 144 >>> overlaps( \ 145 dict2obj({ \ 146 'pk': 1 \ 147 , 'since': datetime.date(2018, 12, 1) \ 148 , 'until': datetime.date(2019, 1, 1) \ 149 }) \ 150 , [dict2obj({ \ 151 'pk': 2 \ 152 , 'since': datetime.date(2019, 1, 1) \ 153 , 'until': datetime.date(2019, 1, 31) \ 154 })], False) 155 True 156 157 >>> overlaps( \ 158 dict2obj({ \ 159 'pk': 1 \ 160 , 'since': datetime.date(2018, 12, 1) \ 161 , 'until': datetime.date(2019, 1, 2) \ 162 }) \ 163 , [dict2obj({ \ 164 'pk': 2 \ 165 , 'since': datetime.date(2019, 1, 1) \ 166 , 'until': datetime.date(2019, 1, 31) \ 167 })]) 168 True 169 """ 170 date_max = datetime.date(datetime.MAXYEAR, 12, 31) 171 for other in others: 172 if check.pk == other.pk: 173 # No checks for the object we're validating 174 continue 175 176 max_start = max(check.since, other.since) 177 min_end = min(check.until or date_max, other.until or date_max) 178 179 if max_start == min_end and not can_equal: 180 return True 181 if max_start < min_end: 182 return True 183 184 return False 185 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/utils/snippets.py b/website/utils/snippets.py --- a/website/utils/snippets.py +++ b/website/utils/snippets.py @@ -43,9 +43,9 @@ def create_google_maps_url(location, zoom, size): if location.lower().strip() == "online": - return static("img/locations/online.png") + return settings.BASE_URL + static("img/locations/online.png") if location.lower().strip() == "discord": - return static("img/locations/discord.png") + return settings.BASE_URL + static("img/locations/discord.png") maps_url = ( f"/maps/api/staticmap?"
{"golden_diff": "diff --git a/website/utils/snippets.py b/website/utils/snippets.py\n--- a/website/utils/snippets.py\n+++ b/website/utils/snippets.py\n@@ -43,9 +43,9 @@\n \n def create_google_maps_url(location, zoom, size):\n if location.lower().strip() == \"online\":\n- return static(\"img/locations/online.png\")\n+ return settings.BASE_URL + static(\"img/locations/online.png\")\n if location.lower().strip() == \"discord\":\n- return static(\"img/locations/discord.png\")\n+ return settings.BASE_URL + static(\"img/locations/discord.png\")\n \n maps_url = (\n f\"/maps/api/staticmap?\"\n", "issue": "URL's from Events API are not always full URL's\n### Describe the bug\r\nThe `google_maps_url` from the Events API is not always a full URL.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. On staging, go to `https://staging.thalia.nu/api/v1/events/147/` and see that the `google_maps_url` is not a full URL but a relative one.\r\n\r\n### Expected behaviour\r\nThe `google_maps_url` should always be a full URL.\r\n\r\n\n", "before_files": [{"content": "\"\"\"Provides various utilities that are useful across the project.\"\"\"\nimport datetime\nimport hmac\nfrom base64 import urlsafe_b64decode, urlsafe_b64encode\nfrom collections import namedtuple\nfrom _sha1 import sha1\n\nfrom django.conf import settings\nfrom django.template.defaultfilters import urlencode\nfrom django.templatetags.static import static\nfrom django.utils import timezone, dateparse\nfrom rest_framework.exceptions import ParseError\n\n\ndef dict2obj(d, name=\"Object\"):\n return namedtuple(name, d.keys())(*d.values())\n\n\ndef datetime_to_lectureyear(date):\n \"\"\"Convert a :class:`~datetime.date` to the start of the lectureyear.\n\n >>> from datetime import date, datetime, timezone\n >>> nov_23 = date(1990, 11, 7)\n >>> datetime_to_lectureyear(nov_23)\n 1990\n >>> mar_2 = date(1993, 3, 2)\n >>> datetime_to_lectureyear(mar_2)\n 1992\n\n Also works on :class:`~datetime.datetime`, but they need to be tz-aware:\n\n >>> new_year = datetime(2000, 1, 1, tzinfo=timezone.utc)\n >>> datetime_to_lectureyear(new_year)\n 1999\n \"\"\"\n if isinstance(date, timezone.datetime):\n date = timezone.localtime(date).date()\n sept_1 = timezone.make_aware(timezone.datetime(date.year, 9, 1))\n if date < sept_1.date():\n return date.year - 1\n return date.year\n\n\ndef create_google_maps_url(location, zoom, size):\n if location.lower().strip() == \"online\":\n return static(\"img/locations/online.png\")\n if location.lower().strip() == \"discord\":\n return static(\"img/locations/discord.png\")\n\n maps_url = (\n f\"/maps/api/staticmap?\"\n f\"center={ urlencode(location) }&\"\n f\"zoom={ zoom }&size={ size }&\"\n f\"markers={ urlencode(location) }&\"\n f\"key={ settings.GOOGLE_MAPS_API_KEY }\"\n )\n\n decoded_key = urlsafe_b64decode(settings.GOOGLE_MAPS_API_SECRET)\n\n signature = hmac.new(decoded_key, maps_url.encode(), sha1)\n\n encoded_signature = urlsafe_b64encode(signature.digest())\n\n maps_url += f\"&signature={encoded_signature.decode('utf-8')}\"\n\n return \"https://maps.googleapis.com\" + maps_url\n\n\ndef _extract_date(param):\n \"\"\"Extract the date from an arbitrary string.\"\"\"\n if param is None:\n return None\n try:\n return dateparse.parse_datetime(param)\n except ValueError:\n return dateparse.parse_date(param)\n\n\ndef extract_date_range(request, allow_empty=False):\n \"\"\"Extract a date range from an arbitrary string.\"\"\"\n default_value = None\n\n start = request.query_params.get(\"start\", default_value)\n if start or not allow_empty:\n try:\n start = dateparse.parse_datetime(start)\n if not timezone.is_aware(start):\n start = timezone.make_aware(start)\n except (ValueError, AttributeError, TypeError) as e:\n raise ParseError(detail=\"start query parameter invalid\") from e\n\n end = request.query_params.get(\"end\", default_value)\n if end or not allow_empty:\n try:\n end = dateparse.parse_datetime(end)\n if not timezone.is_aware(end):\n end = timezone.make_aware(end)\n except (ValueError, AttributeError, TypeError) as e:\n raise ParseError(detail=\"end query parameter invalid\") from e\n\n return start, end\n\n\ndef overlaps(check, others, can_equal=True):\n \"\"\"Check for overlapping date ranges.\n\n This works by checking the maximum of the two `since` times, and the minimum of\n the two `until` times. Because there are no infinite dates, the value date_max\n is created for when the `until` value is None; this signifies a timespan that\n has not ended yet and is the maximum possible date in Python's datetime.\n\n The ranges overlap when the maximum start time is smaller than the minimum\n end time, as can be seen in this example of two integer ranges:\n\n check: . . . .[4]. . . . 9\n other: . . 2 . .[5]. . . .\n\n check: . . . .[4]. . . . 9\n other: . . 2 . . . . . . . [date_max]\n\n And when non overlapping:\n check: . . . . . .[6] . . 9\n other: . . 2 . .[5]. . . .\n\n 4 < 5 == True so these intervals overlap, while 6 < 5 == False so these intervals\n don't overlap\n\n The can_equal argument is used for boards, where the end date can't be the same\n as the start date.\n\n >>> overlaps( \\\n dict2obj({ \\\n 'pk': 1 \\\n , 'since': datetime.date(2018, 12, 1) \\\n , 'until': datetime.date(2019, 1, 1) \\\n }) \\\n , [dict2obj({ \\\n 'pk': 2 \\\n , 'since': datetime.date(2019, 1, 1) \\\n , 'until': datetime.date(2019, 1, 31) \\\n })])\n False\n\n >>> overlaps( \\\n dict2obj({ \\\n 'pk': 1 \\\n , 'since': datetime.date(2018, 12, 1) \\\n , 'until': datetime.date(2019, 1, 1) \\\n }) \\\n , [dict2obj({ \\\n 'pk': 2 \\\n , 'since': datetime.date(2019, 1, 1) \\\n , 'until': datetime.date(2019, 1, 31) \\\n })], False)\n True\n\n >>> overlaps( \\\n dict2obj({ \\\n 'pk': 1 \\\n , 'since': datetime.date(2018, 12, 1) \\\n , 'until': datetime.date(2019, 1, 2) \\\n }) \\\n , [dict2obj({ \\\n 'pk': 2 \\\n , 'since': datetime.date(2019, 1, 1) \\\n , 'until': datetime.date(2019, 1, 31) \\\n })])\n True\n \"\"\"\n date_max = datetime.date(datetime.MAXYEAR, 12, 31)\n for other in others:\n if check.pk == other.pk:\n # No checks for the object we're validating\n continue\n\n max_start = max(check.since, other.since)\n min_end = min(check.until or date_max, other.until or date_max)\n\n if max_start == min_end and not can_equal:\n return True\n if max_start < min_end:\n return True\n\n return False\n", "path": "website/utils/snippets.py"}], "after_files": [{"content": "\"\"\"Provides various utilities that are useful across the project.\"\"\"\nimport datetime\nimport hmac\nfrom base64 import urlsafe_b64decode, urlsafe_b64encode\nfrom collections import namedtuple\nfrom _sha1 import sha1\n\nfrom django.conf import settings\nfrom django.template.defaultfilters import urlencode\nfrom django.templatetags.static import static\nfrom django.utils import timezone, dateparse\nfrom rest_framework.exceptions import ParseError\n\n\ndef dict2obj(d, name=\"Object\"):\n return namedtuple(name, d.keys())(*d.values())\n\n\ndef datetime_to_lectureyear(date):\n \"\"\"Convert a :class:`~datetime.date` to the start of the lectureyear.\n\n >>> from datetime import date, datetime, timezone\n >>> nov_23 = date(1990, 11, 7)\n >>> datetime_to_lectureyear(nov_23)\n 1990\n >>> mar_2 = date(1993, 3, 2)\n >>> datetime_to_lectureyear(mar_2)\n 1992\n\n Also works on :class:`~datetime.datetime`, but they need to be tz-aware:\n\n >>> new_year = datetime(2000, 1, 1, tzinfo=timezone.utc)\n >>> datetime_to_lectureyear(new_year)\n 1999\n \"\"\"\n if isinstance(date, timezone.datetime):\n date = timezone.localtime(date).date()\n sept_1 = timezone.make_aware(timezone.datetime(date.year, 9, 1))\n if date < sept_1.date():\n return date.year - 1\n return date.year\n\n\ndef create_google_maps_url(location, zoom, size):\n if location.lower().strip() == \"online\":\n return settings.BASE_URL + static(\"img/locations/online.png\")\n if location.lower().strip() == \"discord\":\n return settings.BASE_URL + static(\"img/locations/discord.png\")\n\n maps_url = (\n f\"/maps/api/staticmap?\"\n f\"center={ urlencode(location) }&\"\n f\"zoom={ zoom }&size={ size }&\"\n f\"markers={ urlencode(location) }&\"\n f\"key={ settings.GOOGLE_MAPS_API_KEY }\"\n )\n\n decoded_key = urlsafe_b64decode(settings.GOOGLE_MAPS_API_SECRET)\n\n signature = hmac.new(decoded_key, maps_url.encode(), sha1)\n\n encoded_signature = urlsafe_b64encode(signature.digest())\n\n maps_url += f\"&signature={encoded_signature.decode('utf-8')}\"\n\n return \"https://maps.googleapis.com\" + maps_url\n\n\ndef _extract_date(param):\n \"\"\"Extract the date from an arbitrary string.\"\"\"\n if param is None:\n return None\n try:\n return dateparse.parse_datetime(param)\n except ValueError:\n return dateparse.parse_date(param)\n\n\ndef extract_date_range(request, allow_empty=False):\n \"\"\"Extract a date range from an arbitrary string.\"\"\"\n default_value = None\n\n start = request.query_params.get(\"start\", default_value)\n if start or not allow_empty:\n try:\n start = dateparse.parse_datetime(start)\n if not timezone.is_aware(start):\n start = timezone.make_aware(start)\n except (ValueError, AttributeError, TypeError) as e:\n raise ParseError(detail=\"start query parameter invalid\") from e\n\n end = request.query_params.get(\"end\", default_value)\n if end or not allow_empty:\n try:\n end = dateparse.parse_datetime(end)\n if not timezone.is_aware(end):\n end = timezone.make_aware(end)\n except (ValueError, AttributeError, TypeError) as e:\n raise ParseError(detail=\"end query parameter invalid\") from e\n\n return start, end\n\n\ndef overlaps(check, others, can_equal=True):\n \"\"\"Check for overlapping date ranges.\n\n This works by checking the maximum of the two `since` times, and the minimum of\n the two `until` times. Because there are no infinite dates, the value date_max\n is created for when the `until` value is None; this signifies a timespan that\n has not ended yet and is the maximum possible date in Python's datetime.\n\n The ranges overlap when the maximum start time is smaller than the minimum\n end time, as can be seen in this example of two integer ranges:\n\n check: . . . .[4]. . . . 9\n other: . . 2 . .[5]. . . .\n\n check: . . . .[4]. . . . 9\n other: . . 2 . . . . . . . [date_max]\n\n And when non overlapping:\n check: . . . . . .[6] . . 9\n other: . . 2 . .[5]. . . .\n\n 4 < 5 == True so these intervals overlap, while 6 < 5 == False so these intervals\n don't overlap\n\n The can_equal argument is used for boards, where the end date can't be the same\n as the start date.\n\n >>> overlaps( \\\n dict2obj({ \\\n 'pk': 1 \\\n , 'since': datetime.date(2018, 12, 1) \\\n , 'until': datetime.date(2019, 1, 1) \\\n }) \\\n , [dict2obj({ \\\n 'pk': 2 \\\n , 'since': datetime.date(2019, 1, 1) \\\n , 'until': datetime.date(2019, 1, 31) \\\n })])\n False\n\n >>> overlaps( \\\n dict2obj({ \\\n 'pk': 1 \\\n , 'since': datetime.date(2018, 12, 1) \\\n , 'until': datetime.date(2019, 1, 1) \\\n }) \\\n , [dict2obj({ \\\n 'pk': 2 \\\n , 'since': datetime.date(2019, 1, 1) \\\n , 'until': datetime.date(2019, 1, 31) \\\n })], False)\n True\n\n >>> overlaps( \\\n dict2obj({ \\\n 'pk': 1 \\\n , 'since': datetime.date(2018, 12, 1) \\\n , 'until': datetime.date(2019, 1, 2) \\\n }) \\\n , [dict2obj({ \\\n 'pk': 2 \\\n , 'since': datetime.date(2019, 1, 1) \\\n , 'until': datetime.date(2019, 1, 31) \\\n })])\n True\n \"\"\"\n date_max = datetime.date(datetime.MAXYEAR, 12, 31)\n for other in others:\n if check.pk == other.pk:\n # No checks for the object we're validating\n continue\n\n max_start = max(check.since, other.since)\n min_end = min(check.until or date_max, other.until or date_max)\n\n if max_start == min_end and not can_equal:\n return True\n if max_start < min_end:\n return True\n\n return False\n", "path": "website/utils/snippets.py"}]}
2,405
152
gh_patches_debug_16724
rasdani/github-patches
git_diff
jazzband__pip-tools-828
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- log which python version to use/was used eg add this to the header: ``` # This file is autogenerated by pip-compile on: # Python 2.7.15+ (default, Oct 2 2018, 22:12:08) # [GCC 8.2.0] on linux2 # To update, run: # # pip-compile --generate-hashes --output-file=requirements.txt requirements.in ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `piptools/writer.py` Content: ``` 1 import os 2 import re 3 from itertools import chain 4 from typing import BinaryIO, Dict, Iterable, Iterator, List, Optional, Set, Tuple 5 6 from click import unstyle 7 from click.core import Context 8 from pip._internal.models.format_control import FormatControl 9 from pip._internal.req.req_install import InstallRequirement 10 from pip._vendor.packaging.markers import Marker 11 12 from .logging import log 13 from .utils import ( 14 UNSAFE_PACKAGES, 15 comment, 16 dedup, 17 format_requirement, 18 get_compile_command, 19 key_from_ireq, 20 ) 21 22 MESSAGE_UNHASHED_PACKAGE = comment( 23 "# WARNING: pip install will require the following package to be hashed." 24 "\n# Consider using a hashable URL like " 25 "https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip" 26 ) 27 28 MESSAGE_UNSAFE_PACKAGES_UNPINNED = comment( 29 "# WARNING: The following packages were not pinned, but pip requires them to be" 30 "\n# pinned when the requirements file includes hashes. " 31 "Consider using the --allow-unsafe flag." 32 ) 33 34 MESSAGE_UNSAFE_PACKAGES = comment( 35 "# The following packages are considered to be unsafe in a requirements file:" 36 ) 37 38 MESSAGE_UNINSTALLABLE = ( 39 "The generated requirements file may be rejected by pip install. " 40 "See # WARNING lines for details." 41 ) 42 43 44 strip_comes_from_line_re = re.compile(r" \(line \d+\)$") 45 46 47 def _comes_from_as_string(ireq: InstallRequirement) -> str: 48 if isinstance(ireq.comes_from, str): 49 return strip_comes_from_line_re.sub("", ireq.comes_from) 50 return key_from_ireq(ireq.comes_from) 51 52 53 class OutputWriter: 54 def __init__( 55 self, 56 dst_file: BinaryIO, 57 click_ctx: Context, 58 dry_run: bool, 59 emit_header: bool, 60 emit_index_url: bool, 61 emit_trusted_host: bool, 62 annotate: bool, 63 generate_hashes: bool, 64 default_index_url: str, 65 index_urls: Iterable[str], 66 trusted_hosts: Iterable[str], 67 format_control: FormatControl, 68 allow_unsafe: bool, 69 find_links: List[str], 70 emit_find_links: bool, 71 ) -> None: 72 self.dst_file = dst_file 73 self.click_ctx = click_ctx 74 self.dry_run = dry_run 75 self.emit_header = emit_header 76 self.emit_index_url = emit_index_url 77 self.emit_trusted_host = emit_trusted_host 78 self.annotate = annotate 79 self.generate_hashes = generate_hashes 80 self.default_index_url = default_index_url 81 self.index_urls = index_urls 82 self.trusted_hosts = trusted_hosts 83 self.format_control = format_control 84 self.allow_unsafe = allow_unsafe 85 self.find_links = find_links 86 self.emit_find_links = emit_find_links 87 88 def _sort_key(self, ireq: InstallRequirement) -> Tuple[bool, str]: 89 return (not ireq.editable, str(ireq.req).lower()) 90 91 def write_header(self) -> Iterator[str]: 92 if self.emit_header: 93 yield comment("#") 94 yield comment("# This file is autogenerated by pip-compile") 95 yield comment("# To update, run:") 96 yield comment("#") 97 compile_command = os.environ.get( 98 "CUSTOM_COMPILE_COMMAND" 99 ) or get_compile_command(self.click_ctx) 100 yield comment(f"# {compile_command}") 101 yield comment("#") 102 103 def write_index_options(self) -> Iterator[str]: 104 if self.emit_index_url: 105 for index, index_url in enumerate(dedup(self.index_urls)): 106 if index == 0 and index_url.rstrip("/") == self.default_index_url: 107 continue 108 flag = "--index-url" if index == 0 else "--extra-index-url" 109 yield f"{flag} {index_url}" 110 111 def write_trusted_hosts(self) -> Iterator[str]: 112 if self.emit_trusted_host: 113 for trusted_host in dedup(self.trusted_hosts): 114 yield f"--trusted-host {trusted_host}" 115 116 def write_format_controls(self) -> Iterator[str]: 117 for nb in dedup(sorted(self.format_control.no_binary)): 118 yield f"--no-binary {nb}" 119 for ob in dedup(sorted(self.format_control.only_binary)): 120 yield f"--only-binary {ob}" 121 122 def write_find_links(self) -> Iterator[str]: 123 if self.emit_find_links: 124 for find_link in dedup(self.find_links): 125 yield f"--find-links {find_link}" 126 127 def write_flags(self) -> Iterator[str]: 128 emitted = False 129 for line in chain( 130 self.write_index_options(), 131 self.write_find_links(), 132 self.write_trusted_hosts(), 133 self.write_format_controls(), 134 ): 135 emitted = True 136 yield line 137 if emitted: 138 yield "" 139 140 def _iter_lines( 141 self, 142 results: Set[InstallRequirement], 143 unsafe_requirements: Optional[Set[InstallRequirement]] = None, 144 markers: Optional[Dict[str, Marker]] = None, 145 hashes: Optional[Dict[InstallRequirement, Set[str]]] = None, 146 ) -> Iterator[str]: 147 # default values 148 unsafe_requirements = unsafe_requirements or set() 149 markers = markers or {} 150 hashes = hashes or {} 151 152 # Check for unhashed or unpinned packages if at least one package does have 153 # hashes, which will trigger pip install's --require-hashes mode. 154 warn_uninstallable = False 155 has_hashes = hashes and any(hash for hash in hashes.values()) 156 157 yielded = False 158 159 for line in self.write_header(): 160 yield line 161 yielded = True 162 for line in self.write_flags(): 163 yield line 164 yielded = True 165 166 unsafe_requirements = ( 167 {r for r in results if r.name in UNSAFE_PACKAGES} 168 if not unsafe_requirements 169 else unsafe_requirements 170 ) 171 packages = {r for r in results if r.name not in UNSAFE_PACKAGES} 172 173 if packages: 174 for ireq in sorted(packages, key=self._sort_key): 175 if has_hashes and not hashes.get(ireq): 176 yield MESSAGE_UNHASHED_PACKAGE 177 warn_uninstallable = True 178 line = self._format_requirement( 179 ireq, markers.get(key_from_ireq(ireq)), hashes=hashes 180 ) 181 yield line 182 yielded = True 183 184 if unsafe_requirements: 185 yield "" 186 yielded = True 187 if has_hashes and not self.allow_unsafe: 188 yield MESSAGE_UNSAFE_PACKAGES_UNPINNED 189 warn_uninstallable = True 190 else: 191 yield MESSAGE_UNSAFE_PACKAGES 192 193 for ireq in sorted(unsafe_requirements, key=self._sort_key): 194 ireq_key = key_from_ireq(ireq) 195 if not self.allow_unsafe: 196 yield comment(f"# {ireq_key}") 197 else: 198 line = self._format_requirement( 199 ireq, marker=markers.get(ireq_key), hashes=hashes 200 ) 201 yield line 202 203 # Yield even when there's no real content, so that blank files are written 204 if not yielded: 205 yield "" 206 207 if warn_uninstallable: 208 log.warning(MESSAGE_UNINSTALLABLE) 209 210 def write( 211 self, 212 results: Set[InstallRequirement], 213 unsafe_requirements: Set[InstallRequirement], 214 markers: Dict[str, Marker], 215 hashes: Optional[Dict[InstallRequirement, Set[str]]], 216 ) -> None: 217 218 for line in self._iter_lines(results, unsafe_requirements, markers, hashes): 219 log.info(line) 220 if not self.dry_run: 221 self.dst_file.write(unstyle(line).encode()) 222 self.dst_file.write(os.linesep.encode()) 223 224 def _format_requirement( 225 self, 226 ireq: InstallRequirement, 227 marker: Optional[Marker] = None, 228 hashes: Optional[Dict[InstallRequirement, Set[str]]] = None, 229 ) -> str: 230 ireq_hashes = (hashes if hashes is not None else {}).get(ireq) 231 232 line = format_requirement(ireq, marker=marker, hashes=ireq_hashes) 233 234 if not self.annotate: 235 return line 236 237 # Annotate what packages or reqs-ins this package is required by 238 required_by = set() 239 if hasattr(ireq, "_source_ireqs"): 240 required_by |= { 241 _comes_from_as_string(src_ireq) 242 for src_ireq in ireq._source_ireqs 243 if src_ireq.comes_from 244 } 245 elif ireq.comes_from: 246 required_by.add(_comes_from_as_string(ireq)) 247 248 if required_by: 249 sorted_required_by = sorted(required_by) 250 if len(sorted_required_by) == 1: 251 source = sorted_required_by[0] 252 annotation = " # via " + source 253 else: 254 annotation_lines = [" # via"] 255 for source in sorted_required_by: 256 annotation_lines.append(" # " + source) 257 annotation = "\n".join(annotation_lines) 258 line = f"{line}\n{comment(annotation)}" 259 260 return line 261 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/piptools/writer.py b/piptools/writer.py --- a/piptools/writer.py +++ b/piptools/writer.py @@ -1,5 +1,6 @@ import os import re +import sys from itertools import chain from typing import BinaryIO, Dict, Iterable, Iterator, List, Optional, Set, Tuple @@ -91,7 +92,10 @@ def write_header(self) -> Iterator[str]: if self.emit_header: yield comment("#") - yield comment("# This file is autogenerated by pip-compile") + yield comment( + "# This file is autogenerated by pip-compile with python " + f"{sys.version_info.major}.{sys.version_info.minor}" + ) yield comment("# To update, run:") yield comment("#") compile_command = os.environ.get(
{"golden_diff": "diff --git a/piptools/writer.py b/piptools/writer.py\n--- a/piptools/writer.py\n+++ b/piptools/writer.py\n@@ -1,5 +1,6 @@\n import os\n import re\n+import sys\n from itertools import chain\n from typing import BinaryIO, Dict, Iterable, Iterator, List, Optional, Set, Tuple\n \n@@ -91,7 +92,10 @@\n def write_header(self) -> Iterator[str]:\n if self.emit_header:\n yield comment(\"#\")\n- yield comment(\"# This file is autogenerated by pip-compile\")\n+ yield comment(\n+ \"# This file is autogenerated by pip-compile with python \"\n+ f\"{sys.version_info.major}.{sys.version_info.minor}\"\n+ )\n yield comment(\"# To update, run:\")\n yield comment(\"#\")\n compile_command = os.environ.get(\n", "issue": "log which python version to use/was used\neg add this to the header:\r\n\r\n```\r\n # This file is autogenerated by pip-compile on:\r\n # Python 2.7.15+ (default, Oct 2 2018, 22:12:08) \r\n # [GCC 8.2.0] on linux2\r\n # To update, run:\r\n #\r\n # pip-compile --generate-hashes --output-file=requirements.txt requirements.in\r\n```\n", "before_files": [{"content": "import os\nimport re\nfrom itertools import chain\nfrom typing import BinaryIO, Dict, Iterable, Iterator, List, Optional, Set, Tuple\n\nfrom click import unstyle\nfrom click.core import Context\nfrom pip._internal.models.format_control import FormatControl\nfrom pip._internal.req.req_install import InstallRequirement\nfrom pip._vendor.packaging.markers import Marker\n\nfrom .logging import log\nfrom .utils import (\n UNSAFE_PACKAGES,\n comment,\n dedup,\n format_requirement,\n get_compile_command,\n key_from_ireq,\n)\n\nMESSAGE_UNHASHED_PACKAGE = comment(\n \"# WARNING: pip install will require the following package to be hashed.\"\n \"\\n# Consider using a hashable URL like \"\n \"https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip\"\n)\n\nMESSAGE_UNSAFE_PACKAGES_UNPINNED = comment(\n \"# WARNING: The following packages were not pinned, but pip requires them to be\"\n \"\\n# pinned when the requirements file includes hashes. \"\n \"Consider using the --allow-unsafe flag.\"\n)\n\nMESSAGE_UNSAFE_PACKAGES = comment(\n \"# The following packages are considered to be unsafe in a requirements file:\"\n)\n\nMESSAGE_UNINSTALLABLE = (\n \"The generated requirements file may be rejected by pip install. \"\n \"See # WARNING lines for details.\"\n)\n\n\nstrip_comes_from_line_re = re.compile(r\" \\(line \\d+\\)$\")\n\n\ndef _comes_from_as_string(ireq: InstallRequirement) -> str:\n if isinstance(ireq.comes_from, str):\n return strip_comes_from_line_re.sub(\"\", ireq.comes_from)\n return key_from_ireq(ireq.comes_from)\n\n\nclass OutputWriter:\n def __init__(\n self,\n dst_file: BinaryIO,\n click_ctx: Context,\n dry_run: bool,\n emit_header: bool,\n emit_index_url: bool,\n emit_trusted_host: bool,\n annotate: bool,\n generate_hashes: bool,\n default_index_url: str,\n index_urls: Iterable[str],\n trusted_hosts: Iterable[str],\n format_control: FormatControl,\n allow_unsafe: bool,\n find_links: List[str],\n emit_find_links: bool,\n ) -> None:\n self.dst_file = dst_file\n self.click_ctx = click_ctx\n self.dry_run = dry_run\n self.emit_header = emit_header\n self.emit_index_url = emit_index_url\n self.emit_trusted_host = emit_trusted_host\n self.annotate = annotate\n self.generate_hashes = generate_hashes\n self.default_index_url = default_index_url\n self.index_urls = index_urls\n self.trusted_hosts = trusted_hosts\n self.format_control = format_control\n self.allow_unsafe = allow_unsafe\n self.find_links = find_links\n self.emit_find_links = emit_find_links\n\n def _sort_key(self, ireq: InstallRequirement) -> Tuple[bool, str]:\n return (not ireq.editable, str(ireq.req).lower())\n\n def write_header(self) -> Iterator[str]:\n if self.emit_header:\n yield comment(\"#\")\n yield comment(\"# This file is autogenerated by pip-compile\")\n yield comment(\"# To update, run:\")\n yield comment(\"#\")\n compile_command = os.environ.get(\n \"CUSTOM_COMPILE_COMMAND\"\n ) or get_compile_command(self.click_ctx)\n yield comment(f\"# {compile_command}\")\n yield comment(\"#\")\n\n def write_index_options(self) -> Iterator[str]:\n if self.emit_index_url:\n for index, index_url in enumerate(dedup(self.index_urls)):\n if index == 0 and index_url.rstrip(\"/\") == self.default_index_url:\n continue\n flag = \"--index-url\" if index == 0 else \"--extra-index-url\"\n yield f\"{flag} {index_url}\"\n\n def write_trusted_hosts(self) -> Iterator[str]:\n if self.emit_trusted_host:\n for trusted_host in dedup(self.trusted_hosts):\n yield f\"--trusted-host {trusted_host}\"\n\n def write_format_controls(self) -> Iterator[str]:\n for nb in dedup(sorted(self.format_control.no_binary)):\n yield f\"--no-binary {nb}\"\n for ob in dedup(sorted(self.format_control.only_binary)):\n yield f\"--only-binary {ob}\"\n\n def write_find_links(self) -> Iterator[str]:\n if self.emit_find_links:\n for find_link in dedup(self.find_links):\n yield f\"--find-links {find_link}\"\n\n def write_flags(self) -> Iterator[str]:\n emitted = False\n for line in chain(\n self.write_index_options(),\n self.write_find_links(),\n self.write_trusted_hosts(),\n self.write_format_controls(),\n ):\n emitted = True\n yield line\n if emitted:\n yield \"\"\n\n def _iter_lines(\n self,\n results: Set[InstallRequirement],\n unsafe_requirements: Optional[Set[InstallRequirement]] = None,\n markers: Optional[Dict[str, Marker]] = None,\n hashes: Optional[Dict[InstallRequirement, Set[str]]] = None,\n ) -> Iterator[str]:\n # default values\n unsafe_requirements = unsafe_requirements or set()\n markers = markers or {}\n hashes = hashes or {}\n\n # Check for unhashed or unpinned packages if at least one package does have\n # hashes, which will trigger pip install's --require-hashes mode.\n warn_uninstallable = False\n has_hashes = hashes and any(hash for hash in hashes.values())\n\n yielded = False\n\n for line in self.write_header():\n yield line\n yielded = True\n for line in self.write_flags():\n yield line\n yielded = True\n\n unsafe_requirements = (\n {r for r in results if r.name in UNSAFE_PACKAGES}\n if not unsafe_requirements\n else unsafe_requirements\n )\n packages = {r for r in results if r.name not in UNSAFE_PACKAGES}\n\n if packages:\n for ireq in sorted(packages, key=self._sort_key):\n if has_hashes and not hashes.get(ireq):\n yield MESSAGE_UNHASHED_PACKAGE\n warn_uninstallable = True\n line = self._format_requirement(\n ireq, markers.get(key_from_ireq(ireq)), hashes=hashes\n )\n yield line\n yielded = True\n\n if unsafe_requirements:\n yield \"\"\n yielded = True\n if has_hashes and not self.allow_unsafe:\n yield MESSAGE_UNSAFE_PACKAGES_UNPINNED\n warn_uninstallable = True\n else:\n yield MESSAGE_UNSAFE_PACKAGES\n\n for ireq in sorted(unsafe_requirements, key=self._sort_key):\n ireq_key = key_from_ireq(ireq)\n if not self.allow_unsafe:\n yield comment(f\"# {ireq_key}\")\n else:\n line = self._format_requirement(\n ireq, marker=markers.get(ireq_key), hashes=hashes\n )\n yield line\n\n # Yield even when there's no real content, so that blank files are written\n if not yielded:\n yield \"\"\n\n if warn_uninstallable:\n log.warning(MESSAGE_UNINSTALLABLE)\n\n def write(\n self,\n results: Set[InstallRequirement],\n unsafe_requirements: Set[InstallRequirement],\n markers: Dict[str, Marker],\n hashes: Optional[Dict[InstallRequirement, Set[str]]],\n ) -> None:\n\n for line in self._iter_lines(results, unsafe_requirements, markers, hashes):\n log.info(line)\n if not self.dry_run:\n self.dst_file.write(unstyle(line).encode())\n self.dst_file.write(os.linesep.encode())\n\n def _format_requirement(\n self,\n ireq: InstallRequirement,\n marker: Optional[Marker] = None,\n hashes: Optional[Dict[InstallRequirement, Set[str]]] = None,\n ) -> str:\n ireq_hashes = (hashes if hashes is not None else {}).get(ireq)\n\n line = format_requirement(ireq, marker=marker, hashes=ireq_hashes)\n\n if not self.annotate:\n return line\n\n # Annotate what packages or reqs-ins this package is required by\n required_by = set()\n if hasattr(ireq, \"_source_ireqs\"):\n required_by |= {\n _comes_from_as_string(src_ireq)\n for src_ireq in ireq._source_ireqs\n if src_ireq.comes_from\n }\n elif ireq.comes_from:\n required_by.add(_comes_from_as_string(ireq))\n\n if required_by:\n sorted_required_by = sorted(required_by)\n if len(sorted_required_by) == 1:\n source = sorted_required_by[0]\n annotation = \" # via \" + source\n else:\n annotation_lines = [\" # via\"]\n for source in sorted_required_by:\n annotation_lines.append(\" # \" + source)\n annotation = \"\\n\".join(annotation_lines)\n line = f\"{line}\\n{comment(annotation)}\"\n\n return line\n", "path": "piptools/writer.py"}], "after_files": [{"content": "import os\nimport re\nimport sys\nfrom itertools import chain\nfrom typing import BinaryIO, Dict, Iterable, Iterator, List, Optional, Set, Tuple\n\nfrom click import unstyle\nfrom click.core import Context\nfrom pip._internal.models.format_control import FormatControl\nfrom pip._internal.req.req_install import InstallRequirement\nfrom pip._vendor.packaging.markers import Marker\n\nfrom .logging import log\nfrom .utils import (\n UNSAFE_PACKAGES,\n comment,\n dedup,\n format_requirement,\n get_compile_command,\n key_from_ireq,\n)\n\nMESSAGE_UNHASHED_PACKAGE = comment(\n \"# WARNING: pip install will require the following package to be hashed.\"\n \"\\n# Consider using a hashable URL like \"\n \"https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip\"\n)\n\nMESSAGE_UNSAFE_PACKAGES_UNPINNED = comment(\n \"# WARNING: The following packages were not pinned, but pip requires them to be\"\n \"\\n# pinned when the requirements file includes hashes. \"\n \"Consider using the --allow-unsafe flag.\"\n)\n\nMESSAGE_UNSAFE_PACKAGES = comment(\n \"# The following packages are considered to be unsafe in a requirements file:\"\n)\n\nMESSAGE_UNINSTALLABLE = (\n \"The generated requirements file may be rejected by pip install. \"\n \"See # WARNING lines for details.\"\n)\n\n\nstrip_comes_from_line_re = re.compile(r\" \\(line \\d+\\)$\")\n\n\ndef _comes_from_as_string(ireq: InstallRequirement) -> str:\n if isinstance(ireq.comes_from, str):\n return strip_comes_from_line_re.sub(\"\", ireq.comes_from)\n return key_from_ireq(ireq.comes_from)\n\n\nclass OutputWriter:\n def __init__(\n self,\n dst_file: BinaryIO,\n click_ctx: Context,\n dry_run: bool,\n emit_header: bool,\n emit_index_url: bool,\n emit_trusted_host: bool,\n annotate: bool,\n generate_hashes: bool,\n default_index_url: str,\n index_urls: Iterable[str],\n trusted_hosts: Iterable[str],\n format_control: FormatControl,\n allow_unsafe: bool,\n find_links: List[str],\n emit_find_links: bool,\n ) -> None:\n self.dst_file = dst_file\n self.click_ctx = click_ctx\n self.dry_run = dry_run\n self.emit_header = emit_header\n self.emit_index_url = emit_index_url\n self.emit_trusted_host = emit_trusted_host\n self.annotate = annotate\n self.generate_hashes = generate_hashes\n self.default_index_url = default_index_url\n self.index_urls = index_urls\n self.trusted_hosts = trusted_hosts\n self.format_control = format_control\n self.allow_unsafe = allow_unsafe\n self.find_links = find_links\n self.emit_find_links = emit_find_links\n\n def _sort_key(self, ireq: InstallRequirement) -> Tuple[bool, str]:\n return (not ireq.editable, str(ireq.req).lower())\n\n def write_header(self) -> Iterator[str]:\n if self.emit_header:\n yield comment(\"#\")\n yield comment(\n \"# This file is autogenerated by pip-compile with python \"\n f\"{sys.version_info.major}.{sys.version_info.minor}\"\n )\n yield comment(\"# To update, run:\")\n yield comment(\"#\")\n compile_command = os.environ.get(\n \"CUSTOM_COMPILE_COMMAND\"\n ) or get_compile_command(self.click_ctx)\n yield comment(f\"# {compile_command}\")\n yield comment(\"#\")\n\n def write_index_options(self) -> Iterator[str]:\n if self.emit_index_url:\n for index, index_url in enumerate(dedup(self.index_urls)):\n if index == 0 and index_url.rstrip(\"/\") == self.default_index_url:\n continue\n flag = \"--index-url\" if index == 0 else \"--extra-index-url\"\n yield f\"{flag} {index_url}\"\n\n def write_trusted_hosts(self) -> Iterator[str]:\n if self.emit_trusted_host:\n for trusted_host in dedup(self.trusted_hosts):\n yield f\"--trusted-host {trusted_host}\"\n\n def write_format_controls(self) -> Iterator[str]:\n for nb in dedup(sorted(self.format_control.no_binary)):\n yield f\"--no-binary {nb}\"\n for ob in dedup(sorted(self.format_control.only_binary)):\n yield f\"--only-binary {ob}\"\n\n def write_find_links(self) -> Iterator[str]:\n if self.emit_find_links:\n for find_link in dedup(self.find_links):\n yield f\"--find-links {find_link}\"\n\n def write_flags(self) -> Iterator[str]:\n emitted = False\n for line in chain(\n self.write_index_options(),\n self.write_find_links(),\n self.write_trusted_hosts(),\n self.write_format_controls(),\n ):\n emitted = True\n yield line\n if emitted:\n yield \"\"\n\n def _iter_lines(\n self,\n results: Set[InstallRequirement],\n unsafe_requirements: Optional[Set[InstallRequirement]] = None,\n markers: Optional[Dict[str, Marker]] = None,\n hashes: Optional[Dict[InstallRequirement, Set[str]]] = None,\n ) -> Iterator[str]:\n # default values\n unsafe_requirements = unsafe_requirements or set()\n markers = markers or {}\n hashes = hashes or {}\n\n # Check for unhashed or unpinned packages if at least one package does have\n # hashes, which will trigger pip install's --require-hashes mode.\n warn_uninstallable = False\n has_hashes = hashes and any(hash for hash in hashes.values())\n\n yielded = False\n\n for line in self.write_header():\n yield line\n yielded = True\n for line in self.write_flags():\n yield line\n yielded = True\n\n unsafe_requirements = (\n {r for r in results if r.name in UNSAFE_PACKAGES}\n if not unsafe_requirements\n else unsafe_requirements\n )\n packages = {r for r in results if r.name not in UNSAFE_PACKAGES}\n\n if packages:\n for ireq in sorted(packages, key=self._sort_key):\n if has_hashes and not hashes.get(ireq):\n yield MESSAGE_UNHASHED_PACKAGE\n warn_uninstallable = True\n line = self._format_requirement(\n ireq, markers.get(key_from_ireq(ireq)), hashes=hashes\n )\n yield line\n yielded = True\n\n if unsafe_requirements:\n yield \"\"\n yielded = True\n if has_hashes and not self.allow_unsafe:\n yield MESSAGE_UNSAFE_PACKAGES_UNPINNED\n warn_uninstallable = True\n else:\n yield MESSAGE_UNSAFE_PACKAGES\n\n for ireq in sorted(unsafe_requirements, key=self._sort_key):\n ireq_key = key_from_ireq(ireq)\n if not self.allow_unsafe:\n yield comment(f\"# {ireq_key}\")\n else:\n line = self._format_requirement(\n ireq, marker=markers.get(ireq_key), hashes=hashes\n )\n yield line\n\n # Yield even when there's no real content, so that blank files are written\n if not yielded:\n yield \"\"\n\n if warn_uninstallable:\n log.warning(MESSAGE_UNINSTALLABLE)\n\n def write(\n self,\n results: Set[InstallRequirement],\n unsafe_requirements: Set[InstallRequirement],\n markers: Dict[str, Marker],\n hashes: Optional[Dict[InstallRequirement, Set[str]]],\n ) -> None:\n\n for line in self._iter_lines(results, unsafe_requirements, markers, hashes):\n log.info(line)\n if not self.dry_run:\n self.dst_file.write(unstyle(line).encode())\n self.dst_file.write(os.linesep.encode())\n\n def _format_requirement(\n self,\n ireq: InstallRequirement,\n marker: Optional[Marker] = None,\n hashes: Optional[Dict[InstallRequirement, Set[str]]] = None,\n ) -> str:\n ireq_hashes = (hashes if hashes is not None else {}).get(ireq)\n\n line = format_requirement(ireq, marker=marker, hashes=ireq_hashes)\n\n if not self.annotate:\n return line\n\n # Annotate what packages or reqs-ins this package is required by\n required_by = set()\n if hasattr(ireq, \"_source_ireqs\"):\n required_by |= {\n _comes_from_as_string(src_ireq)\n for src_ireq in ireq._source_ireqs\n if src_ireq.comes_from\n }\n elif ireq.comes_from:\n required_by.add(_comes_from_as_string(ireq))\n\n if required_by:\n sorted_required_by = sorted(required_by)\n if len(sorted_required_by) == 1:\n source = sorted_required_by[0]\n annotation = \" # via \" + source\n else:\n annotation_lines = [\" # via\"]\n for source in sorted_required_by:\n annotation_lines.append(\" # \" + source)\n annotation = \"\\n\".join(annotation_lines)\n line = f\"{line}\\n{comment(annotation)}\"\n\n return line\n", "path": "piptools/writer.py"}]}
3,003
188
gh_patches_debug_5037
rasdani/github-patches
git_diff
facebookresearch__hydra-793
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Bug] MISSING for Object Conf cls argument # 🐛 Bug OmegaConf cls argument should not be a mandatory value if target is defined. Can we change this to be an optional value with None being the default? ** Stack trace/error message ** ``` omegaconf.errors.MissingMandatoryValue: Missing mandatory value: scheduler.cls full_key: scheduler.cls reference_type=ObjectConf object_type=ObjectConf ``` ## System information - **Hydra Version** : 1.0.0rc2 - **Python version** : 3.7.7 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `hydra/types.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 from dataclasses import dataclass, field 3 from enum import Enum 4 from typing import Any, Callable, Dict 5 6 from omegaconf import MISSING 7 8 TaskFunction = Callable[[Any], Any] 9 10 11 @dataclass 12 # This extends Dict[str, Any] to allow for the deprecated "class" field. 13 # Once support for class field removed this can stop extending Dict. 14 class ObjectConf(Dict[str, Any]): 15 # class, class method or function name 16 target: str = MISSING 17 18 # parameters to pass to cls when calling it 19 params: Any = field(default_factory=dict) 20 21 # cls is deprecated, use target, cls will be removed in Hydra 1.1 22 cls: str = MISSING 23 24 # class is deprecated, use target, class will be removed in Hydra 1.1 25 # (class is Python keyword and is only supported through DictConfig) 26 # class: str = MISSING 27 28 29 class RunMode(Enum): 30 RUN = 1 31 MULTIRUN = 2 32 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/hydra/types.py b/hydra/types.py --- a/hydra/types.py +++ b/hydra/types.py @@ -18,13 +18,6 @@ # parameters to pass to cls when calling it params: Any = field(default_factory=dict) - # cls is deprecated, use target, cls will be removed in Hydra 1.1 - cls: str = MISSING - - # class is deprecated, use target, class will be removed in Hydra 1.1 - # (class is Python keyword and is only supported through DictConfig) - # class: str = MISSING - class RunMode(Enum): RUN = 1
{"golden_diff": "diff --git a/hydra/types.py b/hydra/types.py\n--- a/hydra/types.py\n+++ b/hydra/types.py\n@@ -18,13 +18,6 @@\n # parameters to pass to cls when calling it\n params: Any = field(default_factory=dict)\n \n- # cls is deprecated, use target, cls will be removed in Hydra 1.1\n- cls: str = MISSING\n-\n- # class is deprecated, use target, class will be removed in Hydra 1.1\n- # (class is Python keyword and is only supported through DictConfig)\n- # class: str = MISSING\n-\n \n class RunMode(Enum):\n RUN = 1\n", "issue": "[Bug] MISSING for Object Conf cls argument\n# \ud83d\udc1b Bug\r\n\r\nOmegaConf cls argument should not be a mandatory value if target is defined. Can we change this to be an optional value with None being the default?\r\n \r\n** Stack trace/error message **\r\n```\r\nomegaconf.errors.MissingMandatoryValue: Missing mandatory value: scheduler.cls\r\n full_key: scheduler.cls\r\n reference_type=ObjectConf\r\n object_type=ObjectConf\r\n```\r\n\r\n\r\n## System information\r\n- **Hydra Version** : 1.0.0rc2\r\n- **Python version** : 3.7.7\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import Any, Callable, Dict\n\nfrom omegaconf import MISSING\n\nTaskFunction = Callable[[Any], Any]\n\n\n@dataclass\n# This extends Dict[str, Any] to allow for the deprecated \"class\" field.\n# Once support for class field removed this can stop extending Dict.\nclass ObjectConf(Dict[str, Any]):\n # class, class method or function name\n target: str = MISSING\n\n # parameters to pass to cls when calling it\n params: Any = field(default_factory=dict)\n\n # cls is deprecated, use target, cls will be removed in Hydra 1.1\n cls: str = MISSING\n\n # class is deprecated, use target, class will be removed in Hydra 1.1\n # (class is Python keyword and is only supported through DictConfig)\n # class: str = MISSING\n\n\nclass RunMode(Enum):\n RUN = 1\n MULTIRUN = 2\n", "path": "hydra/types.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import Any, Callable, Dict\n\nfrom omegaconf import MISSING\n\nTaskFunction = Callable[[Any], Any]\n\n\n@dataclass\n# This extends Dict[str, Any] to allow for the deprecated \"class\" field.\n# Once support for class field removed this can stop extending Dict.\nclass ObjectConf(Dict[str, Any]):\n # class, class method or function name\n target: str = MISSING\n\n # parameters to pass to cls when calling it\n params: Any = field(default_factory=dict)\n\n\nclass RunMode(Enum):\n RUN = 1\n MULTIRUN = 2\n", "path": "hydra/types.py"}]}
678
156
gh_patches_debug_245
rasdani/github-patches
git_diff
mlcommons__GaNDLF-809
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `gdown` does not seem to be working **Describe the bug** Current CI seems to be broken. **To Reproduce** Steps to reproduce the behavior: 1. Run any CI test 2. See error: ```python-traceback [SNIP!] if gdrive_file_id and is_gdrive_download_link: content_disposition = six.moves.urllib_parse.unquote( res.headers["Content-Disposition"] ) m = re.search(r"filename\*=UTF-8''(.*)", content_disposition) > filename_from_url = m.groups()[0] E AttributeError: 'NoneType' object has no attribute 'groups' ``` Example: https://github.com/mlcommons/GaNDLF/actions/runs/7489779631/job/20387346791?pr=764#step:9:219 **Expected behavior** The sample data file download should work. **Screenshots** N.A. **GaNDLF Version** Current master **Desktop (please complete the following information):** N.A. **Additional context** Basically, it is this error: https://github.com/wkentaro/gdown/issues/291 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 """The setup script.""" 4 5 6 import sys, re, os 7 from setuptools import setup, find_packages 8 from setuptools.command.install import install 9 from setuptools.command.develop import develop 10 from setuptools.command.egg_info import egg_info 11 12 try: 13 with open("README.md") as readme_file: 14 readme = readme_file.read() 15 except Exception as error: 16 readme = "No README information found." 17 sys.stderr.write( 18 "Warning: Could not open '%s' due %s\n" % ("README.md", error) 19 ) 20 21 22 class CustomInstallCommand(install): 23 def run(self): 24 install.run(self) 25 26 27 class CustomDevelopCommand(develop): 28 def run(self): 29 develop.run(self) 30 31 32 class CustomEggInfoCommand(egg_info): 33 def run(self): 34 egg_info.run(self) 35 36 37 try: 38 filepath = "GANDLF/version.py" 39 version_file = open(filepath) 40 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read()) 41 42 except Exception as error: 43 __version__ = "0.0.1" 44 sys.stderr.write( 45 "Warning: Could not open '%s' due %s\n" % (filepath, error) 46 ) 47 48 # Handle cases where specific files need to be bundled into the final package as installed via PyPI 49 dockerfiles = [ 50 item 51 for item in os.listdir(os.path.dirname(os.path.abspath(__file__))) 52 if (os.path.isfile(item) and item.startswith("Dockerfile-")) 53 ] 54 entrypoint_files = [ 55 item 56 for item in os.listdir(os.path.dirname(os.path.abspath(__file__))) 57 if (os.path.isfile(item) and item.startswith("gandlf_")) 58 ] 59 setup_files = ["setup.py", ".dockerignore", "pyproject.toml", "MANIFEST.in"] 60 all_extra_files = dockerfiles + entrypoint_files + setup_files 61 all_extra_files_pathcorrected = [ 62 os.path.join("../", item) for item in all_extra_files 63 ] 64 # find_packages should only ever find these as subpackages of gandlf, not as top-level packages 65 # generate this dynamically? 66 # GANDLF.GANDLF is needed to prevent recursion madness in deployments 67 toplevel_package_excludes = [ 68 "GANDLF.GANDLF", 69 "anonymize", 70 "cli", 71 "compute", 72 "data", 73 "grad_clipping", 74 "losses", 75 "metrics", 76 "models", 77 "optimizers", 78 "schedulers", 79 "utils", 80 ] 81 82 83 requirements = [ 84 "torch==2.1.2", 85 "black==23.11.0", 86 "numpy==1.25.0", 87 "scipy", 88 "SimpleITK!=2.0.*", 89 "SimpleITK!=2.2.1", # https://github.com/mlcommons/GaNDLF/issues/536 90 "torchvision", 91 "tqdm", 92 "torchio==0.19.5", 93 "pandas>=2.0.0", 94 "scikit-learn>=0.23.2", 95 "scikit-image>=0.19.1", 96 "setuptools", 97 "seaborn", 98 "pyyaml", 99 "tiffslide", 100 "matplotlib", 101 "gdown==4.6.3", 102 "pytest", 103 "coverage", 104 "pytest-cov", 105 "psutil", 106 "medcam", 107 "opencv-python", 108 "torchmetrics==1.1.2", 109 "zarr==2.10.3", 110 "pydicom", 111 "onnx", 112 "torchinfo==1.7.0", 113 "segmentation-models-pytorch==0.3.3", 114 "ACSConv==0.1.1", 115 "docker", 116 "dicom-anonymizer", 117 "twine", 118 "zarr", 119 "keyring", 120 "monai==1.3.0", 121 ] 122 123 if __name__ == "__main__": 124 setup( 125 name="GANDLF", 126 version=__version__, 127 author="MLCommons", 128 author_email="[email protected]", 129 python_requires=">3.8, <3.12", 130 packages=find_packages( 131 where=os.path.dirname(os.path.abspath(__file__)), 132 exclude=toplevel_package_excludes, 133 ), 134 cmdclass={ 135 "install": CustomInstallCommand, 136 "develop": CustomDevelopCommand, 137 "egg_info": CustomEggInfoCommand, 138 }, 139 scripts=[ 140 "gandlf_run", 141 "gandlf_constructCSV", 142 "gandlf_collectStats", 143 "gandlf_patchMiner", 144 "gandlf_preprocess", 145 "gandlf_anonymizer", 146 "gandlf_verifyInstall", 147 "gandlf_configGenerator", 148 "gandlf_recoverConfig", 149 "gandlf_deploy", 150 "gandlf_optimizeModel", 151 "gandlf_generateMetrics", 152 ], 153 classifiers=[ 154 "Development Status :: 3 - Alpha", 155 "Intended Audience :: Science/Research", 156 "License :: OSI Approved :: Apache Software License", 157 "Natural Language :: English", 158 "Operating System :: OS Independent", 159 "Programming Language :: Python :: 3.9", 160 "Programming Language :: Python :: 3.10", 161 "Programming Language :: Python :: 3.11", 162 "Topic :: Scientific/Engineering :: Medical Science Apps.", 163 ], 164 description=( 165 "PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging." 166 ), 167 install_requires=requirements, 168 license="Apache-2.0", 169 long_description=readme, 170 long_description_content_type="text/markdown", 171 include_package_data=True, 172 package_data={"GANDLF": all_extra_files_pathcorrected}, 173 keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch", 174 zip_safe=False, 175 ) 176 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -98,7 +98,7 @@ "pyyaml", "tiffslide", "matplotlib", - "gdown==4.6.3", + "gdown==5.1.0", "pytest", "coverage", "pytest-cov",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -98,7 +98,7 @@\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n- \"gdown==4.6.3\",\n+ \"gdown==5.1.0\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n", "issue": "`gdown` does not seem to be working\n**Describe the bug**\r\nCurrent CI seems to be broken.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Run any CI test\r\n2. See error:\r\n```python-traceback\r\n[SNIP!]\r\n if gdrive_file_id and is_gdrive_download_link:\r\n content_disposition = six.moves.urllib_parse.unquote(\r\n res.headers[\"Content-Disposition\"]\r\n )\r\n m = re.search(r\"filename\\*=UTF-8''(.*)\", content_disposition)\r\n> filename_from_url = m.groups()[0]\r\nE AttributeError: 'NoneType' object has no attribute 'groups'\r\n```\r\nExample: https://github.com/mlcommons/GaNDLF/actions/runs/7489779631/job/20387346791?pr=764#step:9:219\r\n\r\n**Expected behavior**\r\nThe sample data file download should work.\r\n\r\n**Screenshots**\r\nN.A.\r\n\r\n**GaNDLF Version**\r\nCurrent master\r\n\r\n**Desktop (please complete the following information):**\r\nN.A.\r\n\r\n**Additional context**\r\nBasically, it is this error: https://github.com/wkentaro/gdown/issues/291\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re, os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\nexcept Exception as error:\n readme = \"No README information found.\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (\"README.md\", error)\n )\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (filepath, error)\n )\n\n# Handle cases where specific files need to be bundled into the final package as installed via PyPI\ndockerfiles = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"Dockerfile-\"))\n]\nentrypoint_files = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"gandlf_\"))\n]\nsetup_files = [\"setup.py\", \".dockerignore\", \"pyproject.toml\", \"MANIFEST.in\"]\nall_extra_files = dockerfiles + entrypoint_files + setup_files\nall_extra_files_pathcorrected = [\n os.path.join(\"../\", item) for item in all_extra_files\n]\n# find_packages should only ever find these as subpackages of gandlf, not as top-level packages\n# generate this dynamically?\n# GANDLF.GANDLF is needed to prevent recursion madness in deployments\ntoplevel_package_excludes = [\n \"GANDLF.GANDLF\",\n \"anonymize\",\n \"cli\",\n \"compute\",\n \"data\",\n \"grad_clipping\",\n \"losses\",\n \"metrics\",\n \"models\",\n \"optimizers\",\n \"schedulers\",\n \"utils\",\n]\n\n\nrequirements = [\n \"torch==2.1.2\",\n \"black==23.11.0\",\n \"numpy==1.25.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"SimpleITK!=2.2.1\", # https://github.com/mlcommons/GaNDLF/issues/536\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.19.5\",\n \"pandas>=2.0.0\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"gdown==4.6.3\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==1.1.2\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n \"segmentation-models-pytorch==0.3.3\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n \"twine\",\n \"zarr\",\n \"keyring\",\n \"monai==1.3.0\",\n]\n\nif __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n version=__version__,\n author=\"MLCommons\",\n author_email=\"[email protected]\",\n python_requires=\">3.8, <3.12\",\n packages=find_packages(\n where=os.path.dirname(os.path.abspath(__file__)),\n exclude=toplevel_package_excludes,\n ),\n cmdclass={\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n \"gandlf_configGenerator\",\n \"gandlf_recoverConfig\",\n \"gandlf_deploy\",\n \"gandlf_optimizeModel\",\n \"gandlf_generateMetrics\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"Apache-2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n package_data={\"GANDLF\": all_extra_files_pathcorrected},\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch\",\n zip_safe=False,\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re, os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\nexcept Exception as error:\n readme = \"No README information found.\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (\"README.md\", error)\n )\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (filepath, error)\n )\n\n# Handle cases where specific files need to be bundled into the final package as installed via PyPI\ndockerfiles = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"Dockerfile-\"))\n]\nentrypoint_files = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"gandlf_\"))\n]\nsetup_files = [\"setup.py\", \".dockerignore\", \"pyproject.toml\", \"MANIFEST.in\"]\nall_extra_files = dockerfiles + entrypoint_files + setup_files\nall_extra_files_pathcorrected = [\n os.path.join(\"../\", item) for item in all_extra_files\n]\n# find_packages should only ever find these as subpackages of gandlf, not as top-level packages\n# generate this dynamically?\n# GANDLF.GANDLF is needed to prevent recursion madness in deployments\ntoplevel_package_excludes = [\n \"GANDLF.GANDLF\",\n \"anonymize\",\n \"cli\",\n \"compute\",\n \"data\",\n \"grad_clipping\",\n \"losses\",\n \"metrics\",\n \"models\",\n \"optimizers\",\n \"schedulers\",\n \"utils\",\n]\n\n\nrequirements = [\n \"torch==2.1.2\",\n \"black==23.11.0\",\n \"numpy==1.25.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"SimpleITK!=2.2.1\", # https://github.com/mlcommons/GaNDLF/issues/536\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.19.5\",\n \"pandas>=2.0.0\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"gdown==5.1.0\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==1.1.2\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n \"segmentation-models-pytorch==0.3.3\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n \"twine\",\n \"zarr\",\n \"keyring\",\n \"monai==1.3.0\",\n]\n\nif __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n version=__version__,\n author=\"MLCommons\",\n author_email=\"[email protected]\",\n python_requires=\">3.8, <3.12\",\n packages=find_packages(\n where=os.path.dirname(os.path.abspath(__file__)),\n exclude=toplevel_package_excludes,\n ),\n cmdclass={\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n \"gandlf_configGenerator\",\n \"gandlf_recoverConfig\",\n \"gandlf_deploy\",\n \"gandlf_optimizeModel\",\n \"gandlf_generateMetrics\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"Apache-2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n package_data={\"GANDLF\": all_extra_files_pathcorrected},\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch\",\n zip_safe=False,\n )\n", "path": "setup.py"}]}
2,241
85
gh_patches_debug_9165
rasdani/github-patches
git_diff
microsoft__DeepSpeed-1921
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] No module named 'fcntl' while importing the package Hey, Working on windows 11, Python 3.7 and tried importing the package. Got the error in the title, is there a way around it since it's exclusive to Linux? ![fcntl](https://user-images.githubusercontent.com/61480454/164317686-5303f264-5fc7-42c0-89b5-d16a125ae11f.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `deepspeed/utils/debug.py` Content: ``` 1 """ debug utils """ 2 3 import fcntl 4 5 # for debug purposes map module and param objects to their fully qualified names 6 module_names = {} 7 param_names = {} 8 9 10 def debug_extract_module_and_param_names(model): 11 # extract the fully qualified names as soon as the model is acquired 12 global module_names 13 global param_names 14 # XXX: can probably make a map of param2module and vice-versa 15 module_names = {module: name for name, module in model.named_modules()} 16 param_names = {param: name for name, param in model.named_parameters()} 17 18 19 def debug_module2name(module): 20 if module in module_names: 21 return module_names[module] 22 else: 23 return "unknown" 24 25 26 def debug_module2name_id(module): 27 return f"name={debug_module2name(module)} id={module.id}" 28 29 30 def debug_module2name_class(module): 31 return f"name={debug_module2name(module)} {module.__class__.__name__}" 32 33 34 def debug_param2name(param): 35 if param in param_names: 36 return param_names[param] 37 else: 38 return "unknown" 39 40 41 def debug_param2name_id(param): 42 return f"name={debug_param2name(param)} id={param.ds_id}" 43 44 45 def debug_param2name_id_shape(param): 46 return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape}" 47 48 49 def debug_param2name_id_shape_device(param): 50 return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} device={param.device}" 51 52 53 def debug_param2name_id_numel(param): 54 return f"name={debug_param2name(param)} id={param.ds_id} numel={param.numel()}" 55 56 57 def debug_param2name_id_shape_status(param): 58 return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} status={param.ds_status}" 59 60 61 def printflock(*msgs): 62 """ 63 64 For printing messages for all concurrent gpus w/o getting interleaved text. 65 66 This is useful when debugging issues where multi-gpus don't sync. 67 68 1. Enable the force debug in say partitioning and zero3 files 69 2. Override the usual versions with :: 70 71 def print_rank_0(message, debug=False, force=False): 72 rank = torch.distributed.get_rank() 73 printflock(f"[{rank}] {message}") 74 3. run the program and you get both logs non-interleaved 75 76 But this makes it very difficult to make sense of the output, so the ``log_rank_file`` helper 77 function might be more useful, as it's easier to send each log stream into a separate file and 78 then compare those. 79 80 """ 81 82 with open(__file__, "r") as fh: 83 fcntl.flock(fh, fcntl.LOCK_EX) 84 try: 85 print(*msgs) 86 finally: 87 fcntl.flock(fh, fcntl.LOCK_UN) 88 89 90 fh = None 91 92 93 def log_rank_file(rank, *msgs): 94 """ 95 Print to a log file of the given rank 96 97 This is useful for debugging hanging in sync processes. Here is a possible workflow: 98 99 1. Enable the force debug in say partitioning and zero3 files 100 2. Override the usual versions of print_rank_0 in those files with :: 101 102 def print_rank_0(message, debug=False, force=False): 103 rank = torch.distributed.get_rank() 104 log_rank_file(rank, message) 105 106 3. run the program 107 4. fix up the expected differences, e.g. different cuda numbers :: 108 109 perl -pi -e 's|cuda:1|cuda:0|' log_rank_* 110 111 5. now diff and see where names and ids diverge - you will find where the gpus don't do the same 112 work (e.g. when some layers get conditionally skipped on one gpu but not all) 113 114 diff -u log_rank_0.txt log_rank_1.txt | less 115 116 """ 117 global fh 118 if fh is None: 119 fh = open(f"log_rank_{rank}.txt", "w") 120 for m in msgs: 121 fh.write(f"{m}\n") 122 fh.flush() 123 124 125 def print_backward_tensors(tensor): 126 def _print_bwd_tensors(grad_fn): 127 print(f"Backward tensors in {grad_fn}") 128 for funcs in grad_fn.next_functions: 129 if funcs[0]: 130 try: 131 tensor = getattr(funcs[0], 'variable') 132 print(funcs[0]) 133 print( 134 f"Tensor - id: {id(tensor)}, shape: {tensor.shape}, data: {tensor}, grad: {tensor.grad}" 135 ) 136 except AttributeError as e: 137 _print_bwd_tensors(funcs[0]) 138 139 if hasattr(tensor, 'grad_fn'): 140 _print_bwd_tensors(tensor.grad_fn) 141 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/deepspeed/utils/debug.py b/deepspeed/utils/debug.py --- a/deepspeed/utils/debug.py +++ b/deepspeed/utils/debug.py @@ -1,6 +1,7 @@ """ debug utils """ -import fcntl +# For lazy import with printflock() +fcntl = None # for debug purposes map module and param objects to their fully qualified names module_names = {} @@ -78,6 +79,9 @@ then compare those. """ + global fcntl + if fcntl == None: + import fcntl with open(__file__, "r") as fh: fcntl.flock(fh, fcntl.LOCK_EX)
{"golden_diff": "diff --git a/deepspeed/utils/debug.py b/deepspeed/utils/debug.py\n--- a/deepspeed/utils/debug.py\n+++ b/deepspeed/utils/debug.py\n@@ -1,6 +1,7 @@\n \"\"\" debug utils \"\"\"\n \n-import fcntl\n+# For lazy import with printflock()\n+fcntl = None\n \n # for debug purposes map module and param objects to their fully qualified names\n module_names = {}\n@@ -78,6 +79,9 @@\n then compare those.\n \n \"\"\"\n+ global fcntl\n+ if fcntl == None:\n+ import fcntl\n \n with open(__file__, \"r\") as fh:\n fcntl.flock(fh, fcntl.LOCK_EX)\n", "issue": "[BUG] No module named 'fcntl' while importing the package\nHey,\r\nWorking on windows 11, Python 3.7 and tried importing the package.\r\nGot the error in the title, is there a way around it since it's exclusive to Linux?\r\n\r\n![fcntl](https://user-images.githubusercontent.com/61480454/164317686-5303f264-5fc7-42c0-89b5-d16a125ae11f.png)\r\n \n", "before_files": [{"content": "\"\"\" debug utils \"\"\"\n\nimport fcntl\n\n# for debug purposes map module and param objects to their fully qualified names\nmodule_names = {}\nparam_names = {}\n\n\ndef debug_extract_module_and_param_names(model):\n # extract the fully qualified names as soon as the model is acquired\n global module_names\n global param_names\n # XXX: can probably make a map of param2module and vice-versa\n module_names = {module: name for name, module in model.named_modules()}\n param_names = {param: name for name, param in model.named_parameters()}\n\n\ndef debug_module2name(module):\n if module in module_names:\n return module_names[module]\n else:\n return \"unknown\"\n\n\ndef debug_module2name_id(module):\n return f\"name={debug_module2name(module)} id={module.id}\"\n\n\ndef debug_module2name_class(module):\n return f\"name={debug_module2name(module)} {module.__class__.__name__}\"\n\n\ndef debug_param2name(param):\n if param in param_names:\n return param_names[param]\n else:\n return \"unknown\"\n\n\ndef debug_param2name_id(param):\n return f\"name={debug_param2name(param)} id={param.ds_id}\"\n\n\ndef debug_param2name_id_shape(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape}\"\n\n\ndef debug_param2name_id_shape_device(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} device={param.device}\"\n\n\ndef debug_param2name_id_numel(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} numel={param.numel()}\"\n\n\ndef debug_param2name_id_shape_status(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} status={param.ds_status}\"\n\n\ndef printflock(*msgs):\n \"\"\"\n\n For printing messages for all concurrent gpus w/o getting interleaved text.\n\n This is useful when debugging issues where multi-gpus don't sync.\n\n 1. Enable the force debug in say partitioning and zero3 files\n 2. Override the usual versions with ::\n\n def print_rank_0(message, debug=False, force=False):\n rank = torch.distributed.get_rank()\n printflock(f\"[{rank}] {message}\")\n 3. run the program and you get both logs non-interleaved\n\n But this makes it very difficult to make sense of the output, so the ``log_rank_file`` helper\n function might be more useful, as it's easier to send each log stream into a separate file and\n then compare those.\n\n \"\"\"\n\n with open(__file__, \"r\") as fh:\n fcntl.flock(fh, fcntl.LOCK_EX)\n try:\n print(*msgs)\n finally:\n fcntl.flock(fh, fcntl.LOCK_UN)\n\n\nfh = None\n\n\ndef log_rank_file(rank, *msgs):\n \"\"\"\n Print to a log file of the given rank\n\n This is useful for debugging hanging in sync processes. Here is a possible workflow:\n\n 1. Enable the force debug in say partitioning and zero3 files\n 2. Override the usual versions of print_rank_0 in those files with ::\n\n def print_rank_0(message, debug=False, force=False):\n rank = torch.distributed.get_rank()\n log_rank_file(rank, message)\n\n 3. run the program\n 4. fix up the expected differences, e.g. different cuda numbers ::\n\n perl -pi -e 's|cuda:1|cuda:0|' log_rank_*\n\n 5. now diff and see where names and ids diverge - you will find where the gpus don't do the same\n work (e.g. when some layers get conditionally skipped on one gpu but not all)\n\n diff -u log_rank_0.txt log_rank_1.txt | less\n\n \"\"\"\n global fh\n if fh is None:\n fh = open(f\"log_rank_{rank}.txt\", \"w\")\n for m in msgs:\n fh.write(f\"{m}\\n\")\n fh.flush()\n\n\ndef print_backward_tensors(tensor):\n def _print_bwd_tensors(grad_fn):\n print(f\"Backward tensors in {grad_fn}\")\n for funcs in grad_fn.next_functions:\n if funcs[0]:\n try:\n tensor = getattr(funcs[0], 'variable')\n print(funcs[0])\n print(\n f\"Tensor - id: {id(tensor)}, shape: {tensor.shape}, data: {tensor}, grad: {tensor.grad}\"\n )\n except AttributeError as e:\n _print_bwd_tensors(funcs[0])\n\n if hasattr(tensor, 'grad_fn'):\n _print_bwd_tensors(tensor.grad_fn)\n", "path": "deepspeed/utils/debug.py"}], "after_files": [{"content": "\"\"\" debug utils \"\"\"\n\n# For lazy import with printflock()\nfcntl = None\n\n# for debug purposes map module and param objects to their fully qualified names\nmodule_names = {}\nparam_names = {}\n\n\ndef debug_extract_module_and_param_names(model):\n # extract the fully qualified names as soon as the model is acquired\n global module_names\n global param_names\n # XXX: can probably make a map of param2module and vice-versa\n module_names = {module: name for name, module in model.named_modules()}\n param_names = {param: name for name, param in model.named_parameters()}\n\n\ndef debug_module2name(module):\n if module in module_names:\n return module_names[module]\n else:\n return \"unknown\"\n\n\ndef debug_module2name_id(module):\n return f\"name={debug_module2name(module)} id={module.id}\"\n\n\ndef debug_module2name_class(module):\n return f\"name={debug_module2name(module)} {module.__class__.__name__}\"\n\n\ndef debug_param2name(param):\n if param in param_names:\n return param_names[param]\n else:\n return \"unknown\"\n\n\ndef debug_param2name_id(param):\n return f\"name={debug_param2name(param)} id={param.ds_id}\"\n\n\ndef debug_param2name_id_shape(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape}\"\n\n\ndef debug_param2name_id_shape_device(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} device={param.device}\"\n\n\ndef debug_param2name_id_numel(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} numel={param.numel()}\"\n\n\ndef debug_param2name_id_shape_status(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} status={param.ds_status}\"\n\n\ndef printflock(*msgs):\n \"\"\"\n\n For printing messages for all concurrent gpus w/o getting interleaved text.\n\n This is useful when debugging issues where multi-gpus don't sync.\n\n 1. Enable the force debug in say partitioning and zero3 files\n 2. Override the usual versions with ::\n\n def print_rank_0(message, debug=False, force=False):\n rank = torch.distributed.get_rank()\n printflock(f\"[{rank}] {message}\")\n 3. run the program and you get both logs non-interleaved\n\n But this makes it very difficult to make sense of the output, so the ``log_rank_file`` helper\n function might be more useful, as it's easier to send each log stream into a separate file and\n then compare those.\n\n \"\"\"\n global fcntl\n if fcntl == None:\n import fcntl\n\n with open(__file__, \"r\") as fh:\n fcntl.flock(fh, fcntl.LOCK_EX)\n try:\n print(*msgs)\n finally:\n fcntl.flock(fh, fcntl.LOCK_UN)\n\n\nfh = None\n\n\ndef log_rank_file(rank, *msgs):\n \"\"\"\n Print to a log file of the given rank\n\n This is useful for debugging hanging in sync processes. Here is a possible workflow:\n\n 1. Enable the force debug in say partitioning and zero3 files\n 2. Override the usual versions of print_rank_0 in those files with ::\n\n def print_rank_0(message, debug=False, force=False):\n rank = torch.distributed.get_rank()\n log_rank_file(rank, message)\n\n 3. run the program\n 4. fix up the expected differences, e.g. different cuda numbers ::\n\n perl -pi -e 's|cuda:1|cuda:0|' log_rank_*\n\n 5. now diff and see where names and ids diverge - you will find where the gpus don't do the same\n work (e.g. when some layers get conditionally skipped on one gpu but not all)\n\n diff -u log_rank_0.txt log_rank_1.txt | less\n\n \"\"\"\n global fh\n if fh is None:\n fh = open(f\"log_rank_{rank}.txt\", \"w\")\n for m in msgs:\n fh.write(f\"{m}\\n\")\n fh.flush()\n\n\ndef print_backward_tensors(tensor):\n def _print_bwd_tensors(grad_fn):\n print(f\"Backward tensors in {grad_fn}\")\n for funcs in grad_fn.next_functions:\n if funcs[0]:\n try:\n tensor = getattr(funcs[0], 'variable')\n print(funcs[0])\n print(\n f\"Tensor - id: {id(tensor)}, shape: {tensor.shape}, data: {tensor}, grad: {tensor.grad}\"\n )\n except AttributeError as e:\n _print_bwd_tensors(funcs[0])\n\n if hasattr(tensor, 'grad_fn'):\n _print_bwd_tensors(tensor.grad_fn)\n", "path": "deepspeed/utils/debug.py"}]}
1,753
152
gh_patches_debug_31923
rasdani/github-patches
git_diff
alpa-projects__alpa-511
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Adding the `pjit` in the comparison Some people are more familiar with using model parallel via [`pjit`](https://github.com/google/jax/blob/main/jax/experimental/pjit.py). What about adding one more rows [here](https://github.com/alpa-projects/alpa/blob/main/docs/gallery/tutorials/alpa_vs_pmap.py#L46-L52)? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/gallery/tutorials/alpa_vs_pmap.py` Content: ``` 1 """ 2 Differences between alpa.parallelize and jax.pmap 3 ================================================= 4 5 The most common tool for parallelization or distributed computing in jax is 6 `pmap <https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap>`_. 7 With several lines of code change, we can use ``pmap`` for data parallel 8 training. However, we cannot use ``pmap`` for model parallel training, 9 which is required for training large models with billions of parameters. 10 11 On the contrary, ``alpa.parallelize`` supports both data parallelism and 12 model parallelism in an automatic way. ``alpa.parallelize`` analyzes the 13 jax computational graph and picks the best strategy. 14 If data parallelism is more suitable, ``alpa.parallelize`` achieves the same 15 performance as ``pmap`` but with less code change. 16 If model parallelism is more suitable, ``alpa.parallelize`` achieves better performance 17 and uses less memory than ``pmap``. 18 19 In this tutorial, we are going to compare ``alpa.parallelize`` and ``pmap`` on two 20 workloads. A more detailed comparison among ``alpa.parallelize``, ``pmap``, and ``xmap`` 21 is also attached at the end of the article. 22 """ 23 24 ################################################################################ 25 # When data parallelism is prefered 26 # --------------------------------- 27 28 # TODO 29 30 ################################################################################ 31 # When model parallelism is prefered 32 # ---------------------------------- 33 34 # TODO 35 36 ################################################################################ 37 # Comparing ``alpa.parallelize``, ``pmap``, and ``xmap`` 38 # ------------------------------------------------------ 39 # Besides ``pmap``, jax also provides 40 # `xmap <https://jax.readthedocs.io/en/latest/notebooks/xmap_tutorial.html>`_ 41 # for more advanced parallelization. 42 # The table below compares the features of ``alpa.parallelize``, ``pmap``, and ``xmap``. 43 # In summary, ``alpa.parallelize`` supports more parallelism techniques in a 44 # more automatic way. 45 # 46 # ================ ================ ==================== ==================== ========= 47 # Transformation Data Parallelism Operator Parallelism Pipeline Parallelism Automated 48 # ================ ================ ==================== ==================== ========= 49 # alpa.parallelize yes yes yes yes 50 # pmap yes no no no 51 # xmap yes yes no no 52 # ================ ================ ==================== ==================== ========= 53 # 54 # .. note:: 55 # Operator parallelism and pipeline parallelism are two forms of model parallelism. 56 # Operator parallelism partitions the work in a single operator and assigns them 57 # to different devices. Pipeline parallelism partitions the computational 58 # graphs and assigns different operators to different devices. 59 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/gallery/tutorials/alpa_vs_pmap.py b/docs/gallery/tutorials/alpa_vs_pmap.py --- a/docs/gallery/tutorials/alpa_vs_pmap.py +++ b/docs/gallery/tutorials/alpa_vs_pmap.py @@ -34,14 +34,15 @@ # TODO ################################################################################ -# Comparing ``alpa.parallelize``, ``pmap``, and ``xmap`` -# ------------------------------------------------------ +# Comparing ``alpa.parallelize``, ``pmap``, ``xmap``, and ``pjit`` +# ----------------------------------------------------------------- # Besides ``pmap``, jax also provides -# `xmap <https://jax.readthedocs.io/en/latest/notebooks/xmap_tutorial.html>`_ +# `xmap <https://jax.readthedocs.io/en/latest/notebooks/xmap_tutorial.html>`_ and +# `pjit <https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html>`_ # for more advanced parallelization. -# The table below compares the features of ``alpa.parallelize``, ``pmap``, and ``xmap``. -# In summary, ``alpa.parallelize`` supports more parallelism techniques in a -# more automatic way. +# The table below compares the features of ``alpa.parallelize``, ``pmap``, ``xmap`` +# and ``pjit``. In summary, ``alpa.parallelize`` supports more parallelism +# techniques in a more automatic way. # # ================ ================ ==================== ==================== ========= # Transformation Data Parallelism Operator Parallelism Pipeline Parallelism Automated @@ -49,6 +50,7 @@ # alpa.parallelize yes yes yes yes # pmap yes no no no # xmap yes yes no no +# pjit yes yes no no # ================ ================ ==================== ==================== ========= # # .. note::
{"golden_diff": "diff --git a/docs/gallery/tutorials/alpa_vs_pmap.py b/docs/gallery/tutorials/alpa_vs_pmap.py\n--- a/docs/gallery/tutorials/alpa_vs_pmap.py\n+++ b/docs/gallery/tutorials/alpa_vs_pmap.py\n@@ -34,14 +34,15 @@\n # TODO\n \n ################################################################################\n-# Comparing ``alpa.parallelize``, ``pmap``, and ``xmap``\n-# ------------------------------------------------------\n+# Comparing ``alpa.parallelize``, ``pmap``, ``xmap``, and ``pjit``\n+# -----------------------------------------------------------------\n # Besides ``pmap``, jax also provides\n-# `xmap <https://jax.readthedocs.io/en/latest/notebooks/xmap_tutorial.html>`_\n+# `xmap <https://jax.readthedocs.io/en/latest/notebooks/xmap_tutorial.html>`_ and \n+# `pjit <https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html>`_\n # for more advanced parallelization.\n-# The table below compares the features of ``alpa.parallelize``, ``pmap``, and ``xmap``.\n-# In summary, ``alpa.parallelize`` supports more parallelism techniques in a\n-# more automatic way.\n+# The table below compares the features of ``alpa.parallelize``, ``pmap``, ``xmap`` \n+# and ``pjit``. In summary, ``alpa.parallelize`` supports more parallelism \n+# techniques in a more automatic way.\n #\n # ================ ================ ==================== ==================== =========\n # Transformation Data Parallelism Operator Parallelism Pipeline Parallelism Automated\n@@ -49,6 +50,7 @@\n # alpa.parallelize yes yes yes yes\n # pmap yes no no no\n # xmap yes yes no no\n+# pjit yes yes no no\n # ================ ================ ==================== ==================== =========\n #\n # .. note::\n", "issue": "Adding the `pjit` in the comparison\nSome people are more familiar with using model parallel via [`pjit`](https://github.com/google/jax/blob/main/jax/experimental/pjit.py). What about adding one more rows [here](https://github.com/alpa-projects/alpa/blob/main/docs/gallery/tutorials/alpa_vs_pmap.py#L46-L52)?\n", "before_files": [{"content": "\"\"\"\nDifferences between alpa.parallelize and jax.pmap\n=================================================\n\nThe most common tool for parallelization or distributed computing in jax is\n`pmap <https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap>`_.\nWith several lines of code change, we can use ``pmap`` for data parallel\ntraining. However, we cannot use ``pmap`` for model parallel training,\nwhich is required for training large models with billions of parameters.\n\nOn the contrary, ``alpa.parallelize`` supports both data parallelism and\nmodel parallelism in an automatic way. ``alpa.parallelize`` analyzes the\njax computational graph and picks the best strategy.\nIf data parallelism is more suitable, ``alpa.parallelize`` achieves the same\nperformance as ``pmap`` but with less code change.\nIf model parallelism is more suitable, ``alpa.parallelize`` achieves better performance\nand uses less memory than ``pmap``.\n\nIn this tutorial, we are going to compare ``alpa.parallelize`` and ``pmap`` on two\nworkloads. A more detailed comparison among ``alpa.parallelize``, ``pmap``, and ``xmap``\nis also attached at the end of the article.\n\"\"\"\n\n################################################################################\n# When data parallelism is prefered\n# ---------------------------------\n\n# TODO\n\n################################################################################\n# When model parallelism is prefered\n# ----------------------------------\n\n# TODO\n\n################################################################################\n# Comparing ``alpa.parallelize``, ``pmap``, and ``xmap``\n# ------------------------------------------------------\n# Besides ``pmap``, jax also provides\n# `xmap <https://jax.readthedocs.io/en/latest/notebooks/xmap_tutorial.html>`_\n# for more advanced parallelization.\n# The table below compares the features of ``alpa.parallelize``, ``pmap``, and ``xmap``.\n# In summary, ``alpa.parallelize`` supports more parallelism techniques in a\n# more automatic way.\n#\n# ================ ================ ==================== ==================== =========\n# Transformation Data Parallelism Operator Parallelism Pipeline Parallelism Automated\n# ================ ================ ==================== ==================== =========\n# alpa.parallelize yes yes yes yes\n# pmap yes no no no\n# xmap yes yes no no\n# ================ ================ ==================== ==================== =========\n#\n# .. note::\n# Operator parallelism and pipeline parallelism are two forms of model parallelism.\n# Operator parallelism partitions the work in a single operator and assigns them\n# to different devices. Pipeline parallelism partitions the computational\n# graphs and assigns different operators to different devices.\n", "path": "docs/gallery/tutorials/alpa_vs_pmap.py"}], "after_files": [{"content": "\"\"\"\nDifferences between alpa.parallelize and jax.pmap\n=================================================\n\nThe most common tool for parallelization or distributed computing in jax is\n`pmap <https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap>`_.\nWith several lines of code change, we can use ``pmap`` for data parallel\ntraining. However, we cannot use ``pmap`` for model parallel training,\nwhich is required for training large models with billions of parameters.\n\nOn the contrary, ``alpa.parallelize`` supports both data parallelism and\nmodel parallelism in an automatic way. ``alpa.parallelize`` analyzes the\njax computational graph and picks the best strategy.\nIf data parallelism is more suitable, ``alpa.parallelize`` achieves the same\nperformance as ``pmap`` but with less code change.\nIf model parallelism is more suitable, ``alpa.parallelize`` achieves better performance\nand uses less memory than ``pmap``.\n\nIn this tutorial, we are going to compare ``alpa.parallelize`` and ``pmap`` on two\nworkloads. A more detailed comparison among ``alpa.parallelize``, ``pmap``, and ``xmap``\nis also attached at the end of the article.\n\"\"\"\n\n################################################################################\n# When data parallelism is prefered\n# ---------------------------------\n\n# TODO\n\n################################################################################\n# When model parallelism is prefered\n# ----------------------------------\n\n# TODO\n\n################################################################################\n# Comparing ``alpa.parallelize``, ``pmap``, ``xmap``, and ``pjit``\n# -----------------------------------------------------------------\n# Besides ``pmap``, jax also provides\n# `xmap <https://jax.readthedocs.io/en/latest/notebooks/xmap_tutorial.html>`_ and \n# `pjit <https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html>`_\n# for more advanced parallelization.\n# The table below compares the features of ``alpa.parallelize``, ``pmap``, ``xmap`` \n# and ``pjit``. In summary, ``alpa.parallelize`` supports more parallelism \n# techniques in a more automatic way.\n#\n# ================ ================ ==================== ==================== =========\n# Transformation Data Parallelism Operator Parallelism Pipeline Parallelism Automated\n# ================ ================ ==================== ==================== =========\n# alpa.parallelize yes yes yes yes\n# pmap yes no no no\n# xmap yes yes no no\n# pjit yes yes no no\n# ================ ================ ==================== ==================== =========\n#\n# .. note::\n# Operator parallelism and pipeline parallelism are two forms of model parallelism.\n# Operator parallelism partitions the work in a single operator and assigns them\n# to different devices. Pipeline parallelism partitions the computational\n# graphs and assigns different operators to different devices.\n", "path": "docs/gallery/tutorials/alpa_vs_pmap.py"}]}
1,017
424
gh_patches_debug_19670
rasdani/github-patches
git_diff
google__flax-2842
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- lm1b sampler update is out of bound? Hi, _Disclaimer: I'm not confident if this is some intended behavior._ I think in the lm1b example, the prediction sampler could write to indices out of bound of the result. The particular offending line is: https://github.com/google/flax/blob/master/examples/lm1b/temperature_sampler.py#L109. Since `i <= max_decode_len` according to https://github.com/google/flax/blob/master/examples/lm1b/temperature_sampler.py#L75, and since `prompt_inputs.shape[1] == max_decode_len`, the last iteration (or two iterations?) of while loop would overwrite to `prompt_inputs[..., max_decode_len - 1]` (due to XLA semantics). It's either a subtle bug (it won't raise cause error), or there's something I'm not understanding about the sampling procedure. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/lm1b/temperature_sampler.py` Content: ``` 1 # Copyright 2022 The Flax Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Fast decoding routines for inference from a trained language model.""" 16 17 from jax import lax 18 from jax import random 19 import jax.numpy as jnp 20 21 22 # Constants 23 # The default End-of-Sentence token id is 2 (SentencePiece). 24 EOS_ID = 2 25 26 27 def temperature_sample(prompt_inputs, 28 init_cache, 29 tokens_to_logits, 30 prng_key, 31 temperature=1.0, 32 topk=20, 33 eos_token=EOS_ID): 34 """Temperature sampling for language model generation. 35 36 Args: 37 prompt_inputs: array: [batch_size, max_decode_len] int32 sequence of tokens. 38 init_cache: flax attention cache. 39 tokens_to_logits: fast autoregressive decoder function taking single token 40 slices and cache and returning next-token logits and updated cache. 41 prng_key: JAX PRNGKey. 42 temperature: float: sampling temperature factor. As it approaches 43 zero this becomes equivalent to greedy sampling. 44 topk: integer: if nonzero only use the top-k logits to sample next token, 45 if zero don't use any cutoff and sample from full logits over vocabulary. 46 eos_token: int: end-of-sentence token for target vocabulary. 47 48 Returns: 49 Array of sampled sequences: [batch_size, max_decode_len] 50 """ 51 batch_size = prompt_inputs.shape[0] 52 max_decode_len = prompt_inputs.shape[1] 53 end_marker = jnp.array(eos_token) 54 temperature = jnp.array(temperature) 55 56 # Initialize sampling loop state. 57 # initial loop PRNGKey 58 rng0 = prng_key 59 # loop position counter. 60 i0 = jnp.array(0) 61 # per batch-item holding current token in loop. 62 token0 = jnp.zeros((batch_size, 1), dtype=jnp.int32) 63 # per batch-item state bit indicating if sentence has finished. 64 ended0 = jnp.zeros((batch_size, 1), dtype=jnp.bool_) 65 # (batch, length) array containing prefix prompt tokens for sampling loop 66 # as well as the generated output of newly sampled tokens. 67 sequences0 = prompt_inputs 68 # Sampling loop state is stored in a simple tuple. 69 sampling_loop_init_state = (i0, sequences0, init_cache, token0, ended0, rng0) 70 71 def sampling_loop_cond_fn(state): 72 """Sampling loop termination condition.""" 73 (i, _, _, _, ended, _) = state 74 # Have we reached max decoding length? 75 not_at_end = (i < max_decode_len) 76 # Have all sampled sequences reached an end marker? 77 all_sequences_ended = jnp.all(ended) 78 return not_at_end & (~all_sequences_ended) 79 80 def sampling_loop_body_fn(state): 81 """Sampling loop state update.""" 82 i, sequences, cache, cur_token, ended, rng = state 83 # Split RNG for sampling. 84 rng1, rng2 = random.split(rng) 85 # Call fast-decoder model on current tokens to get next-position logits. 86 logits, new_cache = tokens_to_logits(cur_token, cache) 87 # Sample next token from logits. 88 # TODO(levskaya): add top-p "nucleus" sampling option. 89 if topk: 90 # Get top-k logits and their indices, sample within these top-k tokens. 91 topk_logits, topk_idxs = lax.top_k(logits, topk) 92 topk_token = jnp.expand_dims(random.categorical( 93 rng1, topk_logits / temperature).astype(jnp.int32), axis=-1) 94 # Return the original indices corresponding to the sampled top-k tokens. 95 next_token = jnp.squeeze( 96 jnp.take_along_axis(topk_idxs, topk_token, axis=-1), axis=-1) 97 else: 98 next_token = random.categorical( 99 rng1, logits / temperature).astype(jnp.int32) 100 # Only use sampled tokens if we're past provided prefix tokens. 101 out_of_prompt = (sequences[:, i+1] == 0) 102 next_token = (next_token * out_of_prompt + 103 sequences[:, i+1] * ~out_of_prompt) 104 # If end-marker reached for batch item, only emit padding tokens. 105 next_token_or_endpad = (next_token[None] * ~ended) 106 ended |= (next_token_or_endpad == end_marker) 107 # Add current sampled tokens to recorded sequences. 108 new_sequences = lax.dynamic_update_slice( 109 sequences, next_token_or_endpad, (0, i+1)) 110 return (i+1, new_sequences, new_cache, next_token_or_endpad, ended, rng2) 111 112 # Run sampling loop and collect final state. 113 final_state = lax.while_loop(sampling_loop_cond_fn, 114 sampling_loop_body_fn, 115 sampling_loop_init_state) 116 117 # Pick part of the state corresponding to the sampled sequences. 118 final_sequences = final_state[1] 119 return final_sequences 120 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/lm1b/temperature_sampler.py b/examples/lm1b/temperature_sampler.py --- a/examples/lm1b/temperature_sampler.py +++ b/examples/lm1b/temperature_sampler.py @@ -57,7 +57,7 @@ # initial loop PRNGKey rng0 = prng_key # loop position counter. - i0 = jnp.array(0) + i0 = jnp.array(-1) # per batch-item holding current token in loop. token0 = jnp.zeros((batch_size, 1), dtype=jnp.int32) # per batch-item state bit indicating if sentence has finished. @@ -72,7 +72,7 @@ """Sampling loop termination condition.""" (i, _, _, _, ended, _) = state # Have we reached max decoding length? - not_at_end = (i < max_decode_len) + not_at_end = (i < max_decode_len - 1) # Have all sampled sequences reached an end marker? all_sequences_ended = jnp.all(ended) return not_at_end & (~all_sequences_ended)
{"golden_diff": "diff --git a/examples/lm1b/temperature_sampler.py b/examples/lm1b/temperature_sampler.py\n--- a/examples/lm1b/temperature_sampler.py\n+++ b/examples/lm1b/temperature_sampler.py\n@@ -57,7 +57,7 @@\n # initial loop PRNGKey\n rng0 = prng_key\n # loop position counter.\n- i0 = jnp.array(0)\n+ i0 = jnp.array(-1)\n # per batch-item holding current token in loop.\n token0 = jnp.zeros((batch_size, 1), dtype=jnp.int32)\n # per batch-item state bit indicating if sentence has finished.\n@@ -72,7 +72,7 @@\n \"\"\"Sampling loop termination condition.\"\"\"\n (i, _, _, _, ended, _) = state\n # Have we reached max decoding length?\n- not_at_end = (i < max_decode_len)\n+ not_at_end = (i < max_decode_len - 1)\n # Have all sampled sequences reached an end marker?\n all_sequences_ended = jnp.all(ended)\n return not_at_end & (~all_sequences_ended)\n", "issue": "lm1b sampler update is out of bound?\nHi,\r\n\r\n_Disclaimer: I'm not confident if this is some intended behavior._\r\n\r\nI think in the lm1b example, the prediction sampler could write to indices out of bound of the result.\r\nThe particular offending line is: https://github.com/google/flax/blob/master/examples/lm1b/temperature_sampler.py#L109.\r\n\r\nSince `i <= max_decode_len` according to https://github.com/google/flax/blob/master/examples/lm1b/temperature_sampler.py#L75, and since `prompt_inputs.shape[1] == max_decode_len`, the last iteration (or two iterations?) of while loop would overwrite to `prompt_inputs[..., max_decode_len - 1]` (due to XLA semantics). \r\n\r\nIt's either a subtle bug (it won't raise cause error), or there's something I'm not understanding about the sampling procedure.\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Fast decoding routines for inference from a trained language model.\"\"\"\n\nfrom jax import lax\nfrom jax import random\nimport jax.numpy as jnp\n\n\n# Constants\n# The default End-of-Sentence token id is 2 (SentencePiece).\nEOS_ID = 2\n\n\ndef temperature_sample(prompt_inputs,\n init_cache,\n tokens_to_logits,\n prng_key,\n temperature=1.0,\n topk=20,\n eos_token=EOS_ID):\n \"\"\"Temperature sampling for language model generation.\n\n Args:\n prompt_inputs: array: [batch_size, max_decode_len] int32 sequence of tokens.\n init_cache: flax attention cache.\n tokens_to_logits: fast autoregressive decoder function taking single token\n slices and cache and returning next-token logits and updated cache.\n prng_key: JAX PRNGKey.\n temperature: float: sampling temperature factor. As it approaches\n zero this becomes equivalent to greedy sampling.\n topk: integer: if nonzero only use the top-k logits to sample next token,\n if zero don't use any cutoff and sample from full logits over vocabulary.\n eos_token: int: end-of-sentence token for target vocabulary.\n\n Returns:\n Array of sampled sequences: [batch_size, max_decode_len]\n \"\"\"\n batch_size = prompt_inputs.shape[0]\n max_decode_len = prompt_inputs.shape[1]\n end_marker = jnp.array(eos_token)\n temperature = jnp.array(temperature)\n\n # Initialize sampling loop state.\n # initial loop PRNGKey\n rng0 = prng_key\n # loop position counter.\n i0 = jnp.array(0)\n # per batch-item holding current token in loop.\n token0 = jnp.zeros((batch_size, 1), dtype=jnp.int32)\n # per batch-item state bit indicating if sentence has finished.\n ended0 = jnp.zeros((batch_size, 1), dtype=jnp.bool_)\n # (batch, length) array containing prefix prompt tokens for sampling loop\n # as well as the generated output of newly sampled tokens.\n sequences0 = prompt_inputs\n # Sampling loop state is stored in a simple tuple.\n sampling_loop_init_state = (i0, sequences0, init_cache, token0, ended0, rng0)\n\n def sampling_loop_cond_fn(state):\n \"\"\"Sampling loop termination condition.\"\"\"\n (i, _, _, _, ended, _) = state\n # Have we reached max decoding length?\n not_at_end = (i < max_decode_len)\n # Have all sampled sequences reached an end marker?\n all_sequences_ended = jnp.all(ended)\n return not_at_end & (~all_sequences_ended)\n\n def sampling_loop_body_fn(state):\n \"\"\"Sampling loop state update.\"\"\"\n i, sequences, cache, cur_token, ended, rng = state\n # Split RNG for sampling.\n rng1, rng2 = random.split(rng)\n # Call fast-decoder model on current tokens to get next-position logits.\n logits, new_cache = tokens_to_logits(cur_token, cache)\n # Sample next token from logits.\n # TODO(levskaya): add top-p \"nucleus\" sampling option.\n if topk:\n # Get top-k logits and their indices, sample within these top-k tokens.\n topk_logits, topk_idxs = lax.top_k(logits, topk)\n topk_token = jnp.expand_dims(random.categorical(\n rng1, topk_logits / temperature).astype(jnp.int32), axis=-1)\n # Return the original indices corresponding to the sampled top-k tokens.\n next_token = jnp.squeeze(\n jnp.take_along_axis(topk_idxs, topk_token, axis=-1), axis=-1)\n else:\n next_token = random.categorical(\n rng1, logits / temperature).astype(jnp.int32)\n # Only use sampled tokens if we're past provided prefix tokens.\n out_of_prompt = (sequences[:, i+1] == 0)\n next_token = (next_token * out_of_prompt +\n sequences[:, i+1] * ~out_of_prompt)\n # If end-marker reached for batch item, only emit padding tokens.\n next_token_or_endpad = (next_token[None] * ~ended)\n ended |= (next_token_or_endpad == end_marker)\n # Add current sampled tokens to recorded sequences.\n new_sequences = lax.dynamic_update_slice(\n sequences, next_token_or_endpad, (0, i+1))\n return (i+1, new_sequences, new_cache, next_token_or_endpad, ended, rng2)\n\n # Run sampling loop and collect final state.\n final_state = lax.while_loop(sampling_loop_cond_fn,\n sampling_loop_body_fn,\n sampling_loop_init_state)\n\n # Pick part of the state corresponding to the sampled sequences.\n final_sequences = final_state[1]\n return final_sequences\n", "path": "examples/lm1b/temperature_sampler.py"}], "after_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Fast decoding routines for inference from a trained language model.\"\"\"\n\nfrom jax import lax\nfrom jax import random\nimport jax.numpy as jnp\n\n\n# Constants\n# The default End-of-Sentence token id is 2 (SentencePiece).\nEOS_ID = 2\n\n\ndef temperature_sample(prompt_inputs,\n init_cache,\n tokens_to_logits,\n prng_key,\n temperature=1.0,\n topk=20,\n eos_token=EOS_ID):\n \"\"\"Temperature sampling for language model generation.\n\n Args:\n prompt_inputs: array: [batch_size, max_decode_len] int32 sequence of tokens.\n init_cache: flax attention cache.\n tokens_to_logits: fast autoregressive decoder function taking single token\n slices and cache and returning next-token logits and updated cache.\n prng_key: JAX PRNGKey.\n temperature: float: sampling temperature factor. As it approaches\n zero this becomes equivalent to greedy sampling.\n topk: integer: if nonzero only use the top-k logits to sample next token,\n if zero don't use any cutoff and sample from full logits over vocabulary.\n eos_token: int: end-of-sentence token for target vocabulary.\n\n Returns:\n Array of sampled sequences: [batch_size, max_decode_len]\n \"\"\"\n batch_size = prompt_inputs.shape[0]\n max_decode_len = prompt_inputs.shape[1]\n end_marker = jnp.array(eos_token)\n temperature = jnp.array(temperature)\n\n # Initialize sampling loop state.\n # initial loop PRNGKey\n rng0 = prng_key\n # loop position counter.\n i0 = jnp.array(-1)\n # per batch-item holding current token in loop.\n token0 = jnp.zeros((batch_size, 1), dtype=jnp.int32)\n # per batch-item state bit indicating if sentence has finished.\n ended0 = jnp.zeros((batch_size, 1), dtype=jnp.bool_)\n # (batch, length) array containing prefix prompt tokens for sampling loop\n # as well as the generated output of newly sampled tokens.\n sequences0 = prompt_inputs\n # Sampling loop state is stored in a simple tuple.\n sampling_loop_init_state = (i0, sequences0, init_cache, token0, ended0, rng0)\n\n def sampling_loop_cond_fn(state):\n \"\"\"Sampling loop termination condition.\"\"\"\n (i, _, _, _, ended, _) = state\n # Have we reached max decoding length?\n not_at_end = (i < max_decode_len - 1)\n # Have all sampled sequences reached an end marker?\n all_sequences_ended = jnp.all(ended)\n return not_at_end & (~all_sequences_ended)\n\n def sampling_loop_body_fn(state):\n \"\"\"Sampling loop state update.\"\"\"\n i, sequences, cache, cur_token, ended, rng = state\n # Split RNG for sampling.\n rng1, rng2 = random.split(rng)\n # Call fast-decoder model on current tokens to get next-position logits.\n logits, new_cache = tokens_to_logits(cur_token, cache)\n # Sample next token from logits.\n # TODO(levskaya): add top-p \"nucleus\" sampling option.\n if topk:\n # Get top-k logits and their indices, sample within these top-k tokens.\n topk_logits, topk_idxs = lax.top_k(logits, topk)\n topk_token = jnp.expand_dims(random.categorical(\n rng1, topk_logits / temperature).astype(jnp.int32), axis=-1)\n # Return the original indices corresponding to the sampled top-k tokens.\n next_token = jnp.squeeze(\n jnp.take_along_axis(topk_idxs, topk_token, axis=-1), axis=-1)\n else:\n next_token = random.categorical(\n rng1, logits / temperature).astype(jnp.int32)\n # Only use sampled tokens if we're past provided prefix tokens.\n out_of_prompt = (sequences[:, i+1] == 0)\n next_token = (next_token * out_of_prompt +\n sequences[:, i+1] * ~out_of_prompt)\n # If end-marker reached for batch item, only emit padding tokens.\n next_token_or_endpad = (next_token[None] * ~ended)\n ended |= (next_token_or_endpad == end_marker)\n # Add current sampled tokens to recorded sequences.\n new_sequences = lax.dynamic_update_slice(\n sequences, next_token_or_endpad, (0, i+1))\n return (i+1, new_sequences, new_cache, next_token_or_endpad, ended, rng2)\n\n # Run sampling loop and collect final state.\n final_state = lax.while_loop(sampling_loop_cond_fn,\n sampling_loop_body_fn,\n sampling_loop_init_state)\n\n # Pick part of the state corresponding to the sampled sequences.\n final_sequences = final_state[1]\n return final_sequences\n", "path": "examples/lm1b/temperature_sampler.py"}]}
1,892
254
gh_patches_debug_12520
rasdani/github-patches
git_diff
rucio__rucio-1573
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Problems with accessing the REST API and the WebUI in the development docker container Motivation ---------- It is not possible to access the REST API under 'https://localhost/proxy' and to access the Web UI under 'https://localhost/ui/' within the docker container build with the Dockerfile under /etc/docker/dev. Modification ------------ A few modifications can be copied from the demo environment. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/rucio/core/lifetime_exception.py` Content: ``` 1 # Copyright European Organization for Nuclear Research (CERN) 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # You may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Authors: 9 # - Cedric Serfon, <[email protected]>, 2016-2018 10 11 from re import match 12 from datetime import datetime, timedelta 13 14 from sqlalchemy import or_ 15 from sqlalchemy.exc import IntegrityError 16 from sqlalchemy.orm.exc import NoResultFound 17 18 from rucio.common.exception import ConfigNotFound, RucioException, LifetimeExceptionDuplicate, LifetimeExceptionNotFound, UnsupportedOperation 19 from rucio.common.utils import generate_uuid, str_to_date 20 import rucio.common.policy 21 from rucio.core.config import get 22 from rucio.core.message import add_message 23 from rucio.core.rse import list_rse_attributes 24 25 from rucio.db.sqla import models 26 from rucio.db.sqla.constants import DIDType, LifetimeExceptionsState 27 from rucio.db.sqla.session import transactional_session, stream_session, read_session 28 29 30 @stream_session 31 def list_exceptions(exception_id, states, session=None): 32 """ 33 List exceptions to Lifetime Model. 34 35 :param exception_id: The id of the exception 36 :param states: The states to filter 37 :param session: The database session in use. 38 """ 39 40 state_clause = [] 41 if states: 42 state_clause = [models.LifetimeExceptions.state == state for state in states] 43 44 query = session.query(models.LifetimeExceptions.id, 45 models.LifetimeExceptions.scope, models.LifetimeExceptions.name, 46 models.LifetimeExceptions.did_type, 47 models.LifetimeExceptions.account, 48 models.LifetimeExceptions.pattern, 49 models.LifetimeExceptions.comments, 50 models.LifetimeExceptions.state, 51 models.LifetimeExceptions.expires_at, 52 models.LifetimeExceptions.created_at) 53 if state_clause != []: 54 query = query.filter(or_(*state_clause)) 55 if exception_id: 56 query = query.filter(id=exception_id) 57 58 for exception in query.yield_per(5): 59 yield {'id': exception.id, 'scope': exception.scope, 'name': exception.name, 60 'did_type': exception.did_type, 'account': exception.account, 61 'pattern': exception.pattern, 'comments': exception.comments, 62 'state': exception.state, 'created_at': exception.created_at, 63 'expires_at': exception.expires_at} 64 65 66 @transactional_session 67 def add_exception(dids, account, pattern, comments, expires_at, session=None): 68 """ 69 Add exceptions to Lifetime Model. 70 71 :param dids: The list of dids 72 :param account: The account of the requester. 73 :param pattern: The account. 74 :param comments: The comments associated to the exception. 75 :param expires_at: The expiration date of the exception. 76 :param session: The database session in use. 77 78 returns: The id of the exception. 79 """ 80 exception_id = generate_uuid() 81 text = 'Account %s requested a lifetime extension for a list of DIDs that can be found below\n' % account 82 reason = comments 83 volume = None 84 lifetime = None 85 if comments.find('||||') > -1: 86 reason, volume = comments.split('||||') 87 text += 'The reason for the extension is "%s"\n' % reason 88 text += 'It represents %s datasets\n' % len(dids) 89 if volume: 90 text += 'The estimated physical volume is %s\n' % volume 91 if expires_at and (isinstance(expires_at, str) or isinstance(expires_at, unicode)): 92 lifetime = str_to_date(expires_at) 93 text += 'The lifetime exception should expires on %s\n' % str(expires_at) 94 elif isinstance(expires_at, datetime): 95 lifetime = expires_at 96 text += 'The lifetime exception should expires on %s\n' % str(expires_at) 97 text += 'Link to approve or reject this request can be found at the end of the mail\n' 98 text += '\n' 99 text += 'DIDTYPE SCOPE NAME\n' 100 text += '\n' 101 truncated_message = False 102 for did in dids: 103 did_type = None 104 if 'did_type' in did: 105 if isinstance(did['did_type'], str) or isinstance(did['did_type'], unicode): 106 did_type = DIDType.from_sym(did['did_type']) 107 else: 108 did_type = did['did_type'] 109 new_exception = models.LifetimeExceptions(id=exception_id, scope=did['scope'], name=did['name'], did_type=did_type, 110 account=account, pattern=pattern, comments=reason, state=LifetimeExceptionsState.WAITING, expires_at=lifetime) 111 if len(text) < 3000: 112 text += '%s %s %s\n' % (str(did_type), did['scope'], did['name']) 113 else: 114 truncated_message = True 115 try: 116 new_exception.save(session=session, flush=False) 117 except IntegrityError as error: 118 if match('.*ORA-00001.*', str(error.args[0]))\ 119 or match('.*IntegrityError.*UNIQUE constraint failed.*', str(error.args[0]))\ 120 or match('.*1062.*Duplicate entry.*for key.*', str(error.args[0]))\ 121 or match('.*sqlite3.IntegrityError.*are not unique.*', error.args[0]): 122 raise LifetimeExceptionDuplicate() 123 raise RucioException(error.args[0]) 124 if truncated_message: 125 text += '...\n' 126 text += 'List too long. Truncated\n' 127 text += '\n' 128 text += 'Approve: https://rucio-ui.cern.ch/lifetime_exception?id=%s&action=approve\n' % str(exception_id) 129 text += 'Deny: https://rucio-ui.cern.ch/lifetime_exception?id=%s&action=deny\n' % str(exception_id) 130 approvers_email = [] 131 try: 132 approvers_email = get('lifetime_model', 'approvers_email', session=session) 133 approvers_email = approvers_email.split(',') # pylint: disable=no-member 134 except ConfigNotFound: 135 approvers_email = [] 136 137 add_message(event_type='email', 138 payload={'body': text, 'to': approvers_email, 139 'subject': '[RUCIO] Request to approve lifetime exception %s' % str(exception_id)}, 140 session=session) 141 return exception_id 142 143 144 @transactional_session 145 def update_exception(exception_id, state, session=None): 146 """ 147 Update exceptions state to Lifetime Model. 148 149 :param exception_id: The id of the exception 150 :param state: The states to filter 151 :param session: The database session in use. 152 """ 153 query = session.query(models.LifetimeExceptions).filter_by(id=exception_id) 154 try: 155 query.one() 156 except NoResultFound: 157 raise LifetimeExceptionNotFound 158 159 if state in [LifetimeExceptionsState.APPROVED, LifetimeExceptionsState.REJECTED]: 160 query.update({'state': state, 'updated_at': datetime.utcnow()}, synchronize_session=False) 161 else: 162 raise UnsupportedOperation 163 164 165 @read_session 166 def define_eol(scope, name, rses, session=None): 167 """ 168 ATLAS policy for rules on SCRATCHDISK 169 170 :param scope: Scope of the DID. 171 :param name: Name of the DID. 172 :param rses: List of RSEs. 173 :param session: The database session in use. 174 """ 175 policy = rucio.common.policy.get_policy() 176 if policy != 'atlas': 177 return None 178 179 # Check if on ATLAS managed space 180 if [rse for rse in rses if list_rse_attributes(rse=None, rse_id=rse['id'], session=session).get('type') in ['LOCALGROUPDISK', 'LOCALGROUPTAPE', 'GROUPDISK', 'GROUPTAPE']]: 181 return None 182 # Now check the lifetime policy 183 try: 184 did = session.query(models.DataIdentifier).filter(models.DataIdentifier.scope == scope, 185 models.DataIdentifier.name == name).one() 186 except NoResultFound: 187 return None 188 policy_dict = rucio.common.policy.get_lifetime_policy() 189 did_type = 'other' 190 if scope.startswith('mc'): 191 did_type = 'mc' 192 elif scope.startswith('data'): 193 did_type = 'data' 194 elif scope.startswith('valid'): 195 did_type = 'valid' 196 else: 197 did_type = 'other' 198 for policy in policy_dict[did_type]: 199 if 'exclude' in policy: 200 to_exclude = False 201 for key in policy['exclude']: 202 meta_key = None 203 if key not in ['datatype', 'project', ]: 204 if key == 'stream': 205 meta_key = 'stream_name' 206 elif key == 'tags': 207 meta_key = 'version' 208 else: 209 meta_key = key 210 values = policy['exclude'][key] 211 for value in values: 212 value = value.replace('%', '.*') 213 if meta_key and did[meta_key] and value and match(value, did[meta_key]): 214 to_exclude = True 215 break 216 if to_exclude: 217 break 218 if to_exclude: 219 continue 220 if 'include' in policy: 221 match_policy = True 222 for key in policy['include']: 223 meta_key = None 224 if key not in ['datatype', 'project', ]: 225 if key == 'stream': 226 meta_key = 'stream_name' 227 elif key == 'tags': 228 meta_key = 'version' 229 else: 230 continue 231 else: 232 meta_key = key 233 values = policy['include'][key] 234 to_keep = False 235 for value in values: 236 value = value.replace('%', '.*') 237 if meta_key and did[meta_key] and value and match(value, did[meta_key]): 238 to_keep = True 239 break 240 match_policy = match_policy and to_keep 241 if not to_keep: 242 match_policy = False 243 break 244 if match_policy: 245 if int(policy['age']) >= 12: 246 years = int(int(policy['age']) / 12) 247 months = int(policy['age']) - years * 12 248 lifetime_value = 365 * years + 30 * months 249 else: 250 lifetime_value = int(policy['age']) * 30 251 if int(policy['extension']) >= 12: 252 years = int(int(policy['extension']) / 12) 253 months = int(policy['extension']) - years * 12 254 extension = 365 * years + 30 * months 255 else: 256 extension = int(policy['extension']) * 30 257 258 default_eol_at = did.created_at + timedelta(days=lifetime_value) 259 eol_at = default_eol_at 260 if did.accessed_at: 261 eol_at = did.accessed_at + timedelta(days=extension) 262 if eol_at < default_eol_at: 263 eol_at = default_eol_at 264 return eol_at 265 return None 266 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/rucio/core/lifetime_exception.py b/lib/rucio/core/lifetime_exception.py --- a/lib/rucio/core/lifetime_exception.py +++ b/lib/rucio/core/lifetime_exception.py @@ -7,6 +7,7 @@ # # Authors: # - Cedric Serfon, <[email protected]>, 2016-2018 +# - Dimitrios Christidis, <[email protected]> 2018 from re import match from datetime import datetime, timedelta @@ -152,7 +153,7 @@ """ query = session.query(models.LifetimeExceptions).filter_by(id=exception_id) try: - query.one() + query.first() except NoResultFound: raise LifetimeExceptionNotFound
{"golden_diff": "diff --git a/lib/rucio/core/lifetime_exception.py b/lib/rucio/core/lifetime_exception.py\n--- a/lib/rucio/core/lifetime_exception.py\n+++ b/lib/rucio/core/lifetime_exception.py\n@@ -7,6 +7,7 @@\n #\n # Authors:\n # - Cedric Serfon, <[email protected]>, 2016-2018\n+# - Dimitrios Christidis, <[email protected]> 2018\n \n from re import match\n from datetime import datetime, timedelta\n@@ -152,7 +153,7 @@\n \"\"\"\n query = session.query(models.LifetimeExceptions).filter_by(id=exception_id)\n try:\n- query.one()\n+ query.first()\n except NoResultFound:\n raise LifetimeExceptionNotFound\n", "issue": "Problems with accessing the REST API and the WebUI in the development docker container\nMotivation\r\n----------\r\nIt is not possible to access the REST API under 'https://localhost/proxy' and to access the Web UI under 'https://localhost/ui/' within the docker container build with the Dockerfile under /etc/docker/dev.\r\n\r\nModification\r\n------------\r\nA few modifications can be copied from the demo environment.\n", "before_files": [{"content": "# Copyright European Organization for Nuclear Research (CERN)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Authors:\n# - Cedric Serfon, <[email protected]>, 2016-2018\n\nfrom re import match\nfrom datetime import datetime, timedelta\n\nfrom sqlalchemy import or_\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom rucio.common.exception import ConfigNotFound, RucioException, LifetimeExceptionDuplicate, LifetimeExceptionNotFound, UnsupportedOperation\nfrom rucio.common.utils import generate_uuid, str_to_date\nimport rucio.common.policy\nfrom rucio.core.config import get\nfrom rucio.core.message import add_message\nfrom rucio.core.rse import list_rse_attributes\n\nfrom rucio.db.sqla import models\nfrom rucio.db.sqla.constants import DIDType, LifetimeExceptionsState\nfrom rucio.db.sqla.session import transactional_session, stream_session, read_session\n\n\n@stream_session\ndef list_exceptions(exception_id, states, session=None):\n \"\"\"\n List exceptions to Lifetime Model.\n\n :param exception_id: The id of the exception\n :param states: The states to filter\n :param session: The database session in use.\n \"\"\"\n\n state_clause = []\n if states:\n state_clause = [models.LifetimeExceptions.state == state for state in states]\n\n query = session.query(models.LifetimeExceptions.id,\n models.LifetimeExceptions.scope, models.LifetimeExceptions.name,\n models.LifetimeExceptions.did_type,\n models.LifetimeExceptions.account,\n models.LifetimeExceptions.pattern,\n models.LifetimeExceptions.comments,\n models.LifetimeExceptions.state,\n models.LifetimeExceptions.expires_at,\n models.LifetimeExceptions.created_at)\n if state_clause != []:\n query = query.filter(or_(*state_clause))\n if exception_id:\n query = query.filter(id=exception_id)\n\n for exception in query.yield_per(5):\n yield {'id': exception.id, 'scope': exception.scope, 'name': exception.name,\n 'did_type': exception.did_type, 'account': exception.account,\n 'pattern': exception.pattern, 'comments': exception.comments,\n 'state': exception.state, 'created_at': exception.created_at,\n 'expires_at': exception.expires_at}\n\n\n@transactional_session\ndef add_exception(dids, account, pattern, comments, expires_at, session=None):\n \"\"\"\n Add exceptions to Lifetime Model.\n\n :param dids: The list of dids\n :param account: The account of the requester.\n :param pattern: The account.\n :param comments: The comments associated to the exception.\n :param expires_at: The expiration date of the exception.\n :param session: The database session in use.\n\n returns: The id of the exception.\n \"\"\"\n exception_id = generate_uuid()\n text = 'Account %s requested a lifetime extension for a list of DIDs that can be found below\\n' % account\n reason = comments\n volume = None\n lifetime = None\n if comments.find('||||') > -1:\n reason, volume = comments.split('||||')\n text += 'The reason for the extension is \"%s\"\\n' % reason\n text += 'It represents %s datasets\\n' % len(dids)\n if volume:\n text += 'The estimated physical volume is %s\\n' % volume\n if expires_at and (isinstance(expires_at, str) or isinstance(expires_at, unicode)):\n lifetime = str_to_date(expires_at)\n text += 'The lifetime exception should expires on %s\\n' % str(expires_at)\n elif isinstance(expires_at, datetime):\n lifetime = expires_at\n text += 'The lifetime exception should expires on %s\\n' % str(expires_at)\n text += 'Link to approve or reject this request can be found at the end of the mail\\n'\n text += '\\n'\n text += 'DIDTYPE SCOPE NAME\\n'\n text += '\\n'\n truncated_message = False\n for did in dids:\n did_type = None\n if 'did_type' in did:\n if isinstance(did['did_type'], str) or isinstance(did['did_type'], unicode):\n did_type = DIDType.from_sym(did['did_type'])\n else:\n did_type = did['did_type']\n new_exception = models.LifetimeExceptions(id=exception_id, scope=did['scope'], name=did['name'], did_type=did_type,\n account=account, pattern=pattern, comments=reason, state=LifetimeExceptionsState.WAITING, expires_at=lifetime)\n if len(text) < 3000:\n text += '%s %s %s\\n' % (str(did_type), did['scope'], did['name'])\n else:\n truncated_message = True\n try:\n new_exception.save(session=session, flush=False)\n except IntegrityError as error:\n if match('.*ORA-00001.*', str(error.args[0]))\\\n or match('.*IntegrityError.*UNIQUE constraint failed.*', str(error.args[0]))\\\n or match('.*1062.*Duplicate entry.*for key.*', str(error.args[0]))\\\n or match('.*sqlite3.IntegrityError.*are not unique.*', error.args[0]):\n raise LifetimeExceptionDuplicate()\n raise RucioException(error.args[0])\n if truncated_message:\n text += '...\\n'\n text += 'List too long. Truncated\\n'\n text += '\\n'\n text += 'Approve: https://rucio-ui.cern.ch/lifetime_exception?id=%s&action=approve\\n' % str(exception_id)\n text += 'Deny: https://rucio-ui.cern.ch/lifetime_exception?id=%s&action=deny\\n' % str(exception_id)\n approvers_email = []\n try:\n approvers_email = get('lifetime_model', 'approvers_email', session=session)\n approvers_email = approvers_email.split(',') # pylint: disable=no-member\n except ConfigNotFound:\n approvers_email = []\n\n add_message(event_type='email',\n payload={'body': text, 'to': approvers_email,\n 'subject': '[RUCIO] Request to approve lifetime exception %s' % str(exception_id)},\n session=session)\n return exception_id\n\n\n@transactional_session\ndef update_exception(exception_id, state, session=None):\n \"\"\"\n Update exceptions state to Lifetime Model.\n\n :param exception_id: The id of the exception\n :param state: The states to filter\n :param session: The database session in use.\n \"\"\"\n query = session.query(models.LifetimeExceptions).filter_by(id=exception_id)\n try:\n query.one()\n except NoResultFound:\n raise LifetimeExceptionNotFound\n\n if state in [LifetimeExceptionsState.APPROVED, LifetimeExceptionsState.REJECTED]:\n query.update({'state': state, 'updated_at': datetime.utcnow()}, synchronize_session=False)\n else:\n raise UnsupportedOperation\n\n\n@read_session\ndef define_eol(scope, name, rses, session=None):\n \"\"\"\n ATLAS policy for rules on SCRATCHDISK\n\n :param scope: Scope of the DID.\n :param name: Name of the DID.\n :param rses: List of RSEs.\n :param session: The database session in use.\n \"\"\"\n policy = rucio.common.policy.get_policy()\n if policy != 'atlas':\n return None\n\n # Check if on ATLAS managed space\n if [rse for rse in rses if list_rse_attributes(rse=None, rse_id=rse['id'], session=session).get('type') in ['LOCALGROUPDISK', 'LOCALGROUPTAPE', 'GROUPDISK', 'GROUPTAPE']]:\n return None\n # Now check the lifetime policy\n try:\n did = session.query(models.DataIdentifier).filter(models.DataIdentifier.scope == scope,\n models.DataIdentifier.name == name).one()\n except NoResultFound:\n return None\n policy_dict = rucio.common.policy.get_lifetime_policy()\n did_type = 'other'\n if scope.startswith('mc'):\n did_type = 'mc'\n elif scope.startswith('data'):\n did_type = 'data'\n elif scope.startswith('valid'):\n did_type = 'valid'\n else:\n did_type = 'other'\n for policy in policy_dict[did_type]:\n if 'exclude' in policy:\n to_exclude = False\n for key in policy['exclude']:\n meta_key = None\n if key not in ['datatype', 'project', ]:\n if key == 'stream':\n meta_key = 'stream_name'\n elif key == 'tags':\n meta_key = 'version'\n else:\n meta_key = key\n values = policy['exclude'][key]\n for value in values:\n value = value.replace('%', '.*')\n if meta_key and did[meta_key] and value and match(value, did[meta_key]):\n to_exclude = True\n break\n if to_exclude:\n break\n if to_exclude:\n continue\n if 'include' in policy:\n match_policy = True\n for key in policy['include']:\n meta_key = None\n if key not in ['datatype', 'project', ]:\n if key == 'stream':\n meta_key = 'stream_name'\n elif key == 'tags':\n meta_key = 'version'\n else:\n continue\n else:\n meta_key = key\n values = policy['include'][key]\n to_keep = False\n for value in values:\n value = value.replace('%', '.*')\n if meta_key and did[meta_key] and value and match(value, did[meta_key]):\n to_keep = True\n break\n match_policy = match_policy and to_keep\n if not to_keep:\n match_policy = False\n break\n if match_policy:\n if int(policy['age']) >= 12:\n years = int(int(policy['age']) / 12)\n months = int(policy['age']) - years * 12\n lifetime_value = 365 * years + 30 * months\n else:\n lifetime_value = int(policy['age']) * 30\n if int(policy['extension']) >= 12:\n years = int(int(policy['extension']) / 12)\n months = int(policy['extension']) - years * 12\n extension = 365 * years + 30 * months\n else:\n extension = int(policy['extension']) * 30\n\n default_eol_at = did.created_at + timedelta(days=lifetime_value)\n eol_at = default_eol_at\n if did.accessed_at:\n eol_at = did.accessed_at + timedelta(days=extension)\n if eol_at < default_eol_at:\n eol_at = default_eol_at\n return eol_at\n return None\n", "path": "lib/rucio/core/lifetime_exception.py"}], "after_files": [{"content": "# Copyright European Organization for Nuclear Research (CERN)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Authors:\n# - Cedric Serfon, <[email protected]>, 2016-2018\n# - Dimitrios Christidis, <[email protected]> 2018\n\nfrom re import match\nfrom datetime import datetime, timedelta\n\nfrom sqlalchemy import or_\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom rucio.common.exception import ConfigNotFound, RucioException, LifetimeExceptionDuplicate, LifetimeExceptionNotFound, UnsupportedOperation\nfrom rucio.common.utils import generate_uuid, str_to_date\nimport rucio.common.policy\nfrom rucio.core.config import get\nfrom rucio.core.message import add_message\nfrom rucio.core.rse import list_rse_attributes\n\nfrom rucio.db.sqla import models\nfrom rucio.db.sqla.constants import DIDType, LifetimeExceptionsState\nfrom rucio.db.sqla.session import transactional_session, stream_session, read_session\n\n\n@stream_session\ndef list_exceptions(exception_id, states, session=None):\n \"\"\"\n List exceptions to Lifetime Model.\n\n :param exception_id: The id of the exception\n :param states: The states to filter\n :param session: The database session in use.\n \"\"\"\n\n state_clause = []\n if states:\n state_clause = [models.LifetimeExceptions.state == state for state in states]\n\n query = session.query(models.LifetimeExceptions.id,\n models.LifetimeExceptions.scope, models.LifetimeExceptions.name,\n models.LifetimeExceptions.did_type,\n models.LifetimeExceptions.account,\n models.LifetimeExceptions.pattern,\n models.LifetimeExceptions.comments,\n models.LifetimeExceptions.state,\n models.LifetimeExceptions.expires_at,\n models.LifetimeExceptions.created_at)\n if state_clause != []:\n query = query.filter(or_(*state_clause))\n if exception_id:\n query = query.filter(id=exception_id)\n\n for exception in query.yield_per(5):\n yield {'id': exception.id, 'scope': exception.scope, 'name': exception.name,\n 'did_type': exception.did_type, 'account': exception.account,\n 'pattern': exception.pattern, 'comments': exception.comments,\n 'state': exception.state, 'created_at': exception.created_at,\n 'expires_at': exception.expires_at}\n\n\n@transactional_session\ndef add_exception(dids, account, pattern, comments, expires_at, session=None):\n \"\"\"\n Add exceptions to Lifetime Model.\n\n :param dids: The list of dids\n :param account: The account of the requester.\n :param pattern: The account.\n :param comments: The comments associated to the exception.\n :param expires_at: The expiration date of the exception.\n :param session: The database session in use.\n\n returns: The id of the exception.\n \"\"\"\n exception_id = generate_uuid()\n text = 'Account %s requested a lifetime extension for a list of DIDs that can be found below\\n' % account\n reason = comments\n volume = None\n lifetime = None\n if comments.find('||||') > -1:\n reason, volume = comments.split('||||')\n text += 'The reason for the extension is \"%s\"\\n' % reason\n text += 'It represents %s datasets\\n' % len(dids)\n if volume:\n text += 'The estimated physical volume is %s\\n' % volume\n if expires_at and (isinstance(expires_at, str) or isinstance(expires_at, unicode)):\n lifetime = str_to_date(expires_at)\n text += 'The lifetime exception should expires on %s\\n' % str(expires_at)\n elif isinstance(expires_at, datetime):\n lifetime = expires_at\n text += 'The lifetime exception should expires on %s\\n' % str(expires_at)\n text += 'Link to approve or reject this request can be found at the end of the mail\\n'\n text += '\\n'\n text += 'DIDTYPE SCOPE NAME\\n'\n text += '\\n'\n truncated_message = False\n for did in dids:\n did_type = None\n if 'did_type' in did:\n if isinstance(did['did_type'], str) or isinstance(did['did_type'], unicode):\n did_type = DIDType.from_sym(did['did_type'])\n else:\n did_type = did['did_type']\n new_exception = models.LifetimeExceptions(id=exception_id, scope=did['scope'], name=did['name'], did_type=did_type,\n account=account, pattern=pattern, comments=reason, state=LifetimeExceptionsState.WAITING, expires_at=lifetime)\n if len(text) < 3000:\n text += '%s %s %s\\n' % (str(did_type), did['scope'], did['name'])\n else:\n truncated_message = True\n try:\n new_exception.save(session=session, flush=False)\n except IntegrityError as error:\n if match('.*ORA-00001.*', str(error.args[0]))\\\n or match('.*IntegrityError.*UNIQUE constraint failed.*', str(error.args[0]))\\\n or match('.*1062.*Duplicate entry.*for key.*', str(error.args[0]))\\\n or match('.*sqlite3.IntegrityError.*are not unique.*', error.args[0]):\n raise LifetimeExceptionDuplicate()\n raise RucioException(error.args[0])\n if truncated_message:\n text += '...\\n'\n text += 'List too long. Truncated\\n'\n text += '\\n'\n text += 'Approve: https://rucio-ui.cern.ch/lifetime_exception?id=%s&action=approve\\n' % str(exception_id)\n text += 'Deny: https://rucio-ui.cern.ch/lifetime_exception?id=%s&action=deny\\n' % str(exception_id)\n approvers_email = []\n try:\n approvers_email = get('lifetime_model', 'approvers_email', session=session)\n approvers_email = approvers_email.split(',') # pylint: disable=no-member\n except ConfigNotFound:\n approvers_email = []\n\n add_message(event_type='email',\n payload={'body': text, 'to': approvers_email,\n 'subject': '[RUCIO] Request to approve lifetime exception %s' % str(exception_id)},\n session=session)\n return exception_id\n\n\n@transactional_session\ndef update_exception(exception_id, state, session=None):\n \"\"\"\n Update exceptions state to Lifetime Model.\n\n :param exception_id: The id of the exception\n :param state: The states to filter\n :param session: The database session in use.\n \"\"\"\n query = session.query(models.LifetimeExceptions).filter_by(id=exception_id)\n try:\n query.first()\n except NoResultFound:\n raise LifetimeExceptionNotFound\n\n if state in [LifetimeExceptionsState.APPROVED, LifetimeExceptionsState.REJECTED]:\n query.update({'state': state, 'updated_at': datetime.utcnow()}, synchronize_session=False)\n else:\n raise UnsupportedOperation\n\n\n@read_session\ndef define_eol(scope, name, rses, session=None):\n \"\"\"\n ATLAS policy for rules on SCRATCHDISK\n\n :param scope: Scope of the DID.\n :param name: Name of the DID.\n :param rses: List of RSEs.\n :param session: The database session in use.\n \"\"\"\n policy = rucio.common.policy.get_policy()\n if policy != 'atlas':\n return None\n\n # Check if on ATLAS managed space\n if [rse for rse in rses if list_rse_attributes(rse=None, rse_id=rse['id'], session=session).get('type') in ['LOCALGROUPDISK', 'LOCALGROUPTAPE', 'GROUPDISK', 'GROUPTAPE']]:\n return None\n # Now check the lifetime policy\n try:\n did = session.query(models.DataIdentifier).filter(models.DataIdentifier.scope == scope,\n models.DataIdentifier.name == name).one()\n except NoResultFound:\n return None\n policy_dict = rucio.common.policy.get_lifetime_policy()\n did_type = 'other'\n if scope.startswith('mc'):\n did_type = 'mc'\n elif scope.startswith('data'):\n did_type = 'data'\n elif scope.startswith('valid'):\n did_type = 'valid'\n else:\n did_type = 'other'\n for policy in policy_dict[did_type]:\n if 'exclude' in policy:\n to_exclude = False\n for key in policy['exclude']:\n meta_key = None\n if key not in ['datatype', 'project', ]:\n if key == 'stream':\n meta_key = 'stream_name'\n elif key == 'tags':\n meta_key = 'version'\n else:\n meta_key = key\n values = policy['exclude'][key]\n for value in values:\n value = value.replace('%', '.*')\n if meta_key and did[meta_key] and value and match(value, did[meta_key]):\n to_exclude = True\n break\n if to_exclude:\n break\n if to_exclude:\n continue\n if 'include' in policy:\n match_policy = True\n for key in policy['include']:\n meta_key = None\n if key not in ['datatype', 'project', ]:\n if key == 'stream':\n meta_key = 'stream_name'\n elif key == 'tags':\n meta_key = 'version'\n else:\n continue\n else:\n meta_key = key\n values = policy['include'][key]\n to_keep = False\n for value in values:\n value = value.replace('%', '.*')\n if meta_key and did[meta_key] and value and match(value, did[meta_key]):\n to_keep = True\n break\n match_policy = match_policy and to_keep\n if not to_keep:\n match_policy = False\n break\n if match_policy:\n if int(policy['age']) >= 12:\n years = int(int(policy['age']) / 12)\n months = int(policy['age']) - years * 12\n lifetime_value = 365 * years + 30 * months\n else:\n lifetime_value = int(policy['age']) * 30\n if int(policy['extension']) >= 12:\n years = int(int(policy['extension']) / 12)\n months = int(policy['extension']) - years * 12\n extension = 365 * years + 30 * months\n else:\n extension = int(policy['extension']) * 30\n\n default_eol_at = did.created_at + timedelta(days=lifetime_value)\n eol_at = default_eol_at\n if did.accessed_at:\n eol_at = did.accessed_at + timedelta(days=extension)\n if eol_at < default_eol_at:\n eol_at = default_eol_at\n return eol_at\n return None\n", "path": "lib/rucio/core/lifetime_exception.py"}]}
3,489
186
gh_patches_debug_4479
rasdani/github-patches
git_diff
freedomofpress__securedrop-3709
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [functional testing] Fix staging CI job on tbb-0.9.0 We removed the application/functional test run from the staging environment in #3697. We should also update the testinfra test references and remove the application test run from CI, otherwise we get a few testinfra test failures due to pip deps, and an error when we attempt to run the application tests in CI: ``` TASK [Run application tests] *************************************************** Friday 10 August 2018 19:28:17 +0000 (0:00:00.037) 0:01:08.223 ********* fatal: [app-staging]: FAILED! => {"changed": true, "msg": "non-zero return code", "rc": 127, "stderr": "Shared connection to 52.36.194.59 closed.\r\n", "stdout": "/home/sdrop/.ansible/tmp/ansible-tmp-1533929297.62-93522333058246/app-tests.sh: line 13: pytest: command not found\r\n", "stdout_lines": ["/home/sdrop/.ansible/tmp/ansible-tmp-1533929297.62-93522333058246/app-tests.sh: line 13: pytest: command not found"]} ...ignoring ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `securedrop/create-dev-data.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 import datetime 5 import os 6 import sys 7 import argparse 8 from sqlalchemy.exc import IntegrityError 9 10 os.environ["SECUREDROP_ENV"] = "dev" # noqa 11 import journalist_app 12 from sdconfig import config 13 from db import db 14 from models import Journalist, Source, Submission 15 16 17 def add_test_user(username, password, otp_secret, is_admin=False): 18 context = journalist_app.create_app(config).app_context() 19 context.push() 20 21 try: 22 user = Journalist(username=username, 23 password=password, 24 is_admin=is_admin) 25 user.otp_secret = otp_secret 26 db.session.add(user) 27 db.session.commit() 28 print('Test user successfully added: ' 29 'username={}, password={}, otp_secret={}, is_admin={}' 30 ''.format(username, password, otp_secret, is_admin)) 31 except IntegrityError: 32 print("Test user already added") 33 db.session.rollback() 34 35 context.pop() 36 37 38 def create_source_and_submissions(num_submissions=2): 39 app = journalist_app.create_app(config) 40 41 with app.app_context(): 42 # Store source in database 43 codename = app.crypto_util.genrandomid() 44 filesystem_id = app.crypto_util.hash_codename(codename) 45 journalist_designation = app.crypto_util.display_id() 46 source = Source(filesystem_id, journalist_designation) 47 source.pending = False 48 db.session.add(source) 49 db.session.commit() 50 51 # Generate submissions directory and generate source key 52 os.mkdir(app.storage.path(source.filesystem_id)) 53 app.crypto_util.genkeypair(source.filesystem_id, codename) 54 55 # Generate some test submissions 56 for _ in range(num_submissions): 57 source.interaction_count += 1 58 fpath = app.storage.save_message_submission( 59 source.filesystem_id, 60 source.interaction_count, 61 source.journalist_filename, 62 'test submission!' 63 ) 64 source.last_updated = datetime.datetime.utcnow() 65 submission = Submission(source, fpath) 66 db.session.add(submission) 67 68 db.session.commit() 69 print("Test source '{}' added with {} submissions".format( 70 journalist_designation, num_submissions) 71 ) 72 73 74 if __name__ == "__main__": # pragma: no cover 75 # Add two test users 76 test_password = "correct horse battery staple profanity oil chewy" 77 test_otp_secret = "JHCOGO7VCER3EJ4L" 78 79 parser = argparse.ArgumentParser() 80 parser.add_argument("--staging", help="Adding user for staging tests.", 81 action="store_true") 82 args = parser.parse_args() 83 add_test_user("journalist", 84 test_password, 85 test_otp_secret, 86 is_admin=True) 87 88 # If staging, we only need the journalist user (admin) 89 if args.staging: 90 sys.exit(0) 91 92 add_test_user("dellsberg", 93 test_password, 94 test_otp_secret, 95 is_admin=False) 96 97 # Add test sources and submissions 98 num_sources = 2 99 for _ in range(num_sources): 100 create_source_and_submissions() 101 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/securedrop/create-dev-data.py b/securedrop/create-dev-data.py --- a/securedrop/create-dev-data.py +++ b/securedrop/create-dev-data.py @@ -78,7 +78,7 @@ parser = argparse.ArgumentParser() parser.add_argument("--staging", help="Adding user for staging tests.", - action="store_true") + action="store_true") args = parser.parse_args() add_test_user("journalist", test_password,
{"golden_diff": "diff --git a/securedrop/create-dev-data.py b/securedrop/create-dev-data.py\n--- a/securedrop/create-dev-data.py\n+++ b/securedrop/create-dev-data.py\n@@ -78,7 +78,7 @@\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"--staging\", help=\"Adding user for staging tests.\",\n- action=\"store_true\")\n+ action=\"store_true\")\n args = parser.parse_args()\n add_test_user(\"journalist\",\n test_password,\n", "issue": "[functional testing] Fix staging CI job on tbb-0.9.0\nWe removed the application/functional test run from the staging environment in #3697. We should also update the testinfra test references and remove the application test run from CI, otherwise we get a few testinfra test failures due to pip deps, and an error when we attempt to run the application tests in CI: \r\n\r\n```\r\nTASK [Run application tests] ***************************************************\r\n Friday 10 August 2018 19:28:17 +0000 (0:00:00.037) 0:01:08.223 *********\r\n fatal: [app-staging]: FAILED! => {\"changed\": true, \"msg\": \"non-zero return code\", \"rc\": 127, \"stderr\": \"Shared connection to 52.36.194.59 closed.\\r\\n\", \"stdout\": \"/home/sdrop/.ansible/tmp/ansible-tmp-1533929297.62-93522333058246/app-tests.sh: line 13: pytest: command not found\\r\\n\", \"stdout_lines\": [\"/home/sdrop/.ansible/tmp/ansible-tmp-1533929297.62-93522333058246/app-tests.sh: line 13: pytest: command not found\"]}\r\n ...ignoring\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport datetime\nimport os\nimport sys\nimport argparse\nfrom sqlalchemy.exc import IntegrityError\n\nos.environ[\"SECUREDROP_ENV\"] = \"dev\" # noqa\nimport journalist_app\nfrom sdconfig import config\nfrom db import db\nfrom models import Journalist, Source, Submission\n\n\ndef add_test_user(username, password, otp_secret, is_admin=False):\n context = journalist_app.create_app(config).app_context()\n context.push()\n\n try:\n user = Journalist(username=username,\n password=password,\n is_admin=is_admin)\n user.otp_secret = otp_secret\n db.session.add(user)\n db.session.commit()\n print('Test user successfully added: '\n 'username={}, password={}, otp_secret={}, is_admin={}'\n ''.format(username, password, otp_secret, is_admin))\n except IntegrityError:\n print(\"Test user already added\")\n db.session.rollback()\n\n context.pop()\n\n\ndef create_source_and_submissions(num_submissions=2):\n app = journalist_app.create_app(config)\n\n with app.app_context():\n # Store source in database\n codename = app.crypto_util.genrandomid()\n filesystem_id = app.crypto_util.hash_codename(codename)\n journalist_designation = app.crypto_util.display_id()\n source = Source(filesystem_id, journalist_designation)\n source.pending = False\n db.session.add(source)\n db.session.commit()\n\n # Generate submissions directory and generate source key\n os.mkdir(app.storage.path(source.filesystem_id))\n app.crypto_util.genkeypair(source.filesystem_id, codename)\n\n # Generate some test submissions\n for _ in range(num_submissions):\n source.interaction_count += 1\n fpath = app.storage.save_message_submission(\n source.filesystem_id,\n source.interaction_count,\n source.journalist_filename,\n 'test submission!'\n )\n source.last_updated = datetime.datetime.utcnow()\n submission = Submission(source, fpath)\n db.session.add(submission)\n\n db.session.commit()\n print(\"Test source '{}' added with {} submissions\".format(\n journalist_designation, num_submissions)\n )\n\n\nif __name__ == \"__main__\": # pragma: no cover\n # Add two test users\n test_password = \"correct horse battery staple profanity oil chewy\"\n test_otp_secret = \"JHCOGO7VCER3EJ4L\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--staging\", help=\"Adding user for staging tests.\",\n action=\"store_true\")\n args = parser.parse_args()\n add_test_user(\"journalist\",\n test_password,\n test_otp_secret,\n is_admin=True)\n\n # If staging, we only need the journalist user (admin)\n if args.staging:\n sys.exit(0)\n\n add_test_user(\"dellsberg\",\n test_password,\n test_otp_secret,\n is_admin=False)\n\n # Add test sources and submissions\n num_sources = 2\n for _ in range(num_sources):\n create_source_and_submissions()\n", "path": "securedrop/create-dev-data.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport datetime\nimport os\nimport sys\nimport argparse\nfrom sqlalchemy.exc import IntegrityError\n\nos.environ[\"SECUREDROP_ENV\"] = \"dev\" # noqa\nimport journalist_app\nfrom sdconfig import config\nfrom db import db\nfrom models import Journalist, Source, Submission\n\n\ndef add_test_user(username, password, otp_secret, is_admin=False):\n context = journalist_app.create_app(config).app_context()\n context.push()\n\n try:\n user = Journalist(username=username,\n password=password,\n is_admin=is_admin)\n user.otp_secret = otp_secret\n db.session.add(user)\n db.session.commit()\n print('Test user successfully added: '\n 'username={}, password={}, otp_secret={}, is_admin={}'\n ''.format(username, password, otp_secret, is_admin))\n except IntegrityError:\n print(\"Test user already added\")\n db.session.rollback()\n\n context.pop()\n\n\ndef create_source_and_submissions(num_submissions=2):\n app = journalist_app.create_app(config)\n\n with app.app_context():\n # Store source in database\n codename = app.crypto_util.genrandomid()\n filesystem_id = app.crypto_util.hash_codename(codename)\n journalist_designation = app.crypto_util.display_id()\n source = Source(filesystem_id, journalist_designation)\n source.pending = False\n db.session.add(source)\n db.session.commit()\n\n # Generate submissions directory and generate source key\n os.mkdir(app.storage.path(source.filesystem_id))\n app.crypto_util.genkeypair(source.filesystem_id, codename)\n\n # Generate some test submissions\n for _ in range(num_submissions):\n source.interaction_count += 1\n fpath = app.storage.save_message_submission(\n source.filesystem_id,\n source.interaction_count,\n source.journalist_filename,\n 'test submission!'\n )\n source.last_updated = datetime.datetime.utcnow()\n submission = Submission(source, fpath)\n db.session.add(submission)\n\n db.session.commit()\n print(\"Test source '{}' added with {} submissions\".format(\n journalist_designation, num_submissions)\n )\n\n\nif __name__ == \"__main__\": # pragma: no cover\n # Add two test users\n test_password = \"correct horse battery staple profanity oil chewy\"\n test_otp_secret = \"JHCOGO7VCER3EJ4L\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--staging\", help=\"Adding user for staging tests.\",\n action=\"store_true\")\n args = parser.parse_args()\n add_test_user(\"journalist\",\n test_password,\n test_otp_secret,\n is_admin=True)\n\n # If staging, we only need the journalist user (admin)\n if args.staging:\n sys.exit(0)\n\n add_test_user(\"dellsberg\",\n test_password,\n test_otp_secret,\n is_admin=False)\n\n # Add test sources and submissions\n num_sources = 2\n for _ in range(num_sources):\n create_source_and_submissions()\n", "path": "securedrop/create-dev-data.py"}]}
1,458
108
gh_patches_debug_3856
rasdani/github-patches
git_diff
googleapis__google-auth-library-python-385
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Docs: compute_engine.credentials doesn't render note for service_account_email On this doc item: https://google-auth.readthedocs.io/en/latest/reference/google.auth.compute_engine.credentials.html#google.auth.compute_engine.credentials.Credentials.service_account_email The rendered docs say simply `The service account email.` However, the [docstring](https://google-auth.readthedocs.io/en/latest/_modules/google/auth/compute_engine/credentials.html#Credentials.refresh) has more information: ``` """The service account email. .. note: This is not guaranteed to be set until :meth`refresh` has been called. """ ``` Why is this important note not rendered? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `google/auth/compute_engine/credentials.py` Content: ``` 1 # Copyright 2016 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Google Compute Engine credentials. 16 17 This module provides authentication for application running on Google Compute 18 Engine using the Compute Engine metadata server. 19 20 """ 21 22 import datetime 23 24 import six 25 26 from google.auth import _helpers 27 from google.auth import credentials 28 from google.auth import exceptions 29 from google.auth import iam 30 from google.auth import jwt 31 from google.auth.compute_engine import _metadata 32 from google.oauth2 import _client 33 34 35 class Credentials(credentials.ReadOnlyScoped, credentials.Credentials): 36 """Compute Engine Credentials. 37 38 These credentials use the Google Compute Engine metadata server to obtain 39 OAuth 2.0 access tokens associated with the instance's service account. 40 41 For more information about Compute Engine authentication, including how 42 to configure scopes, see the `Compute Engine authentication 43 documentation`_. 44 45 .. note:: Compute Engine instances can be created with scopes and therefore 46 these credentials are considered to be 'scoped'. However, you can 47 not use :meth:`~google.auth.credentials.ScopedCredentials.with_scopes` 48 because it is not possible to change the scopes that the instance 49 has. Also note that 50 :meth:`~google.auth.credentials.ScopedCredentials.has_scopes` will not 51 work until the credentials have been refreshed. 52 53 .. _Compute Engine authentication documentation: 54 https://cloud.google.com/compute/docs/authentication#using 55 """ 56 57 def __init__(self, service_account_email="default"): 58 """ 59 Args: 60 service_account_email (str): The service account email to use, or 61 'default'. A Compute Engine instance may have multiple service 62 accounts. 63 """ 64 super(Credentials, self).__init__() 65 self._service_account_email = service_account_email 66 67 def _retrieve_info(self, request): 68 """Retrieve information about the service account. 69 70 Updates the scopes and retrieves the full service account email. 71 72 Args: 73 request (google.auth.transport.Request): The object used to make 74 HTTP requests. 75 """ 76 info = _metadata.get_service_account_info( 77 request, service_account=self._service_account_email 78 ) 79 80 self._service_account_email = info["email"] 81 self._scopes = info["scopes"] 82 83 def refresh(self, request): 84 """Refresh the access token and scopes. 85 86 Args: 87 request (google.auth.transport.Request): The object used to make 88 HTTP requests. 89 90 Raises: 91 google.auth.exceptions.RefreshError: If the Compute Engine metadata 92 service can't be reached if if the instance has not 93 credentials. 94 """ 95 try: 96 self._retrieve_info(request) 97 self.token, self.expiry = _metadata.get_service_account_token( 98 request, service_account=self._service_account_email 99 ) 100 except exceptions.TransportError as caught_exc: 101 new_exc = exceptions.RefreshError(caught_exc) 102 six.raise_from(new_exc, caught_exc) 103 104 @property 105 def service_account_email(self): 106 """The service account email. 107 108 .. note: This is not guaranteed to be set until :meth`refresh` has been 109 called. 110 """ 111 return self._service_account_email 112 113 @property 114 def requires_scopes(self): 115 """False: Compute Engine credentials can not be scoped.""" 116 return False 117 118 119 _DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds 120 _DEFAULT_TOKEN_URI = "https://www.googleapis.com/oauth2/v4/token" 121 122 123 class IDTokenCredentials(credentials.Credentials, credentials.Signing): 124 """Open ID Connect ID Token-based service account credentials. 125 126 These credentials relies on the default service account of a GCE instance. 127 128 In order for this to work, the GCE instance must have been started with 129 a service account that has access to the IAM Cloud API. 130 """ 131 132 def __init__( 133 self, 134 request, 135 target_audience, 136 token_uri=_DEFAULT_TOKEN_URI, 137 additional_claims=None, 138 service_account_email=None, 139 ): 140 """ 141 Args: 142 request (google.auth.transport.Request): The object used to make 143 HTTP requests. 144 target_audience (str): The intended audience for these credentials, 145 used when requesting the ID Token. The ID Token's ``aud`` claim 146 will be set to this string. 147 token_uri (str): The OAuth 2.0 Token URI. 148 additional_claims (Mapping[str, str]): Any additional claims for 149 the JWT assertion used in the authorization grant. 150 service_account_email (str): Optional explicit service account to 151 use to sign JWT tokens. 152 By default, this is the default GCE service account. 153 """ 154 super(IDTokenCredentials, self).__init__() 155 156 if service_account_email is None: 157 sa_info = _metadata.get_service_account_info(request) 158 service_account_email = sa_info["email"] 159 self._service_account_email = service_account_email 160 161 self._signer = iam.Signer( 162 request=request, 163 credentials=Credentials(), 164 service_account_email=service_account_email, 165 ) 166 167 self._token_uri = token_uri 168 self._target_audience = target_audience 169 170 if additional_claims is not None: 171 self._additional_claims = additional_claims 172 else: 173 self._additional_claims = {} 174 175 def with_target_audience(self, target_audience): 176 """Create a copy of these credentials with the specified target 177 audience. 178 Args: 179 target_audience (str): The intended audience for these credentials, 180 used when requesting the ID Token. 181 Returns: 182 google.auth.service_account.IDTokenCredentials: A new credentials 183 instance. 184 """ 185 return self.__class__( 186 self._signer, 187 service_account_email=self._service_account_email, 188 token_uri=self._token_uri, 189 target_audience=target_audience, 190 additional_claims=self._additional_claims.copy(), 191 ) 192 193 def _make_authorization_grant_assertion(self): 194 """Create the OAuth 2.0 assertion. 195 This assertion is used during the OAuth 2.0 grant to acquire an 196 ID token. 197 Returns: 198 bytes: The authorization grant assertion. 199 """ 200 now = _helpers.utcnow() 201 lifetime = datetime.timedelta(seconds=_DEFAULT_TOKEN_LIFETIME_SECS) 202 expiry = now + lifetime 203 204 payload = { 205 "iat": _helpers.datetime_to_secs(now), 206 "exp": _helpers.datetime_to_secs(expiry), 207 # The issuer must be the service account email. 208 "iss": self.service_account_email, 209 # The audience must be the auth token endpoint's URI 210 "aud": self._token_uri, 211 # The target audience specifies which service the ID token is 212 # intended for. 213 "target_audience": self._target_audience, 214 } 215 216 payload.update(self._additional_claims) 217 218 token = jwt.encode(self._signer, payload) 219 220 return token 221 222 @_helpers.copy_docstring(credentials.Credentials) 223 def refresh(self, request): 224 assertion = self._make_authorization_grant_assertion() 225 access_token, expiry, _ = _client.id_token_jwt_grant( 226 request, self._token_uri, assertion 227 ) 228 self.token = access_token 229 self.expiry = expiry 230 231 @property 232 @_helpers.copy_docstring(credentials.Signing) 233 def signer(self): 234 return self._signer 235 236 @_helpers.copy_docstring(credentials.Signing) 237 def sign_bytes(self, message): 238 return self._signer.sign(message) 239 240 @property 241 def service_account_email(self): 242 """The service account email.""" 243 return self._service_account_email 244 245 @property 246 def signer_email(self): 247 return self._service_account_email 248 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/google/auth/compute_engine/credentials.py b/google/auth/compute_engine/credentials.py --- a/google/auth/compute_engine/credentials.py +++ b/google/auth/compute_engine/credentials.py @@ -105,7 +105,7 @@ def service_account_email(self): """The service account email. - .. note: This is not guaranteed to be set until :meth`refresh` has been + .. note:: This is not guaranteed to be set until :meth:`refresh` has been called. """ return self._service_account_email
{"golden_diff": "diff --git a/google/auth/compute_engine/credentials.py b/google/auth/compute_engine/credentials.py\n--- a/google/auth/compute_engine/credentials.py\n+++ b/google/auth/compute_engine/credentials.py\n@@ -105,7 +105,7 @@\n def service_account_email(self):\n \"\"\"The service account email.\n \n- .. note: This is not guaranteed to be set until :meth`refresh` has been\n+ .. note:: This is not guaranteed to be set until :meth:`refresh` has been\n called.\n \"\"\"\n return self._service_account_email\n", "issue": "Docs: compute_engine.credentials doesn't render note for service_account_email\nOn this doc item: https://google-auth.readthedocs.io/en/latest/reference/google.auth.compute_engine.credentials.html#google.auth.compute_engine.credentials.Credentials.service_account_email\r\n\r\nThe rendered docs say simply `The service account email.`\r\n\r\nHowever, the [docstring](https://google-auth.readthedocs.io/en/latest/_modules/google/auth/compute_engine/credentials.html#Credentials.refresh) has more information:\r\n\r\n```\r\n \"\"\"The service account email.\r\n\r\n .. note: This is not guaranteed to be set until :meth`refresh` has been\r\n called.\r\n \"\"\"\r\n```\r\n\r\nWhy is this important note not rendered?\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google Compute Engine credentials.\n\nThis module provides authentication for application running on Google Compute\nEngine using the Compute Engine metadata server.\n\n\"\"\"\n\nimport datetime\n\nimport six\n\nfrom google.auth import _helpers\nfrom google.auth import credentials\nfrom google.auth import exceptions\nfrom google.auth import iam\nfrom google.auth import jwt\nfrom google.auth.compute_engine import _metadata\nfrom google.oauth2 import _client\n\n\nclass Credentials(credentials.ReadOnlyScoped, credentials.Credentials):\n \"\"\"Compute Engine Credentials.\n\n These credentials use the Google Compute Engine metadata server to obtain\n OAuth 2.0 access tokens associated with the instance's service account.\n\n For more information about Compute Engine authentication, including how\n to configure scopes, see the `Compute Engine authentication\n documentation`_.\n\n .. note:: Compute Engine instances can be created with scopes and therefore\n these credentials are considered to be 'scoped'. However, you can\n not use :meth:`~google.auth.credentials.ScopedCredentials.with_scopes`\n because it is not possible to change the scopes that the instance\n has. Also note that\n :meth:`~google.auth.credentials.ScopedCredentials.has_scopes` will not\n work until the credentials have been refreshed.\n\n .. _Compute Engine authentication documentation:\n https://cloud.google.com/compute/docs/authentication#using\n \"\"\"\n\n def __init__(self, service_account_email=\"default\"):\n \"\"\"\n Args:\n service_account_email (str): The service account email to use, or\n 'default'. A Compute Engine instance may have multiple service\n accounts.\n \"\"\"\n super(Credentials, self).__init__()\n self._service_account_email = service_account_email\n\n def _retrieve_info(self, request):\n \"\"\"Retrieve information about the service account.\n\n Updates the scopes and retrieves the full service account email.\n\n Args:\n request (google.auth.transport.Request): The object used to make\n HTTP requests.\n \"\"\"\n info = _metadata.get_service_account_info(\n request, service_account=self._service_account_email\n )\n\n self._service_account_email = info[\"email\"]\n self._scopes = info[\"scopes\"]\n\n def refresh(self, request):\n \"\"\"Refresh the access token and scopes.\n\n Args:\n request (google.auth.transport.Request): The object used to make\n HTTP requests.\n\n Raises:\n google.auth.exceptions.RefreshError: If the Compute Engine metadata\n service can't be reached if if the instance has not\n credentials.\n \"\"\"\n try:\n self._retrieve_info(request)\n self.token, self.expiry = _metadata.get_service_account_token(\n request, service_account=self._service_account_email\n )\n except exceptions.TransportError as caught_exc:\n new_exc = exceptions.RefreshError(caught_exc)\n six.raise_from(new_exc, caught_exc)\n\n @property\n def service_account_email(self):\n \"\"\"The service account email.\n\n .. note: This is not guaranteed to be set until :meth`refresh` has been\n called.\n \"\"\"\n return self._service_account_email\n\n @property\n def requires_scopes(self):\n \"\"\"False: Compute Engine credentials can not be scoped.\"\"\"\n return False\n\n\n_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds\n_DEFAULT_TOKEN_URI = \"https://www.googleapis.com/oauth2/v4/token\"\n\n\nclass IDTokenCredentials(credentials.Credentials, credentials.Signing):\n \"\"\"Open ID Connect ID Token-based service account credentials.\n\n These credentials relies on the default service account of a GCE instance.\n\n In order for this to work, the GCE instance must have been started with\n a service account that has access to the IAM Cloud API.\n \"\"\"\n\n def __init__(\n self,\n request,\n target_audience,\n token_uri=_DEFAULT_TOKEN_URI,\n additional_claims=None,\n service_account_email=None,\n ):\n \"\"\"\n Args:\n request (google.auth.transport.Request): The object used to make\n HTTP requests.\n target_audience (str): The intended audience for these credentials,\n used when requesting the ID Token. The ID Token's ``aud`` claim\n will be set to this string.\n token_uri (str): The OAuth 2.0 Token URI.\n additional_claims (Mapping[str, str]): Any additional claims for\n the JWT assertion used in the authorization grant.\n service_account_email (str): Optional explicit service account to\n use to sign JWT tokens.\n By default, this is the default GCE service account.\n \"\"\"\n super(IDTokenCredentials, self).__init__()\n\n if service_account_email is None:\n sa_info = _metadata.get_service_account_info(request)\n service_account_email = sa_info[\"email\"]\n self._service_account_email = service_account_email\n\n self._signer = iam.Signer(\n request=request,\n credentials=Credentials(),\n service_account_email=service_account_email,\n )\n\n self._token_uri = token_uri\n self._target_audience = target_audience\n\n if additional_claims is not None:\n self._additional_claims = additional_claims\n else:\n self._additional_claims = {}\n\n def with_target_audience(self, target_audience):\n \"\"\"Create a copy of these credentials with the specified target\n audience.\n Args:\n target_audience (str): The intended audience for these credentials,\n used when requesting the ID Token.\n Returns:\n google.auth.service_account.IDTokenCredentials: A new credentials\n instance.\n \"\"\"\n return self.__class__(\n self._signer,\n service_account_email=self._service_account_email,\n token_uri=self._token_uri,\n target_audience=target_audience,\n additional_claims=self._additional_claims.copy(),\n )\n\n def _make_authorization_grant_assertion(self):\n \"\"\"Create the OAuth 2.0 assertion.\n This assertion is used during the OAuth 2.0 grant to acquire an\n ID token.\n Returns:\n bytes: The authorization grant assertion.\n \"\"\"\n now = _helpers.utcnow()\n lifetime = datetime.timedelta(seconds=_DEFAULT_TOKEN_LIFETIME_SECS)\n expiry = now + lifetime\n\n payload = {\n \"iat\": _helpers.datetime_to_secs(now),\n \"exp\": _helpers.datetime_to_secs(expiry),\n # The issuer must be the service account email.\n \"iss\": self.service_account_email,\n # The audience must be the auth token endpoint's URI\n \"aud\": self._token_uri,\n # The target audience specifies which service the ID token is\n # intended for.\n \"target_audience\": self._target_audience,\n }\n\n payload.update(self._additional_claims)\n\n token = jwt.encode(self._signer, payload)\n\n return token\n\n @_helpers.copy_docstring(credentials.Credentials)\n def refresh(self, request):\n assertion = self._make_authorization_grant_assertion()\n access_token, expiry, _ = _client.id_token_jwt_grant(\n request, self._token_uri, assertion\n )\n self.token = access_token\n self.expiry = expiry\n\n @property\n @_helpers.copy_docstring(credentials.Signing)\n def signer(self):\n return self._signer\n\n @_helpers.copy_docstring(credentials.Signing)\n def sign_bytes(self, message):\n return self._signer.sign(message)\n\n @property\n def service_account_email(self):\n \"\"\"The service account email.\"\"\"\n return self._service_account_email\n\n @property\n def signer_email(self):\n return self._service_account_email\n", "path": "google/auth/compute_engine/credentials.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google Compute Engine credentials.\n\nThis module provides authentication for application running on Google Compute\nEngine using the Compute Engine metadata server.\n\n\"\"\"\n\nimport datetime\n\nimport six\n\nfrom google.auth import _helpers\nfrom google.auth import credentials\nfrom google.auth import exceptions\nfrom google.auth import iam\nfrom google.auth import jwt\nfrom google.auth.compute_engine import _metadata\nfrom google.oauth2 import _client\n\n\nclass Credentials(credentials.ReadOnlyScoped, credentials.Credentials):\n \"\"\"Compute Engine Credentials.\n\n These credentials use the Google Compute Engine metadata server to obtain\n OAuth 2.0 access tokens associated with the instance's service account.\n\n For more information about Compute Engine authentication, including how\n to configure scopes, see the `Compute Engine authentication\n documentation`_.\n\n .. note:: Compute Engine instances can be created with scopes and therefore\n these credentials are considered to be 'scoped'. However, you can\n not use :meth:`~google.auth.credentials.ScopedCredentials.with_scopes`\n because it is not possible to change the scopes that the instance\n has. Also note that\n :meth:`~google.auth.credentials.ScopedCredentials.has_scopes` will not\n work until the credentials have been refreshed.\n\n .. _Compute Engine authentication documentation:\n https://cloud.google.com/compute/docs/authentication#using\n \"\"\"\n\n def __init__(self, service_account_email=\"default\"):\n \"\"\"\n Args:\n service_account_email (str): The service account email to use, or\n 'default'. A Compute Engine instance may have multiple service\n accounts.\n \"\"\"\n super(Credentials, self).__init__()\n self._service_account_email = service_account_email\n\n def _retrieve_info(self, request):\n \"\"\"Retrieve information about the service account.\n\n Updates the scopes and retrieves the full service account email.\n\n Args:\n request (google.auth.transport.Request): The object used to make\n HTTP requests.\n \"\"\"\n info = _metadata.get_service_account_info(\n request, service_account=self._service_account_email\n )\n\n self._service_account_email = info[\"email\"]\n self._scopes = info[\"scopes\"]\n\n def refresh(self, request):\n \"\"\"Refresh the access token and scopes.\n\n Args:\n request (google.auth.transport.Request): The object used to make\n HTTP requests.\n\n Raises:\n google.auth.exceptions.RefreshError: If the Compute Engine metadata\n service can't be reached if if the instance has not\n credentials.\n \"\"\"\n try:\n self._retrieve_info(request)\n self.token, self.expiry = _metadata.get_service_account_token(\n request, service_account=self._service_account_email\n )\n except exceptions.TransportError as caught_exc:\n new_exc = exceptions.RefreshError(caught_exc)\n six.raise_from(new_exc, caught_exc)\n\n @property\n def service_account_email(self):\n \"\"\"The service account email.\n\n .. note:: This is not guaranteed to be set until :meth:`refresh` has been\n called.\n \"\"\"\n return self._service_account_email\n\n @property\n def requires_scopes(self):\n \"\"\"False: Compute Engine credentials can not be scoped.\"\"\"\n return False\n\n\n_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds\n_DEFAULT_TOKEN_URI = \"https://www.googleapis.com/oauth2/v4/token\"\n\n\nclass IDTokenCredentials(credentials.Credentials, credentials.Signing):\n \"\"\"Open ID Connect ID Token-based service account credentials.\n\n These credentials relies on the default service account of a GCE instance.\n\n In order for this to work, the GCE instance must have been started with\n a service account that has access to the IAM Cloud API.\n \"\"\"\n\n def __init__(\n self,\n request,\n target_audience,\n token_uri=_DEFAULT_TOKEN_URI,\n additional_claims=None,\n service_account_email=None,\n ):\n \"\"\"\n Args:\n request (google.auth.transport.Request): The object used to make\n HTTP requests.\n target_audience (str): The intended audience for these credentials,\n used when requesting the ID Token. The ID Token's ``aud`` claim\n will be set to this string.\n token_uri (str): The OAuth 2.0 Token URI.\n additional_claims (Mapping[str, str]): Any additional claims for\n the JWT assertion used in the authorization grant.\n service_account_email (str): Optional explicit service account to\n use to sign JWT tokens.\n By default, this is the default GCE service account.\n \"\"\"\n super(IDTokenCredentials, self).__init__()\n\n if service_account_email is None:\n sa_info = _metadata.get_service_account_info(request)\n service_account_email = sa_info[\"email\"]\n self._service_account_email = service_account_email\n\n self._signer = iam.Signer(\n request=request,\n credentials=Credentials(),\n service_account_email=service_account_email,\n )\n\n self._token_uri = token_uri\n self._target_audience = target_audience\n\n if additional_claims is not None:\n self._additional_claims = additional_claims\n else:\n self._additional_claims = {}\n\n def with_target_audience(self, target_audience):\n \"\"\"Create a copy of these credentials with the specified target\n audience.\n Args:\n target_audience (str): The intended audience for these credentials,\n used when requesting the ID Token.\n Returns:\n google.auth.service_account.IDTokenCredentials: A new credentials\n instance.\n \"\"\"\n return self.__class__(\n self._signer,\n service_account_email=self._service_account_email,\n token_uri=self._token_uri,\n target_audience=target_audience,\n additional_claims=self._additional_claims.copy(),\n )\n\n def _make_authorization_grant_assertion(self):\n \"\"\"Create the OAuth 2.0 assertion.\n This assertion is used during the OAuth 2.0 grant to acquire an\n ID token.\n Returns:\n bytes: The authorization grant assertion.\n \"\"\"\n now = _helpers.utcnow()\n lifetime = datetime.timedelta(seconds=_DEFAULT_TOKEN_LIFETIME_SECS)\n expiry = now + lifetime\n\n payload = {\n \"iat\": _helpers.datetime_to_secs(now),\n \"exp\": _helpers.datetime_to_secs(expiry),\n # The issuer must be the service account email.\n \"iss\": self.service_account_email,\n # The audience must be the auth token endpoint's URI\n \"aud\": self._token_uri,\n # The target audience specifies which service the ID token is\n # intended for.\n \"target_audience\": self._target_audience,\n }\n\n payload.update(self._additional_claims)\n\n token = jwt.encode(self._signer, payload)\n\n return token\n\n @_helpers.copy_docstring(credentials.Credentials)\n def refresh(self, request):\n assertion = self._make_authorization_grant_assertion()\n access_token, expiry, _ = _client.id_token_jwt_grant(\n request, self._token_uri, assertion\n )\n self.token = access_token\n self.expiry = expiry\n\n @property\n @_helpers.copy_docstring(credentials.Signing)\n def signer(self):\n return self._signer\n\n @_helpers.copy_docstring(credentials.Signing)\n def sign_bytes(self, message):\n return self._signer.sign(message)\n\n @property\n def service_account_email(self):\n \"\"\"The service account email.\"\"\"\n return self._service_account_email\n\n @property\n def signer_email(self):\n return self._service_account_email\n", "path": "google/auth/compute_engine/credentials.py"}]}
2,802
128
gh_patches_debug_57973
rasdani/github-patches
git_diff
pyjanitor-devs__pyjanitor-1191
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [INF/CI] Add `--cov-append` for `pytest` <!-- Thank you for your PR! BEFORE YOU CONTINUE! Please add the appropriate three-letter abbreviation to your title. The abbreviations can be: - [DOC]: Documentation fixes. - [ENH]: Code contributions and new features. - [TST]: Test-related contributions. - [INF]: Infrastructure-related contributions. Also, do not forget to tag the relevant issue here as well. Finally, as commits come in, don't forget to regularly rebase! --> # PR Description Please describe the changes proposed in the pull request: > Another reason code coverage failed is that pytest doesn't add `--cov-append` option. `--cov-append` can get a sum coverage. I'll add this option in the next PR. First let us merge `codecov.yml` into `tests.yml`. Keep the same test logic for the dev branch or a PR. _Originally posted by @Zeroto521 in https://github.com/pyjanitor-devs/pyjanitor/issues/1185#issuecomment-1296479926_ <!-- Doing so provides maintainers with context on what the PR is, and can help us more effectively review your PR. --> <!-- Please also identify below which issue that has been raised that you are going to close. --> <!-- As you go down the PR template, please feel free to delete sections that are irrelevant. --> # PR Checklist <!-- This checklist exists for newcomers who are not yet familiar with our requirements. If you are experienced with the project, please feel free to delete this section. --> Please ensure that you have done the following: 1. [x] PR in from a fork off your branch. Do not PR from `<your_username>`:`dev`, but rather from `<your_username>`:`<feature-branch_name>`. <!-- Doing this helps us keep the commit history much cleaner than it would otherwise be. --> 2. [x] If you're not on the contributors list, add yourself to `AUTHORS.md`. <!-- We'd like to acknowledge your contributions! --> 3. [x] Add a line to `CHANGELOG.md` under the latest version header (i.e. the one that is "on deck") describing the contribution. - Do use some discretion here; if there are multiple PRs that are related, keep them in a single line. # Automatic checks There will be automatic checks run on the PR. These include: - Building a preview of the docs on Netlify - Automatically linting the code - Making sure the code is documented - Making sure that all tests are passed - Making sure that code coverage doesn't go down. # Relevant Reviewers <!-- Finally, please tag relevant maintainers to review. --> Please tag maintainers to review. - @ericmjl --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `janitor/accessors/__init__.py` Content: ``` 1 """Miscellaneous mathematical operators. 2 3 Lazy loading used here to speed up imports. 4 """ 5 6 import warnings 7 from typing import Tuple 8 9 10 import lazy_loader as lazy 11 12 scipy_special = lazy.load("scipy.special") 13 ss = lazy.load("scipy.stats") 14 pf = lazy.load("pandas_flavor") 15 pd = lazy.load("pandas") 16 np = lazy.load("numpy") 17 pdtypes = lazy.load("pandas.api.types") 18 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/janitor/accessors/__init__.py b/janitor/accessors/__init__.py --- a/janitor/accessors/__init__.py +++ b/janitor/accessors/__init__.py @@ -1,17 +1,3 @@ -"""Miscellaneous mathematical operators. +"""Miscellaneous mathematical operators.""" -Lazy loading used here to speed up imports. -""" - -import warnings -from typing import Tuple - - -import lazy_loader as lazy - -scipy_special = lazy.load("scipy.special") -ss = lazy.load("scipy.stats") -pf = lazy.load("pandas_flavor") -pd = lazy.load("pandas") -np = lazy.load("numpy") -pdtypes = lazy.load("pandas.api.types") +from janitor.accessors.data_description import DataDescription # noqa: F401
{"golden_diff": "diff --git a/janitor/accessors/__init__.py b/janitor/accessors/__init__.py\n--- a/janitor/accessors/__init__.py\n+++ b/janitor/accessors/__init__.py\n@@ -1,17 +1,3 @@\n-\"\"\"Miscellaneous mathematical operators.\n+\"\"\"Miscellaneous mathematical operators.\"\"\"\n \n-Lazy loading used here to speed up imports.\n-\"\"\"\n-\n-import warnings\n-from typing import Tuple\n-\n-\n-import lazy_loader as lazy\n-\n-scipy_special = lazy.load(\"scipy.special\")\n-ss = lazy.load(\"scipy.stats\")\n-pf = lazy.load(\"pandas_flavor\")\n-pd = lazy.load(\"pandas\")\n-np = lazy.load(\"numpy\")\n-pdtypes = lazy.load(\"pandas.api.types\")\n+from janitor.accessors.data_description import DataDescription # noqa: F401\n", "issue": "[INF/CI] Add `--cov-append` for `pytest`\n<!-- Thank you for your PR!\r\n\r\nBEFORE YOU CONTINUE! Please add the appropriate three-letter abbreviation to your title.\r\n\r\nThe abbreviations can be:\r\n- [DOC]: Documentation fixes.\r\n- [ENH]: Code contributions and new features.\r\n- [TST]: Test-related contributions.\r\n- [INF]: Infrastructure-related contributions.\r\n\r\nAlso, do not forget to tag the relevant issue here as well.\r\n\r\nFinally, as commits come in, don't forget to regularly rebase!\r\n-->\r\n\r\n# PR Description\r\n\r\nPlease describe the changes proposed in the pull request:\r\n\r\n> Another reason code coverage failed is that pytest doesn't add `--cov-append` option.\r\n`--cov-append` can get a sum coverage. I'll add this option in the next PR.\r\nFirst let us merge `codecov.yml` into `tests.yml`. Keep the same test logic for the dev branch or a PR.\r\n\r\n_Originally posted by @Zeroto521 in https://github.com/pyjanitor-devs/pyjanitor/issues/1185#issuecomment-1296479926_\r\n\r\n<!-- Doing so provides maintainers with context on what the PR is, and can help us more effectively review your PR. -->\r\n\r\n<!-- Please also identify below which issue that has been raised that you are going to close. -->\r\n\r\n<!-- As you go down the PR template, please feel free to delete sections that are irrelevant. -->\r\n\r\n# PR Checklist\r\n\r\n<!-- This checklist exists for newcomers who are not yet familiar with our requirements. If you are experienced with\r\nthe project, please feel free to delete this section. -->\r\n\r\nPlease ensure that you have done the following:\r\n\r\n1. [x] PR in from a fork off your branch. Do not PR from `<your_username>`:`dev`, but rather from `<your_username>`:`<feature-branch_name>`.\r\n<!-- Doing this helps us keep the commit history much cleaner than it would otherwise be. -->\r\n2. [x] If you're not on the contributors list, add yourself to `AUTHORS.md`.\r\n<!-- We'd like to acknowledge your contributions! -->\r\n3. [x] Add a line to `CHANGELOG.md` under the latest version header (i.e. the one that is \"on deck\") describing the contribution.\r\n - Do use some discretion here; if there are multiple PRs that are related, keep them in a single line.\r\n\r\n# Automatic checks\r\n\r\nThere will be automatic checks run on the PR. These include:\r\n\r\n- Building a preview of the docs on Netlify\r\n- Automatically linting the code\r\n- Making sure the code is documented\r\n- Making sure that all tests are passed\r\n- Making sure that code coverage doesn't go down.\r\n\r\n# Relevant Reviewers\r\n\r\n<!-- Finally, please tag relevant maintainers to review. -->\r\n\r\nPlease tag maintainers to review.\r\n\r\n- @ericmjl\r\n\n", "before_files": [{"content": "\"\"\"Miscellaneous mathematical operators.\n\nLazy loading used here to speed up imports.\n\"\"\"\n\nimport warnings\nfrom typing import Tuple\n\n\nimport lazy_loader as lazy\n\nscipy_special = lazy.load(\"scipy.special\")\nss = lazy.load(\"scipy.stats\")\npf = lazy.load(\"pandas_flavor\")\npd = lazy.load(\"pandas\")\nnp = lazy.load(\"numpy\")\npdtypes = lazy.load(\"pandas.api.types\")\n", "path": "janitor/accessors/__init__.py"}], "after_files": [{"content": "\"\"\"Miscellaneous mathematical operators.\"\"\"\n\nfrom janitor.accessors.data_description import DataDescription # noqa: F401\n", "path": "janitor/accessors/__init__.py"}]}
972
185
gh_patches_debug_38001
rasdani/github-patches
git_diff
biolab__orange3-1907
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TypeError: OneClassSVMLearner doesn't support multiple class variables https://sentry.io/biolab/orange3/issues/202498152/ ``` TypeError: OneClassSVMLearner doesn't support multiple class variables Module "Orange.widgets.data.owoutliers", line 141, in commit y_pred = self.detect_outliers() Module "Orange.widgets.data.owoutliers", line 169, in detect_outliers model = learner(self.data) Module "Orange.base", line 253, in __call__ m = super().__call__(data) Module "Orange.base", line 52, in __call__ self.__class__.__name__) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Orange/widgets/data/owoutliers.py` Content: ``` 1 import sys 2 3 import numpy as np 4 from AnyQt.QtWidgets import QLayout 5 6 from Orange.base import SklLearner 7 from Orange.classification import OneClassSVMLearner, EllipticEnvelopeLearner 8 from Orange.data import Table, Domain, ContinuousVariable 9 from Orange.widgets import widget, gui 10 from Orange.widgets.settings import Setting 11 from Orange.widgets.utils.sql import check_sql_input 12 13 14 class OWOutliers(widget.OWWidget): 15 name = "Outliers" 16 description = "Detect outliers." 17 icon = "icons/Outliers.svg" 18 priority = 3000 19 category = "Data" 20 keywords = ["data", "outlier", "inlier"] 21 22 inputs = [("Data", Table, "set_data")] 23 outputs = [("Inliers", Table), ("Outliers", Table)] 24 25 want_main_area = False 26 27 OneClassSVM, Covariance = range(2) 28 29 outlier_method = Setting(OneClassSVM) 30 nu = Setting(50) 31 gamma = Setting(0.01) 32 cont = Setting(10) 33 empirical_covariance = Setting(False) 34 support_fraction = Setting(1) 35 36 data_info_default = 'No data on input.' 37 in_out_info_default = ' ' 38 39 def __init__(self): 40 super().__init__() 41 self.data = None 42 self.n_inliers = self.n_outliers = None 43 44 box = gui.vBox(self.controlArea, "Information") 45 self.data_info_label = gui.widgetLabel(box, self.data_info_default) 46 self.in_out_info_label = gui.widgetLabel(box, 47 self.in_out_info_default) 48 49 box = gui.vBox(self.controlArea, "Outlier Detection Method") 50 detection = gui.radioButtons(box, self, "outlier_method") 51 52 gui.appendRadioButton(detection, 53 "One class SVM with non-linear kernel (RBF)") 54 ibox = gui.indentedBox(detection) 55 tooltip = "An upper bound on the fraction of training errors and a " \ 56 "lower bound of the fraction of support vectors" 57 gui.widgetLabel(ibox, 'Nu:', tooltip=tooltip) 58 self.nu_slider = gui.hSlider( 59 ibox, self, "nu", minValue=1, maxValue=100, ticks=10, 60 labelFormat="%d %%", callback=self.nu_changed, tooltip=tooltip) 61 self.gamma_spin = gui.spin( 62 ibox, self, "gamma", label="Kernel coefficient:", step=1e-2, 63 spinType=float, minv=0.01, maxv=10, callback=self.gamma_changed) 64 gui.separator(detection, 12) 65 66 self.rb_cov = gui.appendRadioButton(detection, "Covariance estimator") 67 ibox = gui.indentedBox(detection) 68 self.l_cov = gui.widgetLabel(ibox, 'Contamination:') 69 self.cont_slider = gui.hSlider( 70 ibox, self, "cont", minValue=0, maxValue=100, ticks=10, 71 labelFormat="%d %%", callback=self.cont_changed) 72 73 ebox = gui.hBox(ibox) 74 self.cb_emp_cov = gui.checkBox( 75 ebox, self, "empirical_covariance", 76 "Support fraction:", callback=self.empirical_changed) 77 self.support_fraction_spin = gui.spin( 78 ebox, self, "support_fraction", step=1e-1, spinType=float, 79 minv=0.1, maxv=10, callback=self.support_fraction_changed) 80 81 gui.separator(detection, 12) 82 83 gui.button(self.buttonsArea, self, "Detect Outliers", 84 callback=self.commit) 85 self.layout().setSizeConstraint(QLayout.SetFixedSize) 86 87 def nu_changed(self): 88 self.outlier_method = self.OneClassSVM 89 90 def gamma_changed(self): 91 self.outlier_method = self.OneClassSVM 92 93 def cont_changed(self): 94 self.outlier_method = self.Covariance 95 96 def support_fraction_changed(self): 97 self.outlier_method = self.Covariance 98 99 def empirical_changed(self): 100 self.outlier_method = self.Covariance 101 102 def disable_covariance(self): 103 self.outlier_method = self.OneClassSVM 104 self.rb_cov.setDisabled(True) 105 self.l_cov.setDisabled(True) 106 self.cont_slider.setDisabled(True) 107 self.cb_emp_cov.setDisabled(True) 108 self.support_fraction_spin.setDisabled(True) 109 self.warning('Too many features for covariance estimation.') 110 111 def enable_covariance(self): 112 self.rb_cov.setDisabled(False) 113 self.l_cov.setDisabled(False) 114 self.cont_slider.setDisabled(False) 115 self.cb_emp_cov.setDisabled(False) 116 self.support_fraction_spin.setDisabled(False) 117 self.warning() 118 119 @check_sql_input 120 def set_data(self, dataset): 121 self.data = dataset 122 if self.data is None: 123 self.data_info_label.setText(self.data_info_default) 124 self.in_out_info_label.setText(self.in_out_info_default) 125 else: 126 self.data_info_label.setText('%d instances' % len(self.data)) 127 self.in_out_info_label.setText(' ') 128 129 self.enable_covariance() 130 if self.data and len(self.data.domain.attributes) > 1500: 131 self.disable_covariance() 132 133 self.commit() 134 135 def commit(self): 136 self.error() 137 inliers = outliers = None 138 self.n_inliers = self.n_outliers = None 139 if self.data is not None and len(self.data) > 0: 140 try: 141 y_pred = self.detect_outliers() 142 except ValueError: 143 self.error("Singular covariance matrix.") 144 self.in_out_info_label.setText(self.in_out_info_default) 145 else: 146 inliers_ind = np.where(y_pred == 1)[0] 147 outliers_ind = np.where(y_pred == -1)[0] 148 inliers = Table(self.new_domain, self.new_data, inliers_ind) 149 outliers = Table(self.new_domain, 150 self.new_data, outliers_ind) 151 self.in_out_info_label.setText('%d inliers, %d outliers' % 152 (len(inliers), len(outliers))) 153 self.n_inliers = len(inliers) 154 self.n_outliers = len(outliers) 155 156 self.send("Inliers", inliers) 157 self.send("Outliers", outliers) 158 159 def detect_outliers(self): 160 if self.outlier_method == self.OneClassSVM: 161 learner = OneClassSVMLearner( 162 gamma=self.gamma, nu=self.nu / 100, 163 preprocessors=SklLearner.preprocessors) 164 else: 165 learner = EllipticEnvelopeLearner( 166 support_fraction=self.support_fraction 167 if self.empirical_covariance else None, 168 contamination=self.cont / 100.) 169 model = learner(self.data) 170 y_pred = model(self.data) 171 self.add_metas(model) 172 return np.array(y_pred) 173 174 def add_metas(self, model): 175 if self.outlier_method == self.Covariance: 176 mahal = model.mahalanobis(self.data.X) 177 mahal = mahal.reshape(len(self.data), 1) 178 attrs = self.data.domain.attributes 179 classes = self.data.domain.class_vars 180 new_metas = list(self.data.domain.metas) + \ 181 [ContinuousVariable(name="Mahalanobis")] 182 self.new_domain = Domain(attrs, classes, new_metas) 183 self.new_data = Table(self.new_domain, self.data) 184 self.new_data.metas = np.hstack((self.data.metas, mahal)) 185 else: 186 self.new_domain = self.data.domain 187 self.new_data = self.data 188 189 def send_report(self): 190 if self.n_outliers is None or self.n_inliers is None: 191 return 192 self.report_items("Data", 193 (("Input instances", len(self.data)), 194 ("Inliers", self.n_inliers), 195 ("Outliers", self.n_outliers))) 196 if self.outlier_method == 0: 197 self.report_items( 198 "Detection", 199 (("Detection method", 200 "One class SVM with non-linear kernel (RBF)"), 201 ("Regularization (nu)", self.nu), 202 ("Kernel coefficient", self.gamma))) 203 else: 204 self.report_items( 205 "Detection", 206 (("Detection method", "Covariance estimator"), 207 ("Contamination", self.cont), 208 ("Support fraction", self.support_fraction))) 209 210 def test_main(): 211 from AnyQt.QtWidgets import QApplication 212 app = QApplication([]) 213 data = Table("iris") 214 w = OWOutliers() 215 w.set_data(data) 216 w.commit() 217 w.show() 218 return app.exec_() 219 220 221 if __name__ == "__main__": 222 sys.exit(test_main()) 223 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Orange/widgets/data/owoutliers.py b/Orange/widgets/data/owoutliers.py --- a/Orange/widgets/data/owoutliers.py +++ b/Orange/widgets/data/owoutliers.py @@ -8,6 +8,7 @@ from Orange.data import Table, Domain, ContinuousVariable from Orange.widgets import widget, gui from Orange.widgets.settings import Setting +from Orange.widgets.widget import Msg from Orange.widgets.utils.sql import check_sql_input @@ -36,6 +37,10 @@ data_info_default = 'No data on input.' in_out_info_default = ' ' + class Error(widget.OWWidget.Error): + singular_cov = Msg("Singular covariance matrix.") + multiclass_error = Msg("Multiple class data is not supported") + def __init__(self): super().__init__() self.data = None @@ -133,25 +138,29 @@ self.commit() def commit(self): - self.error() + self.clear_messages() inliers = outliers = None self.n_inliers = self.n_outliers = None if self.data is not None and len(self.data) > 0: - try: - y_pred = self.detect_outliers() - except ValueError: - self.error("Singular covariance matrix.") - self.in_out_info_label.setText(self.in_out_info_default) + if self.data.Y.ndim > 1: + self.Error.multiclass_error() else: - inliers_ind = np.where(y_pred == 1)[0] - outliers_ind = np.where(y_pred == -1)[0] - inliers = Table(self.new_domain, self.new_data, inliers_ind) - outliers = Table(self.new_domain, - self.new_data, outliers_ind) - self.in_out_info_label.setText('%d inliers, %d outliers' % - (len(inliers), len(outliers))) - self.n_inliers = len(inliers) - self.n_outliers = len(outliers) + try: + y_pred = self.detect_outliers() + except ValueError: + self.Error.singular_cov() + self.in_out_info_label.setText(self.in_out_info_default) + else: + inliers_ind = np.where(y_pred == 1)[0] + outliers_ind = np.where(y_pred == -1)[0] + inliers = Table(self.new_domain, self.new_data, inliers_ind) + outliers = Table(self.new_domain, + self.new_data, outliers_ind) + self.in_out_info_label.setText( + "{} inliers, {} outliers".format(len(inliers), + len(outliers))) + self.n_inliers = len(inliers) + self.n_outliers = len(outliers) self.send("Inliers", inliers) self.send("Outliers", outliers)
{"golden_diff": "diff --git a/Orange/widgets/data/owoutliers.py b/Orange/widgets/data/owoutliers.py\n--- a/Orange/widgets/data/owoutliers.py\n+++ b/Orange/widgets/data/owoutliers.py\n@@ -8,6 +8,7 @@\n from Orange.data import Table, Domain, ContinuousVariable\n from Orange.widgets import widget, gui\n from Orange.widgets.settings import Setting\n+from Orange.widgets.widget import Msg\n from Orange.widgets.utils.sql import check_sql_input\n \n \n@@ -36,6 +37,10 @@\n data_info_default = 'No data on input.'\n in_out_info_default = ' '\n \n+ class Error(widget.OWWidget.Error):\n+ singular_cov = Msg(\"Singular covariance matrix.\")\n+ multiclass_error = Msg(\"Multiple class data is not supported\")\n+\n def __init__(self):\n super().__init__()\n self.data = None\n@@ -133,25 +138,29 @@\n self.commit()\n \n def commit(self):\n- self.error()\n+ self.clear_messages()\n inliers = outliers = None\n self.n_inliers = self.n_outliers = None\n if self.data is not None and len(self.data) > 0:\n- try:\n- y_pred = self.detect_outliers()\n- except ValueError:\n- self.error(\"Singular covariance matrix.\")\n- self.in_out_info_label.setText(self.in_out_info_default)\n+ if self.data.Y.ndim > 1:\n+ self.Error.multiclass_error()\n else:\n- inliers_ind = np.where(y_pred == 1)[0]\n- outliers_ind = np.where(y_pred == -1)[0]\n- inliers = Table(self.new_domain, self.new_data, inliers_ind)\n- outliers = Table(self.new_domain,\n- self.new_data, outliers_ind)\n- self.in_out_info_label.setText('%d inliers, %d outliers' %\n- (len(inliers), len(outliers)))\n- self.n_inliers = len(inliers)\n- self.n_outliers = len(outliers)\n+ try:\n+ y_pred = self.detect_outliers()\n+ except ValueError:\n+ self.Error.singular_cov()\n+ self.in_out_info_label.setText(self.in_out_info_default)\n+ else:\n+ inliers_ind = np.where(y_pred == 1)[0]\n+ outliers_ind = np.where(y_pred == -1)[0]\n+ inliers = Table(self.new_domain, self.new_data, inliers_ind)\n+ outliers = Table(self.new_domain,\n+ self.new_data, outliers_ind)\n+ self.in_out_info_label.setText(\n+ \"{} inliers, {} outliers\".format(len(inliers),\n+ len(outliers)))\n+ self.n_inliers = len(inliers)\n+ self.n_outliers = len(outliers)\n \n self.send(\"Inliers\", inliers)\n self.send(\"Outliers\", outliers)\n", "issue": "TypeError: OneClassSVMLearner doesn't support multiple class variables\nhttps://sentry.io/biolab/orange3/issues/202498152/\n\n```\nTypeError: OneClassSVMLearner doesn't support multiple class variables\n Module \"Orange.widgets.data.owoutliers\", line 141, in commit\n\n y_pred = self.detect_outliers()\n Module \"Orange.widgets.data.owoutliers\", line 169, in detect_outliers\n\n model = learner(self.data)\n Module \"Orange.base\", line 253, in __call__\n\n m = super().__call__(data)\n Module \"Orange.base\", line 52, in __call__\n\n self.__class__.__name__)\n```\n", "before_files": [{"content": "import sys\n\nimport numpy as np\nfrom AnyQt.QtWidgets import QLayout\n\nfrom Orange.base import SklLearner\nfrom Orange.classification import OneClassSVMLearner, EllipticEnvelopeLearner\nfrom Orange.data import Table, Domain, ContinuousVariable\nfrom Orange.widgets import widget, gui\nfrom Orange.widgets.settings import Setting\nfrom Orange.widgets.utils.sql import check_sql_input\n\n\nclass OWOutliers(widget.OWWidget):\n name = \"Outliers\"\n description = \"Detect outliers.\"\n icon = \"icons/Outliers.svg\"\n priority = 3000\n category = \"Data\"\n keywords = [\"data\", \"outlier\", \"inlier\"]\n\n inputs = [(\"Data\", Table, \"set_data\")]\n outputs = [(\"Inliers\", Table), (\"Outliers\", Table)]\n\n want_main_area = False\n\n OneClassSVM, Covariance = range(2)\n\n outlier_method = Setting(OneClassSVM)\n nu = Setting(50)\n gamma = Setting(0.01)\n cont = Setting(10)\n empirical_covariance = Setting(False)\n support_fraction = Setting(1)\n\n data_info_default = 'No data on input.'\n in_out_info_default = ' '\n\n def __init__(self):\n super().__init__()\n self.data = None\n self.n_inliers = self.n_outliers = None\n\n box = gui.vBox(self.controlArea, \"Information\")\n self.data_info_label = gui.widgetLabel(box, self.data_info_default)\n self.in_out_info_label = gui.widgetLabel(box,\n self.in_out_info_default)\n\n box = gui.vBox(self.controlArea, \"Outlier Detection Method\")\n detection = gui.radioButtons(box, self, \"outlier_method\")\n\n gui.appendRadioButton(detection,\n \"One class SVM with non-linear kernel (RBF)\")\n ibox = gui.indentedBox(detection)\n tooltip = \"An upper bound on the fraction of training errors and a \" \\\n \"lower bound of the fraction of support vectors\"\n gui.widgetLabel(ibox, 'Nu:', tooltip=tooltip)\n self.nu_slider = gui.hSlider(\n ibox, self, \"nu\", minValue=1, maxValue=100, ticks=10,\n labelFormat=\"%d %%\", callback=self.nu_changed, tooltip=tooltip)\n self.gamma_spin = gui.spin(\n ibox, self, \"gamma\", label=\"Kernel coefficient:\", step=1e-2,\n spinType=float, minv=0.01, maxv=10, callback=self.gamma_changed)\n gui.separator(detection, 12)\n\n self.rb_cov = gui.appendRadioButton(detection, \"Covariance estimator\")\n ibox = gui.indentedBox(detection)\n self.l_cov = gui.widgetLabel(ibox, 'Contamination:')\n self.cont_slider = gui.hSlider(\n ibox, self, \"cont\", minValue=0, maxValue=100, ticks=10,\n labelFormat=\"%d %%\", callback=self.cont_changed)\n\n ebox = gui.hBox(ibox)\n self.cb_emp_cov = gui.checkBox(\n ebox, self, \"empirical_covariance\",\n \"Support fraction:\", callback=self.empirical_changed)\n self.support_fraction_spin = gui.spin(\n ebox, self, \"support_fraction\", step=1e-1, spinType=float,\n minv=0.1, maxv=10, callback=self.support_fraction_changed)\n\n gui.separator(detection, 12)\n\n gui.button(self.buttonsArea, self, \"Detect Outliers\",\n callback=self.commit)\n self.layout().setSizeConstraint(QLayout.SetFixedSize)\n\n def nu_changed(self):\n self.outlier_method = self.OneClassSVM\n\n def gamma_changed(self):\n self.outlier_method = self.OneClassSVM\n\n def cont_changed(self):\n self.outlier_method = self.Covariance\n\n def support_fraction_changed(self):\n self.outlier_method = self.Covariance\n\n def empirical_changed(self):\n self.outlier_method = self.Covariance\n\n def disable_covariance(self):\n self.outlier_method = self.OneClassSVM\n self.rb_cov.setDisabled(True)\n self.l_cov.setDisabled(True)\n self.cont_slider.setDisabled(True)\n self.cb_emp_cov.setDisabled(True)\n self.support_fraction_spin.setDisabled(True)\n self.warning('Too many features for covariance estimation.')\n\n def enable_covariance(self):\n self.rb_cov.setDisabled(False)\n self.l_cov.setDisabled(False)\n self.cont_slider.setDisabled(False)\n self.cb_emp_cov.setDisabled(False)\n self.support_fraction_spin.setDisabled(False)\n self.warning()\n\n @check_sql_input\n def set_data(self, dataset):\n self.data = dataset\n if self.data is None:\n self.data_info_label.setText(self.data_info_default)\n self.in_out_info_label.setText(self.in_out_info_default)\n else:\n self.data_info_label.setText('%d instances' % len(self.data))\n self.in_out_info_label.setText(' ')\n\n self.enable_covariance()\n if self.data and len(self.data.domain.attributes) > 1500:\n self.disable_covariance()\n\n self.commit()\n\n def commit(self):\n self.error()\n inliers = outliers = None\n self.n_inliers = self.n_outliers = None\n if self.data is not None and len(self.data) > 0:\n try:\n y_pred = self.detect_outliers()\n except ValueError:\n self.error(\"Singular covariance matrix.\")\n self.in_out_info_label.setText(self.in_out_info_default)\n else:\n inliers_ind = np.where(y_pred == 1)[0]\n outliers_ind = np.where(y_pred == -1)[0]\n inliers = Table(self.new_domain, self.new_data, inliers_ind)\n outliers = Table(self.new_domain,\n self.new_data, outliers_ind)\n self.in_out_info_label.setText('%d inliers, %d outliers' %\n (len(inliers), len(outliers)))\n self.n_inliers = len(inliers)\n self.n_outliers = len(outliers)\n\n self.send(\"Inliers\", inliers)\n self.send(\"Outliers\", outliers)\n\n def detect_outliers(self):\n if self.outlier_method == self.OneClassSVM:\n learner = OneClassSVMLearner(\n gamma=self.gamma, nu=self.nu / 100,\n preprocessors=SklLearner.preprocessors)\n else:\n learner = EllipticEnvelopeLearner(\n support_fraction=self.support_fraction\n if self.empirical_covariance else None,\n contamination=self.cont / 100.)\n model = learner(self.data)\n y_pred = model(self.data)\n self.add_metas(model)\n return np.array(y_pred)\n\n def add_metas(self, model):\n if self.outlier_method == self.Covariance:\n mahal = model.mahalanobis(self.data.X)\n mahal = mahal.reshape(len(self.data), 1)\n attrs = self.data.domain.attributes\n classes = self.data.domain.class_vars\n new_metas = list(self.data.domain.metas) + \\\n [ContinuousVariable(name=\"Mahalanobis\")]\n self.new_domain = Domain(attrs, classes, new_metas)\n self.new_data = Table(self.new_domain, self.data)\n self.new_data.metas = np.hstack((self.data.metas, mahal))\n else:\n self.new_domain = self.data.domain\n self.new_data = self.data\n\n def send_report(self):\n if self.n_outliers is None or self.n_inliers is None:\n return\n self.report_items(\"Data\",\n ((\"Input instances\", len(self.data)),\n (\"Inliers\", self.n_inliers),\n (\"Outliers\", self.n_outliers)))\n if self.outlier_method == 0:\n self.report_items(\n \"Detection\",\n ((\"Detection method\",\n \"One class SVM with non-linear kernel (RBF)\"),\n (\"Regularization (nu)\", self.nu),\n (\"Kernel coefficient\", self.gamma)))\n else:\n self.report_items(\n \"Detection\",\n ((\"Detection method\", \"Covariance estimator\"),\n (\"Contamination\", self.cont),\n (\"Support fraction\", self.support_fraction)))\n\ndef test_main():\n from AnyQt.QtWidgets import QApplication\n app = QApplication([])\n data = Table(\"iris\")\n w = OWOutliers()\n w.set_data(data)\n w.commit()\n w.show()\n return app.exec_()\n\n\nif __name__ == \"__main__\":\n sys.exit(test_main())\n", "path": "Orange/widgets/data/owoutliers.py"}], "after_files": [{"content": "import sys\n\nimport numpy as np\nfrom AnyQt.QtWidgets import QLayout\n\nfrom Orange.base import SklLearner\nfrom Orange.classification import OneClassSVMLearner, EllipticEnvelopeLearner\nfrom Orange.data import Table, Domain, ContinuousVariable\nfrom Orange.widgets import widget, gui\nfrom Orange.widgets.settings import Setting\nfrom Orange.widgets.widget import Msg\nfrom Orange.widgets.utils.sql import check_sql_input\n\n\nclass OWOutliers(widget.OWWidget):\n name = \"Outliers\"\n description = \"Detect outliers.\"\n icon = \"icons/Outliers.svg\"\n priority = 3000\n category = \"Data\"\n keywords = [\"data\", \"outlier\", \"inlier\"]\n\n inputs = [(\"Data\", Table, \"set_data\")]\n outputs = [(\"Inliers\", Table), (\"Outliers\", Table)]\n\n want_main_area = False\n\n OneClassSVM, Covariance = range(2)\n\n outlier_method = Setting(OneClassSVM)\n nu = Setting(50)\n gamma = Setting(0.01)\n cont = Setting(10)\n empirical_covariance = Setting(False)\n support_fraction = Setting(1)\n\n data_info_default = 'No data on input.'\n in_out_info_default = ' '\n\n class Error(widget.OWWidget.Error):\n singular_cov = Msg(\"Singular covariance matrix.\")\n multiclass_error = Msg(\"Multiple class data is not supported\")\n\n def __init__(self):\n super().__init__()\n self.data = None\n self.n_inliers = self.n_outliers = None\n\n box = gui.vBox(self.controlArea, \"Information\")\n self.data_info_label = gui.widgetLabel(box, self.data_info_default)\n self.in_out_info_label = gui.widgetLabel(box,\n self.in_out_info_default)\n\n box = gui.vBox(self.controlArea, \"Outlier Detection Method\")\n detection = gui.radioButtons(box, self, \"outlier_method\")\n\n gui.appendRadioButton(detection,\n \"One class SVM with non-linear kernel (RBF)\")\n ibox = gui.indentedBox(detection)\n tooltip = \"An upper bound on the fraction of training errors and a \" \\\n \"lower bound of the fraction of support vectors\"\n gui.widgetLabel(ibox, 'Nu:', tooltip=tooltip)\n self.nu_slider = gui.hSlider(\n ibox, self, \"nu\", minValue=1, maxValue=100, ticks=10,\n labelFormat=\"%d %%\", callback=self.nu_changed, tooltip=tooltip)\n self.gamma_spin = gui.spin(\n ibox, self, \"gamma\", label=\"Kernel coefficient:\", step=1e-2,\n spinType=float, minv=0.01, maxv=10, callback=self.gamma_changed)\n gui.separator(detection, 12)\n\n self.rb_cov = gui.appendRadioButton(detection, \"Covariance estimator\")\n ibox = gui.indentedBox(detection)\n self.l_cov = gui.widgetLabel(ibox, 'Contamination:')\n self.cont_slider = gui.hSlider(\n ibox, self, \"cont\", minValue=0, maxValue=100, ticks=10,\n labelFormat=\"%d %%\", callback=self.cont_changed)\n\n ebox = gui.hBox(ibox)\n self.cb_emp_cov = gui.checkBox(\n ebox, self, \"empirical_covariance\",\n \"Support fraction:\", callback=self.empirical_changed)\n self.support_fraction_spin = gui.spin(\n ebox, self, \"support_fraction\", step=1e-1, spinType=float,\n minv=0.1, maxv=10, callback=self.support_fraction_changed)\n\n gui.separator(detection, 12)\n\n gui.button(self.buttonsArea, self, \"Detect Outliers\",\n callback=self.commit)\n self.layout().setSizeConstraint(QLayout.SetFixedSize)\n\n def nu_changed(self):\n self.outlier_method = self.OneClassSVM\n\n def gamma_changed(self):\n self.outlier_method = self.OneClassSVM\n\n def cont_changed(self):\n self.outlier_method = self.Covariance\n\n def support_fraction_changed(self):\n self.outlier_method = self.Covariance\n\n def empirical_changed(self):\n self.outlier_method = self.Covariance\n\n def disable_covariance(self):\n self.outlier_method = self.OneClassSVM\n self.rb_cov.setDisabled(True)\n self.l_cov.setDisabled(True)\n self.cont_slider.setDisabled(True)\n self.cb_emp_cov.setDisabled(True)\n self.support_fraction_spin.setDisabled(True)\n self.warning('Too many features for covariance estimation.')\n\n def enable_covariance(self):\n self.rb_cov.setDisabled(False)\n self.l_cov.setDisabled(False)\n self.cont_slider.setDisabled(False)\n self.cb_emp_cov.setDisabled(False)\n self.support_fraction_spin.setDisabled(False)\n self.warning()\n\n @check_sql_input\n def set_data(self, dataset):\n self.data = dataset\n if self.data is None:\n self.data_info_label.setText(self.data_info_default)\n self.in_out_info_label.setText(self.in_out_info_default)\n else:\n self.data_info_label.setText('%d instances' % len(self.data))\n self.in_out_info_label.setText(' ')\n\n self.enable_covariance()\n if self.data and len(self.data.domain.attributes) > 1500:\n self.disable_covariance()\n\n self.commit()\n\n def commit(self):\n self.clear_messages()\n inliers = outliers = None\n self.n_inliers = self.n_outliers = None\n if self.data is not None and len(self.data) > 0:\n if self.data.Y.ndim > 1:\n self.Error.multiclass_error()\n else:\n try:\n y_pred = self.detect_outliers()\n except ValueError:\n self.Error.singular_cov()\n self.in_out_info_label.setText(self.in_out_info_default)\n else:\n inliers_ind = np.where(y_pred == 1)[0]\n outliers_ind = np.where(y_pred == -1)[0]\n inliers = Table(self.new_domain, self.new_data, inliers_ind)\n outliers = Table(self.new_domain,\n self.new_data, outliers_ind)\n self.in_out_info_label.setText(\n \"{} inliers, {} outliers\".format(len(inliers),\n len(outliers)))\n self.n_inliers = len(inliers)\n self.n_outliers = len(outliers)\n\n self.send(\"Inliers\", inliers)\n self.send(\"Outliers\", outliers)\n\n def detect_outliers(self):\n if self.outlier_method == self.OneClassSVM:\n learner = OneClassSVMLearner(\n gamma=self.gamma, nu=self.nu / 100,\n preprocessors=SklLearner.preprocessors)\n else:\n learner = EllipticEnvelopeLearner(\n support_fraction=self.support_fraction\n if self.empirical_covariance else None,\n contamination=self.cont / 100.)\n model = learner(self.data)\n y_pred = model(self.data)\n self.add_metas(model)\n return np.array(y_pred)\n\n def add_metas(self, model):\n if self.outlier_method == self.Covariance:\n mahal = model.mahalanobis(self.data.X)\n mahal = mahal.reshape(len(self.data), 1)\n attrs = self.data.domain.attributes\n classes = self.data.domain.class_vars\n new_metas = list(self.data.domain.metas) + \\\n [ContinuousVariable(name=\"Mahalanobis\")]\n self.new_domain = Domain(attrs, classes, new_metas)\n self.new_data = Table(self.new_domain, self.data)\n self.new_data.metas = np.hstack((self.data.metas, mahal))\n else:\n self.new_domain = self.data.domain\n self.new_data = self.data\n\n def send_report(self):\n if self.n_outliers is None or self.n_inliers is None:\n return\n self.report_items(\"Data\",\n ((\"Input instances\", len(self.data)),\n (\"Inliers\", self.n_inliers),\n (\"Outliers\", self.n_outliers)))\n if self.outlier_method == 0:\n self.report_items(\n \"Detection\",\n ((\"Detection method\",\n \"One class SVM with non-linear kernel (RBF)\"),\n (\"Regularization (nu)\", self.nu),\n (\"Kernel coefficient\", self.gamma)))\n else:\n self.report_items(\n \"Detection\",\n ((\"Detection method\", \"Covariance estimator\"),\n (\"Contamination\", self.cont),\n (\"Support fraction\", self.support_fraction)))\n\ndef test_main():\n from AnyQt.QtWidgets import QApplication\n app = QApplication([])\n data = Table(\"iris\")\n w = OWOutliers()\n w.set_data(data)\n w.commit()\n w.show()\n return app.exec_()\n\n\nif __name__ == \"__main__\":\n sys.exit(test_main())\n", "path": "Orange/widgets/data/owoutliers.py"}]}
2,848
632
gh_patches_debug_6944
rasdani/github-patches
git_diff
OpenNMT__OpenNMT-py-1805
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Logger is not passed to translator When building the translator, there is a logger created but not passed to the translator: https://github.com/OpenNMT/OpenNMT-py/blob/35cf4f0ae774a4aa500318879a1a4d53408ac129/onmt/bin/translate.py#L18 This results in a log file that only contains a single entry: https://github.com/OpenNMT/OpenNMT-py/blob/35cf4f0ae774a4aa500318879a1a4d53408ac129/onmt/bin/translate.py#L24 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `onmt/bin/translate.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 from __future__ import unicode_literals 5 6 from onmt.utils.logging import init_logger 7 from onmt.utils.misc import split_corpus 8 from onmt.translate.translator import build_translator 9 10 import onmt.opts as opts 11 from onmt.utils.parse import ArgumentParser 12 13 14 def translate(opt): 15 ArgumentParser.validate_translate_opts(opt) 16 logger = init_logger(opt.log_file) 17 18 translator = build_translator(opt, report_score=True) 19 src_shards = split_corpus(opt.src, opt.shard_size) 20 tgt_shards = split_corpus(opt.tgt, opt.shard_size) 21 shard_pairs = zip(src_shards, tgt_shards) 22 23 for i, (src_shard, tgt_shard) in enumerate(shard_pairs): 24 logger.info("Translating shard %d." % i) 25 translator.translate( 26 src=src_shard, 27 tgt=tgt_shard, 28 src_dir=opt.src_dir, 29 batch_size=opt.batch_size, 30 batch_type=opt.batch_type, 31 attn_debug=opt.attn_debug, 32 align_debug=opt.align_debug 33 ) 34 35 36 def _get_parser(): 37 parser = ArgumentParser(description='translate.py') 38 39 opts.config_opts(parser) 40 opts.translate_opts(parser) 41 return parser 42 43 44 def main(): 45 parser = _get_parser() 46 47 opt = parser.parse_args() 48 translate(opt) 49 50 51 if __name__ == "__main__": 52 main() 53 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/onmt/bin/translate.py b/onmt/bin/translate.py --- a/onmt/bin/translate.py +++ b/onmt/bin/translate.py @@ -15,7 +15,7 @@ ArgumentParser.validate_translate_opts(opt) logger = init_logger(opt.log_file) - translator = build_translator(opt, report_score=True) + translator = build_translator(opt, logger=logger, report_score=True) src_shards = split_corpus(opt.src, opt.shard_size) tgt_shards = split_corpus(opt.tgt, opt.shard_size) shard_pairs = zip(src_shards, tgt_shards)
{"golden_diff": "diff --git a/onmt/bin/translate.py b/onmt/bin/translate.py\n--- a/onmt/bin/translate.py\n+++ b/onmt/bin/translate.py\n@@ -15,7 +15,7 @@\n ArgumentParser.validate_translate_opts(opt)\n logger = init_logger(opt.log_file)\n \n- translator = build_translator(opt, report_score=True)\n+ translator = build_translator(opt, logger=logger, report_score=True)\n src_shards = split_corpus(opt.src, opt.shard_size)\n tgt_shards = split_corpus(opt.tgt, opt.shard_size)\n shard_pairs = zip(src_shards, tgt_shards)\n", "issue": "Logger is not passed to translator\nWhen building the translator, there is a logger created but not passed to the translator:\r\nhttps://github.com/OpenNMT/OpenNMT-py/blob/35cf4f0ae774a4aa500318879a1a4d53408ac129/onmt/bin/translate.py#L18\r\nThis results in a log file that only contains a single entry:\r\nhttps://github.com/OpenNMT/OpenNMT-py/blob/35cf4f0ae774a4aa500318879a1a4d53408ac129/onmt/bin/translate.py#L24\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom onmt.utils.logging import init_logger\nfrom onmt.utils.misc import split_corpus\nfrom onmt.translate.translator import build_translator\n\nimport onmt.opts as opts\nfrom onmt.utils.parse import ArgumentParser\n\n\ndef translate(opt):\n ArgumentParser.validate_translate_opts(opt)\n logger = init_logger(opt.log_file)\n\n translator = build_translator(opt, report_score=True)\n src_shards = split_corpus(opt.src, opt.shard_size)\n tgt_shards = split_corpus(opt.tgt, opt.shard_size)\n shard_pairs = zip(src_shards, tgt_shards)\n\n for i, (src_shard, tgt_shard) in enumerate(shard_pairs):\n logger.info(\"Translating shard %d.\" % i)\n translator.translate(\n src=src_shard,\n tgt=tgt_shard,\n src_dir=opt.src_dir,\n batch_size=opt.batch_size,\n batch_type=opt.batch_type,\n attn_debug=opt.attn_debug,\n align_debug=opt.align_debug\n )\n\n\ndef _get_parser():\n parser = ArgumentParser(description='translate.py')\n\n opts.config_opts(parser)\n opts.translate_opts(parser)\n return parser\n\n\ndef main():\n parser = _get_parser()\n\n opt = parser.parse_args()\n translate(opt)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "onmt/bin/translate.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom onmt.utils.logging import init_logger\nfrom onmt.utils.misc import split_corpus\nfrom onmt.translate.translator import build_translator\n\nimport onmt.opts as opts\nfrom onmt.utils.parse import ArgumentParser\n\n\ndef translate(opt):\n ArgumentParser.validate_translate_opts(opt)\n logger = init_logger(opt.log_file)\n\n translator = build_translator(opt, logger=logger, report_score=True)\n src_shards = split_corpus(opt.src, opt.shard_size)\n tgt_shards = split_corpus(opt.tgt, opt.shard_size)\n shard_pairs = zip(src_shards, tgt_shards)\n\n for i, (src_shard, tgt_shard) in enumerate(shard_pairs):\n logger.info(\"Translating shard %d.\" % i)\n translator.translate(\n src=src_shard,\n tgt=tgt_shard,\n src_dir=opt.src_dir,\n batch_size=opt.batch_size,\n batch_type=opt.batch_type,\n attn_debug=opt.attn_debug,\n align_debug=opt.align_debug\n )\n\n\ndef _get_parser():\n parser = ArgumentParser(description='translate.py')\n\n opts.config_opts(parser)\n opts.translate_opts(parser)\n return parser\n\n\ndef main():\n parser = _get_parser()\n\n opt = parser.parse_args()\n translate(opt)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "onmt/bin/translate.py"}]}
833
140
gh_patches_debug_15331
rasdani/github-patches
git_diff
napari__napari-1578
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Plugin Errors not work in develop ## 🐛 Bug <!-- A clear and concise description of what the bug is. --> $ NAPARI_CATCH_ERRORS=0 python -m napari 16:11:17 ERROR Unhandled exception: Traceback (most recent call last): File "/home/czaki/Dokumenty/projekty/napari/napari/_qt/qt_main_window.py", line 300, in _show_plugin_err_reporter plugin_sorter = QtPluginErrReporter(parent=self._qt_window) File "/home/czaki/Dokumenty/projekty/napari/napari/_qt/dialogs/qt_plugin_report.py", line 62, in __init__ from ..plugins import plugin_manager as _pm ModuleNotFoundError: No module named 'napari._qt.plugins' ## To Reproduce Steps to reproduce the behavior: 1. install last napari from github 2. try open "Plugin Errors" dialog <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> ## Expected behavior <!-- A clear and concise description of what you expected to happen. --> ## Environment - Please copy and paste the information at napari info option in help menubar here: napari: 0.3.7.dev30+gf101ef7 Platform: Linux-4.15.0-112-generic-x86_64-with-debian-buster-sid Python: 3.7.6 (default, Feb 20 2020, 10:04:45) [GCC 7.4.0] Qt: 5.14.2 PyQt5: 5.14.2 NumPy: 1.18.5 SciPy: 1.4.1 Dask: 2.17.2 VisPy: 0.6.4 GL version: 4.6.0 NVIDIA 440.100 MAX_TEXTURE_SIZE: 32768 Plugins: - napari-plugin-engine: 0.1.5 - svg: 0.1.3 - Any other relevant information: ## Additional context <!-- Add any other context about the problem here. --> @sofroniewn @jni --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `napari/_qt/dialogs/qt_plugin_report.py` Content: ``` 1 """Provides a QtPluginErrReporter that allows the user report plugin errors. 2 """ 3 from typing import Optional 4 5 from napari_plugin_engine import PluginManager, standard_metadata 6 from qtpy.QtCore import Qt 7 from qtpy.QtGui import QGuiApplication 8 from qtpy.QtWidgets import ( 9 QComboBox, 10 QDialog, 11 QHBoxLayout, 12 QLabel, 13 QPushButton, 14 QTextEdit, 15 QVBoxLayout, 16 QWidget, 17 ) 18 19 from ...plugins.exceptions import format_exceptions 20 21 22 class QtPluginErrReporter(QDialog): 23 """Dialog that allows users to review and report PluginError tracebacks. 24 25 Parameters 26 ---------- 27 parent : QWidget, optional 28 Optional parent widget for this widget. 29 initial_plugin : str, optional 30 If provided, errors from ``initial_plugin`` will be shown when the 31 dialog is created, by default None 32 33 Attributes 34 ---------- 35 text_area : qtpy.QtWidgets.QTextEdit 36 The text area where traceback information will be shown. 37 plugin_combo : qtpy.QtWidgets.QComboBox 38 The dropdown menu used to select the current plugin 39 github_button : qtpy.QtWidgets.QPushButton 40 A button that, when pressed, will open an issue at the current plugin's 41 github issue tracker, prepopulated with a formatted traceback. Button 42 is only visible if a github URL is detected in the package metadata for 43 the current plugin. 44 clipboard_button : qtpy.QtWidgets.QPushButton 45 A button that, when pressed, copies the current traceback information 46 to the clipboard. (HTML tags are removed in the copied text.) 47 plugin_meta : qtpy.QtWidgets.QLabel 48 A label that will show available plugin metadata (such as home page). 49 """ 50 51 NULL_OPTION = 'select plugin... ' 52 53 def __init__( 54 self, 55 plugin_manager: Optional[PluginManager] = None, 56 *, 57 parent: Optional[QWidget] = None, 58 initial_plugin: Optional[str] = None, 59 ) -> None: 60 super().__init__(parent) 61 if not plugin_manager: 62 from ..plugins import plugin_manager as _pm 63 64 self.plugin_manager = _pm 65 else: 66 self.plugin_manager = plugin_manager 67 68 self.setWindowTitle('Recorded Plugin Exceptions') 69 self.setWindowModality(Qt.NonModal) 70 self.layout = QVBoxLayout() 71 self.layout.setSpacing(0) 72 self.layout.setContentsMargins(10, 10, 10, 10) 73 self.setLayout(self.layout) 74 75 self.text_area = QTextEdit() 76 self.text_area.setTextInteractionFlags(Qt.TextSelectableByMouse) 77 self.text_area.setMinimumWidth(360) 78 79 # Create plugin dropdown menu 80 self.plugin_combo = QComboBox() 81 self.plugin_combo.addItem(self.NULL_OPTION) 82 bad_plugins = [e.plugin_name for e in self.plugin_manager.get_errors()] 83 self.plugin_combo.addItems(list(sorted(set(bad_plugins)))) 84 self.plugin_combo.currentTextChanged.connect(self.set_plugin) 85 self.plugin_combo.setCurrentText(self.NULL_OPTION) 86 87 # create github button (gets connected in self.set_plugin) 88 self.github_button = QPushButton('Open issue on GitHub', self) 89 self.github_button.setToolTip( 90 "Open a web browser to submit this error log\n" 91 "to the developer's GitHub issue tracker" 92 ) 93 self.github_button.hide() 94 95 # create copy to clipboard button 96 self.clipboard_button = QPushButton() 97 self.clipboard_button.hide() 98 self.clipboard_button.setObjectName("QtCopyToClipboardButton") 99 self.clipboard_button.setToolTip("Copy error log to clipboard") 100 self.clipboard_button.clicked.connect(self.copyToClipboard) 101 102 # plugin_meta contains a URL to the home page, (and/or other details) 103 self.plugin_meta = QLabel('', parent=self) 104 self.plugin_meta.setObjectName("pluginInfo") 105 self.plugin_meta.setTextFormat(Qt.RichText) 106 self.plugin_meta.setTextInteractionFlags(Qt.TextBrowserInteraction) 107 self.plugin_meta.setOpenExternalLinks(True) 108 self.plugin_meta.setAlignment(Qt.AlignRight) 109 110 # make layout 111 row_1_layout = QHBoxLayout() 112 row_1_layout.setContentsMargins(11, 5, 10, 0) 113 row_1_layout.addStretch(1) 114 row_1_layout.addWidget(self.plugin_meta) 115 row_2_layout = QHBoxLayout() 116 row_2_layout.setContentsMargins(11, 5, 10, 0) 117 row_2_layout.addWidget(self.plugin_combo) 118 row_2_layout.addStretch(1) 119 row_2_layout.addWidget(self.github_button) 120 row_2_layout.addWidget(self.clipboard_button) 121 row_2_layout.setSpacing(5) 122 self.layout.addLayout(row_1_layout) 123 self.layout.addLayout(row_2_layout) 124 self.layout.addWidget(self.text_area, 1) 125 self.setMinimumWidth(750) 126 self.setMinimumHeight(600) 127 128 if initial_plugin: 129 self.set_plugin(initial_plugin) 130 131 def set_plugin(self, plugin: str) -> None: 132 """Set the current plugin shown in the dropdown and text area. 133 134 Parameters 135 ---------- 136 plugin : str 137 name of a plugin that has created an error this session. 138 """ 139 self.github_button.hide() 140 self.clipboard_button.hide() 141 try: 142 self.github_button.clicked.disconnect() 143 # when disconnecting a non-existent signal 144 # PySide2 raises runtimeError, PyQt5 raises TypeError 145 except (RuntimeError, TypeError): 146 pass 147 148 if not plugin or (plugin == self.NULL_OPTION): 149 self.plugin_meta.setText('') 150 self.text_area.setHtml('') 151 return 152 153 if not self.plugin_manager.get_errors(plugin): 154 raise ValueError(f"No errors reported for plugin '{plugin}'") 155 self.plugin_combo.setCurrentText(plugin) 156 157 err_string = format_exceptions(plugin, as_html=True) 158 self.text_area.setHtml(err_string) 159 self.clipboard_button.show() 160 161 # set metadata and outbound links/buttons 162 err0 = self.plugin_manager.get_errors(plugin)[0] 163 meta = standard_metadata(err0.plugin) if err0.plugin else {} 164 meta_text = '' 165 if not meta: 166 self.plugin_meta.setText(meta_text) 167 return 168 169 url = meta.get('url') 170 if url: 171 meta_text += ( 172 '<span style="color:#999;">plugin home page:&nbsp;&nbsp;' 173 f'</span><a href="{url}" style="color:#999">{url}</a>' 174 ) 175 if 'github.com' in url: 176 177 def onclick(): 178 import webbrowser 179 180 err = format_exceptions(plugin, as_html=False) 181 err = ( 182 "<!--Provide detail on the error here-->\n\n\n\n" 183 "<details>\n<summary>Traceback from napari</summary>" 184 f"\n\n```\n{err}\n```\n</details>" 185 ) 186 url = f'{meta.get("url")}/issues/new?&body={err}' 187 webbrowser.open(url, new=2) 188 189 self.github_button.clicked.connect(onclick) 190 self.github_button.show() 191 self.plugin_meta.setText(meta_text) 192 193 def copyToClipboard(self) -> None: 194 """Copy current plugin traceback info to clipboard as plain text.""" 195 plugin = self.plugin_combo.currentText() 196 err_string = format_exceptions(plugin, as_html=False) 197 cb = QGuiApplication.clipboard() 198 cb.setText(err_string) 199 ``` Path: `napari/_qt/dialogs/qt_plugin_table.py` Content: ``` 1 from qtpy.QtCore import Qt 2 from qtpy.QtWidgets import QAbstractItemView, QDialog, QLabel, QVBoxLayout 3 4 from ..widgets.qt_dict_table import QtDictTable 5 6 7 class QtPluginTable(QDialog): 8 def __init__(self, parent, plugin_manager=None): 9 super().__init__(parent) 10 if not plugin_manager: 11 from ..plugins import plugin_manager 12 13 self.setMaximumHeight(800) 14 self.setMaximumWidth(1280) 15 layout = QVBoxLayout() 16 # maybe someday add a search bar here? 17 title = QLabel("Installed Plugins") 18 title.setObjectName("h2") 19 layout.addWidget(title) 20 # get metadata for successfully registered plugins 21 plugin_manager.discover() 22 data = plugin_manager.list_plugin_metadata() 23 data = list(filter(lambda x: x['plugin_name'] != 'builtins', data)) 24 # create a table for it 25 self.table = QtDictTable( 26 parent, 27 data, 28 headers=[ 29 'plugin_name', 30 'package', 31 'version', 32 'url', 33 'author', 34 'license', 35 ], 36 min_section_width=60, 37 ) 38 self.table.setObjectName("pluginTable") 39 self.table.horizontalHeader().setObjectName("pluginTableHeader") 40 self.table.verticalHeader().setObjectName("pluginTableHeader") 41 self.table.setGridStyle(Qt.NoPen) 42 # prevent editing of table 43 self.table.setEditTriggers(QAbstractItemView.NoEditTriggers) 44 layout.addWidget(self.table) 45 self.setLayout(layout) 46 self.setAttribute(Qt.WA_DeleteOnClose) 47 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/napari/_qt/dialogs/qt_plugin_report.py b/napari/_qt/dialogs/qt_plugin_report.py --- a/napari/_qt/dialogs/qt_plugin_report.py +++ b/napari/_qt/dialogs/qt_plugin_report.py @@ -59,7 +59,7 @@ ) -> None: super().__init__(parent) if not plugin_manager: - from ..plugins import plugin_manager as _pm + from ...plugins import plugin_manager as _pm self.plugin_manager = _pm else: diff --git a/napari/_qt/dialogs/qt_plugin_table.py b/napari/_qt/dialogs/qt_plugin_table.py --- a/napari/_qt/dialogs/qt_plugin_table.py +++ b/napari/_qt/dialogs/qt_plugin_table.py @@ -8,7 +8,7 @@ def __init__(self, parent, plugin_manager=None): super().__init__(parent) if not plugin_manager: - from ..plugins import plugin_manager + from ...plugins import plugin_manager self.setMaximumHeight(800) self.setMaximumWidth(1280)
{"golden_diff": "diff --git a/napari/_qt/dialogs/qt_plugin_report.py b/napari/_qt/dialogs/qt_plugin_report.py\n--- a/napari/_qt/dialogs/qt_plugin_report.py\n+++ b/napari/_qt/dialogs/qt_plugin_report.py\n@@ -59,7 +59,7 @@\n ) -> None:\n super().__init__(parent)\n if not plugin_manager:\n- from ..plugins import plugin_manager as _pm\n+ from ...plugins import plugin_manager as _pm\n \n self.plugin_manager = _pm\n else:\ndiff --git a/napari/_qt/dialogs/qt_plugin_table.py b/napari/_qt/dialogs/qt_plugin_table.py\n--- a/napari/_qt/dialogs/qt_plugin_table.py\n+++ b/napari/_qt/dialogs/qt_plugin_table.py\n@@ -8,7 +8,7 @@\n def __init__(self, parent, plugin_manager=None):\n super().__init__(parent)\n if not plugin_manager:\n- from ..plugins import plugin_manager\n+ from ...plugins import plugin_manager\n \n self.setMaximumHeight(800)\n self.setMaximumWidth(1280)\n", "issue": "Plugin Errors not work in develop \n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n$ NAPARI_CATCH_ERRORS=0 python -m napari\r\n16:11:17 ERROR Unhandled exception:\r\nTraceback (most recent call last):\r\n File \"/home/czaki/Dokumenty/projekty/napari/napari/_qt/qt_main_window.py\", line 300, in _show_plugin_err_reporter\r\n plugin_sorter = QtPluginErrReporter(parent=self._qt_window)\r\n File \"/home/czaki/Dokumenty/projekty/napari/napari/_qt/dialogs/qt_plugin_report.py\", line 62, in __init__\r\n from ..plugins import plugin_manager as _pm\r\nModuleNotFoundError: No module named 'napari._qt.plugins'\r\n\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. install last napari from github\r\n2. try open \"Plugin Errors\" dialog\r\n\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n## Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n## Environment\r\n\r\n - Please copy and paste the information at napari info option in help menubar here:\r\n\r\nnapari: 0.3.7.dev30+gf101ef7\r\nPlatform: Linux-4.15.0-112-generic-x86_64-with-debian-buster-sid\r\nPython: 3.7.6 (default, Feb 20 2020, 10:04:45) [GCC 7.4.0]\r\nQt: 5.14.2\r\nPyQt5: 5.14.2\r\nNumPy: 1.18.5\r\nSciPy: 1.4.1\r\nDask: 2.17.2\r\nVisPy: 0.6.4\r\n\r\nGL version: 4.6.0 NVIDIA 440.100\r\nMAX_TEXTURE_SIZE: 32768\r\n\r\nPlugins:\r\n- napari-plugin-engine: 0.1.5\r\n- svg: 0.1.3\r\n\r\n - Any other relevant information:\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n@sofroniewn @jni \n", "before_files": [{"content": "\"\"\"Provides a QtPluginErrReporter that allows the user report plugin errors.\n\"\"\"\nfrom typing import Optional\n\nfrom napari_plugin_engine import PluginManager, standard_metadata\nfrom qtpy.QtCore import Qt\nfrom qtpy.QtGui import QGuiApplication\nfrom qtpy.QtWidgets import (\n QComboBox,\n QDialog,\n QHBoxLayout,\n QLabel,\n QPushButton,\n QTextEdit,\n QVBoxLayout,\n QWidget,\n)\n\nfrom ...plugins.exceptions import format_exceptions\n\n\nclass QtPluginErrReporter(QDialog):\n \"\"\"Dialog that allows users to review and report PluginError tracebacks.\n\n Parameters\n ----------\n parent : QWidget, optional\n Optional parent widget for this widget.\n initial_plugin : str, optional\n If provided, errors from ``initial_plugin`` will be shown when the\n dialog is created, by default None\n\n Attributes\n ----------\n text_area : qtpy.QtWidgets.QTextEdit\n The text area where traceback information will be shown.\n plugin_combo : qtpy.QtWidgets.QComboBox\n The dropdown menu used to select the current plugin\n github_button : qtpy.QtWidgets.QPushButton\n A button that, when pressed, will open an issue at the current plugin's\n github issue tracker, prepopulated with a formatted traceback. Button\n is only visible if a github URL is detected in the package metadata for\n the current plugin.\n clipboard_button : qtpy.QtWidgets.QPushButton\n A button that, when pressed, copies the current traceback information\n to the clipboard. (HTML tags are removed in the copied text.)\n plugin_meta : qtpy.QtWidgets.QLabel\n A label that will show available plugin metadata (such as home page).\n \"\"\"\n\n NULL_OPTION = 'select plugin... '\n\n def __init__(\n self,\n plugin_manager: Optional[PluginManager] = None,\n *,\n parent: Optional[QWidget] = None,\n initial_plugin: Optional[str] = None,\n ) -> None:\n super().__init__(parent)\n if not plugin_manager:\n from ..plugins import plugin_manager as _pm\n\n self.plugin_manager = _pm\n else:\n self.plugin_manager = plugin_manager\n\n self.setWindowTitle('Recorded Plugin Exceptions')\n self.setWindowModality(Qt.NonModal)\n self.layout = QVBoxLayout()\n self.layout.setSpacing(0)\n self.layout.setContentsMargins(10, 10, 10, 10)\n self.setLayout(self.layout)\n\n self.text_area = QTextEdit()\n self.text_area.setTextInteractionFlags(Qt.TextSelectableByMouse)\n self.text_area.setMinimumWidth(360)\n\n # Create plugin dropdown menu\n self.plugin_combo = QComboBox()\n self.plugin_combo.addItem(self.NULL_OPTION)\n bad_plugins = [e.plugin_name for e in self.plugin_manager.get_errors()]\n self.plugin_combo.addItems(list(sorted(set(bad_plugins))))\n self.plugin_combo.currentTextChanged.connect(self.set_plugin)\n self.plugin_combo.setCurrentText(self.NULL_OPTION)\n\n # create github button (gets connected in self.set_plugin)\n self.github_button = QPushButton('Open issue on GitHub', self)\n self.github_button.setToolTip(\n \"Open a web browser to submit this error log\\n\"\n \"to the developer's GitHub issue tracker\"\n )\n self.github_button.hide()\n\n # create copy to clipboard button\n self.clipboard_button = QPushButton()\n self.clipboard_button.hide()\n self.clipboard_button.setObjectName(\"QtCopyToClipboardButton\")\n self.clipboard_button.setToolTip(\"Copy error log to clipboard\")\n self.clipboard_button.clicked.connect(self.copyToClipboard)\n\n # plugin_meta contains a URL to the home page, (and/or other details)\n self.plugin_meta = QLabel('', parent=self)\n self.plugin_meta.setObjectName(\"pluginInfo\")\n self.plugin_meta.setTextFormat(Qt.RichText)\n self.plugin_meta.setTextInteractionFlags(Qt.TextBrowserInteraction)\n self.plugin_meta.setOpenExternalLinks(True)\n self.plugin_meta.setAlignment(Qt.AlignRight)\n\n # make layout\n row_1_layout = QHBoxLayout()\n row_1_layout.setContentsMargins(11, 5, 10, 0)\n row_1_layout.addStretch(1)\n row_1_layout.addWidget(self.plugin_meta)\n row_2_layout = QHBoxLayout()\n row_2_layout.setContentsMargins(11, 5, 10, 0)\n row_2_layout.addWidget(self.plugin_combo)\n row_2_layout.addStretch(1)\n row_2_layout.addWidget(self.github_button)\n row_2_layout.addWidget(self.clipboard_button)\n row_2_layout.setSpacing(5)\n self.layout.addLayout(row_1_layout)\n self.layout.addLayout(row_2_layout)\n self.layout.addWidget(self.text_area, 1)\n self.setMinimumWidth(750)\n self.setMinimumHeight(600)\n\n if initial_plugin:\n self.set_plugin(initial_plugin)\n\n def set_plugin(self, plugin: str) -> None:\n \"\"\"Set the current plugin shown in the dropdown and text area.\n\n Parameters\n ----------\n plugin : str\n name of a plugin that has created an error this session.\n \"\"\"\n self.github_button.hide()\n self.clipboard_button.hide()\n try:\n self.github_button.clicked.disconnect()\n # when disconnecting a non-existent signal\n # PySide2 raises runtimeError, PyQt5 raises TypeError\n except (RuntimeError, TypeError):\n pass\n\n if not plugin or (plugin == self.NULL_OPTION):\n self.plugin_meta.setText('')\n self.text_area.setHtml('')\n return\n\n if not self.plugin_manager.get_errors(plugin):\n raise ValueError(f\"No errors reported for plugin '{plugin}'\")\n self.plugin_combo.setCurrentText(plugin)\n\n err_string = format_exceptions(plugin, as_html=True)\n self.text_area.setHtml(err_string)\n self.clipboard_button.show()\n\n # set metadata and outbound links/buttons\n err0 = self.plugin_manager.get_errors(plugin)[0]\n meta = standard_metadata(err0.plugin) if err0.plugin else {}\n meta_text = ''\n if not meta:\n self.plugin_meta.setText(meta_text)\n return\n\n url = meta.get('url')\n if url:\n meta_text += (\n '<span style=\"color:#999;\">plugin home page:&nbsp;&nbsp;'\n f'</span><a href=\"{url}\" style=\"color:#999\">{url}</a>'\n )\n if 'github.com' in url:\n\n def onclick():\n import webbrowser\n\n err = format_exceptions(plugin, as_html=False)\n err = (\n \"<!--Provide detail on the error here-->\\n\\n\\n\\n\"\n \"<details>\\n<summary>Traceback from napari</summary>\"\n f\"\\n\\n```\\n{err}\\n```\\n</details>\"\n )\n url = f'{meta.get(\"url\")}/issues/new?&body={err}'\n webbrowser.open(url, new=2)\n\n self.github_button.clicked.connect(onclick)\n self.github_button.show()\n self.plugin_meta.setText(meta_text)\n\n def copyToClipboard(self) -> None:\n \"\"\"Copy current plugin traceback info to clipboard as plain text.\"\"\"\n plugin = self.plugin_combo.currentText()\n err_string = format_exceptions(plugin, as_html=False)\n cb = QGuiApplication.clipboard()\n cb.setText(err_string)\n", "path": "napari/_qt/dialogs/qt_plugin_report.py"}, {"content": "from qtpy.QtCore import Qt\nfrom qtpy.QtWidgets import QAbstractItemView, QDialog, QLabel, QVBoxLayout\n\nfrom ..widgets.qt_dict_table import QtDictTable\n\n\nclass QtPluginTable(QDialog):\n def __init__(self, parent, plugin_manager=None):\n super().__init__(parent)\n if not plugin_manager:\n from ..plugins import plugin_manager\n\n self.setMaximumHeight(800)\n self.setMaximumWidth(1280)\n layout = QVBoxLayout()\n # maybe someday add a search bar here?\n title = QLabel(\"Installed Plugins\")\n title.setObjectName(\"h2\")\n layout.addWidget(title)\n # get metadata for successfully registered plugins\n plugin_manager.discover()\n data = plugin_manager.list_plugin_metadata()\n data = list(filter(lambda x: x['plugin_name'] != 'builtins', data))\n # create a table for it\n self.table = QtDictTable(\n parent,\n data,\n headers=[\n 'plugin_name',\n 'package',\n 'version',\n 'url',\n 'author',\n 'license',\n ],\n min_section_width=60,\n )\n self.table.setObjectName(\"pluginTable\")\n self.table.horizontalHeader().setObjectName(\"pluginTableHeader\")\n self.table.verticalHeader().setObjectName(\"pluginTableHeader\")\n self.table.setGridStyle(Qt.NoPen)\n # prevent editing of table\n self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)\n layout.addWidget(self.table)\n self.setLayout(layout)\n self.setAttribute(Qt.WA_DeleteOnClose)\n", "path": "napari/_qt/dialogs/qt_plugin_table.py"}], "after_files": [{"content": "\"\"\"Provides a QtPluginErrReporter that allows the user report plugin errors.\n\"\"\"\nfrom typing import Optional\n\nfrom napari_plugin_engine import PluginManager, standard_metadata\nfrom qtpy.QtCore import Qt\nfrom qtpy.QtGui import QGuiApplication\nfrom qtpy.QtWidgets import (\n QComboBox,\n QDialog,\n QHBoxLayout,\n QLabel,\n QPushButton,\n QTextEdit,\n QVBoxLayout,\n QWidget,\n)\n\nfrom ...plugins.exceptions import format_exceptions\n\n\nclass QtPluginErrReporter(QDialog):\n \"\"\"Dialog that allows users to review and report PluginError tracebacks.\n\n Parameters\n ----------\n parent : QWidget, optional\n Optional parent widget for this widget.\n initial_plugin : str, optional\n If provided, errors from ``initial_plugin`` will be shown when the\n dialog is created, by default None\n\n Attributes\n ----------\n text_area : qtpy.QtWidgets.QTextEdit\n The text area where traceback information will be shown.\n plugin_combo : qtpy.QtWidgets.QComboBox\n The dropdown menu used to select the current plugin\n github_button : qtpy.QtWidgets.QPushButton\n A button that, when pressed, will open an issue at the current plugin's\n github issue tracker, prepopulated with a formatted traceback. Button\n is only visible if a github URL is detected in the package metadata for\n the current plugin.\n clipboard_button : qtpy.QtWidgets.QPushButton\n A button that, when pressed, copies the current traceback information\n to the clipboard. (HTML tags are removed in the copied text.)\n plugin_meta : qtpy.QtWidgets.QLabel\n A label that will show available plugin metadata (such as home page).\n \"\"\"\n\n NULL_OPTION = 'select plugin... '\n\n def __init__(\n self,\n plugin_manager: Optional[PluginManager] = None,\n *,\n parent: Optional[QWidget] = None,\n initial_plugin: Optional[str] = None,\n ) -> None:\n super().__init__(parent)\n if not plugin_manager:\n from ...plugins import plugin_manager as _pm\n\n self.plugin_manager = _pm\n else:\n self.plugin_manager = plugin_manager\n\n self.setWindowTitle('Recorded Plugin Exceptions')\n self.setWindowModality(Qt.NonModal)\n self.layout = QVBoxLayout()\n self.layout.setSpacing(0)\n self.layout.setContentsMargins(10, 10, 10, 10)\n self.setLayout(self.layout)\n\n self.text_area = QTextEdit()\n self.text_area.setTextInteractionFlags(Qt.TextSelectableByMouse)\n self.text_area.setMinimumWidth(360)\n\n # Create plugin dropdown menu\n self.plugin_combo = QComboBox()\n self.plugin_combo.addItem(self.NULL_OPTION)\n bad_plugins = [e.plugin_name for e in self.plugin_manager.get_errors()]\n self.plugin_combo.addItems(list(sorted(set(bad_plugins))))\n self.plugin_combo.currentTextChanged.connect(self.set_plugin)\n self.plugin_combo.setCurrentText(self.NULL_OPTION)\n\n # create github button (gets connected in self.set_plugin)\n self.github_button = QPushButton('Open issue on GitHub', self)\n self.github_button.setToolTip(\n \"Open a web browser to submit this error log\\n\"\n \"to the developer's GitHub issue tracker\"\n )\n self.github_button.hide()\n\n # create copy to clipboard button\n self.clipboard_button = QPushButton()\n self.clipboard_button.hide()\n self.clipboard_button.setObjectName(\"QtCopyToClipboardButton\")\n self.clipboard_button.setToolTip(\"Copy error log to clipboard\")\n self.clipboard_button.clicked.connect(self.copyToClipboard)\n\n # plugin_meta contains a URL to the home page, (and/or other details)\n self.plugin_meta = QLabel('', parent=self)\n self.plugin_meta.setObjectName(\"pluginInfo\")\n self.plugin_meta.setTextFormat(Qt.RichText)\n self.plugin_meta.setTextInteractionFlags(Qt.TextBrowserInteraction)\n self.plugin_meta.setOpenExternalLinks(True)\n self.plugin_meta.setAlignment(Qt.AlignRight)\n\n # make layout\n row_1_layout = QHBoxLayout()\n row_1_layout.setContentsMargins(11, 5, 10, 0)\n row_1_layout.addStretch(1)\n row_1_layout.addWidget(self.plugin_meta)\n row_2_layout = QHBoxLayout()\n row_2_layout.setContentsMargins(11, 5, 10, 0)\n row_2_layout.addWidget(self.plugin_combo)\n row_2_layout.addStretch(1)\n row_2_layout.addWidget(self.github_button)\n row_2_layout.addWidget(self.clipboard_button)\n row_2_layout.setSpacing(5)\n self.layout.addLayout(row_1_layout)\n self.layout.addLayout(row_2_layout)\n self.layout.addWidget(self.text_area, 1)\n self.setMinimumWidth(750)\n self.setMinimumHeight(600)\n\n if initial_plugin:\n self.set_plugin(initial_plugin)\n\n def set_plugin(self, plugin: str) -> None:\n \"\"\"Set the current plugin shown in the dropdown and text area.\n\n Parameters\n ----------\n plugin : str\n name of a plugin that has created an error this session.\n \"\"\"\n self.github_button.hide()\n self.clipboard_button.hide()\n try:\n self.github_button.clicked.disconnect()\n # when disconnecting a non-existent signal\n # PySide2 raises runtimeError, PyQt5 raises TypeError\n except (RuntimeError, TypeError):\n pass\n\n if not plugin or (plugin == self.NULL_OPTION):\n self.plugin_meta.setText('')\n self.text_area.setHtml('')\n return\n\n if not self.plugin_manager.get_errors(plugin):\n raise ValueError(f\"No errors reported for plugin '{plugin}'\")\n self.plugin_combo.setCurrentText(plugin)\n\n err_string = format_exceptions(plugin, as_html=True)\n self.text_area.setHtml(err_string)\n self.clipboard_button.show()\n\n # set metadata and outbound links/buttons\n err0 = self.plugin_manager.get_errors(plugin)[0]\n meta = standard_metadata(err0.plugin) if err0.plugin else {}\n meta_text = ''\n if not meta:\n self.plugin_meta.setText(meta_text)\n return\n\n url = meta.get('url')\n if url:\n meta_text += (\n '<span style=\"color:#999;\">plugin home page:&nbsp;&nbsp;'\n f'</span><a href=\"{url}\" style=\"color:#999\">{url}</a>'\n )\n if 'github.com' in url:\n\n def onclick():\n import webbrowser\n\n err = format_exceptions(plugin, as_html=False)\n err = (\n \"<!--Provide detail on the error here-->\\n\\n\\n\\n\"\n \"<details>\\n<summary>Traceback from napari</summary>\"\n f\"\\n\\n```\\n{err}\\n```\\n</details>\"\n )\n url = f'{meta.get(\"url\")}/issues/new?&body={err}'\n webbrowser.open(url, new=2)\n\n self.github_button.clicked.connect(onclick)\n self.github_button.show()\n self.plugin_meta.setText(meta_text)\n\n def copyToClipboard(self) -> None:\n \"\"\"Copy current plugin traceback info to clipboard as plain text.\"\"\"\n plugin = self.plugin_combo.currentText()\n err_string = format_exceptions(plugin, as_html=False)\n cb = QGuiApplication.clipboard()\n cb.setText(err_string)\n", "path": "napari/_qt/dialogs/qt_plugin_report.py"}, {"content": "from qtpy.QtCore import Qt\nfrom qtpy.QtWidgets import QAbstractItemView, QDialog, QLabel, QVBoxLayout\n\nfrom ..widgets.qt_dict_table import QtDictTable\n\n\nclass QtPluginTable(QDialog):\n def __init__(self, parent, plugin_manager=None):\n super().__init__(parent)\n if not plugin_manager:\n from ...plugins import plugin_manager\n\n self.setMaximumHeight(800)\n self.setMaximumWidth(1280)\n layout = QVBoxLayout()\n # maybe someday add a search bar here?\n title = QLabel(\"Installed Plugins\")\n title.setObjectName(\"h2\")\n layout.addWidget(title)\n # get metadata for successfully registered plugins\n plugin_manager.discover()\n data = plugin_manager.list_plugin_metadata()\n data = list(filter(lambda x: x['plugin_name'] != 'builtins', data))\n # create a table for it\n self.table = QtDictTable(\n parent,\n data,\n headers=[\n 'plugin_name',\n 'package',\n 'version',\n 'url',\n 'author',\n 'license',\n ],\n min_section_width=60,\n )\n self.table.setObjectName(\"pluginTable\")\n self.table.horizontalHeader().setObjectName(\"pluginTableHeader\")\n self.table.verticalHeader().setObjectName(\"pluginTableHeader\")\n self.table.setGridStyle(Qt.NoPen)\n # prevent editing of table\n self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)\n layout.addWidget(self.table)\n self.setLayout(layout)\n self.setAttribute(Qt.WA_DeleteOnClose)\n", "path": "napari/_qt/dialogs/qt_plugin_table.py"}]}
3,268
250
gh_patches_debug_17769
rasdani/github-patches
git_diff
kivy__python-for-android-2340
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Webview app crashes when trying to request permissions ### Checklist - [X] the issue is indeed a bug and not a support request - [X] issue doesn't already exist: https://github.com/kivy/python-for-android/issues - [X] I have a short, runnable example that reproduces the issue - [X] I reproduced the problem with the latest development version (`p4a.branch = develop`) - [X] I used the grave accent (aka backticks) to format code or logs when appropriated ### Versions - Python: 3.6.6 - OS: Ubuntu 20.04 - python-for-android: 2020.6.2 ### Description I'm building with: ``` HERE=$(pwd) cd app export ANDROIDSDK="$HERE/android_sdks" # Have also tried with # export ANDROIDNDK="$HERE/android_sdks/android-ndk-r19c" export ANDROIDNDK="$HERE/android_sdks/android-ndk-r21d" export ANDROIDAPI="27" # Target API version of your application export NDKAPI="21" # Minimum supported API version of your application p4a apk --private "." --package=org.anarres.pdfreader --name "pdf reader" --version 0.1 --bootstrap=webview --requirements=python3,flask --port=5000 --orientation=sensor --blacklist="../blacklist.txt" --permission READ_EXTERNAL_STORAGE --permission WRITE_EXTERNAL_STORAGE ``` My python code is just: ``` from android.permissions import request_permissions, Permission request_permissions([Permission.READ_EXTERNAL_STORAGE, Permission.WRITE_EXTERNAL_STORAGE]) from flask import Flask, render_template app = Flask(__name__) @app.route('/') def index(): return render_template('index.html') if __name__ == '__main__': app.run(port=5000) ``` If I comment out: ``` from android.permissions import request_permissions, Permission request_permissions([Permission.READ_EXTERNAL_STORAGE, Permission.WRITE_EXTERNAL_STORAGE]) ``` I don't see the error, but then of course I can't access the external storage. ### Logs ``` ImportError: dlopen failed: cannot locate symbol "SDL_ANDROID_GetJNIEnv" referenced by "/data/data/org.anarres.pdfreader/files/app/_python_bundle/site-packages/android/_android.so"... ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pythonforandroid/recipes/android/src/setup.py` Content: ``` 1 from distutils.core import setup, Extension 2 import os 3 4 library_dirs = ['libs/' + os.environ['ARCH']] 5 lib_dict = { 6 'sdl2': ['SDL2', 'SDL2_image', 'SDL2_mixer', 'SDL2_ttf'] 7 } 8 sdl_libs = lib_dict.get(os.environ['BOOTSTRAP'], []) 9 10 modules = [Extension('android._android', 11 ['android/_android.c', 'android/_android_jni.c'], 12 libraries=sdl_libs + ['log'], 13 library_dirs=library_dirs), 14 Extension('android._android_billing', 15 ['android/_android_billing.c', 'android/_android_billing_jni.c'], 16 libraries=['log'], 17 library_dirs=library_dirs)] 18 19 setup(name='android', 20 version='1.0', 21 packages=['android'], 22 package_dir={'android': 'android'}, 23 ext_modules=modules 24 ) 25 ``` Path: `pythonforandroid/recipes/android/__init__.py` Content: ``` 1 from pythonforandroid.recipe import CythonRecipe, IncludedFilesBehaviour 2 from pythonforandroid.util import current_directory 3 from pythonforandroid import logger 4 5 from os.path import join 6 7 8 class AndroidRecipe(IncludedFilesBehaviour, CythonRecipe): 9 # name = 'android' 10 version = None 11 url = None 12 13 src_filename = 'src' 14 15 depends = [('sdl2', 'genericndkbuild'), 'pyjnius'] 16 17 config_env = {} 18 19 def get_recipe_env(self, arch): 20 env = super().get_recipe_env(arch) 21 env.update(self.config_env) 22 return env 23 24 def prebuild_arch(self, arch): 25 super().prebuild_arch(arch) 26 ctx_bootstrap = self.ctx.bootstrap.name 27 28 # define macros for Cython, C, Python 29 tpxi = 'DEF {} = {}\n' 30 th = '#define {} {}\n' 31 tpy = '{} = {}\n' 32 33 # make sure bootstrap name is in unicode 34 if isinstance(ctx_bootstrap, bytes): 35 ctx_bootstrap = ctx_bootstrap.decode('utf-8') 36 bootstrap = bootstrap_name = ctx_bootstrap 37 is_sdl2 = (bootstrap_name == "sdl2") 38 if bootstrap_name in ["sdl2", "webview", "service_only", "service_library"]: 39 java_ns = u'org.kivy.android' 40 jni_ns = u'org/kivy/android' 41 else: 42 logger.error(( 43 'unsupported bootstrap for android recipe: {}' 44 ''.format(bootstrap_name) 45 )) 46 exit(1) 47 48 config = { 49 'BOOTSTRAP': bootstrap, 50 'IS_SDL2': int(is_sdl2), 51 'PY2': 0, 52 'JAVA_NAMESPACE': java_ns, 53 'JNI_NAMESPACE': jni_ns, 54 'ACTIVITY_CLASS_NAME': self.ctx.activity_class_name, 55 'ACTIVITY_CLASS_NAMESPACE': self.ctx.activity_class_name.replace('.', '/'), 56 } 57 58 # create config files for Cython, C and Python 59 with ( 60 current_directory(self.get_build_dir(arch.arch))), ( 61 open(join('android', 'config.pxi'), 'w')) as fpxi, ( 62 open(join('android', 'config.h'), 'w')) as fh, ( 63 open(join('android', 'config.py'), 'w')) as fpy: 64 65 for key, value in config.items(): 66 fpxi.write(tpxi.format(key, repr(value))) 67 fpy.write(tpy.format(key, repr(value))) 68 69 fh.write(th.format( 70 key, 71 value if isinstance(value, int) else '"{}"'.format(value) 72 )) 73 self.config_env[key] = str(value) 74 75 if is_sdl2: 76 fh.write('JNIEnv *SDL_AndroidGetJNIEnv(void);\n') 77 fh.write( 78 '#define SDL_ANDROID_GetJNIEnv SDL_AndroidGetJNIEnv\n' 79 ) 80 81 82 recipe = AndroidRecipe() 83 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pythonforandroid/recipes/android/__init__.py b/pythonforandroid/recipes/android/__init__.py --- a/pythonforandroid/recipes/android/__init__.py +++ b/pythonforandroid/recipes/android/__init__.py @@ -77,6 +77,11 @@ fh.write( '#define SDL_ANDROID_GetJNIEnv SDL_AndroidGetJNIEnv\n' ) + else: + fh.write('JNIEnv *WebView_AndroidGetJNIEnv(void);\n') + fh.write( + '#define SDL_ANDROID_GetJNIEnv WebView_AndroidGetJNIEnv\n' + ) recipe = AndroidRecipe() diff --git a/pythonforandroid/recipes/android/src/setup.py b/pythonforandroid/recipes/android/src/setup.py --- a/pythonforandroid/recipes/android/src/setup.py +++ b/pythonforandroid/recipes/android/src/setup.py @@ -5,7 +5,7 @@ lib_dict = { 'sdl2': ['SDL2', 'SDL2_image', 'SDL2_mixer', 'SDL2_ttf'] } -sdl_libs = lib_dict.get(os.environ['BOOTSTRAP'], []) +sdl_libs = lib_dict.get(os.environ['BOOTSTRAP'], ['main']) modules = [Extension('android._android', ['android/_android.c', 'android/_android_jni.c'],
{"golden_diff": "diff --git a/pythonforandroid/recipes/android/__init__.py b/pythonforandroid/recipes/android/__init__.py\n--- a/pythonforandroid/recipes/android/__init__.py\n+++ b/pythonforandroid/recipes/android/__init__.py\n@@ -77,6 +77,11 @@\n fh.write(\n '#define SDL_ANDROID_GetJNIEnv SDL_AndroidGetJNIEnv\\n'\n )\n+ else:\n+ fh.write('JNIEnv *WebView_AndroidGetJNIEnv(void);\\n')\n+ fh.write(\n+ '#define SDL_ANDROID_GetJNIEnv WebView_AndroidGetJNIEnv\\n'\n+ )\n \n \n recipe = AndroidRecipe()\ndiff --git a/pythonforandroid/recipes/android/src/setup.py b/pythonforandroid/recipes/android/src/setup.py\n--- a/pythonforandroid/recipes/android/src/setup.py\n+++ b/pythonforandroid/recipes/android/src/setup.py\n@@ -5,7 +5,7 @@\n lib_dict = {\n 'sdl2': ['SDL2', 'SDL2_image', 'SDL2_mixer', 'SDL2_ttf']\n }\n-sdl_libs = lib_dict.get(os.environ['BOOTSTRAP'], [])\n+sdl_libs = lib_dict.get(os.environ['BOOTSTRAP'], ['main'])\n \n modules = [Extension('android._android',\n ['android/_android.c', 'android/_android_jni.c'],\n", "issue": "Webview app crashes when trying to request permissions\n### Checklist\r\n\r\n- [X] the issue is indeed a bug and not a support request\r\n- [X] issue doesn't already exist: https://github.com/kivy/python-for-android/issues\r\n- [X] I have a short, runnable example that reproduces the issue\r\n- [X] I reproduced the problem with the latest development version (`p4a.branch = develop`)\r\n- [X] I used the grave accent (aka backticks) to format code or logs when appropriated\r\n\r\n### Versions\r\n\r\n- Python: 3.6.6\r\n- OS: Ubuntu 20.04\r\n- python-for-android: 2020.6.2\r\n\r\n### Description\r\n\r\nI'm building with:\r\n\r\n```\r\nHERE=$(pwd)\r\ncd app\r\n\r\nexport ANDROIDSDK=\"$HERE/android_sdks\"\r\n# Have also tried with\r\n# export ANDROIDNDK=\"$HERE/android_sdks/android-ndk-r19c\"\r\nexport ANDROIDNDK=\"$HERE/android_sdks/android-ndk-r21d\"\r\nexport ANDROIDAPI=\"27\" # Target API version of your application\r\nexport NDKAPI=\"21\" # Minimum supported API version of your application\r\n\r\np4a apk --private \".\" --package=org.anarres.pdfreader --name \"pdf reader\" --version 0.1 --bootstrap=webview --requirements=python3,flask --port=5000 --orientation=sensor --blacklist=\"../blacklist.txt\" --permission READ_EXTERNAL_STORAGE --permission WRITE_EXTERNAL_STORAGE\r\n```\r\n\r\nMy python code is just:\r\n\r\n```\r\nfrom android.permissions import request_permissions, Permission\r\nrequest_permissions([Permission.READ_EXTERNAL_STORAGE, Permission.WRITE_EXTERNAL_STORAGE])\r\n\r\nfrom flask import Flask, render_template\r\n\r\napp = Flask(__name__)\r\n\r\[email protected]('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\nif __name__ == '__main__':\r\n app.run(port=5000)\r\n```\r\n\r\nIf I comment out:\r\n\r\n```\r\nfrom android.permissions import request_permissions, Permission\r\nrequest_permissions([Permission.READ_EXTERNAL_STORAGE, Permission.WRITE_EXTERNAL_STORAGE])\r\n```\r\n\r\nI don't see the error, but then of course I can't access the external storage.\r\n\r\n### Logs\r\n\r\n```\r\nImportError: dlopen failed: cannot locate symbol \"SDL_ANDROID_GetJNIEnv\" referenced by \"/data/data/org.anarres.pdfreader/files/app/_python_bundle/site-packages/android/_android.so\"...\r\n```\r\n\n", "before_files": [{"content": "from distutils.core import setup, Extension\nimport os\n\nlibrary_dirs = ['libs/' + os.environ['ARCH']]\nlib_dict = {\n 'sdl2': ['SDL2', 'SDL2_image', 'SDL2_mixer', 'SDL2_ttf']\n}\nsdl_libs = lib_dict.get(os.environ['BOOTSTRAP'], [])\n\nmodules = [Extension('android._android',\n ['android/_android.c', 'android/_android_jni.c'],\n libraries=sdl_libs + ['log'],\n library_dirs=library_dirs),\n Extension('android._android_billing',\n ['android/_android_billing.c', 'android/_android_billing_jni.c'],\n libraries=['log'],\n library_dirs=library_dirs)]\n\nsetup(name='android',\n version='1.0',\n packages=['android'],\n package_dir={'android': 'android'},\n ext_modules=modules\n )\n", "path": "pythonforandroid/recipes/android/src/setup.py"}, {"content": "from pythonforandroid.recipe import CythonRecipe, IncludedFilesBehaviour\nfrom pythonforandroid.util import current_directory\nfrom pythonforandroid import logger\n\nfrom os.path import join\n\n\nclass AndroidRecipe(IncludedFilesBehaviour, CythonRecipe):\n # name = 'android'\n version = None\n url = None\n\n src_filename = 'src'\n\n depends = [('sdl2', 'genericndkbuild'), 'pyjnius']\n\n config_env = {}\n\n def get_recipe_env(self, arch):\n env = super().get_recipe_env(arch)\n env.update(self.config_env)\n return env\n\n def prebuild_arch(self, arch):\n super().prebuild_arch(arch)\n ctx_bootstrap = self.ctx.bootstrap.name\n\n # define macros for Cython, C, Python\n tpxi = 'DEF {} = {}\\n'\n th = '#define {} {}\\n'\n tpy = '{} = {}\\n'\n\n # make sure bootstrap name is in unicode\n if isinstance(ctx_bootstrap, bytes):\n ctx_bootstrap = ctx_bootstrap.decode('utf-8')\n bootstrap = bootstrap_name = ctx_bootstrap\n is_sdl2 = (bootstrap_name == \"sdl2\")\n if bootstrap_name in [\"sdl2\", \"webview\", \"service_only\", \"service_library\"]:\n java_ns = u'org.kivy.android'\n jni_ns = u'org/kivy/android'\n else:\n logger.error((\n 'unsupported bootstrap for android recipe: {}'\n ''.format(bootstrap_name)\n ))\n exit(1)\n\n config = {\n 'BOOTSTRAP': bootstrap,\n 'IS_SDL2': int(is_sdl2),\n 'PY2': 0,\n 'JAVA_NAMESPACE': java_ns,\n 'JNI_NAMESPACE': jni_ns,\n 'ACTIVITY_CLASS_NAME': self.ctx.activity_class_name,\n 'ACTIVITY_CLASS_NAMESPACE': self.ctx.activity_class_name.replace('.', '/'),\n }\n\n # create config files for Cython, C and Python\n with (\n current_directory(self.get_build_dir(arch.arch))), (\n open(join('android', 'config.pxi'), 'w')) as fpxi, (\n open(join('android', 'config.h'), 'w')) as fh, (\n open(join('android', 'config.py'), 'w')) as fpy:\n\n for key, value in config.items():\n fpxi.write(tpxi.format(key, repr(value)))\n fpy.write(tpy.format(key, repr(value)))\n\n fh.write(th.format(\n key,\n value if isinstance(value, int) else '\"{}\"'.format(value)\n ))\n self.config_env[key] = str(value)\n\n if is_sdl2:\n fh.write('JNIEnv *SDL_AndroidGetJNIEnv(void);\\n')\n fh.write(\n '#define SDL_ANDROID_GetJNIEnv SDL_AndroidGetJNIEnv\\n'\n )\n\n\nrecipe = AndroidRecipe()\n", "path": "pythonforandroid/recipes/android/__init__.py"}], "after_files": [{"content": "from distutils.core import setup, Extension\nimport os\n\nlibrary_dirs = ['libs/' + os.environ['ARCH']]\nlib_dict = {\n 'sdl2': ['SDL2', 'SDL2_image', 'SDL2_mixer', 'SDL2_ttf']\n}\nsdl_libs = lib_dict.get(os.environ['BOOTSTRAP'], ['main'])\n\nmodules = [Extension('android._android',\n ['android/_android.c', 'android/_android_jni.c'],\n libraries=sdl_libs + ['log'],\n library_dirs=library_dirs),\n Extension('android._android_billing',\n ['android/_android_billing.c', 'android/_android_billing_jni.c'],\n libraries=['log'],\n library_dirs=library_dirs)]\n\nsetup(name='android',\n version='1.0',\n packages=['android'],\n package_dir={'android': 'android'},\n ext_modules=modules\n )\n", "path": "pythonforandroid/recipes/android/src/setup.py"}, {"content": "from pythonforandroid.recipe import CythonRecipe, IncludedFilesBehaviour\nfrom pythonforandroid.util import current_directory\nfrom pythonforandroid import logger\n\nfrom os.path import join\n\n\nclass AndroidRecipe(IncludedFilesBehaviour, CythonRecipe):\n # name = 'android'\n version = None\n url = None\n\n src_filename = 'src'\n\n depends = [('sdl2', 'genericndkbuild'), 'pyjnius']\n\n config_env = {}\n\n def get_recipe_env(self, arch):\n env = super().get_recipe_env(arch)\n env.update(self.config_env)\n return env\n\n def prebuild_arch(self, arch):\n super().prebuild_arch(arch)\n ctx_bootstrap = self.ctx.bootstrap.name\n\n # define macros for Cython, C, Python\n tpxi = 'DEF {} = {}\\n'\n th = '#define {} {}\\n'\n tpy = '{} = {}\\n'\n\n # make sure bootstrap name is in unicode\n if isinstance(ctx_bootstrap, bytes):\n ctx_bootstrap = ctx_bootstrap.decode('utf-8')\n bootstrap = bootstrap_name = ctx_bootstrap\n is_sdl2 = (bootstrap_name == \"sdl2\")\n if bootstrap_name in [\"sdl2\", \"webview\", \"service_only\", \"service_library\"]:\n java_ns = u'org.kivy.android'\n jni_ns = u'org/kivy/android'\n else:\n logger.error((\n 'unsupported bootstrap for android recipe: {}'\n ''.format(bootstrap_name)\n ))\n exit(1)\n\n config = {\n 'BOOTSTRAP': bootstrap,\n 'IS_SDL2': int(is_sdl2),\n 'PY2': 0,\n 'JAVA_NAMESPACE': java_ns,\n 'JNI_NAMESPACE': jni_ns,\n 'ACTIVITY_CLASS_NAME': self.ctx.activity_class_name,\n 'ACTIVITY_CLASS_NAMESPACE': self.ctx.activity_class_name.replace('.', '/'),\n }\n\n # create config files for Cython, C and Python\n with (\n current_directory(self.get_build_dir(arch.arch))), (\n open(join('android', 'config.pxi'), 'w')) as fpxi, (\n open(join('android', 'config.h'), 'w')) as fh, (\n open(join('android', 'config.py'), 'w')) as fpy:\n\n for key, value in config.items():\n fpxi.write(tpxi.format(key, repr(value)))\n fpy.write(tpy.format(key, repr(value)))\n\n fh.write(th.format(\n key,\n value if isinstance(value, int) else '\"{}\"'.format(value)\n ))\n self.config_env[key] = str(value)\n\n if is_sdl2:\n fh.write('JNIEnv *SDL_AndroidGetJNIEnv(void);\\n')\n fh.write(\n '#define SDL_ANDROID_GetJNIEnv SDL_AndroidGetJNIEnv\\n'\n )\n else:\n fh.write('JNIEnv *WebView_AndroidGetJNIEnv(void);\\n')\n fh.write(\n '#define SDL_ANDROID_GetJNIEnv WebView_AndroidGetJNIEnv\\n'\n )\n\n\nrecipe = AndroidRecipe()\n", "path": "pythonforandroid/recipes/android/__init__.py"}]}
1,800
283
gh_patches_debug_40385
rasdani/github-patches
git_diff
learningequality__kolibri-8277
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make learner polling indicator conditional for only SoUD For the new polling indicator on the dropdown menu, this should be updated to appear and poll only when the user is a learner who is logged in on a subset of users device. <img width="270" alt="Screen Shot 2021-08-05 at 10 58 50 AM" src="https://user-images.githubusercontent.com/17235236/128372343-da85c951-2258-4d5e-ade3-a15b753e6806.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kolibri/plugins/facility/kolibri_plugin.py` Content: ``` 1 from __future__ import absolute_import 2 from __future__ import print_function 3 from __future__ import unicode_literals 4 5 from kolibri.core.auth.constants.user_kinds import ADMIN 6 from kolibri.core.hooks import NavigationHook 7 from kolibri.core.hooks import RoleBasedRedirectHook 8 from kolibri.core.webpack.hooks import WebpackBundleHook 9 from kolibri.plugins import KolibriPluginBase 10 from kolibri.plugins.hooks import register_hook 11 12 13 class FacilityManagementPlugin(KolibriPluginBase): 14 translated_view_urls = "urls" 15 untranslated_view_urls = "api_urls" 16 17 18 @register_hook 19 class FacilityManagementAsset(WebpackBundleHook): 20 bundle_id = "app" 21 22 23 @register_hook 24 class FacilityRedirect(RoleBasedRedirectHook): 25 roles = (ADMIN,) 26 27 @property 28 def url(self): 29 return self.plugin_url(FacilityManagementPlugin, "facility_management") 30 31 32 @register_hook 33 class FacilityManagementNavItem(NavigationHook): 34 bundle_id = "side_nav" 35 ``` Path: `kolibri/plugins/coach/kolibri_plugin.py` Content: ``` 1 from __future__ import absolute_import 2 from __future__ import print_function 3 from __future__ import unicode_literals 4 5 from kolibri.core.auth.constants.user_kinds import COACH 6 from kolibri.core.hooks import NavigationHook 7 from kolibri.core.hooks import RoleBasedRedirectHook 8 from kolibri.core.webpack import hooks as webpack_hooks 9 from kolibri.plugins import KolibriPluginBase 10 from kolibri.plugins.hooks import register_hook 11 12 13 class Coach(KolibriPluginBase): 14 untranslated_view_urls = "api_urls" 15 translated_view_urls = "urls" 16 17 18 @register_hook 19 class CoachRedirect(RoleBasedRedirectHook): 20 roles = (COACH,) 21 22 @property 23 def url(self): 24 return self.plugin_url(Coach, "coach") 25 26 27 @register_hook 28 class CoachNavItem(NavigationHook): 29 bundle_id = "side_nav" 30 31 32 @register_hook 33 class CoachAsset(webpack_hooks.WebpackBundleHook): 34 bundle_id = "app" 35 ``` Path: `kolibri/core/kolibri_plugin.py` Content: ``` 1 from __future__ import absolute_import 2 from __future__ import print_function 3 from __future__ import unicode_literals 4 5 from django.conf import settings 6 from django.contrib.staticfiles.templatetags.staticfiles import static 7 from django.core.urlresolvers import get_resolver 8 from django.core.urlresolvers import reverse 9 from django.template.loader import render_to_string 10 from django.utils.html import mark_safe 11 from django.utils.translation import get_language 12 from django.utils.translation import get_language_bidi 13 from django.utils.translation import get_language_info 14 from django_js_reverse.core import _safe_json 15 from django_js_reverse.core import generate_json 16 from django_js_reverse.rjsmin import jsmin 17 18 import kolibri 19 from kolibri.core.content.utils.paths import get_content_storage_url 20 from kolibri.core.content.utils.paths import get_hashi_path 21 from kolibri.core.content.utils.paths import get_zip_content_base_path 22 from kolibri.core.content.utils.paths import get_zip_content_config 23 from kolibri.core.device.models import ContentCacheKey 24 from kolibri.core.device.utils import allow_other_browsers_to_connect 25 from kolibri.core.hooks import NavigationHook 26 from kolibri.core.oidc_provider_hook import OIDCProviderHook 27 from kolibri.core.theme_hook import ThemeHook 28 from kolibri.core.webpack.hooks import WebpackBundleHook 29 from kolibri.plugins.app.utils import interface 30 from kolibri.plugins.hooks import register_hook 31 from kolibri.utils import i18n 32 from kolibri.utils.conf import OPTIONS 33 34 35 @register_hook 36 class FrontEndCoreAppAssetHook(WebpackBundleHook): 37 bundle_id = "default_frontend" 38 39 def url_tag(self): 40 # Modified from: 41 # https://github.com/ierror/django-js-reverse/blob/master/django_js_reverse/core.py#L101 42 js_name = "window.kolibriPluginDataGlobal['{bundle}'].urls".format( 43 bundle=self.unique_id 44 ) 45 default_urlresolver = get_resolver(None) 46 47 data = generate_json(default_urlresolver) 48 49 # Generate the JS that exposes functions to reverse all Django URLs 50 # in the frontend. 51 js = render_to_string( 52 "django_js_reverse/urls_js.tpl", 53 {"data": _safe_json(data), "js_name": "__placeholder__"}, 54 # For some reason the js_name gets escaped going into the template 55 # so this was the easiest way to inject it. 56 ).replace("__placeholder__", js_name) 57 zip_content_origin, zip_content_port = get_zip_content_config() 58 return [ 59 mark_safe( 60 """<script type="text/javascript">""" 61 # Minify the generated Javascript 62 + jsmin(js) 63 # Add URL references for our base static URL, the Django media URL 64 # and our content storage URL - this allows us to calculate 65 # the path at which to access a local file on the frontend if needed. 66 + """ 67 {js_name}.__staticUrl = '{static_url}'; 68 {js_name}.__mediaUrl = '{media_url}'; 69 {js_name}.__contentUrl = '{content_url}'; 70 {js_name}.__zipContentUrl = '{zip_content_url}'; 71 {js_name}.__hashiUrl = '{hashi_url}'; 72 {js_name}.__zipContentOrigin = '{zip_content_origin}'; 73 {js_name}.__zipContentPort = {zip_content_port}; 74 </script> 75 """.format( 76 js_name=js_name, 77 static_url=settings.STATIC_URL, 78 media_url=settings.MEDIA_URL, 79 content_url=get_content_storage_url( 80 baseurl=OPTIONS["Deployment"]["URL_PATH_PREFIX"] 81 ), 82 zip_content_url=get_zip_content_base_path(), 83 hashi_url=get_hashi_path(), 84 zip_content_origin=zip_content_origin, 85 zip_content_port=zip_content_port, 86 ) 87 ) 88 ] 89 90 def navigation_tags(self): 91 return [ 92 hook.render_to_page_load_sync_html() 93 for hook in NavigationHook.registered_hooks 94 ] 95 96 def render_to_page_load_sync_html(self): 97 """ 98 Don't render the frontend message files in the usual way 99 as the global object to register them does not exist yet. 100 Instead they are loaded through plugin data. 101 """ 102 tags = ( 103 self.plugin_data_tag() 104 + self.url_tag() 105 + list(self.js_and_css_tags()) 106 + self.navigation_tags() 107 ) 108 109 return mark_safe("\n".join(tags)) 110 111 @property 112 def plugin_data(self): 113 language_code = get_language() 114 static_root = static("assets/fonts/noto-full") 115 full_file = "{}.{}.{}.css?v={}" 116 return { 117 "fullCSSFileModern": full_file.format( 118 static_root, language_code, "modern", kolibri.__version__ 119 ), 120 "fullCSSFileBasic": full_file.format( 121 static_root, language_code, "basic", kolibri.__version__ 122 ), 123 "allowRemoteAccess": allow_other_browsers_to_connect() 124 or not interface.enabled, 125 "appCapabilities": interface.capabilities, 126 "contentCacheKey": ContentCacheKey.get_cache_key(), 127 "languageGlobals": self.language_globals(), 128 "oidcProviderEnabled": OIDCProviderHook.is_enabled(), 129 "kolibriTheme": ThemeHook.get_theme(), 130 } 131 132 def language_globals(self): 133 language_code = get_language() 134 lang_dir = "rtl" if get_language_bidi() else "ltr" 135 136 languages = {} 137 for code, language_name in settings.LANGUAGES: 138 lang_info = i18n.KOLIBRI_LANGUAGE_INFO[code] 139 languages[code] = { 140 # Format to match the schema of the content Language model 141 "id": code, 142 "lang_name": language_name, 143 "english_name": lang_info["english_name"] 144 if lang_info 145 else get_language_info(code)["name"], 146 "lang_direction": get_language_info(code)["bidi"], 147 } 148 return { 149 "coreLanguageMessages": self.frontend_messages(), 150 "languageCode": language_code, 151 "languageDir": lang_dir, 152 "languages": languages, 153 } 154 155 156 @register_hook 157 class FrontendHeadAssetsHook(WebpackBundleHook): 158 """ 159 Render these assets in the <head> tag of base.html, before other JS and assets. 160 """ 161 162 bundle_id = "frontend_head_assets" 163 inline = True 164 165 def render_to_page_load_sync_html(self): 166 """ 167 Add in the extra language font file tags needed 168 for preloading our custom font files. 169 """ 170 tags = ( 171 self.plugin_data_tag() 172 + self.language_font_file_tags() 173 + self.frontend_message_tag() 174 + list(self.js_and_css_tags()) 175 ) 176 177 return mark_safe("\n".join(tags)) 178 179 def language_font_file_tags(self): 180 language_code = get_language() 181 common_file = static("assets/fonts/noto-common.css") 182 subset_file = static("assets/fonts/noto-subset.{}.css".format(language_code)) 183 return [ 184 '<link type="text/css" href="{common_css_file}?v={version}" rel="stylesheet"/>'.format( 185 common_css_file=common_file, version=kolibri.__version__ 186 ), 187 '<link type="text/css" href="{subset_css_file}?v={version}" rel="stylesheet"/>'.format( 188 subset_css_file=subset_file, version=kolibri.__version__ 189 ), 190 ] 191 192 @property 193 def plugin_data(self): 194 return {"unsupportedUrl": reverse("kolibri:core:unsupported")} 195 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kolibri/core/kolibri_plugin.py b/kolibri/core/kolibri_plugin.py --- a/kolibri/core/kolibri_plugin.py +++ b/kolibri/core/kolibri_plugin.py @@ -22,6 +22,7 @@ from kolibri.core.content.utils.paths import get_zip_content_config from kolibri.core.device.models import ContentCacheKey from kolibri.core.device.utils import allow_other_browsers_to_connect +from kolibri.core.device.utils import get_device_setting from kolibri.core.hooks import NavigationHook from kolibri.core.oidc_provider_hook import OIDCProviderHook from kolibri.core.theme_hook import ThemeHook @@ -127,6 +128,7 @@ "languageGlobals": self.language_globals(), "oidcProviderEnabled": OIDCProviderHook.is_enabled(), "kolibriTheme": ThemeHook.get_theme(), + "isSubsetOfUsersDevice": get_device_setting("subset_of_users_device"), } def language_globals(self): diff --git a/kolibri/plugins/coach/kolibri_plugin.py b/kolibri/plugins/coach/kolibri_plugin.py --- a/kolibri/plugins/coach/kolibri_plugin.py +++ b/kolibri/plugins/coach/kolibri_plugin.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals from kolibri.core.auth.constants.user_kinds import COACH +from kolibri.core.device.utils import get_device_setting from kolibri.core.hooks import NavigationHook from kolibri.core.hooks import RoleBasedRedirectHook from kolibri.core.webpack import hooks as webpack_hooks @@ -12,7 +13,14 @@ class Coach(KolibriPluginBase): untranslated_view_urls = "api_urls" - translated_view_urls = "urls" + + @property + def translated_view_urls(self): + # On an SoUD this plugin should be disabled. In lieu of properly + # disabling the plugin, we will just not register any urls for now + if not get_device_setting("subset_of_users_device", False): + return "urls" + return None @register_hook diff --git a/kolibri/plugins/facility/kolibri_plugin.py b/kolibri/plugins/facility/kolibri_plugin.py --- a/kolibri/plugins/facility/kolibri_plugin.py +++ b/kolibri/plugins/facility/kolibri_plugin.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals from kolibri.core.auth.constants.user_kinds import ADMIN +from kolibri.core.device.utils import get_device_setting from kolibri.core.hooks import NavigationHook from kolibri.core.hooks import RoleBasedRedirectHook from kolibri.core.webpack.hooks import WebpackBundleHook @@ -11,9 +12,16 @@ class FacilityManagementPlugin(KolibriPluginBase): - translated_view_urls = "urls" untranslated_view_urls = "api_urls" + @property + def translated_view_urls(self): + # On an SoUD this plugin should be disabled. In lieu of properly + # disabling the plugin, we will just not register any urls for now + if not get_device_setting("subset_of_users_device", False): + return "urls" + return None + @register_hook class FacilityManagementAsset(WebpackBundleHook):
{"golden_diff": "diff --git a/kolibri/core/kolibri_plugin.py b/kolibri/core/kolibri_plugin.py\n--- a/kolibri/core/kolibri_plugin.py\n+++ b/kolibri/core/kolibri_plugin.py\n@@ -22,6 +22,7 @@\n from kolibri.core.content.utils.paths import get_zip_content_config\n from kolibri.core.device.models import ContentCacheKey\n from kolibri.core.device.utils import allow_other_browsers_to_connect\n+from kolibri.core.device.utils import get_device_setting\n from kolibri.core.hooks import NavigationHook\n from kolibri.core.oidc_provider_hook import OIDCProviderHook\n from kolibri.core.theme_hook import ThemeHook\n@@ -127,6 +128,7 @@\n \"languageGlobals\": self.language_globals(),\n \"oidcProviderEnabled\": OIDCProviderHook.is_enabled(),\n \"kolibriTheme\": ThemeHook.get_theme(),\n+ \"isSubsetOfUsersDevice\": get_device_setting(\"subset_of_users_device\"),\n }\n \n def language_globals(self):\ndiff --git a/kolibri/plugins/coach/kolibri_plugin.py b/kolibri/plugins/coach/kolibri_plugin.py\n--- a/kolibri/plugins/coach/kolibri_plugin.py\n+++ b/kolibri/plugins/coach/kolibri_plugin.py\n@@ -3,6 +3,7 @@\n from __future__ import unicode_literals\n \n from kolibri.core.auth.constants.user_kinds import COACH\n+from kolibri.core.device.utils import get_device_setting\n from kolibri.core.hooks import NavigationHook\n from kolibri.core.hooks import RoleBasedRedirectHook\n from kolibri.core.webpack import hooks as webpack_hooks\n@@ -12,7 +13,14 @@\n \n class Coach(KolibriPluginBase):\n untranslated_view_urls = \"api_urls\"\n- translated_view_urls = \"urls\"\n+\n+ @property\n+ def translated_view_urls(self):\n+ # On an SoUD this plugin should be disabled. In lieu of properly\n+ # disabling the plugin, we will just not register any urls for now\n+ if not get_device_setting(\"subset_of_users_device\", False):\n+ return \"urls\"\n+ return None\n \n \n @register_hook\ndiff --git a/kolibri/plugins/facility/kolibri_plugin.py b/kolibri/plugins/facility/kolibri_plugin.py\n--- a/kolibri/plugins/facility/kolibri_plugin.py\n+++ b/kolibri/plugins/facility/kolibri_plugin.py\n@@ -3,6 +3,7 @@\n from __future__ import unicode_literals\n \n from kolibri.core.auth.constants.user_kinds import ADMIN\n+from kolibri.core.device.utils import get_device_setting\n from kolibri.core.hooks import NavigationHook\n from kolibri.core.hooks import RoleBasedRedirectHook\n from kolibri.core.webpack.hooks import WebpackBundleHook\n@@ -11,9 +12,16 @@\n \n \n class FacilityManagementPlugin(KolibriPluginBase):\n- translated_view_urls = \"urls\"\n untranslated_view_urls = \"api_urls\"\n \n+ @property\n+ def translated_view_urls(self):\n+ # On an SoUD this plugin should be disabled. In lieu of properly\n+ # disabling the plugin, we will just not register any urls for now\n+ if not get_device_setting(\"subset_of_users_device\", False):\n+ return \"urls\"\n+ return None\n+\n \n @register_hook\n class FacilityManagementAsset(WebpackBundleHook):\n", "issue": "Make learner polling indicator conditional for only SoUD\nFor the new polling indicator on the dropdown menu, this should be updated to appear and poll only when the user is a learner who is logged in on a subset of users device. \r\n\r\n<img width=\"270\" alt=\"Screen Shot 2021-08-05 at 10 58 50 AM\" src=\"https://user-images.githubusercontent.com/17235236/128372343-da85c951-2258-4d5e-ade3-a15b753e6806.png\">\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom kolibri.core.auth.constants.user_kinds import ADMIN\nfrom kolibri.core.hooks import NavigationHook\nfrom kolibri.core.hooks import RoleBasedRedirectHook\nfrom kolibri.core.webpack.hooks import WebpackBundleHook\nfrom kolibri.plugins import KolibriPluginBase\nfrom kolibri.plugins.hooks import register_hook\n\n\nclass FacilityManagementPlugin(KolibriPluginBase):\n translated_view_urls = \"urls\"\n untranslated_view_urls = \"api_urls\"\n\n\n@register_hook\nclass FacilityManagementAsset(WebpackBundleHook):\n bundle_id = \"app\"\n\n\n@register_hook\nclass FacilityRedirect(RoleBasedRedirectHook):\n roles = (ADMIN,)\n\n @property\n def url(self):\n return self.plugin_url(FacilityManagementPlugin, \"facility_management\")\n\n\n@register_hook\nclass FacilityManagementNavItem(NavigationHook):\n bundle_id = \"side_nav\"\n", "path": "kolibri/plugins/facility/kolibri_plugin.py"}, {"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom kolibri.core.auth.constants.user_kinds import COACH\nfrom kolibri.core.hooks import NavigationHook\nfrom kolibri.core.hooks import RoleBasedRedirectHook\nfrom kolibri.core.webpack import hooks as webpack_hooks\nfrom kolibri.plugins import KolibriPluginBase\nfrom kolibri.plugins.hooks import register_hook\n\n\nclass Coach(KolibriPluginBase):\n untranslated_view_urls = \"api_urls\"\n translated_view_urls = \"urls\"\n\n\n@register_hook\nclass CoachRedirect(RoleBasedRedirectHook):\n roles = (COACH,)\n\n @property\n def url(self):\n return self.plugin_url(Coach, \"coach\")\n\n\n@register_hook\nclass CoachNavItem(NavigationHook):\n bundle_id = \"side_nav\"\n\n\n@register_hook\nclass CoachAsset(webpack_hooks.WebpackBundleHook):\n bundle_id = \"app\"\n", "path": "kolibri/plugins/coach/kolibri_plugin.py"}, {"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\nfrom django.core.urlresolvers import get_resolver\nfrom django.core.urlresolvers import reverse\nfrom django.template.loader import render_to_string\nfrom django.utils.html import mark_safe\nfrom django.utils.translation import get_language\nfrom django.utils.translation import get_language_bidi\nfrom django.utils.translation import get_language_info\nfrom django_js_reverse.core import _safe_json\nfrom django_js_reverse.core import generate_json\nfrom django_js_reverse.rjsmin import jsmin\n\nimport kolibri\nfrom kolibri.core.content.utils.paths import get_content_storage_url\nfrom kolibri.core.content.utils.paths import get_hashi_path\nfrom kolibri.core.content.utils.paths import get_zip_content_base_path\nfrom kolibri.core.content.utils.paths import get_zip_content_config\nfrom kolibri.core.device.models import ContentCacheKey\nfrom kolibri.core.device.utils import allow_other_browsers_to_connect\nfrom kolibri.core.hooks import NavigationHook\nfrom kolibri.core.oidc_provider_hook import OIDCProviderHook\nfrom kolibri.core.theme_hook import ThemeHook\nfrom kolibri.core.webpack.hooks import WebpackBundleHook\nfrom kolibri.plugins.app.utils import interface\nfrom kolibri.plugins.hooks import register_hook\nfrom kolibri.utils import i18n\nfrom kolibri.utils.conf import OPTIONS\n\n\n@register_hook\nclass FrontEndCoreAppAssetHook(WebpackBundleHook):\n bundle_id = \"default_frontend\"\n\n def url_tag(self):\n # Modified from:\n # https://github.com/ierror/django-js-reverse/blob/master/django_js_reverse/core.py#L101\n js_name = \"window.kolibriPluginDataGlobal['{bundle}'].urls\".format(\n bundle=self.unique_id\n )\n default_urlresolver = get_resolver(None)\n\n data = generate_json(default_urlresolver)\n\n # Generate the JS that exposes functions to reverse all Django URLs\n # in the frontend.\n js = render_to_string(\n \"django_js_reverse/urls_js.tpl\",\n {\"data\": _safe_json(data), \"js_name\": \"__placeholder__\"},\n # For some reason the js_name gets escaped going into the template\n # so this was the easiest way to inject it.\n ).replace(\"__placeholder__\", js_name)\n zip_content_origin, zip_content_port = get_zip_content_config()\n return [\n mark_safe(\n \"\"\"<script type=\"text/javascript\">\"\"\"\n # Minify the generated Javascript\n + jsmin(js)\n # Add URL references for our base static URL, the Django media URL\n # and our content storage URL - this allows us to calculate\n # the path at which to access a local file on the frontend if needed.\n + \"\"\"\n {js_name}.__staticUrl = '{static_url}';\n {js_name}.__mediaUrl = '{media_url}';\n {js_name}.__contentUrl = '{content_url}';\n {js_name}.__zipContentUrl = '{zip_content_url}';\n {js_name}.__hashiUrl = '{hashi_url}';\n {js_name}.__zipContentOrigin = '{zip_content_origin}';\n {js_name}.__zipContentPort = {zip_content_port};\n </script>\n \"\"\".format(\n js_name=js_name,\n static_url=settings.STATIC_URL,\n media_url=settings.MEDIA_URL,\n content_url=get_content_storage_url(\n baseurl=OPTIONS[\"Deployment\"][\"URL_PATH_PREFIX\"]\n ),\n zip_content_url=get_zip_content_base_path(),\n hashi_url=get_hashi_path(),\n zip_content_origin=zip_content_origin,\n zip_content_port=zip_content_port,\n )\n )\n ]\n\n def navigation_tags(self):\n return [\n hook.render_to_page_load_sync_html()\n for hook in NavigationHook.registered_hooks\n ]\n\n def render_to_page_load_sync_html(self):\n \"\"\"\n Don't render the frontend message files in the usual way\n as the global object to register them does not exist yet.\n Instead they are loaded through plugin data.\n \"\"\"\n tags = (\n self.plugin_data_tag()\n + self.url_tag()\n + list(self.js_and_css_tags())\n + self.navigation_tags()\n )\n\n return mark_safe(\"\\n\".join(tags))\n\n @property\n def plugin_data(self):\n language_code = get_language()\n static_root = static(\"assets/fonts/noto-full\")\n full_file = \"{}.{}.{}.css?v={}\"\n return {\n \"fullCSSFileModern\": full_file.format(\n static_root, language_code, \"modern\", kolibri.__version__\n ),\n \"fullCSSFileBasic\": full_file.format(\n static_root, language_code, \"basic\", kolibri.__version__\n ),\n \"allowRemoteAccess\": allow_other_browsers_to_connect()\n or not interface.enabled,\n \"appCapabilities\": interface.capabilities,\n \"contentCacheKey\": ContentCacheKey.get_cache_key(),\n \"languageGlobals\": self.language_globals(),\n \"oidcProviderEnabled\": OIDCProviderHook.is_enabled(),\n \"kolibriTheme\": ThemeHook.get_theme(),\n }\n\n def language_globals(self):\n language_code = get_language()\n lang_dir = \"rtl\" if get_language_bidi() else \"ltr\"\n\n languages = {}\n for code, language_name in settings.LANGUAGES:\n lang_info = i18n.KOLIBRI_LANGUAGE_INFO[code]\n languages[code] = {\n # Format to match the schema of the content Language model\n \"id\": code,\n \"lang_name\": language_name,\n \"english_name\": lang_info[\"english_name\"]\n if lang_info\n else get_language_info(code)[\"name\"],\n \"lang_direction\": get_language_info(code)[\"bidi\"],\n }\n return {\n \"coreLanguageMessages\": self.frontend_messages(),\n \"languageCode\": language_code,\n \"languageDir\": lang_dir,\n \"languages\": languages,\n }\n\n\n@register_hook\nclass FrontendHeadAssetsHook(WebpackBundleHook):\n \"\"\"\n Render these assets in the <head> tag of base.html, before other JS and assets.\n \"\"\"\n\n bundle_id = \"frontend_head_assets\"\n inline = True\n\n def render_to_page_load_sync_html(self):\n \"\"\"\n Add in the extra language font file tags needed\n for preloading our custom font files.\n \"\"\"\n tags = (\n self.plugin_data_tag()\n + self.language_font_file_tags()\n + self.frontend_message_tag()\n + list(self.js_and_css_tags())\n )\n\n return mark_safe(\"\\n\".join(tags))\n\n def language_font_file_tags(self):\n language_code = get_language()\n common_file = static(\"assets/fonts/noto-common.css\")\n subset_file = static(\"assets/fonts/noto-subset.{}.css\".format(language_code))\n return [\n '<link type=\"text/css\" href=\"{common_css_file}?v={version}\" rel=\"stylesheet\"/>'.format(\n common_css_file=common_file, version=kolibri.__version__\n ),\n '<link type=\"text/css\" href=\"{subset_css_file}?v={version}\" rel=\"stylesheet\"/>'.format(\n subset_css_file=subset_file, version=kolibri.__version__\n ),\n ]\n\n @property\n def plugin_data(self):\n return {\"unsupportedUrl\": reverse(\"kolibri:core:unsupported\")}\n", "path": "kolibri/core/kolibri_plugin.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom kolibri.core.auth.constants.user_kinds import ADMIN\nfrom kolibri.core.device.utils import get_device_setting\nfrom kolibri.core.hooks import NavigationHook\nfrom kolibri.core.hooks import RoleBasedRedirectHook\nfrom kolibri.core.webpack.hooks import WebpackBundleHook\nfrom kolibri.plugins import KolibriPluginBase\nfrom kolibri.plugins.hooks import register_hook\n\n\nclass FacilityManagementPlugin(KolibriPluginBase):\n untranslated_view_urls = \"api_urls\"\n\n @property\n def translated_view_urls(self):\n # On an SoUD this plugin should be disabled. In lieu of properly\n # disabling the plugin, we will just not register any urls for now\n if not get_device_setting(\"subset_of_users_device\", False):\n return \"urls\"\n return None\n\n\n@register_hook\nclass FacilityManagementAsset(WebpackBundleHook):\n bundle_id = \"app\"\n\n\n@register_hook\nclass FacilityRedirect(RoleBasedRedirectHook):\n roles = (ADMIN,)\n\n @property\n def url(self):\n return self.plugin_url(FacilityManagementPlugin, \"facility_management\")\n\n\n@register_hook\nclass FacilityManagementNavItem(NavigationHook):\n bundle_id = \"side_nav\"\n", "path": "kolibri/plugins/facility/kolibri_plugin.py"}, {"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom kolibri.core.auth.constants.user_kinds import COACH\nfrom kolibri.core.device.utils import get_device_setting\nfrom kolibri.core.hooks import NavigationHook\nfrom kolibri.core.hooks import RoleBasedRedirectHook\nfrom kolibri.core.webpack import hooks as webpack_hooks\nfrom kolibri.plugins import KolibriPluginBase\nfrom kolibri.plugins.hooks import register_hook\n\n\nclass Coach(KolibriPluginBase):\n untranslated_view_urls = \"api_urls\"\n\n @property\n def translated_view_urls(self):\n # On an SoUD this plugin should be disabled. In lieu of properly\n # disabling the plugin, we will just not register any urls for now\n if not get_device_setting(\"subset_of_users_device\", False):\n return \"urls\"\n return None\n\n\n@register_hook\nclass CoachRedirect(RoleBasedRedirectHook):\n roles = (COACH,)\n\n @property\n def url(self):\n return self.plugin_url(Coach, \"coach\")\n\n\n@register_hook\nclass CoachNavItem(NavigationHook):\n bundle_id = \"side_nav\"\n\n\n@register_hook\nclass CoachAsset(webpack_hooks.WebpackBundleHook):\n bundle_id = \"app\"\n", "path": "kolibri/plugins/coach/kolibri_plugin.py"}, {"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\nfrom django.core.urlresolvers import get_resolver\nfrom django.core.urlresolvers import reverse\nfrom django.template.loader import render_to_string\nfrom django.utils.html import mark_safe\nfrom django.utils.translation import get_language\nfrom django.utils.translation import get_language_bidi\nfrom django.utils.translation import get_language_info\nfrom django_js_reverse.core import _safe_json\nfrom django_js_reverse.core import generate_json\nfrom django_js_reverse.rjsmin import jsmin\n\nimport kolibri\nfrom kolibri.core.content.utils.paths import get_content_storage_url\nfrom kolibri.core.content.utils.paths import get_hashi_path\nfrom kolibri.core.content.utils.paths import get_zip_content_base_path\nfrom kolibri.core.content.utils.paths import get_zip_content_config\nfrom kolibri.core.device.models import ContentCacheKey\nfrom kolibri.core.device.utils import allow_other_browsers_to_connect\nfrom kolibri.core.device.utils import get_device_setting\nfrom kolibri.core.hooks import NavigationHook\nfrom kolibri.core.oidc_provider_hook import OIDCProviderHook\nfrom kolibri.core.theme_hook import ThemeHook\nfrom kolibri.core.webpack.hooks import WebpackBundleHook\nfrom kolibri.plugins.app.utils import interface\nfrom kolibri.plugins.hooks import register_hook\nfrom kolibri.utils import i18n\nfrom kolibri.utils.conf import OPTIONS\n\n\n@register_hook\nclass FrontEndCoreAppAssetHook(WebpackBundleHook):\n bundle_id = \"default_frontend\"\n\n def url_tag(self):\n # Modified from:\n # https://github.com/ierror/django-js-reverse/blob/master/django_js_reverse/core.py#L101\n js_name = \"window.kolibriPluginDataGlobal['{bundle}'].urls\".format(\n bundle=self.unique_id\n )\n default_urlresolver = get_resolver(None)\n\n data = generate_json(default_urlresolver)\n\n # Generate the JS that exposes functions to reverse all Django URLs\n # in the frontend.\n js = render_to_string(\n \"django_js_reverse/urls_js.tpl\",\n {\"data\": _safe_json(data), \"js_name\": \"__placeholder__\"},\n # For some reason the js_name gets escaped going into the template\n # so this was the easiest way to inject it.\n ).replace(\"__placeholder__\", js_name)\n zip_content_origin, zip_content_port = get_zip_content_config()\n return [\n mark_safe(\n \"\"\"<script type=\"text/javascript\">\"\"\"\n # Minify the generated Javascript\n + jsmin(js)\n # Add URL references for our base static URL, the Django media URL\n # and our content storage URL - this allows us to calculate\n # the path at which to access a local file on the frontend if needed.\n + \"\"\"\n {js_name}.__staticUrl = '{static_url}';\n {js_name}.__mediaUrl = '{media_url}';\n {js_name}.__contentUrl = '{content_url}';\n {js_name}.__zipContentUrl = '{zip_content_url}';\n {js_name}.__hashiUrl = '{hashi_url}';\n {js_name}.__zipContentOrigin = '{zip_content_origin}';\n {js_name}.__zipContentPort = {zip_content_port};\n </script>\n \"\"\".format(\n js_name=js_name,\n static_url=settings.STATIC_URL,\n media_url=settings.MEDIA_URL,\n content_url=get_content_storage_url(\n baseurl=OPTIONS[\"Deployment\"][\"URL_PATH_PREFIX\"]\n ),\n zip_content_url=get_zip_content_base_path(),\n hashi_url=get_hashi_path(),\n zip_content_origin=zip_content_origin,\n zip_content_port=zip_content_port,\n )\n )\n ]\n\n def navigation_tags(self):\n return [\n hook.render_to_page_load_sync_html()\n for hook in NavigationHook.registered_hooks\n ]\n\n def render_to_page_load_sync_html(self):\n \"\"\"\n Don't render the frontend message files in the usual way\n as the global object to register them does not exist yet.\n Instead they are loaded through plugin data.\n \"\"\"\n tags = (\n self.plugin_data_tag()\n + self.url_tag()\n + list(self.js_and_css_tags())\n + self.navigation_tags()\n )\n\n return mark_safe(\"\\n\".join(tags))\n\n @property\n def plugin_data(self):\n language_code = get_language()\n static_root = static(\"assets/fonts/noto-full\")\n full_file = \"{}.{}.{}.css?v={}\"\n return {\n \"fullCSSFileModern\": full_file.format(\n static_root, language_code, \"modern\", kolibri.__version__\n ),\n \"fullCSSFileBasic\": full_file.format(\n static_root, language_code, \"basic\", kolibri.__version__\n ),\n \"allowRemoteAccess\": allow_other_browsers_to_connect()\n or not interface.enabled,\n \"appCapabilities\": interface.capabilities,\n \"contentCacheKey\": ContentCacheKey.get_cache_key(),\n \"languageGlobals\": self.language_globals(),\n \"oidcProviderEnabled\": OIDCProviderHook.is_enabled(),\n \"kolibriTheme\": ThemeHook.get_theme(),\n \"isSubsetOfUsersDevice\": get_device_setting(\"subset_of_users_device\"),\n }\n\n def language_globals(self):\n language_code = get_language()\n lang_dir = \"rtl\" if get_language_bidi() else \"ltr\"\n\n languages = {}\n for code, language_name in settings.LANGUAGES:\n lang_info = i18n.KOLIBRI_LANGUAGE_INFO[code]\n languages[code] = {\n # Format to match the schema of the content Language model\n \"id\": code,\n \"lang_name\": language_name,\n \"english_name\": lang_info[\"english_name\"]\n if lang_info\n else get_language_info(code)[\"name\"],\n \"lang_direction\": get_language_info(code)[\"bidi\"],\n }\n return {\n \"coreLanguageMessages\": self.frontend_messages(),\n \"languageCode\": language_code,\n \"languageDir\": lang_dir,\n \"languages\": languages,\n }\n\n\n@register_hook\nclass FrontendHeadAssetsHook(WebpackBundleHook):\n \"\"\"\n Render these assets in the <head> tag of base.html, before other JS and assets.\n \"\"\"\n\n bundle_id = \"frontend_head_assets\"\n inline = True\n\n def render_to_page_load_sync_html(self):\n \"\"\"\n Add in the extra language font file tags needed\n for preloading our custom font files.\n \"\"\"\n tags = (\n self.plugin_data_tag()\n + self.language_font_file_tags()\n + self.frontend_message_tag()\n + list(self.js_and_css_tags())\n )\n\n return mark_safe(\"\\n\".join(tags))\n\n def language_font_file_tags(self):\n language_code = get_language()\n common_file = static(\"assets/fonts/noto-common.css\")\n subset_file = static(\"assets/fonts/noto-subset.{}.css\".format(language_code))\n return [\n '<link type=\"text/css\" href=\"{common_css_file}?v={version}\" rel=\"stylesheet\"/>'.format(\n common_css_file=common_file, version=kolibri.__version__\n ),\n '<link type=\"text/css\" href=\"{subset_css_file}?v={version}\" rel=\"stylesheet\"/>'.format(\n subset_css_file=subset_file, version=kolibri.__version__\n ),\n ]\n\n @property\n def plugin_data(self):\n return {\"unsupportedUrl\": reverse(\"kolibri:core:unsupported\")}\n", "path": "kolibri/core/kolibri_plugin.py"}]}
3,050
743
gh_patches_debug_35092
rasdani/github-patches
git_diff
lightly-ai__lightly-539
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- data.LightlyDataset.from_torch_dataset does not work properly Hello, I am trying to use Lightly with a dataset of images in `.npz` format. I have created a torch dataset class that loads the sample images and use `data.LightlyDataset.from_torch_dataset ` to create a Lightly dataset from it. However, it seems that the transformations that are passed to the `data.LightlyDataset.from_torch_dataset ` function are not properly performed. and subsequently the `Dataloader` does not work as expected. Can you help me figure out how to handle this? thanks ``` dataset_test_pl = data.LightlyDataset.from_torch_dataset(dataset_test, transform=test_transform) dataloader_test = torch.utils.data.DataLoader( dataset_test_pl, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=num_workers ) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lightly/data/dataset.py` Content: ``` 1 """ Lightly Dataset """ 2 3 # Copyright (c) 2020. Lightly AG and its affiliates. 4 # All Rights Reserved 5 6 import os 7 import bisect 8 import shutil 9 import tempfile 10 11 from PIL import Image 12 from typing import List, Union, Callable 13 14 import torchvision.datasets as datasets 15 from torchvision import transforms 16 17 from lightly.data._helpers import _load_dataset_from_folder 18 from lightly.data._helpers import DatasetFolder 19 from lightly.data._video import VideoDataset 20 from lightly.utils.io import check_filenames 21 22 23 def _get_filename_by_index(dataset, index): 24 """Default function which maps the index of an image to a filename. 25 26 """ 27 if isinstance(dataset, datasets.ImageFolder): 28 # filename is the path of the image relative to the dataset root 29 full_path = dataset.imgs[index][0] 30 return os.path.relpath(full_path, dataset.root) 31 elif isinstance(dataset, DatasetFolder): 32 # filename is the path of the image relative to the dataset root 33 full_path = dataset.samples[index][0] 34 return os.path.relpath(full_path, dataset.root) 35 elif isinstance(dataset, VideoDataset): 36 # filename is constructed by the video dataset 37 return dataset.get_filename(index) 38 else: 39 # dummy to prevent crashes 40 return str(index) 41 42 43 def _ensure_dir(path): 44 """Makes sure that the directory at path exists. 45 46 """ 47 dirname = os.path.dirname(path) 48 os.makedirs(dirname, exist_ok=True) 49 50 51 def _copy_image(input_dir, output_dir, filename): 52 """Copies an image from the input directory to the output directory. 53 54 """ 55 source = os.path.join(input_dir, filename) 56 target = os.path.join(output_dir, filename) 57 _ensure_dir(target) 58 shutil.copyfile(source, target) 59 60 61 def _save_image(image, output_dir, filename, fmt): 62 """Saves an image in the output directory. 63 64 """ 65 target = os.path.join(output_dir, filename) 66 _ensure_dir(target) 67 try: 68 # try to save the image with the specified format or 69 # derive the format from the filename (if format=None) 70 image.save(target, format=fmt) 71 except ValueError: 72 # could not determine format from filename 73 image.save(target, format='png') 74 75 76 def _dump_image(dataset, output_dir, filename, index, fmt): 77 """Saves a single image to the output directory. 78 79 Will copy the image from the input directory to the output directory 80 if possible. If not (e.g. for VideoDatasets), will load the image and 81 then save it to the output directory with the specified format. 82 83 """ 84 85 if isinstance(dataset, datasets.ImageFolder): 86 # can safely copy the image from the input to the output directory 87 _copy_image(dataset.root, output_dir, filename) 88 elif isinstance(dataset, DatasetFolder): 89 # can safely copy the image from the input to the output directory 90 _copy_image(dataset.root, output_dir, filename) 91 else: 92 # need to load the image and save it to the output directory 93 image, _ = dataset[index] 94 _save_image(image, output_dir, filename, fmt) 95 96 97 class LightlyDataset: 98 """Provides a uniform data interface for the embedding models. 99 100 Should be used for all models and functions in the lightly package. 101 Returns a tuple (sample, target, fname) when accessed using __getitem__. 102 103 The LightlyDataset supports different input sources. You can use it 104 on a folder of images. You can also use it on a folder with subfolders 105 with images (ImageNet style). If the input_dir has subfolders, 106 each subfolder gets its own target label. 107 You can also work with videos (requires pyav). 108 If there are multiple videos in the input_dir each video gets a different 109 target label assigned. If input_dir contains images and videos 110 only the videos are used. 111 112 Can also be used in combination with the `from_torch_dataset` method 113 to load a dataset offered by torchvision (e.g. cifar10). 114 115 Args: 116 input_dir: 117 Path to directory holding the images or videos to load. 118 transform: 119 Image transforms (as in torchvision). 120 index_to_filename: 121 Function which takes the dataset and index as input and returns 122 the filename of the file at the index. If None, uses default. 123 filenames: 124 If not None, it filters the dataset in the input directory 125 by the given filenames. 126 127 Examples: 128 >>> # load a dataset consisting of images from a local folder 129 >>> # mydata/ 130 >>> # `- img1.png 131 >>> # `- img2.png 132 >>> # `- ... 133 >>> import lightly.data as data 134 >>> dataset = data.LightlyDataset(input_dir='path/to/mydata/') 135 >>> sample, target, fname = dataset[0] 136 >>> 137 >>> # also works with subfolders 138 >>> # mydata/ 139 >>> # `- subfolder1 140 >>> # `- img1.png 141 >>> # `- subfolder2 142 >>> # ... 143 >>> 144 >>> # also works with videos 145 >>> # mydata/ 146 >>> # `- video1.mp4 147 >>> # `- video2.mp4 148 >>> # `- ... 149 """ 150 151 def __init__(self, 152 input_dir: Union[str, None], 153 transform: transforms.Compose = None, 154 index_to_filename: 155 Callable[[datasets.VisionDataset, int], str] = None, 156 filenames: List[str] = None, 157 ): 158 159 # can pass input_dir=None to create an "empty" dataset 160 self.input_dir = input_dir 161 if filenames is not None: 162 filepaths = [ 163 os.path.join(input_dir, filename) 164 for filename in filenames 165 ] 166 filepaths = set(filepaths) 167 168 def is_valid_file(filepath: str): 169 return filepath in filepaths 170 else: 171 is_valid_file = None 172 if self.input_dir is not None: 173 self.dataset = _load_dataset_from_folder( 174 self.input_dir, transform, is_valid_file=is_valid_file 175 ) 176 177 # initialize function to get filename of image 178 self.index_to_filename = _get_filename_by_index 179 if index_to_filename is not None: 180 self.index_to_filename = index_to_filename 181 182 # if created from an input directory with filenames, check if they 183 # are valid 184 if input_dir: 185 check_filenames(self.get_filenames()) 186 187 @classmethod 188 def from_torch_dataset(cls, 189 dataset, 190 transform=None, 191 index_to_filename=None): 192 """Builds a LightlyDataset from a PyTorch (or torchvision) dataset. 193 194 Args: 195 dataset: 196 PyTorch/torchvision dataset. 197 transform: 198 Image transforms (as in torchvision). 199 index_to_filename: 200 Function which takes the dataset and index as input and returns 201 the filename of the file at the index. If None, uses default. 202 203 Returns: 204 A LightlyDataset object. 205 206 Examples: 207 >>> # load cifar10 from torchvision 208 >>> import torchvision 209 >>> import lightly.data as data 210 >>> base = torchvision.datasets.CIFAR10(root='./') 211 >>> dataset = data.LightlyDataset.from_torch_dataset(base) 212 213 """ 214 # create an "empty" dataset object 215 dataset_obj = cls( 216 None, 217 transform=transform, 218 index_to_filename=index_to_filename 219 ) 220 221 # populate it with the torch dataset 222 dataset_obj.dataset = dataset 223 return dataset_obj 224 225 def __getitem__(self, index: int): 226 """Returns (sample, target, fname) of item at index. 227 228 Args: 229 index: 230 Index of the queried item. 231 232 Returns: 233 The image, target, and filename of the item at index. 234 235 """ 236 fname = self.index_to_filename(self.dataset, index) 237 sample, target = self.dataset.__getitem__(index) 238 239 return sample, target, fname 240 241 def __len__(self): 242 """Returns the length of the dataset. 243 244 """ 245 return len(self.dataset) 246 247 def __add__(self, other): 248 """Adds another item to the dataset. 249 250 """ 251 raise NotImplementedError() 252 253 def get_filenames(self) -> List[str]: 254 """Returns all filenames in the dataset. 255 256 """ 257 list_of_filenames = [] 258 for index in range(len(self)): 259 fname = self.index_to_filename(self.dataset, index) 260 list_of_filenames.append(fname) 261 return list_of_filenames 262 263 def dump(self, 264 output_dir: str, 265 filenames: Union[List[str], None] = None, 266 format: Union[str, None] = None): 267 """Saves images in the dataset to the output directory. 268 269 Will copy the images from the input directory to the output directory 270 if possible. If not (e.g. for VideoDatasets), will load the images and 271 then save them to the output directory with the specified format. 272 273 Args: 274 output_dir: 275 Output directory where the image is stored. 276 filenames: 277 Filenames of the images to store. If None, stores all images. 278 format: 279 Image format. Can be any pillow image format (png, jpg, ...). 280 By default we try to use the same format as the input data. If 281 not possible (e.g. for videos) we dump the image 282 as a png image to prevent compression artifacts. 283 284 """ 285 286 if self.dataset.transform is not None: 287 raise RuntimeError('Cannot dump dataset which applies transforms!') 288 289 # create directory if it doesn't exist yet 290 os.makedirs(output_dir, exist_ok=True) 291 292 # dump all the files if no filenames were passed, otherwise dump only 293 # the ones referenced in the list 294 if filenames is None: 295 indices = [i for i in range(self.__len__())] 296 filenames = self.get_filenames() 297 else: 298 indices = [] 299 filenames = sorted(filenames) 300 all_filenames = self.get_filenames() 301 for index, filename in enumerate(all_filenames): 302 filename_index = bisect.bisect_left(filenames, filename) 303 # make sure the filename exists in filenames 304 if filename_index < len(filenames) and \ 305 filenames[filename_index] == filename: 306 indices.append(index) 307 308 # dump images 309 for i, filename in zip(indices, filenames): 310 _dump_image(self.dataset, output_dir, filename, i, fmt=format) 311 312 def get_filepath_from_filename(self, filename: str, image: Image = None): 313 """Returns the filepath given the filename of the image 314 315 There are three cases: 316 - The dataset is a regular dataset with the images in the input dir. 317 - The dataset is a video dataset, thus the images have to be saved in a 318 temporary folder. 319 - The dataset is a torch dataset, thus the images have to be saved in a 320 temporary folder. 321 Args: 322 filename: 323 The filename of the image 324 image: 325 The image corresponding to the filename 326 327 Returns: 328 The filename to the image, either the existing one (case 1) or a 329 newly created jpg (case 2, 3) 330 331 """ 332 333 has_input_dir = hasattr(self, 'input_dir') and \ 334 isinstance(self.input_dir, str) 335 if has_input_dir: 336 path_to_image = os.path.join(self.input_dir, filename) 337 if os.path.isfile(path_to_image): 338 # the file exists, return its filepath 339 return path_to_image 340 341 if image is None: 342 raise ValueError( 343 'The parameter image must not be None for' 344 'VideoDatasets and TorchDatasets' 345 ) 346 347 # the file doesn't exist, save it as a jpg and return filepath 348 folder_path = tempfile.mkdtemp() 349 filepath = os.path.join(folder_path, filename) + '.jpg' 350 351 if os.path.dirname(filepath): 352 os.makedirs(os.path.dirname(filepath), exist_ok=True) 353 354 image.save(filepath) 355 return filepath 356 357 @property 358 def transform(self): 359 """Getter for the transform of the dataset. 360 361 """ 362 return self.dataset.transform 363 364 @transform.setter 365 def transform(self, t): 366 """Setter for the transform of the dataset. 367 368 """ 369 self.dataset.transform = t 370 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lightly/data/dataset.py b/lightly/data/dataset.py --- a/lightly/data/dataset.py +++ b/lightly/data/dataset.py @@ -10,6 +10,7 @@ from PIL import Image from typing import List, Union, Callable +from torch._C import Value import torchvision.datasets as datasets from torchvision import transforms @@ -153,8 +154,7 @@ transform: transforms.Compose = None, index_to_filename: Callable[[datasets.VisionDataset, int], str] = None, - filenames: List[str] = None, - ): + filenames: List[str] = None): # can pass input_dir=None to create an "empty" dataset self.input_dir = input_dir @@ -169,10 +169,16 @@ return filepath in filepaths else: is_valid_file = None + if self.input_dir is not None: self.dataset = _load_dataset_from_folder( self.input_dir, transform, is_valid_file=is_valid_file ) + elif transform is not None: + raise ValueError( + 'transform must be None when input_dir is None but is ' + f'{transform}', + ) # initialize function to get filename of image self.index_to_filename = _get_filename_by_index @@ -214,12 +220,12 @@ # create an "empty" dataset object dataset_obj = cls( None, - transform=transform, - index_to_filename=index_to_filename + index_to_filename=index_to_filename, ) # populate it with the torch dataset dataset_obj.dataset = dataset + dataset_obj.transform = transform return dataset_obj def __getitem__(self, index: int):
{"golden_diff": "diff --git a/lightly/data/dataset.py b/lightly/data/dataset.py\n--- a/lightly/data/dataset.py\n+++ b/lightly/data/dataset.py\n@@ -10,6 +10,7 @@\n \n from PIL import Image\n from typing import List, Union, Callable\n+from torch._C import Value\n \n import torchvision.datasets as datasets\n from torchvision import transforms\n@@ -153,8 +154,7 @@\n transform: transforms.Compose = None,\n index_to_filename:\n Callable[[datasets.VisionDataset, int], str] = None,\n- filenames: List[str] = None,\n- ):\n+ filenames: List[str] = None):\n \n # can pass input_dir=None to create an \"empty\" dataset\n self.input_dir = input_dir\n@@ -169,10 +169,16 @@\n return filepath in filepaths\n else:\n is_valid_file = None\n+\n if self.input_dir is not None:\n self.dataset = _load_dataset_from_folder(\n self.input_dir, transform, is_valid_file=is_valid_file\n )\n+ elif transform is not None:\n+ raise ValueError(\n+ 'transform must be None when input_dir is None but is '\n+ f'{transform}',\n+ )\n \n # initialize function to get filename of image\n self.index_to_filename = _get_filename_by_index\n@@ -214,12 +220,12 @@\n # create an \"empty\" dataset object\n dataset_obj = cls(\n None,\n- transform=transform,\n- index_to_filename=index_to_filename\n+ index_to_filename=index_to_filename,\n )\n \n # populate it with the torch dataset\n dataset_obj.dataset = dataset\n+ dataset_obj.transform = transform\n return dataset_obj\n \n def __getitem__(self, index: int):\n", "issue": "data.LightlyDataset.from_torch_dataset does not work properly\nHello,\r\n\r\nI am trying to use Lightly with a dataset of images in `.npz` format. I have created a torch dataset class that loads the sample images and use `data.LightlyDataset.from_torch_dataset ` to create a Lightly dataset from it. However, it seems that the transformations that are passed to the `data.LightlyDataset.from_torch_dataset ` function are not properly performed. and subsequently the `Dataloader` does not work as expected. Can you help me figure out how to handle this? thanks\r\n\r\n```\r\ndataset_test_pl = data.LightlyDataset.from_torch_dataset(dataset_test, transform=test_transform)\r\ndataloader_test = torch.utils.data.DataLoader(\r\n dataset_test_pl,\r\n batch_size=batch_size,\r\n shuffle=False,\r\n drop_last=False,\r\n num_workers=num_workers\r\n)\r\n```\n", "before_files": [{"content": "\"\"\" Lightly Dataset \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport os\nimport bisect\nimport shutil\nimport tempfile\n\nfrom PIL import Image\nfrom typing import List, Union, Callable\n\nimport torchvision.datasets as datasets\nfrom torchvision import transforms\n\nfrom lightly.data._helpers import _load_dataset_from_folder\nfrom lightly.data._helpers import DatasetFolder\nfrom lightly.data._video import VideoDataset\nfrom lightly.utils.io import check_filenames\n\n\ndef _get_filename_by_index(dataset, index):\n \"\"\"Default function which maps the index of an image to a filename.\n\n \"\"\"\n if isinstance(dataset, datasets.ImageFolder):\n # filename is the path of the image relative to the dataset root\n full_path = dataset.imgs[index][0]\n return os.path.relpath(full_path, dataset.root)\n elif isinstance(dataset, DatasetFolder):\n # filename is the path of the image relative to the dataset root\n full_path = dataset.samples[index][0]\n return os.path.relpath(full_path, dataset.root)\n elif isinstance(dataset, VideoDataset):\n # filename is constructed by the video dataset\n return dataset.get_filename(index)\n else:\n # dummy to prevent crashes\n return str(index)\n\n\ndef _ensure_dir(path):\n \"\"\"Makes sure that the directory at path exists.\n\n \"\"\"\n dirname = os.path.dirname(path)\n os.makedirs(dirname, exist_ok=True)\n\n\ndef _copy_image(input_dir, output_dir, filename):\n \"\"\"Copies an image from the input directory to the output directory.\n\n \"\"\"\n source = os.path.join(input_dir, filename)\n target = os.path.join(output_dir, filename)\n _ensure_dir(target)\n shutil.copyfile(source, target)\n\n\ndef _save_image(image, output_dir, filename, fmt):\n \"\"\"Saves an image in the output directory.\n\n \"\"\"\n target = os.path.join(output_dir, filename)\n _ensure_dir(target)\n try:\n # try to save the image with the specified format or\n # derive the format from the filename (if format=None)\n image.save(target, format=fmt)\n except ValueError:\n # could not determine format from filename\n image.save(target, format='png')\n\n\ndef _dump_image(dataset, output_dir, filename, index, fmt):\n \"\"\"Saves a single image to the output directory.\n\n Will copy the image from the input directory to the output directory\n if possible. If not (e.g. for VideoDatasets), will load the image and\n then save it to the output directory with the specified format.\n\n \"\"\"\n\n if isinstance(dataset, datasets.ImageFolder):\n # can safely copy the image from the input to the output directory\n _copy_image(dataset.root, output_dir, filename)\n elif isinstance(dataset, DatasetFolder):\n # can safely copy the image from the input to the output directory\n _copy_image(dataset.root, output_dir, filename)\n else:\n # need to load the image and save it to the output directory\n image, _ = dataset[index]\n _save_image(image, output_dir, filename, fmt)\n\n\nclass LightlyDataset:\n \"\"\"Provides a uniform data interface for the embedding models.\n\n Should be used for all models and functions in the lightly package.\n Returns a tuple (sample, target, fname) when accessed using __getitem__.\n\n The LightlyDataset supports different input sources. You can use it\n on a folder of images. You can also use it on a folder with subfolders\n with images (ImageNet style). If the input_dir has subfolders,\n each subfolder gets its own target label.\n You can also work with videos (requires pyav).\n If there are multiple videos in the input_dir each video gets a different\n target label assigned. If input_dir contains images and videos\n only the videos are used.\n\n Can also be used in combination with the `from_torch_dataset` method\n to load a dataset offered by torchvision (e.g. cifar10).\n\n Args:\n input_dir:\n Path to directory holding the images or videos to load.\n transform:\n Image transforms (as in torchvision).\n index_to_filename:\n Function which takes the dataset and index as input and returns\n the filename of the file at the index. If None, uses default.\n filenames:\n If not None, it filters the dataset in the input directory\n by the given filenames.\n\n Examples:\n >>> # load a dataset consisting of images from a local folder\n >>> # mydata/\n >>> # `- img1.png\n >>> # `- img2.png\n >>> # `- ...\n >>> import lightly.data as data\n >>> dataset = data.LightlyDataset(input_dir='path/to/mydata/')\n >>> sample, target, fname = dataset[0]\n >>>\n >>> # also works with subfolders\n >>> # mydata/\n >>> # `- subfolder1\n >>> # `- img1.png\n >>> # `- subfolder2\n >>> # ...\n >>>\n >>> # also works with videos\n >>> # mydata/\n >>> # `- video1.mp4\n >>> # `- video2.mp4\n >>> # `- ...\n \"\"\"\n\n def __init__(self,\n input_dir: Union[str, None],\n transform: transforms.Compose = None,\n index_to_filename:\n Callable[[datasets.VisionDataset, int], str] = None,\n filenames: List[str] = None,\n ):\n\n # can pass input_dir=None to create an \"empty\" dataset\n self.input_dir = input_dir\n if filenames is not None:\n filepaths = [\n os.path.join(input_dir, filename)\n for filename in filenames\n ]\n filepaths = set(filepaths)\n\n def is_valid_file(filepath: str):\n return filepath in filepaths\n else:\n is_valid_file = None\n if self.input_dir is not None:\n self.dataset = _load_dataset_from_folder(\n self.input_dir, transform, is_valid_file=is_valid_file\n )\n\n # initialize function to get filename of image\n self.index_to_filename = _get_filename_by_index\n if index_to_filename is not None:\n self.index_to_filename = index_to_filename\n\n # if created from an input directory with filenames, check if they\n # are valid\n if input_dir:\n check_filenames(self.get_filenames())\n\n @classmethod\n def from_torch_dataset(cls,\n dataset,\n transform=None,\n index_to_filename=None):\n \"\"\"Builds a LightlyDataset from a PyTorch (or torchvision) dataset.\n\n Args:\n dataset:\n PyTorch/torchvision dataset.\n transform:\n Image transforms (as in torchvision).\n index_to_filename:\n Function which takes the dataset and index as input and returns\n the filename of the file at the index. If None, uses default.\n\n Returns:\n A LightlyDataset object.\n\n Examples:\n >>> # load cifar10 from torchvision\n >>> import torchvision\n >>> import lightly.data as data\n >>> base = torchvision.datasets.CIFAR10(root='./')\n >>> dataset = data.LightlyDataset.from_torch_dataset(base)\n\n \"\"\"\n # create an \"empty\" dataset object\n dataset_obj = cls(\n None,\n transform=transform,\n index_to_filename=index_to_filename\n )\n\n # populate it with the torch dataset\n dataset_obj.dataset = dataset\n return dataset_obj\n\n def __getitem__(self, index: int):\n \"\"\"Returns (sample, target, fname) of item at index.\n\n Args:\n index:\n Index of the queried item.\n\n Returns:\n The image, target, and filename of the item at index.\n\n \"\"\"\n fname = self.index_to_filename(self.dataset, index)\n sample, target = self.dataset.__getitem__(index)\n\n return sample, target, fname\n\n def __len__(self):\n \"\"\"Returns the length of the dataset.\n\n \"\"\"\n return len(self.dataset)\n\n def __add__(self, other):\n \"\"\"Adds another item to the dataset.\n\n \"\"\"\n raise NotImplementedError()\n\n def get_filenames(self) -> List[str]:\n \"\"\"Returns all filenames in the dataset.\n\n \"\"\"\n list_of_filenames = []\n for index in range(len(self)):\n fname = self.index_to_filename(self.dataset, index)\n list_of_filenames.append(fname)\n return list_of_filenames\n\n def dump(self,\n output_dir: str,\n filenames: Union[List[str], None] = None,\n format: Union[str, None] = None):\n \"\"\"Saves images in the dataset to the output directory.\n\n Will copy the images from the input directory to the output directory\n if possible. If not (e.g. for VideoDatasets), will load the images and\n then save them to the output directory with the specified format.\n\n Args:\n output_dir:\n Output directory where the image is stored.\n filenames:\n Filenames of the images to store. If None, stores all images.\n format:\n Image format. Can be any pillow image format (png, jpg, ...).\n By default we try to use the same format as the input data. If\n not possible (e.g. for videos) we dump the image \n as a png image to prevent compression artifacts.\n\n \"\"\"\n\n if self.dataset.transform is not None:\n raise RuntimeError('Cannot dump dataset which applies transforms!')\n\n # create directory if it doesn't exist yet\n os.makedirs(output_dir, exist_ok=True)\n\n # dump all the files if no filenames were passed, otherwise dump only\n # the ones referenced in the list\n if filenames is None:\n indices = [i for i in range(self.__len__())]\n filenames = self.get_filenames()\n else:\n indices = []\n filenames = sorted(filenames)\n all_filenames = self.get_filenames()\n for index, filename in enumerate(all_filenames):\n filename_index = bisect.bisect_left(filenames, filename)\n # make sure the filename exists in filenames\n if filename_index < len(filenames) and \\\n filenames[filename_index] == filename:\n indices.append(index)\n\n # dump images\n for i, filename in zip(indices, filenames):\n _dump_image(self.dataset, output_dir, filename, i, fmt=format)\n\n def get_filepath_from_filename(self, filename: str, image: Image = None):\n \"\"\"Returns the filepath given the filename of the image\n\n There are three cases:\n - The dataset is a regular dataset with the images in the input dir.\n - The dataset is a video dataset, thus the images have to be saved in a\n temporary folder.\n - The dataset is a torch dataset, thus the images have to be saved in a\n temporary folder.\n Args:\n filename:\n The filename of the image\n image:\n The image corresponding to the filename\n\n Returns:\n The filename to the image, either the existing one (case 1) or a\n newly created jpg (case 2, 3)\n\n \"\"\"\n\n has_input_dir = hasattr(self, 'input_dir') and \\\n isinstance(self.input_dir, str)\n if has_input_dir:\n path_to_image = os.path.join(self.input_dir, filename)\n if os.path.isfile(path_to_image):\n # the file exists, return its filepath\n return path_to_image\n\n if image is None:\n raise ValueError(\n 'The parameter image must not be None for'\n 'VideoDatasets and TorchDatasets'\n )\n\n # the file doesn't exist, save it as a jpg and return filepath\n folder_path = tempfile.mkdtemp()\n filepath = os.path.join(folder_path, filename) + '.jpg'\n \n if os.path.dirname(filepath):\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n\n image.save(filepath)\n return filepath\n\n @property\n def transform(self):\n \"\"\"Getter for the transform of the dataset.\n\n \"\"\"\n return self.dataset.transform\n\n @transform.setter\n def transform(self, t):\n \"\"\"Setter for the transform of the dataset.\n\n \"\"\"\n self.dataset.transform = t\n", "path": "lightly/data/dataset.py"}], "after_files": [{"content": "\"\"\" Lightly Dataset \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport os\nimport bisect\nimport shutil\nimport tempfile\n\nfrom PIL import Image\nfrom typing import List, Union, Callable\nfrom torch._C import Value\n\nimport torchvision.datasets as datasets\nfrom torchvision import transforms\n\nfrom lightly.data._helpers import _load_dataset_from_folder\nfrom lightly.data._helpers import DatasetFolder\nfrom lightly.data._video import VideoDataset\nfrom lightly.utils.io import check_filenames\n\n\ndef _get_filename_by_index(dataset, index):\n \"\"\"Default function which maps the index of an image to a filename.\n\n \"\"\"\n if isinstance(dataset, datasets.ImageFolder):\n # filename is the path of the image relative to the dataset root\n full_path = dataset.imgs[index][0]\n return os.path.relpath(full_path, dataset.root)\n elif isinstance(dataset, DatasetFolder):\n # filename is the path of the image relative to the dataset root\n full_path = dataset.samples[index][0]\n return os.path.relpath(full_path, dataset.root)\n elif isinstance(dataset, VideoDataset):\n # filename is constructed by the video dataset\n return dataset.get_filename(index)\n else:\n # dummy to prevent crashes\n return str(index)\n\n\ndef _ensure_dir(path):\n \"\"\"Makes sure that the directory at path exists.\n\n \"\"\"\n dirname = os.path.dirname(path)\n os.makedirs(dirname, exist_ok=True)\n\n\ndef _copy_image(input_dir, output_dir, filename):\n \"\"\"Copies an image from the input directory to the output directory.\n\n \"\"\"\n source = os.path.join(input_dir, filename)\n target = os.path.join(output_dir, filename)\n _ensure_dir(target)\n shutil.copyfile(source, target)\n\n\ndef _save_image(image, output_dir, filename, fmt):\n \"\"\"Saves an image in the output directory.\n\n \"\"\"\n target = os.path.join(output_dir, filename)\n _ensure_dir(target)\n try:\n # try to save the image with the specified format or\n # derive the format from the filename (if format=None)\n image.save(target, format=fmt)\n except ValueError:\n # could not determine format from filename\n image.save(target, format='png')\n\n\ndef _dump_image(dataset, output_dir, filename, index, fmt):\n \"\"\"Saves a single image to the output directory.\n\n Will copy the image from the input directory to the output directory\n if possible. If not (e.g. for VideoDatasets), will load the image and\n then save it to the output directory with the specified format.\n\n \"\"\"\n\n if isinstance(dataset, datasets.ImageFolder):\n # can safely copy the image from the input to the output directory\n _copy_image(dataset.root, output_dir, filename)\n elif isinstance(dataset, DatasetFolder):\n # can safely copy the image from the input to the output directory\n _copy_image(dataset.root, output_dir, filename)\n else:\n # need to load the image and save it to the output directory\n image, _ = dataset[index]\n _save_image(image, output_dir, filename, fmt)\n\n\nclass LightlyDataset:\n \"\"\"Provides a uniform data interface for the embedding models.\n\n Should be used for all models and functions in the lightly package.\n Returns a tuple (sample, target, fname) when accessed using __getitem__.\n\n The LightlyDataset supports different input sources. You can use it\n on a folder of images. You can also use it on a folder with subfolders\n with images (ImageNet style). If the input_dir has subfolders,\n each subfolder gets its own target label.\n You can also work with videos (requires pyav).\n If there are multiple videos in the input_dir each video gets a different\n target label assigned. If input_dir contains images and videos\n only the videos are used.\n\n Can also be used in combination with the `from_torch_dataset` method\n to load a dataset offered by torchvision (e.g. cifar10).\n\n Args:\n input_dir:\n Path to directory holding the images or videos to load.\n transform:\n Image transforms (as in torchvision).\n index_to_filename:\n Function which takes the dataset and index as input and returns\n the filename of the file at the index. If None, uses default.\n filenames:\n If not None, it filters the dataset in the input directory\n by the given filenames.\n\n Examples:\n >>> # load a dataset consisting of images from a local folder\n >>> # mydata/\n >>> # `- img1.png\n >>> # `- img2.png\n >>> # `- ...\n >>> import lightly.data as data\n >>> dataset = data.LightlyDataset(input_dir='path/to/mydata/')\n >>> sample, target, fname = dataset[0]\n >>>\n >>> # also works with subfolders\n >>> # mydata/\n >>> # `- subfolder1\n >>> # `- img1.png\n >>> # `- subfolder2\n >>> # ...\n >>>\n >>> # also works with videos\n >>> # mydata/\n >>> # `- video1.mp4\n >>> # `- video2.mp4\n >>> # `- ...\n \"\"\"\n\n def __init__(self,\n input_dir: Union[str, None],\n transform: transforms.Compose = None,\n index_to_filename:\n Callable[[datasets.VisionDataset, int], str] = None,\n filenames: List[str] = None):\n\n # can pass input_dir=None to create an \"empty\" dataset\n self.input_dir = input_dir\n if filenames is not None:\n filepaths = [\n os.path.join(input_dir, filename)\n for filename in filenames\n ]\n filepaths = set(filepaths)\n\n def is_valid_file(filepath: str):\n return filepath in filepaths\n else:\n is_valid_file = None\n\n if self.input_dir is not None:\n self.dataset = _load_dataset_from_folder(\n self.input_dir, transform, is_valid_file=is_valid_file\n )\n elif transform is not None:\n raise ValueError(\n 'transform must be None when input_dir is None but is '\n f'{transform}',\n )\n\n # initialize function to get filename of image\n self.index_to_filename = _get_filename_by_index\n if index_to_filename is not None:\n self.index_to_filename = index_to_filename\n\n # if created from an input directory with filenames, check if they\n # are valid\n if input_dir:\n check_filenames(self.get_filenames())\n\n @classmethod\n def from_torch_dataset(cls,\n dataset,\n transform=None,\n index_to_filename=None):\n \"\"\"Builds a LightlyDataset from a PyTorch (or torchvision) dataset.\n\n Args:\n dataset:\n PyTorch/torchvision dataset.\n transform:\n Image transforms (as in torchvision).\n index_to_filename:\n Function which takes the dataset and index as input and returns\n the filename of the file at the index. If None, uses default.\n\n Returns:\n A LightlyDataset object.\n\n Examples:\n >>> # load cifar10 from torchvision\n >>> import torchvision\n >>> import lightly.data as data\n >>> base = torchvision.datasets.CIFAR10(root='./')\n >>> dataset = data.LightlyDataset.from_torch_dataset(base)\n\n \"\"\"\n # create an \"empty\" dataset object\n dataset_obj = cls(\n None,\n index_to_filename=index_to_filename,\n )\n\n # populate it with the torch dataset\n dataset_obj.dataset = dataset\n dataset_obj.transform = transform\n return dataset_obj\n\n def __getitem__(self, index: int):\n \"\"\"Returns (sample, target, fname) of item at index.\n\n Args:\n index:\n Index of the queried item.\n\n Returns:\n The image, target, and filename of the item at index.\n\n \"\"\"\n fname = self.index_to_filename(self.dataset, index)\n sample, target = self.dataset.__getitem__(index)\n\n return sample, target, fname\n\n def __len__(self):\n \"\"\"Returns the length of the dataset.\n\n \"\"\"\n return len(self.dataset)\n\n def __add__(self, other):\n \"\"\"Adds another item to the dataset.\n\n \"\"\"\n raise NotImplementedError()\n\n def get_filenames(self) -> List[str]:\n \"\"\"Returns all filenames in the dataset.\n\n \"\"\"\n list_of_filenames = []\n for index in range(len(self)):\n fname = self.index_to_filename(self.dataset, index)\n list_of_filenames.append(fname)\n return list_of_filenames\n\n def dump(self,\n output_dir: str,\n filenames: Union[List[str], None] = None,\n format: Union[str, None] = None):\n \"\"\"Saves images in the dataset to the output directory.\n\n Will copy the images from the input directory to the output directory\n if possible. If not (e.g. for VideoDatasets), will load the images and\n then save them to the output directory with the specified format.\n\n Args:\n output_dir:\n Output directory where the image is stored.\n filenames:\n Filenames of the images to store. If None, stores all images.\n format:\n Image format. Can be any pillow image format (png, jpg, ...).\n By default we try to use the same format as the input data. If\n not possible (e.g. for videos) we dump the image \n as a png image to prevent compression artifacts.\n\n \"\"\"\n\n if self.dataset.transform is not None:\n raise RuntimeError('Cannot dump dataset which applies transforms!')\n\n # create directory if it doesn't exist yet\n os.makedirs(output_dir, exist_ok=True)\n\n # dump all the files if no filenames were passed, otherwise dump only\n # the ones referenced in the list\n if filenames is None:\n indices = [i for i in range(self.__len__())]\n filenames = self.get_filenames()\n else:\n indices = []\n filenames = sorted(filenames)\n all_filenames = self.get_filenames()\n for index, filename in enumerate(all_filenames):\n filename_index = bisect.bisect_left(filenames, filename)\n # make sure the filename exists in filenames\n if filename_index < len(filenames) and \\\n filenames[filename_index] == filename:\n indices.append(index)\n\n # dump images\n for i, filename in zip(indices, filenames):\n _dump_image(self.dataset, output_dir, filename, i, fmt=format)\n\n def get_filepath_from_filename(self, filename: str, image: Image = None):\n \"\"\"Returns the filepath given the filename of the image\n\n There are three cases:\n - The dataset is a regular dataset with the images in the input dir.\n - The dataset is a video dataset, thus the images have to be saved in a\n temporary folder.\n - The dataset is a torch dataset, thus the images have to be saved in a\n temporary folder.\n Args:\n filename:\n The filename of the image\n image:\n The image corresponding to the filename\n\n Returns:\n The filename to the image, either the existing one (case 1) or a\n newly created jpg (case 2, 3)\n\n \"\"\"\n\n has_input_dir = hasattr(self, 'input_dir') and \\\n isinstance(self.input_dir, str)\n if has_input_dir:\n path_to_image = os.path.join(self.input_dir, filename)\n if os.path.isfile(path_to_image):\n # the file exists, return its filepath\n return path_to_image\n\n if image is None:\n raise ValueError(\n 'The parameter image must not be None for'\n 'VideoDatasets and TorchDatasets'\n )\n\n # the file doesn't exist, save it as a jpg and return filepath\n folder_path = tempfile.mkdtemp()\n filepath = os.path.join(folder_path, filename) + '.jpg'\n \n if os.path.dirname(filepath):\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n\n image.save(filepath)\n return filepath\n\n @property\n def transform(self):\n \"\"\"Getter for the transform of the dataset.\n\n \"\"\"\n return self.dataset.transform\n\n @transform.setter\n def transform(self, t):\n \"\"\"Setter for the transform of the dataset.\n\n \"\"\"\n self.dataset.transform = t\n", "path": "lightly/data/dataset.py"}]}
4,077
402
gh_patches_debug_25408
rasdani/github-patches
git_diff
liberapay__liberapay.com-1241
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Paypal email is not verified. Although I verified it. I see you added Paypal as a payment option which is great. However when I add my paypal email - which is different from liberapay email - it still says it is not verified although I received an email and clicked "verify". Is this a bug? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `liberapay/payin/paypal.py` Content: ``` 1 from __future__ import absolute_import, division, print_function, unicode_literals 2 3 import logging 4 5 import requests 6 7 from ..exceptions import PaymentError 8 from ..utils.currencies import Money 9 from ..website import website 10 from .common import update_payin, update_payin_transfer 11 12 13 PAYMENT_STATES_MAP = { 14 'approved': 'succeeded', 15 'created': 'pending', 16 'failed': 'failed', 17 } 18 SALE_STATES_MAP = { 19 'completed': 'succeeded', 20 'denied': 'failed', 21 'pending': 'pending', 22 } 23 24 logger = logging.Logger('paypal') 25 26 session = requests.Session() 27 28 29 def _init_session(): 30 # TODO switch to bearer tokens to reduce the risk of exposing the long-lived secret 31 if 'Authentication' in session.headers: 32 return session 33 from base64 import b64encode 34 session.headers.update({ 35 'Authorization': 'Basic ' + b64encode(( 36 '%s:%s' % (website.app_conf.paypal_id, website.app_conf.paypal_secret) 37 ).encode('ascii')).decode('ascii'), 38 }) 39 return session 40 41 42 def create_payment(db, payin, payer, return_url, state): 43 """Create a Payment. 44 45 Doc: https://developer.paypal.com/docs/api/payments/v1/#payment_create 46 47 Note: even though the API expects a list of transactions it rejects the 48 request if the list contains more than one transaction. 49 """ 50 transfers = db.all(""" 51 SELECT pt.* 52 , recipient.username AS recipient_username 53 , team.username AS team_name 54 , a.id AS merchant_id 55 FROM payin_transfers pt 56 JOIN participants recipient ON recipient.id = pt.recipient 57 LEFT JOIN participants team ON team.id = pt.team 58 JOIN payment_accounts a ON a.pk = pt.destination 59 WHERE pt.payin = %s 60 ORDER BY pt.id 61 """, (payin.id,)) 62 assert transfers 63 locale, _ = state['locale'], state['_'] 64 data = { 65 "intent": "sale", 66 "application_context": { 67 "brand_name": "Liberapay", 68 "locale": locale.language, 69 "landing_page": "Login", 70 "shipping_preference": "NO_SHIPPING", 71 "user_action": "commit", 72 }, 73 "payer": { 74 "payment_method": "paypal" 75 }, 76 "transactions": [{ 77 "amount": { 78 "total": str(pt.amount.amount), 79 "currency": pt.amount.currency 80 }, 81 "description": ( 82 _("donation to {0} for their role in the {1} team", 83 pt.recipient_username, pt.team_name) 84 if pt.team_name else 85 _("donation to {0}", pt.recipient_username) 86 ), 87 "invoice_number": str(pt.id), 88 "note_to_payee": ( 89 "donation via Liberapay for your role in the %s team" % pt.team_name 90 if pt.team_name else 91 "donation via Liberapay" 92 ), 93 "payee": { 94 "email": pt.merchant_id, 95 }, 96 "payment_options": { 97 "allowed_payment_method": "UNRESTRICTED" 98 }, 99 "soft_descriptor": "Liberapay", 100 "item_list": { 101 "items": [{ 102 "name": 103 _("One week of the donation") if pt.period == 'weekly' else 104 _("One month of the donation") if pt.period == 'monthly' else 105 _("One year of the donation"), 106 "quantity": pt.n_units, 107 "price": str(pt.unit_amount.amount), 108 "currency": pt.unit_amount.currency 109 }] 110 } 111 } for pt in transfers], 112 "redirect_urls": { 113 "return_url": return_url, 114 "cancel_url": return_url 115 } 116 } 117 url = 'https://api.%s/v1/payments/payment' % website.app_conf.paypal_domain 118 headers = { 119 'PayPal-Request-Id': 'payin_%i' % payin.id 120 } 121 response = _init_session().post(url, json=data, headers=headers) 122 if response.status_code != 201: 123 error = response.text # for Sentry 124 logger.debug(error) 125 raise PaymentError('PayPal', error) 126 payment = response.json() 127 status = PAYMENT_STATES_MAP[payment['state']] 128 error = payment.get('failure_reason') 129 payin = update_payin(db, payin.id, payment['id'], status, error) 130 if payin.status == 'pending': 131 redirect_url = [l['href'] for l in payment['links'] if l['rel'] == 'approval_url'][0] 132 raise state['response'].redirect(redirect_url) 133 return payin 134 135 136 def execute_payment(db, payin, payer_id): 137 """Execute a previously approved payment. 138 139 Doc: https://developer.paypal.com/docs/api/payments/v1/#payment_execute 140 """ 141 url = 'https://api.%s/v1/payments/payment/%s/execute' % ( 142 website.app_conf.paypal_domain, payin.remote_id 143 ) 144 headers = {'PayPal-Request-Id': 'payin_execute_%i' % payin.id} 145 data = {"payer_id": payer_id} 146 response = _init_session().post(url, json=data, headers=headers) 147 if response.status_code != 200: 148 error = response.text # for Sentry 149 logger.debug(error) 150 raise PaymentError('PayPal') 151 payment = response.json() 152 153 # Update the payin 154 status = PAYMENT_STATES_MAP[payment['state']] 155 error = payment.get('failure_reason') 156 payin = update_payin(db, payin.id, payment['id'], status, error) 157 158 # Update the payin transfers 159 for tr in payment['transactions']: 160 sale = tr.get('related_resources', [{}])[0].get('sale') 161 if sale: 162 pt_id = tr['invoice_number'] 163 pt_remote_id = sale['id'] 164 pt_status = SALE_STATES_MAP[sale['state']] 165 pt_error = sale.get('reason_code') 166 pt_fee = Money(sale['transaction_fee']['value'], sale['transaction_fee']['currency']) 167 charge_amount = Money(sale['amount']['total'], sale['amount']['currency']) 168 net_amount = charge_amount - pt_fee 169 update_payin_transfer( 170 db, pt_id, pt_remote_id, pt_status, pt_error, 171 amount=net_amount, fee=pt_fee 172 ) 173 174 return payin 175 176 177 def sync_payment(db, payin): 178 """Fetch the payment's data and update our database. 179 180 Doc: https://developer.paypal.com/docs/api/payments/v1/#payment_get 181 """ 182 url = 'https://api.%s/v1/payments/payment/%s' % ( 183 website.app_conf.paypal_domain, payin.remote_id 184 ) 185 response = _init_session().get(url) 186 if response.status_code != 200: 187 error = response.text # for Sentry 188 logger.debug(error) 189 raise PaymentError('PayPal') 190 payment = response.json() 191 status = PAYMENT_STATES_MAP[payment['state']] 192 error = payment.get('failure_reason') 193 return update_payin(db, payin.id, payment['id'], status, error) 194 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/liberapay/payin/paypal.py b/liberapay/payin/paypal.py --- a/liberapay/payin/paypal.py +++ b/liberapay/payin/paypal.py @@ -107,7 +107,7 @@ "price": str(pt.unit_amount.amount), "currency": pt.unit_amount.currency }] - } + } if pt.n_units >= 1 else {} } for pt in transfers], "redirect_urls": { "return_url": return_url, @@ -163,9 +163,11 @@ pt_remote_id = sale['id'] pt_status = SALE_STATES_MAP[sale['state']] pt_error = sale.get('reason_code') - pt_fee = Money(sale['transaction_fee']['value'], sale['transaction_fee']['currency']) + pt_fee = sale.get('transaction_fee') + if pt_fee: + pt_fee = Money(pt_fee['value'], pt_fee['currency']) charge_amount = Money(sale['amount']['total'], sale['amount']['currency']) - net_amount = charge_amount - pt_fee + net_amount = charge_amount - (pt_fee or 0) update_payin_transfer( db, pt_id, pt_remote_id, pt_status, pt_error, amount=net_amount, fee=pt_fee
{"golden_diff": "diff --git a/liberapay/payin/paypal.py b/liberapay/payin/paypal.py\n--- a/liberapay/payin/paypal.py\n+++ b/liberapay/payin/paypal.py\n@@ -107,7 +107,7 @@\n \"price\": str(pt.unit_amount.amount),\n \"currency\": pt.unit_amount.currency\n }]\n- }\n+ } if pt.n_units >= 1 else {}\n } for pt in transfers],\n \"redirect_urls\": {\n \"return_url\": return_url,\n@@ -163,9 +163,11 @@\n pt_remote_id = sale['id']\n pt_status = SALE_STATES_MAP[sale['state']]\n pt_error = sale.get('reason_code')\n- pt_fee = Money(sale['transaction_fee']['value'], sale['transaction_fee']['currency'])\n+ pt_fee = sale.get('transaction_fee')\n+ if pt_fee:\n+ pt_fee = Money(pt_fee['value'], pt_fee['currency'])\n charge_amount = Money(sale['amount']['total'], sale['amount']['currency'])\n- net_amount = charge_amount - pt_fee\n+ net_amount = charge_amount - (pt_fee or 0)\n update_payin_transfer(\n db, pt_id, pt_remote_id, pt_status, pt_error,\n amount=net_amount, fee=pt_fee\n", "issue": "Paypal email is not verified. Although I verified it.\nI see you added Paypal as a payment option which is great. However when I add my paypal email - which is different from liberapay email - it still says it is not verified although I received an email and clicked \"verify\". Is this a bug?\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nimport requests\n\nfrom ..exceptions import PaymentError\nfrom ..utils.currencies import Money\nfrom ..website import website\nfrom .common import update_payin, update_payin_transfer\n\n\nPAYMENT_STATES_MAP = {\n 'approved': 'succeeded',\n 'created': 'pending',\n 'failed': 'failed',\n}\nSALE_STATES_MAP = {\n 'completed': 'succeeded',\n 'denied': 'failed',\n 'pending': 'pending',\n}\n\nlogger = logging.Logger('paypal')\n\nsession = requests.Session()\n\n\ndef _init_session():\n # TODO switch to bearer tokens to reduce the risk of exposing the long-lived secret\n if 'Authentication' in session.headers:\n return session\n from base64 import b64encode\n session.headers.update({\n 'Authorization': 'Basic ' + b64encode((\n '%s:%s' % (website.app_conf.paypal_id, website.app_conf.paypal_secret)\n ).encode('ascii')).decode('ascii'),\n })\n return session\n\n\ndef create_payment(db, payin, payer, return_url, state):\n \"\"\"Create a Payment.\n\n Doc: https://developer.paypal.com/docs/api/payments/v1/#payment_create\n\n Note: even though the API expects a list of transactions it rejects the\n request if the list contains more than one transaction.\n \"\"\"\n transfers = db.all(\"\"\"\n SELECT pt.*\n , recipient.username AS recipient_username\n , team.username AS team_name\n , a.id AS merchant_id\n FROM payin_transfers pt\n JOIN participants recipient ON recipient.id = pt.recipient\n LEFT JOIN participants team ON team.id = pt.team\n JOIN payment_accounts a ON a.pk = pt.destination\n WHERE pt.payin = %s\n ORDER BY pt.id\n \"\"\", (payin.id,))\n assert transfers\n locale, _ = state['locale'], state['_']\n data = {\n \"intent\": \"sale\",\n \"application_context\": {\n \"brand_name\": \"Liberapay\",\n \"locale\": locale.language,\n \"landing_page\": \"Login\",\n \"shipping_preference\": \"NO_SHIPPING\",\n \"user_action\": \"commit\",\n },\n \"payer\": {\n \"payment_method\": \"paypal\"\n },\n \"transactions\": [{\n \"amount\": {\n \"total\": str(pt.amount.amount),\n \"currency\": pt.amount.currency\n },\n \"description\": (\n _(\"donation to {0} for their role in the {1} team\",\n pt.recipient_username, pt.team_name)\n if pt.team_name else\n _(\"donation to {0}\", pt.recipient_username)\n ),\n \"invoice_number\": str(pt.id),\n \"note_to_payee\": (\n \"donation via Liberapay for your role in the %s team\" % pt.team_name\n if pt.team_name else\n \"donation via Liberapay\"\n ),\n \"payee\": {\n \"email\": pt.merchant_id,\n },\n \"payment_options\": {\n \"allowed_payment_method\": \"UNRESTRICTED\"\n },\n \"soft_descriptor\": \"Liberapay\",\n \"item_list\": {\n \"items\": [{\n \"name\":\n _(\"One week of the donation\") if pt.period == 'weekly' else\n _(\"One month of the donation\") if pt.period == 'monthly' else\n _(\"One year of the donation\"),\n \"quantity\": pt.n_units,\n \"price\": str(pt.unit_amount.amount),\n \"currency\": pt.unit_amount.currency\n }]\n }\n } for pt in transfers],\n \"redirect_urls\": {\n \"return_url\": return_url,\n \"cancel_url\": return_url\n }\n }\n url = 'https://api.%s/v1/payments/payment' % website.app_conf.paypal_domain\n headers = {\n 'PayPal-Request-Id': 'payin_%i' % payin.id\n }\n response = _init_session().post(url, json=data, headers=headers)\n if response.status_code != 201:\n error = response.text # for Sentry\n logger.debug(error)\n raise PaymentError('PayPal', error)\n payment = response.json()\n status = PAYMENT_STATES_MAP[payment['state']]\n error = payment.get('failure_reason')\n payin = update_payin(db, payin.id, payment['id'], status, error)\n if payin.status == 'pending':\n redirect_url = [l['href'] for l in payment['links'] if l['rel'] == 'approval_url'][0]\n raise state['response'].redirect(redirect_url)\n return payin\n\n\ndef execute_payment(db, payin, payer_id):\n \"\"\"Execute a previously approved payment.\n\n Doc: https://developer.paypal.com/docs/api/payments/v1/#payment_execute\n \"\"\"\n url = 'https://api.%s/v1/payments/payment/%s/execute' % (\n website.app_conf.paypal_domain, payin.remote_id\n )\n headers = {'PayPal-Request-Id': 'payin_execute_%i' % payin.id}\n data = {\"payer_id\": payer_id}\n response = _init_session().post(url, json=data, headers=headers)\n if response.status_code != 200:\n error = response.text # for Sentry\n logger.debug(error)\n raise PaymentError('PayPal')\n payment = response.json()\n\n # Update the payin\n status = PAYMENT_STATES_MAP[payment['state']]\n error = payment.get('failure_reason')\n payin = update_payin(db, payin.id, payment['id'], status, error)\n\n # Update the payin transfers\n for tr in payment['transactions']:\n sale = tr.get('related_resources', [{}])[0].get('sale')\n if sale:\n pt_id = tr['invoice_number']\n pt_remote_id = sale['id']\n pt_status = SALE_STATES_MAP[sale['state']]\n pt_error = sale.get('reason_code')\n pt_fee = Money(sale['transaction_fee']['value'], sale['transaction_fee']['currency'])\n charge_amount = Money(sale['amount']['total'], sale['amount']['currency'])\n net_amount = charge_amount - pt_fee\n update_payin_transfer(\n db, pt_id, pt_remote_id, pt_status, pt_error,\n amount=net_amount, fee=pt_fee\n )\n\n return payin\n\n\ndef sync_payment(db, payin):\n \"\"\"Fetch the payment's data and update our database.\n\n Doc: https://developer.paypal.com/docs/api/payments/v1/#payment_get\n \"\"\"\n url = 'https://api.%s/v1/payments/payment/%s' % (\n website.app_conf.paypal_domain, payin.remote_id\n )\n response = _init_session().get(url)\n if response.status_code != 200:\n error = response.text # for Sentry\n logger.debug(error)\n raise PaymentError('PayPal')\n payment = response.json()\n status = PAYMENT_STATES_MAP[payment['state']]\n error = payment.get('failure_reason')\n return update_payin(db, payin.id, payment['id'], status, error)\n", "path": "liberapay/payin/paypal.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nimport requests\n\nfrom ..exceptions import PaymentError\nfrom ..utils.currencies import Money\nfrom ..website import website\nfrom .common import update_payin, update_payin_transfer\n\n\nPAYMENT_STATES_MAP = {\n 'approved': 'succeeded',\n 'created': 'pending',\n 'failed': 'failed',\n}\nSALE_STATES_MAP = {\n 'completed': 'succeeded',\n 'denied': 'failed',\n 'pending': 'pending',\n}\n\nlogger = logging.Logger('paypal')\n\nsession = requests.Session()\n\n\ndef _init_session():\n # TODO switch to bearer tokens to reduce the risk of exposing the long-lived secret\n if 'Authentication' in session.headers:\n return session\n from base64 import b64encode\n session.headers.update({\n 'Authorization': 'Basic ' + b64encode((\n '%s:%s' % (website.app_conf.paypal_id, website.app_conf.paypal_secret)\n ).encode('ascii')).decode('ascii'),\n })\n return session\n\n\ndef create_payment(db, payin, payer, return_url, state):\n \"\"\"Create a Payment.\n\n Doc: https://developer.paypal.com/docs/api/payments/v1/#payment_create\n\n Note: even though the API expects a list of transactions it rejects the\n request if the list contains more than one transaction.\n \"\"\"\n transfers = db.all(\"\"\"\n SELECT pt.*\n , recipient.username AS recipient_username\n , team.username AS team_name\n , a.id AS merchant_id\n FROM payin_transfers pt\n JOIN participants recipient ON recipient.id = pt.recipient\n LEFT JOIN participants team ON team.id = pt.team\n JOIN payment_accounts a ON a.pk = pt.destination\n WHERE pt.payin = %s\n ORDER BY pt.id\n \"\"\", (payin.id,))\n assert transfers\n locale, _ = state['locale'], state['_']\n data = {\n \"intent\": \"sale\",\n \"application_context\": {\n \"brand_name\": \"Liberapay\",\n \"locale\": locale.language,\n \"landing_page\": \"Login\",\n \"shipping_preference\": \"NO_SHIPPING\",\n \"user_action\": \"commit\",\n },\n \"payer\": {\n \"payment_method\": \"paypal\"\n },\n \"transactions\": [{\n \"amount\": {\n \"total\": str(pt.amount.amount),\n \"currency\": pt.amount.currency\n },\n \"description\": (\n _(\"donation to {0} for their role in the {1} team\",\n pt.recipient_username, pt.team_name)\n if pt.team_name else\n _(\"donation to {0}\", pt.recipient_username)\n ),\n \"invoice_number\": str(pt.id),\n \"note_to_payee\": (\n \"donation via Liberapay for your role in the %s team\" % pt.team_name\n if pt.team_name else\n \"donation via Liberapay\"\n ),\n \"payee\": {\n \"email\": pt.merchant_id,\n },\n \"payment_options\": {\n \"allowed_payment_method\": \"UNRESTRICTED\"\n },\n \"soft_descriptor\": \"Liberapay\",\n \"item_list\": {\n \"items\": [{\n \"name\":\n _(\"One week of the donation\") if pt.period == 'weekly' else\n _(\"One month of the donation\") if pt.period == 'monthly' else\n _(\"One year of the donation\"),\n \"quantity\": pt.n_units,\n \"price\": str(pt.unit_amount.amount),\n \"currency\": pt.unit_amount.currency\n }]\n } if pt.n_units >= 1 else {}\n } for pt in transfers],\n \"redirect_urls\": {\n \"return_url\": return_url,\n \"cancel_url\": return_url\n }\n }\n url = 'https://api.%s/v1/payments/payment' % website.app_conf.paypal_domain\n headers = {\n 'PayPal-Request-Id': 'payin_%i' % payin.id\n }\n response = _init_session().post(url, json=data, headers=headers)\n if response.status_code != 201:\n error = response.text # for Sentry\n logger.debug(error)\n raise PaymentError('PayPal', error)\n payment = response.json()\n status = PAYMENT_STATES_MAP[payment['state']]\n error = payment.get('failure_reason')\n payin = update_payin(db, payin.id, payment['id'], status, error)\n if payin.status == 'pending':\n redirect_url = [l['href'] for l in payment['links'] if l['rel'] == 'approval_url'][0]\n raise state['response'].redirect(redirect_url)\n return payin\n\n\ndef execute_payment(db, payin, payer_id):\n \"\"\"Execute a previously approved payment.\n\n Doc: https://developer.paypal.com/docs/api/payments/v1/#payment_execute\n \"\"\"\n url = 'https://api.%s/v1/payments/payment/%s/execute' % (\n website.app_conf.paypal_domain, payin.remote_id\n )\n headers = {'PayPal-Request-Id': 'payin_execute_%i' % payin.id}\n data = {\"payer_id\": payer_id}\n response = _init_session().post(url, json=data, headers=headers)\n if response.status_code != 200:\n error = response.text # for Sentry\n logger.debug(error)\n raise PaymentError('PayPal')\n payment = response.json()\n\n # Update the payin\n status = PAYMENT_STATES_MAP[payment['state']]\n error = payment.get('failure_reason')\n payin = update_payin(db, payin.id, payment['id'], status, error)\n\n # Update the payin transfers\n for tr in payment['transactions']:\n sale = tr.get('related_resources', [{}])[0].get('sale')\n if sale:\n pt_id = tr['invoice_number']\n pt_remote_id = sale['id']\n pt_status = SALE_STATES_MAP[sale['state']]\n pt_error = sale.get('reason_code')\n pt_fee = sale.get('transaction_fee')\n if pt_fee:\n pt_fee = Money(pt_fee['value'], pt_fee['currency'])\n charge_amount = Money(sale['amount']['total'], sale['amount']['currency'])\n net_amount = charge_amount - (pt_fee or 0)\n update_payin_transfer(\n db, pt_id, pt_remote_id, pt_status, pt_error,\n amount=net_amount, fee=pt_fee\n )\n\n return payin\n\n\ndef sync_payment(db, payin):\n \"\"\"Fetch the payment's data and update our database.\n\n Doc: https://developer.paypal.com/docs/api/payments/v1/#payment_get\n \"\"\"\n url = 'https://api.%s/v1/payments/payment/%s' % (\n website.app_conf.paypal_domain, payin.remote_id\n )\n response = _init_session().get(url)\n if response.status_code != 200:\n error = response.text # for Sentry\n logger.debug(error)\n raise PaymentError('PayPal')\n payment = response.json()\n status = PAYMENT_STATES_MAP[payment['state']]\n error = payment.get('failure_reason')\n return update_payin(db, payin.id, payment['id'], status, error)\n", "path": "liberapay/payin/paypal.py"}]}
2,373
296
gh_patches_debug_19739
rasdani/github-patches
git_diff
cocotb__cocotb-278
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- getvalue() in handle.py should configure the returned Binary value When getting a value from a handle a "BinaryValue()" object is created. In my case, I tried to slice that value to get the upper bits, but the call failed because the returned value is not configured. I believe at a minimum, the Handle should set the number of bits, but ideally it would just configure everything the BinaryValue() object needs before returning it. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cocotb/handle.py` Content: ``` 1 #!/usr/bin/env python 2 3 ''' Copyright (c) 2013 Potential Ventures Ltd 4 Copyright (c) 2013 SolarFlare Communications Inc 5 All rights reserved. 6 7 Redistribution and use in source and binary forms, with or without 8 modification, are permitted provided that the following conditions are met: 9 * Redistributions of source code must retain the above copyright 10 notice, this list of conditions and the following disclaimer. 11 * Redistributions in binary form must reproduce the above copyright 12 notice, this list of conditions and the following disclaimer in the 13 documentation and/or other materials provided with the distribution. 14 * Neither the name of Potential Ventures Ltd, 15 SolarFlare Communications Inc nor the 16 names of its contributors may be used to endorse or promote products 17 derived from this software without specific prior written permission. 18 19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY 23 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' 29 30 # -*- coding: utf-8 -*- 31 32 import logging 33 import ctypes 34 import traceback 35 import sys 36 # from StringIO import StringIO 37 38 from io import StringIO, BytesIO 39 40 import os 41 42 # For autodocumentation don't need the extension modules 43 if "SPHINX_BUILD" in os.environ: 44 simulator = None 45 else: 46 import simulator 47 48 import cocotb 49 from cocotb.binary import BinaryValue 50 from cocotb.log import SimLog 51 from cocotb.result import TestError 52 from cocotb.triggers import _RisingEdge, _FallingEdge 53 from cocotb.utils import get_python_integer_types 54 55 56 class SimHandle(object): 57 58 def __init__(self, handle): 59 """ 60 Args: 61 _handle [integer] : vpi/vhpi handle to the simulator object 62 """ 63 self._handle = handle # handle used for future simulator transactions 64 self._sub_handles = {} # Dict. of SimHandle objects created by getattr 65 self._len = None 66 67 self.name = simulator.get_name_string(self._handle) 68 self.fullname = '%s(%s)' % (self.name, 69 simulator.get_type_string(self._handle)) 70 self.log = SimLog('cocotb.' + self.name) 71 self.log.debug("Created!") 72 self._r_edge = _RisingEdge(self) 73 self._f_edge = _FallingEdge(self) 74 75 def __hash__(self): 76 return self._handle 77 78 def __str__(self): 79 return "%s @0x%x" % (self.name, self._handle) 80 81 def __getattr__(self, name): 82 """ Query the simulator for a object with the specified name 83 and cache the result to build a tree 84 """ 85 # python's builtin dir and IPython's dir2 search for these, 86 # raise an AttributeError to avoid incorrect calls to _raise_testerror 87 if name in ["__methods__","__members__","trait_names","_getAttributeNames"]: 88 raise AttributeError(name) 89 if name in self._sub_handles: 90 return self._sub_handles[name] 91 new_handle = simulator.get_handle_by_name(self._handle, name) 92 if not new_handle: 93 self._raise_testerror("%s contains no object named %s" % 94 (self.name, name)) 95 self._sub_handles[name] = SimHandle(new_handle) 96 return self._sub_handles[name] 97 98 def _raise_testerror(self, msg): 99 lastframe = sys._getframe(2) 100 if sys.version_info[0] >= 3: 101 buff = StringIO() 102 traceback.print_stack(lastframe, file=buff) 103 else: 104 buff_bytes = BytesIO() 105 traceback.print_stack(lastframe, file=buff_bytes) 106 buff = StringIO(buff_bytes.getvalue().decode("UTF8")) 107 self.log.error("%s\n%s" % (msg, buff.getvalue())) 108 exception = TestError(msg) 109 exception.stderr.write(buff.getvalue()) 110 buff.close() 111 raise exception 112 113 def __setattr__(self, name, value): 114 """Provide transparent access to signals""" 115 if (not name.startswith('_') and 116 not name in ["name", "fullname", "log", "value"] and 117 self.__hasattr__(name)): 118 getattr(self, name).setcachedvalue(value) 119 return 120 object.__setattr__(self, name, value) 121 122 def __hasattr__(self, name): 123 """ 124 Since calling hasattr(handle, "something") will print out a 125 backtrace to the log since usually attempting to access a 126 non-existent member is an error we provide a 'peek function 127 128 We still add the found handle to our dictionary to prevent leaking 129 handles. 130 """ 131 if name in self._sub_handles: 132 return self._sub_handles[name] 133 new_handle = simulator.get_handle_by_name(self._handle, name) 134 if new_handle: 135 self._sub_handles[name] = SimHandle(new_handle) 136 return new_handle 137 138 def __getitem__(self, index): 139 if index in self._sub_handles: 140 return self._sub_handles[index] 141 new_handle = simulator.get_handle_by_index(self._handle, index) 142 if not new_handle: 143 self._raise_testerror("%s contains no object at index %d" % 144 (self.name, index)) 145 self._sub_handles[index] = SimHandle(new_handle) 146 return self._sub_handles[index] 147 148 def __setitem__(self, index, value): 149 """Provide transparent assignment to bit index""" 150 self.__getitem__(index).setcachedvalue(value) 151 152 def getvalue(self): 153 result = BinaryValue() 154 result.binstr = self._get_value_str() 155 return result 156 157 def setimmediatevalue(self, value): 158 """ 159 Set the value of the underlying simulation object to value. 160 161 Args: 162 value (ctypes.Structure, cocotb.binary.BinaryValue, int) 163 The value to drive onto the simulator object 164 165 Raises: 166 TypeError 167 168 This operation will fail unless the handle refers to a modifiable 169 object eg net, signal or variable. 170 171 We determine the library call to make based on the type of the value 172 173 Assigning integers less than 32-bits is faster 174 """ 175 if (isinstance(value, get_python_integer_types()) and 176 value < 0x7fffffff): 177 simulator.set_signal_val(self._handle, value) 178 return 179 180 if isinstance(value, ctypes.Structure): 181 value = BinaryValue(value=cocotb.utils.pack(value), bits=len(self)) 182 elif isinstance(value, get_python_integer_types()): 183 value = BinaryValue(value=value, bits=len(self), bigEndian=False) 184 elif not isinstance(value, BinaryValue): 185 self.log.critical("Unsupported type for value assignment: %s (%s)" 186 % (type(value), repr(value))) 187 raise TypeError("Unable to set simulator value with type %s" % 188 (type(value))) 189 190 simulator.set_signal_val_str(self._handle, value.binstr) 191 192 def setcachedvalue(self, value): 193 """Intercept the store of a value and hold in cache. 194 195 This operation is to enable all of the scheduled callbacks to completed 196 with the same read data and for the writes to occour on the next 197 sim time""" 198 cocotb.scheduler.save_write(self, value) 199 200 # We want to maintain compatability with python 2.5 so we can't use 201 # @property with a setter 202 value = property(getvalue, setcachedvalue, None, 203 "A reference to the value") 204 205 def _get_value_str(self): 206 return simulator.get_signal_val(self._handle) 207 208 def __le__(self, value): 209 """Overload the less than or equal to operator to 210 provide an hdl-like shortcut 211 module.signal <= 2 212 """ 213 self.value = value 214 215 def __len__(self): 216 """Returns the 'length' of the underlying object. 217 218 For vectors this is the number of bits. 219 220 TODO: Handle other types (loops, generate etc) 221 """ 222 if self._len is None: 223 self._len = len(self._get_value_str()) 224 return self._len 225 226 def __cmp__(self, other): 227 228 # Permits comparison of handles i.e. if clk == dut.clk 229 if isinstance(other, SimHandle): 230 if self._handle == other._handle: 231 return 0 232 return 1 233 234 # Use the comparison method of the other object against our value 235 return self.value.__cmp__(other) 236 237 def __iter__(self): 238 """Iterates over all known types defined by simulator module""" 239 for handle_type in [simulator.MODULE, 240 simulator.PARAMETER, 241 simulator.REG, 242 simulator.NET, 243 simulator.NETARRAY]: 244 iterator = simulator.iterate(handle_type, self._handle) 245 while True: 246 try: 247 thing = simulator.next(iterator) 248 except StopIteration: 249 break 250 hdl = SimHandle(thing) 251 self._sub_handles[hdl.name] = hdl 252 yield hdl 253 254 def __int__(self): 255 return int(self.value) 256 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cocotb/handle.py b/cocotb/handle.py --- a/cocotb/handle.py +++ b/cocotb/handle.py @@ -82,7 +82,7 @@ """ Query the simulator for a object with the specified name and cache the result to build a tree """ - # python's builtin dir and IPython's dir2 search for these, + # python's builtin dir and IPython's dir2 search for these, # raise an AttributeError to avoid incorrect calls to _raise_testerror if name in ["__methods__","__members__","trait_names","_getAttributeNames"]: raise AttributeError(name) @@ -150,7 +150,7 @@ self.__getitem__(index).setcachedvalue(value) def getvalue(self): - result = BinaryValue() + result = BinaryValue(bits=len(self)) result.binstr = self._get_value_str() return result
{"golden_diff": "diff --git a/cocotb/handle.py b/cocotb/handle.py\n--- a/cocotb/handle.py\n+++ b/cocotb/handle.py\n@@ -82,7 +82,7 @@\n \"\"\" Query the simulator for a object with the specified name\n and cache the result to build a tree\n \"\"\"\n- # python's builtin dir and IPython's dir2 search for these, \n+ # python's builtin dir and IPython's dir2 search for these,\n # raise an AttributeError to avoid incorrect calls to _raise_testerror\n if name in [\"__methods__\",\"__members__\",\"trait_names\",\"_getAttributeNames\"]:\n raise AttributeError(name)\n@@ -150,7 +150,7 @@\n self.__getitem__(index).setcachedvalue(value)\n \n def getvalue(self):\n- result = BinaryValue()\n+ result = BinaryValue(bits=len(self))\n result.binstr = self._get_value_str()\n return result\n", "issue": "getvalue() in handle.py should configure the returned Binary value\nWhen getting a value from a handle a \"BinaryValue()\" object is created. In my case, I tried to slice that value to get the upper bits, but the call failed because the returned value is not configured.\n\nI believe at a minimum, the Handle should set the number of bits, but ideally it would just configure everything the BinaryValue() object needs before returning it.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n# -*- coding: utf-8 -*-\n\nimport logging\nimport ctypes\nimport traceback\nimport sys\n# from StringIO import StringIO\n\nfrom io import StringIO, BytesIO\n\nimport os\n\n# For autodocumentation don't need the extension modules\nif \"SPHINX_BUILD\" in os.environ:\n simulator = None\nelse:\n import simulator\n\nimport cocotb\nfrom cocotb.binary import BinaryValue\nfrom cocotb.log import SimLog\nfrom cocotb.result import TestError\nfrom cocotb.triggers import _RisingEdge, _FallingEdge\nfrom cocotb.utils import get_python_integer_types\n\n\nclass SimHandle(object):\n\n def __init__(self, handle):\n \"\"\"\n Args:\n _handle [integer] : vpi/vhpi handle to the simulator object\n \"\"\"\n self._handle = handle # handle used for future simulator transactions\n self._sub_handles = {} # Dict. of SimHandle objects created by getattr\n self._len = None\n\n self.name = simulator.get_name_string(self._handle)\n self.fullname = '%s(%s)' % (self.name,\n simulator.get_type_string(self._handle))\n self.log = SimLog('cocotb.' + self.name)\n self.log.debug(\"Created!\")\n self._r_edge = _RisingEdge(self)\n self._f_edge = _FallingEdge(self)\n\n def __hash__(self):\n return self._handle\n\n def __str__(self):\n return \"%s @0x%x\" % (self.name, self._handle)\n\n def __getattr__(self, name):\n \"\"\" Query the simulator for a object with the specified name\n and cache the result to build a tree\n \"\"\"\n # python's builtin dir and IPython's dir2 search for these, \n # raise an AttributeError to avoid incorrect calls to _raise_testerror\n if name in [\"__methods__\",\"__members__\",\"trait_names\",\"_getAttributeNames\"]:\n raise AttributeError(name)\n if name in self._sub_handles:\n return self._sub_handles[name]\n new_handle = simulator.get_handle_by_name(self._handle, name)\n if not new_handle:\n self._raise_testerror(\"%s contains no object named %s\" %\n (self.name, name))\n self._sub_handles[name] = SimHandle(new_handle)\n return self._sub_handles[name]\n\n def _raise_testerror(self, msg):\n lastframe = sys._getframe(2)\n if sys.version_info[0] >= 3:\n buff = StringIO()\n traceback.print_stack(lastframe, file=buff)\n else:\n buff_bytes = BytesIO()\n traceback.print_stack(lastframe, file=buff_bytes)\n buff = StringIO(buff_bytes.getvalue().decode(\"UTF8\"))\n self.log.error(\"%s\\n%s\" % (msg, buff.getvalue()))\n exception = TestError(msg)\n exception.stderr.write(buff.getvalue())\n buff.close()\n raise exception\n\n def __setattr__(self, name, value):\n \"\"\"Provide transparent access to signals\"\"\"\n if (not name.startswith('_') and\n not name in [\"name\", \"fullname\", \"log\", \"value\"] and\n self.__hasattr__(name)):\n getattr(self, name).setcachedvalue(value)\n return\n object.__setattr__(self, name, value)\n\n def __hasattr__(self, name):\n \"\"\"\n Since calling hasattr(handle, \"something\") will print out a\n backtrace to the log since usually attempting to access a\n non-existent member is an error we provide a 'peek function\n\n We still add the found handle to our dictionary to prevent leaking\n handles.\n \"\"\"\n if name in self._sub_handles:\n return self._sub_handles[name]\n new_handle = simulator.get_handle_by_name(self._handle, name)\n if new_handle:\n self._sub_handles[name] = SimHandle(new_handle)\n return new_handle\n\n def __getitem__(self, index):\n if index in self._sub_handles:\n return self._sub_handles[index]\n new_handle = simulator.get_handle_by_index(self._handle, index)\n if not new_handle:\n self._raise_testerror(\"%s contains no object at index %d\" %\n (self.name, index))\n self._sub_handles[index] = SimHandle(new_handle)\n return self._sub_handles[index]\n\n def __setitem__(self, index, value):\n \"\"\"Provide transparent assignment to bit index\"\"\"\n self.__getitem__(index).setcachedvalue(value)\n\n def getvalue(self):\n result = BinaryValue()\n result.binstr = self._get_value_str()\n return result\n\n def setimmediatevalue(self, value):\n \"\"\"\n Set the value of the underlying simulation object to value.\n\n Args:\n value (ctypes.Structure, cocotb.binary.BinaryValue, int)\n The value to drive onto the simulator object\n\n Raises:\n TypeError\n\n This operation will fail unless the handle refers to a modifiable\n object eg net, signal or variable.\n\n We determine the library call to make based on the type of the value\n\n Assigning integers less than 32-bits is faster\n \"\"\"\n if (isinstance(value, get_python_integer_types()) and\n value < 0x7fffffff):\n simulator.set_signal_val(self._handle, value)\n return\n\n if isinstance(value, ctypes.Structure):\n value = BinaryValue(value=cocotb.utils.pack(value), bits=len(self))\n elif isinstance(value, get_python_integer_types()):\n value = BinaryValue(value=value, bits=len(self), bigEndian=False)\n elif not isinstance(value, BinaryValue):\n self.log.critical(\"Unsupported type for value assignment: %s (%s)\"\n % (type(value), repr(value)))\n raise TypeError(\"Unable to set simulator value with type %s\" %\n (type(value)))\n\n simulator.set_signal_val_str(self._handle, value.binstr)\n\n def setcachedvalue(self, value):\n \"\"\"Intercept the store of a value and hold in cache.\n\n This operation is to enable all of the scheduled callbacks to completed\n with the same read data and for the writes to occour on the next\n sim time\"\"\"\n cocotb.scheduler.save_write(self, value)\n\n # We want to maintain compatability with python 2.5 so we can't use\n # @property with a setter\n value = property(getvalue, setcachedvalue, None,\n \"A reference to the value\")\n\n def _get_value_str(self):\n return simulator.get_signal_val(self._handle)\n\n def __le__(self, value):\n \"\"\"Overload the less than or equal to operator to\n provide an hdl-like shortcut\n module.signal <= 2\n \"\"\"\n self.value = value\n\n def __len__(self):\n \"\"\"Returns the 'length' of the underlying object.\n\n For vectors this is the number of bits.\n\n TODO: Handle other types (loops, generate etc)\n \"\"\"\n if self._len is None:\n self._len = len(self._get_value_str())\n return self._len\n\n def __cmp__(self, other):\n\n # Permits comparison of handles i.e. if clk == dut.clk\n if isinstance(other, SimHandle):\n if self._handle == other._handle:\n return 0\n return 1\n\n # Use the comparison method of the other object against our value\n return self.value.__cmp__(other)\n\n def __iter__(self):\n \"\"\"Iterates over all known types defined by simulator module\"\"\"\n for handle_type in [simulator.MODULE,\n simulator.PARAMETER,\n simulator.REG,\n simulator.NET,\n simulator.NETARRAY]:\n iterator = simulator.iterate(handle_type, self._handle)\n while True:\n try:\n thing = simulator.next(iterator)\n except StopIteration:\n break\n hdl = SimHandle(thing)\n self._sub_handles[hdl.name] = hdl\n yield hdl\n\n def __int__(self):\n return int(self.value)\n", "path": "cocotb/handle.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n# -*- coding: utf-8 -*-\n\nimport logging\nimport ctypes\nimport traceback\nimport sys\n# from StringIO import StringIO\n\nfrom io import StringIO, BytesIO\n\nimport os\n\n# For autodocumentation don't need the extension modules\nif \"SPHINX_BUILD\" in os.environ:\n simulator = None\nelse:\n import simulator\n\nimport cocotb\nfrom cocotb.binary import BinaryValue\nfrom cocotb.log import SimLog\nfrom cocotb.result import TestError\nfrom cocotb.triggers import _RisingEdge, _FallingEdge\nfrom cocotb.utils import get_python_integer_types\n\n\nclass SimHandle(object):\n\n def __init__(self, handle):\n \"\"\"\n Args:\n _handle [integer] : vpi/vhpi handle to the simulator object\n \"\"\"\n self._handle = handle # handle used for future simulator transactions\n self._sub_handles = {} # Dict. of SimHandle objects created by getattr\n self._len = None\n\n self.name = simulator.get_name_string(self._handle)\n self.fullname = '%s(%s)' % (self.name,\n simulator.get_type_string(self._handle))\n self.log = SimLog('cocotb.' + self.name)\n self.log.debug(\"Created!\")\n self._r_edge = _RisingEdge(self)\n self._f_edge = _FallingEdge(self)\n\n def __hash__(self):\n return self._handle\n\n def __str__(self):\n return \"%s @0x%x\" % (self.name, self._handle)\n\n def __getattr__(self, name):\n \"\"\" Query the simulator for a object with the specified name\n and cache the result to build a tree\n \"\"\"\n # python's builtin dir and IPython's dir2 search for these,\n # raise an AttributeError to avoid incorrect calls to _raise_testerror\n if name in [\"__methods__\",\"__members__\",\"trait_names\",\"_getAttributeNames\"]:\n raise AttributeError(name)\n if name in self._sub_handles:\n return self._sub_handles[name]\n new_handle = simulator.get_handle_by_name(self._handle, name)\n if not new_handle:\n self._raise_testerror(\"%s contains no object named %s\" %\n (self.name, name))\n self._sub_handles[name] = SimHandle(new_handle)\n return self._sub_handles[name]\n\n def _raise_testerror(self, msg):\n lastframe = sys._getframe(2)\n if sys.version_info[0] >= 3:\n buff = StringIO()\n traceback.print_stack(lastframe, file=buff)\n else:\n buff_bytes = BytesIO()\n traceback.print_stack(lastframe, file=buff_bytes)\n buff = StringIO(buff_bytes.getvalue().decode(\"UTF8\"))\n self.log.error(\"%s\\n%s\" % (msg, buff.getvalue()))\n exception = TestError(msg)\n exception.stderr.write(buff.getvalue())\n buff.close()\n raise exception\n\n def __setattr__(self, name, value):\n \"\"\"Provide transparent access to signals\"\"\"\n if (not name.startswith('_') and\n not name in [\"name\", \"fullname\", \"log\", \"value\"] and\n self.__hasattr__(name)):\n getattr(self, name).setcachedvalue(value)\n return\n object.__setattr__(self, name, value)\n\n def __hasattr__(self, name):\n \"\"\"\n Since calling hasattr(handle, \"something\") will print out a\n backtrace to the log since usually attempting to access a\n non-existent member is an error we provide a 'peek function\n\n We still add the found handle to our dictionary to prevent leaking\n handles.\n \"\"\"\n if name in self._sub_handles:\n return self._sub_handles[name]\n new_handle = simulator.get_handle_by_name(self._handle, name)\n if new_handle:\n self._sub_handles[name] = SimHandle(new_handle)\n return new_handle\n\n def __getitem__(self, index):\n if index in self._sub_handles:\n return self._sub_handles[index]\n new_handle = simulator.get_handle_by_index(self._handle, index)\n if not new_handle:\n self._raise_testerror(\"%s contains no object at index %d\" %\n (self.name, index))\n self._sub_handles[index] = SimHandle(new_handle)\n return self._sub_handles[index]\n\n def __setitem__(self, index, value):\n \"\"\"Provide transparent assignment to bit index\"\"\"\n self.__getitem__(index).setcachedvalue(value)\n\n def getvalue(self):\n result = BinaryValue(bits=len(self))\n result.binstr = self._get_value_str()\n return result\n\n def setimmediatevalue(self, value):\n \"\"\"\n Set the value of the underlying simulation object to value.\n\n Args:\n value (ctypes.Structure, cocotb.binary.BinaryValue, int)\n The value to drive onto the simulator object\n\n Raises:\n TypeError\n\n This operation will fail unless the handle refers to a modifiable\n object eg net, signal or variable.\n\n We determine the library call to make based on the type of the value\n\n Assigning integers less than 32-bits is faster\n \"\"\"\n if (isinstance(value, get_python_integer_types()) and\n value < 0x7fffffff):\n simulator.set_signal_val(self._handle, value)\n return\n\n if isinstance(value, ctypes.Structure):\n value = BinaryValue(value=cocotb.utils.pack(value), bits=len(self))\n elif isinstance(value, get_python_integer_types()):\n value = BinaryValue(value=value, bits=len(self), bigEndian=False)\n elif not isinstance(value, BinaryValue):\n self.log.critical(\"Unsupported type for value assignment: %s (%s)\"\n % (type(value), repr(value)))\n raise TypeError(\"Unable to set simulator value with type %s\" %\n (type(value)))\n\n simulator.set_signal_val_str(self._handle, value.binstr)\n\n def setcachedvalue(self, value):\n \"\"\"Intercept the store of a value and hold in cache.\n\n This operation is to enable all of the scheduled callbacks to completed\n with the same read data and for the writes to occour on the next\n sim time\"\"\"\n cocotb.scheduler.save_write(self, value)\n\n # We want to maintain compatability with python 2.5 so we can't use\n # @property with a setter\n value = property(getvalue, setcachedvalue, None,\n \"A reference to the value\")\n\n def _get_value_str(self):\n return simulator.get_signal_val(self._handle)\n\n def __le__(self, value):\n \"\"\"Overload the less than or equal to operator to\n provide an hdl-like shortcut\n module.signal <= 2\n \"\"\"\n self.value = value\n\n def __len__(self):\n \"\"\"Returns the 'length' of the underlying object.\n\n For vectors this is the number of bits.\n\n TODO: Handle other types (loops, generate etc)\n \"\"\"\n if self._len is None:\n self._len = len(self._get_value_str())\n return self._len\n\n def __cmp__(self, other):\n\n # Permits comparison of handles i.e. if clk == dut.clk\n if isinstance(other, SimHandle):\n if self._handle == other._handle:\n return 0\n return 1\n\n # Use the comparison method of the other object against our value\n return self.value.__cmp__(other)\n\n def __iter__(self):\n \"\"\"Iterates over all known types defined by simulator module\"\"\"\n for handle_type in [simulator.MODULE,\n simulator.PARAMETER,\n simulator.REG,\n simulator.NET,\n simulator.NETARRAY]:\n iterator = simulator.iterate(handle_type, self._handle)\n while True:\n try:\n thing = simulator.next(iterator)\n except StopIteration:\n break\n hdl = SimHandle(thing)\n self._sub_handles[hdl.name] = hdl\n yield hdl\n\n def __int__(self):\n return int(self.value)\n", "path": "cocotb/handle.py"}]}
3,088
217
gh_patches_debug_42228
rasdani/github-patches
git_diff
easybuilders__easybuild-framework-2474
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- GC3Pie backend submits jobs even if dependency build failed When robot-building a bunch of software via the GC3Pie job backend with SLURM, GC3Pie happily submits follow-up jobs even if building of one (or more) of the dependencies failed. I am not entirely sure whether this is an issue in EasyBuild, in GC3Pie, or in the way the two are working together. I also don't know whether this also happens with other job schedulers. Steps to reproduce: - Start with an empty installdir - Edit `M4-1.4.17.eb` and add `preconfigopts='false && '` - Run `eb GCC-5.1.0.eb -r --job` using GC3Pie with SLURM - Both jobs will be submitted, both fail (the first for obvious reasons, the second because the M4 module can't be found) GC3Pie backend submits jobs even if dependency build failed When robot-building a bunch of software via the GC3Pie job backend with SLURM, GC3Pie happily submits follow-up jobs even if building of one (or more) of the dependencies failed. I am not entirely sure whether this is an issue in EasyBuild, in GC3Pie, or in the way the two are working together. I also don't know whether this also happens with other job schedulers. Steps to reproduce: - Start with an empty installdir - Edit `M4-1.4.17.eb` and add `preconfigopts='false && '` - Run `eb GCC-5.1.0.eb -r --job` using GC3Pie with SLURM - Both jobs will be submitted, both fail (the first for obvious reasons, the second because the M4 module can't be found) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `easybuild/tools/job/gc3pie.py` Content: ``` 1 ## 2 # Copyright 2015-2018 Ghent University 3 # Copyright 2015 S3IT, University of Zurich 4 # 5 # This file is part of EasyBuild, 6 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), 7 # with support of Ghent University (http://ugent.be/hpc), 8 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), 9 # Flemish Research Foundation (FWO) (http://www.fwo.be/en) 10 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). 11 # 12 # https://github.com/easybuilders/easybuild 13 # 14 # EasyBuild is free software: you can redistribute it and/or modify 15 # it under the terms of the GNU General Public License as published by 16 # the Free Software Foundation v2. 17 # 18 # EasyBuild is distributed in the hope that it will be useful, 19 # but WITHOUT ANY WARRANTY; without even the implied warranty of 20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 # GNU General Public License for more details. 22 # 23 # You should have received a copy of the GNU General Public License 24 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. 25 ## 26 """ 27 Interface for submitting jobs via GC3Pie. 28 29 :author: Riccardo Murri (University of Zurich) 30 :author: Kenneth Hoste (Ghent University) 31 """ 32 from distutils.version import LooseVersion 33 from time import gmtime, strftime 34 import re 35 import time 36 37 from vsc.utils import fancylogger 38 39 from easybuild.tools.build_log import EasyBuildError, print_msg 40 from easybuild.tools.config import build_option 41 from easybuild.tools.job.backend import JobBackend 42 from easybuild.tools.utilities import only_if_module_is_available 43 44 45 _log = fancylogger.getLogger('gc3pie', fname=False) 46 47 48 try: 49 import gc3libs 50 import gc3libs.exceptions 51 from gc3libs import Application, Run, create_engine 52 from gc3libs.core import Engine 53 from gc3libs.quantity import hours as hr 54 from gc3libs.workflow import DependentTaskCollection 55 56 # inject EasyBuild logger into GC3Pie 57 gc3libs.log = fancylogger.getLogger('gc3pie', fname=False) 58 # make handling of log.error compatible with stdlib logging 59 gc3libs.log.raiseError = False 60 61 # instruct GC3Pie to not ignore errors, but raise exceptions instead 62 gc3libs.UNIGNORE_ALL_ERRORS = True 63 64 except ImportError as err: 65 _log.debug("Failed to import gc3libs from GC3Pie." 66 " Silently ignoring, this is a real issue only when GC3Pie is used as backend for --job") 67 68 69 # eb --job --job-backend=GC3Pie 70 class GC3Pie(JobBackend): 71 """ 72 Use the GC3Pie framework to submit and monitor compilation jobs, 73 see http://gc3pie.readthedocs.org/. 74 75 In contrast with accessing an external service, GC3Pie implements 76 its own workflow manager, which means ``eb --job 77 --job-backend=GC3Pie`` will keep running until all jobs have 78 terminated. 79 """ 80 81 REQ_VERSION = '2.4.0' 82 VERSION_REGEX = re.compile(r'^(?P<version>\S*) version') 83 84 @only_if_module_is_available('gc3libs', pkgname='gc3pie') 85 def __init__(self, *args, **kwargs): 86 """GC3Pie JobBackend constructor.""" 87 super(GC3Pie, self).__init__(*args, **kwargs) 88 89 # _check_version is called by __init__, so guard it (too) with the decorator 90 @only_if_module_is_available('gc3libs', pkgname='gc3pie') 91 def _check_version(self): 92 """Check whether GC3Pie version complies with required version.""" 93 # location of __version__ to use may change, depending on the minimal required SVN revision for development versions 94 version_str = gc3libs.core.__version__ 95 96 match = self.VERSION_REGEX.search(version_str) 97 if match: 98 version = match.group('version') 99 self.log.debug("Parsed GC3Pie version info: '%s'", version) 100 101 if version == 'development': 102 # presume it's OK -- there's no way to check since GC3Pie switched to git 103 return True 104 105 if LooseVersion(version) < LooseVersion(self.REQ_VERSION): 106 raise EasyBuildError("Found GC3Pie version %s, but version %s or more recent is required", 107 version, self.REQ_VERSION) 108 else: 109 raise EasyBuildError("Failed to parse GC3Pie version string '%s' using pattern %s", 110 version_str, self.VERSION_REGEX.pattern) 111 112 def init(self): 113 """ 114 Initialise the GC3Pie job backend. 115 """ 116 # List of config files for GC3Pie; non-existing ones will be 117 # silently ignored. The list here copies GC3Pie's default, 118 # for the principle of minimal surprise, but there is no 119 # strict requirement that this be done and EB could actually 120 # choose to use a completely distinct set of conf. files. 121 self.config_files = gc3libs.Default.CONFIG_FILE_LOCATIONS[:] 122 cfgfile = build_option('job_backend_config') 123 if cfgfile: 124 self.config_files.append(cfgfile) 125 126 self.output_dir = build_option('job_output_dir') 127 self.jobs = DependentTaskCollection(output_dir=self.output_dir) 128 self.job_cnt = 0 129 130 # after polling for job status, sleep for this time duration 131 # before polling again (in seconds) 132 self.poll_interval = build_option('job_polling_interval') 133 134 def make_job(self, script, name, env_vars=None, hours=None, cores=None): 135 """ 136 Create and return a job object with the given parameters. 137 138 First argument `server` is an instance of the corresponding 139 `JobBackend` class, i.e., a `GC3Pie`:class: instance in this case. 140 141 Second argument `script` is the content of the job script 142 itself, i.e., the sequence of shell commands that will be 143 executed. 144 145 Third argument `name` sets the job human-readable name. 146 147 Fourth (optional) argument `env_vars` is a dictionary with 148 key-value pairs of environment variables that should be passed 149 on to the job. 150 151 Fifth and sixth (optional) arguments `hours` and `cores` should be 152 integer values: 153 * hours must be in the range 1 .. MAX_WALLTIME; 154 * cores depends on which cluster the job is being run. 155 """ 156 named_args = { 157 'jobname': name, # job name in GC3Pie 158 'name': name, # job name in EasyBuild 159 } 160 161 # environment 162 if env_vars: 163 named_args['environment'] = env_vars 164 165 # input/output files for job (none) 166 named_args['inputs'] = [] 167 named_args['outputs'] = [] 168 169 # job logs 170 named_args.update({ 171 # join stdout/stderr in a single log 172 'join': True, 173 # location for log file 174 'output_dir': self.output_dir, 175 # log file name (including timestamp to try and ensure unique filename) 176 'stdout': 'eb-%s-gc3pie-job-%s.log' % (name, strftime("%Y%M%d-UTC-%H-%M-%S", gmtime())) 177 }) 178 179 # walltime 180 max_walltime = build_option('job_max_walltime') 181 if hours is None: 182 hours = max_walltime 183 if hours > max_walltime: 184 self.log.warn("Specified %s hours, but this is impossible. (resetting to %s hours)" % (hours, max_walltime)) 185 hours = max_walltime 186 named_args['requested_walltime'] = hours * hr 187 188 if cores: 189 named_args['requested_cores'] = cores 190 else: 191 self.log.warn("Number of cores to request not specified, falling back to whatever GC3Pie does by default") 192 193 return Application(['/bin/sh', '-c', script], **named_args) 194 195 def queue(self, job, dependencies=frozenset()): 196 """ 197 Add a job to the queue, optionally specifying dependencies. 198 199 :param dependencies: jobs on which this job depends. 200 """ 201 self.jobs.add(job, dependencies) 202 # since it's not trivial to determine the correct job count from self.jobs, we keep track of a count ourselves 203 self.job_cnt += 1 204 205 def complete(self): 206 """ 207 Complete a bulk job submission. 208 209 Create engine, and progress it until all jobs have terminated. 210 """ 211 # create an instance of `Engine` using the list of configuration files 212 try: 213 self._engine = create_engine(*self.config_files, resource_errors_are_fatal=True) 214 215 except gc3libs.exceptions.Error as err: 216 raise EasyBuildError("Failed to create GC3Pie engine: %s", err) 217 218 # make sure that all job log files end up in the same directory, rather than renaming the output directory 219 # see https://gc3pie.readthedocs.org/en/latest/programmers/api/gc3libs/core.html#gc3libs.core.Engine 220 self._engine.retrieve_overwrites = True 221 222 # some sites may not be happy with flooding the cluster with build jobs... 223 self._engine.max_in_flight = build_option('job_max_jobs') 224 225 # `Engine.stats()` (which is used later on in `_print_status_report()`) 226 # changed between 2.4.2 and 2.5.0.dev -- make sure we stay compatible 227 # with both 228 try: 229 self._engine.init_stats_for(Application) 230 except AttributeError: 231 _log.debug("No `init_stats_for` method in the Engine class;" 232 " assuming pre-2.5.0 GC3Pie and ignoring error.") 233 234 # Add your application to the engine. This will NOT submit 235 # your application yet, but will make the engine *aware* of 236 # the application. 237 self._engine.add(self.jobs) 238 239 # in case you want to select a specific resource, call 240 target_resource = build_option('job_target_resource') 241 if target_resource: 242 res = self._engine.select_resource(target_resource) 243 if res == 0: 244 raise EasyBuildError("Failed to select target resource '%s' in GC3Pie", target_resource) 245 246 # Periodically check the status of your application. 247 while self.jobs.execution.state != Run.State.TERMINATED: 248 # `Engine.progress()` will do the GC3Pie magic: 249 # submit new jobs, update status of submitted jobs, get 250 # results of terminating jobs etc... 251 self._engine.progress() 252 253 # report progress 254 self._print_status_report() 255 256 # Wait a few seconds... 257 time.sleep(self.poll_interval) 258 259 # final status report 260 print_msg("Done processing jobs", log=self.log, silent=build_option('silent')) 261 self._print_status_report() 262 263 def _print_status_report(self): 264 """ 265 Print a job status report to STDOUT and the log file. 266 267 The number of jobs in each state is reported; the 268 figures are extracted from the `stats()` method of the 269 currently-running GC3Pie engine. 270 """ 271 stats = self._engine.stats(only=Application) 272 states = ', '.join(["%d %s" % (stats[s], s.lower()) for s in stats if s != 'total' and stats[s]]) 273 print_msg("GC3Pie job overview: %s (total: %s)" % (states, self.job_cnt), 274 log=self.log, silent=build_option('silent')) 275 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/easybuild/tools/job/gc3pie.py b/easybuild/tools/job/gc3pie.py --- a/easybuild/tools/job/gc3pie.py +++ b/easybuild/tools/job/gc3pie.py @@ -51,7 +51,7 @@ from gc3libs import Application, Run, create_engine from gc3libs.core import Engine from gc3libs.quantity import hours as hr - from gc3libs.workflow import DependentTaskCollection + from gc3libs.workflow import AbortOnError, DependentTaskCollection # inject EasyBuild logger into GC3Pie gc3libs.log = fancylogger.getLogger('gc3pie', fname=False) @@ -61,6 +61,16 @@ # instruct GC3Pie to not ignore errors, but raise exceptions instead gc3libs.UNIGNORE_ALL_ERRORS = True + # note: order of class inheritance is important! + class _BuildTaskCollection(AbortOnError, DependentTaskCollection): + """ + A `DependentTaskCollection`:class: that aborts execution upon error. + + This is used to stop the build process in case some dependency + fails. See also `<https://github.com/easybuilders/easybuild-framework/issues/1441>`_ + """ + pass + except ImportError as err: _log.debug("Failed to import gc3libs from GC3Pie." " Silently ignoring, this is a real issue only when GC3Pie is used as backend for --job") @@ -124,7 +134,7 @@ self.config_files.append(cfgfile) self.output_dir = build_option('job_output_dir') - self.jobs = DependentTaskCollection(output_dir=self.output_dir) + self.jobs = _BuildTaskCollection(output_dir=self.output_dir) self.job_cnt = 0 # after polling for job status, sleep for this time duration @@ -135,23 +145,20 @@ """ Create and return a job object with the given parameters. - First argument `server` is an instance of the corresponding - `JobBackend` class, i.e., a `GC3Pie`:class: instance in this case. - - Second argument `script` is the content of the job script + Argument *script* is the content of the job script itself, i.e., the sequence of shell commands that will be executed. - Third argument `name` sets the job human-readable name. + Argument *name* sets the job's human-readable name. - Fourth (optional) argument `env_vars` is a dictionary with + Optional argument *env_vars* is a dictionary with key-value pairs of environment variables that should be passed on to the job. - Fifth and sixth (optional) arguments `hours` and `cores` should be + Optional arguments *hours* and *cores* should be integer values: - * hours must be in the range 1 .. MAX_WALLTIME; - * cores depends on which cluster the job is being run. + - *hours* must be in the range 1 .. ``MAX_WALLTIME``; + - *cores* depends on which cluster the job is being run. """ named_args = { 'jobname': name, # job name in GC3Pie @@ -236,7 +243,7 @@ # the application. self._engine.add(self.jobs) - # in case you want to select a specific resource, call + # select a specific execution resource? target_resource = build_option('job_target_resource') if target_resource: res = self._engine.select_resource(target_resource)
{"golden_diff": "diff --git a/easybuild/tools/job/gc3pie.py b/easybuild/tools/job/gc3pie.py\n--- a/easybuild/tools/job/gc3pie.py\n+++ b/easybuild/tools/job/gc3pie.py\n@@ -51,7 +51,7 @@\n from gc3libs import Application, Run, create_engine\n from gc3libs.core import Engine\n from gc3libs.quantity import hours as hr\n- from gc3libs.workflow import DependentTaskCollection\n+ from gc3libs.workflow import AbortOnError, DependentTaskCollection\n \n # inject EasyBuild logger into GC3Pie\n gc3libs.log = fancylogger.getLogger('gc3pie', fname=False)\n@@ -61,6 +61,16 @@\n # instruct GC3Pie to not ignore errors, but raise exceptions instead\n gc3libs.UNIGNORE_ALL_ERRORS = True\n \n+ # note: order of class inheritance is important!\n+ class _BuildTaskCollection(AbortOnError, DependentTaskCollection):\n+ \"\"\"\n+ A `DependentTaskCollection`:class: that aborts execution upon error.\n+\n+ This is used to stop the build process in case some dependency\n+ fails. See also `<https://github.com/easybuilders/easybuild-framework/issues/1441>`_\n+ \"\"\"\n+ pass\n+\n except ImportError as err:\n _log.debug(\"Failed to import gc3libs from GC3Pie.\"\n \" Silently ignoring, this is a real issue only when GC3Pie is used as backend for --job\")\n@@ -124,7 +134,7 @@\n self.config_files.append(cfgfile)\n \n self.output_dir = build_option('job_output_dir')\n- self.jobs = DependentTaskCollection(output_dir=self.output_dir)\n+ self.jobs = _BuildTaskCollection(output_dir=self.output_dir)\n self.job_cnt = 0\n \n # after polling for job status, sleep for this time duration\n@@ -135,23 +145,20 @@\n \"\"\"\n Create and return a job object with the given parameters.\n \n- First argument `server` is an instance of the corresponding\n- `JobBackend` class, i.e., a `GC3Pie`:class: instance in this case.\n-\n- Second argument `script` is the content of the job script\n+ Argument *script* is the content of the job script\n itself, i.e., the sequence of shell commands that will be\n executed.\n \n- Third argument `name` sets the job human-readable name.\n+ Argument *name* sets the job's human-readable name.\n \n- Fourth (optional) argument `env_vars` is a dictionary with\n+ Optional argument *env_vars* is a dictionary with\n key-value pairs of environment variables that should be passed\n on to the job.\n \n- Fifth and sixth (optional) arguments `hours` and `cores` should be\n+ Optional arguments *hours* and *cores* should be\n integer values:\n- * hours must be in the range 1 .. MAX_WALLTIME;\n- * cores depends on which cluster the job is being run.\n+ - *hours* must be in the range 1 .. ``MAX_WALLTIME``;\n+ - *cores* depends on which cluster the job is being run.\n \"\"\"\n named_args = {\n 'jobname': name, # job name in GC3Pie\n@@ -236,7 +243,7 @@\n # the application.\n self._engine.add(self.jobs)\n \n- # in case you want to select a specific resource, call\n+ # select a specific execution resource?\n target_resource = build_option('job_target_resource')\n if target_resource:\n res = self._engine.select_resource(target_resource)\n", "issue": "GC3Pie backend submits jobs even if dependency build failed\nWhen robot-building a bunch of software via the GC3Pie job backend with SLURM, GC3Pie happily submits follow-up jobs even if building of one (or more) of the dependencies failed. I am not entirely sure whether this is an issue in EasyBuild, in GC3Pie, or in the way the two are working together. I also don't know whether this also happens with other job schedulers.\n\nSteps to reproduce:\n- Start with an empty installdir\n- Edit `M4-1.4.17.eb` and add `preconfigopts='false && '`\n- Run `eb GCC-5.1.0.eb -r --job` using GC3Pie with SLURM\n- Both jobs will be submitted, both fail (the first for obvious reasons, the second because the M4 module can't be found)\n\nGC3Pie backend submits jobs even if dependency build failed\nWhen robot-building a bunch of software via the GC3Pie job backend with SLURM, GC3Pie happily submits follow-up jobs even if building of one (or more) of the dependencies failed. I am not entirely sure whether this is an issue in EasyBuild, in GC3Pie, or in the way the two are working together. I also don't know whether this also happens with other job schedulers.\n\nSteps to reproduce:\n- Start with an empty installdir\n- Edit `M4-1.4.17.eb` and add `preconfigopts='false && '`\n- Run `eb GCC-5.1.0.eb -r --job` using GC3Pie with SLURM\n- Both jobs will be submitted, both fail (the first for obvious reasons, the second because the M4 module can't be found)\n\n", "before_files": [{"content": "##\n# Copyright 2015-2018 Ghent University\n# Copyright 2015 S3IT, University of Zurich\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nInterface for submitting jobs via GC3Pie.\n\n:author: Riccardo Murri (University of Zurich)\n:author: Kenneth Hoste (Ghent University)\n\"\"\"\nfrom distutils.version import LooseVersion\nfrom time import gmtime, strftime\nimport re\nimport time\n\nfrom vsc.utils import fancylogger\n\nfrom easybuild.tools.build_log import EasyBuildError, print_msg\nfrom easybuild.tools.config import build_option\nfrom easybuild.tools.job.backend import JobBackend\nfrom easybuild.tools.utilities import only_if_module_is_available\n\n\n_log = fancylogger.getLogger('gc3pie', fname=False)\n\n\ntry:\n import gc3libs\n import gc3libs.exceptions\n from gc3libs import Application, Run, create_engine\n from gc3libs.core import Engine\n from gc3libs.quantity import hours as hr\n from gc3libs.workflow import DependentTaskCollection\n\n # inject EasyBuild logger into GC3Pie\n gc3libs.log = fancylogger.getLogger('gc3pie', fname=False)\n # make handling of log.error compatible with stdlib logging\n gc3libs.log.raiseError = False\n\n # instruct GC3Pie to not ignore errors, but raise exceptions instead\n gc3libs.UNIGNORE_ALL_ERRORS = True\n\nexcept ImportError as err:\n _log.debug(\"Failed to import gc3libs from GC3Pie.\"\n \" Silently ignoring, this is a real issue only when GC3Pie is used as backend for --job\")\n\n\n# eb --job --job-backend=GC3Pie\nclass GC3Pie(JobBackend):\n \"\"\"\n Use the GC3Pie framework to submit and monitor compilation jobs,\n see http://gc3pie.readthedocs.org/.\n\n In contrast with accessing an external service, GC3Pie implements\n its own workflow manager, which means ``eb --job\n --job-backend=GC3Pie`` will keep running until all jobs have\n terminated.\n \"\"\"\n\n REQ_VERSION = '2.4.0'\n VERSION_REGEX = re.compile(r'^(?P<version>\\S*) version')\n\n @only_if_module_is_available('gc3libs', pkgname='gc3pie')\n def __init__(self, *args, **kwargs):\n \"\"\"GC3Pie JobBackend constructor.\"\"\"\n super(GC3Pie, self).__init__(*args, **kwargs)\n\n # _check_version is called by __init__, so guard it (too) with the decorator\n @only_if_module_is_available('gc3libs', pkgname='gc3pie')\n def _check_version(self):\n \"\"\"Check whether GC3Pie version complies with required version.\"\"\"\n # location of __version__ to use may change, depending on the minimal required SVN revision for development versions\n version_str = gc3libs.core.__version__\n\n match = self.VERSION_REGEX.search(version_str)\n if match:\n version = match.group('version')\n self.log.debug(\"Parsed GC3Pie version info: '%s'\", version)\n\n if version == 'development':\n # presume it's OK -- there's no way to check since GC3Pie switched to git\n return True\n\n if LooseVersion(version) < LooseVersion(self.REQ_VERSION):\n raise EasyBuildError(\"Found GC3Pie version %s, but version %s or more recent is required\",\n version, self.REQ_VERSION)\n else:\n raise EasyBuildError(\"Failed to parse GC3Pie version string '%s' using pattern %s\",\n version_str, self.VERSION_REGEX.pattern)\n\n def init(self):\n \"\"\"\n Initialise the GC3Pie job backend.\n \"\"\"\n # List of config files for GC3Pie; non-existing ones will be\n # silently ignored. The list here copies GC3Pie's default,\n # for the principle of minimal surprise, but there is no\n # strict requirement that this be done and EB could actually\n # choose to use a completely distinct set of conf. files.\n self.config_files = gc3libs.Default.CONFIG_FILE_LOCATIONS[:]\n cfgfile = build_option('job_backend_config')\n if cfgfile:\n self.config_files.append(cfgfile)\n\n self.output_dir = build_option('job_output_dir')\n self.jobs = DependentTaskCollection(output_dir=self.output_dir)\n self.job_cnt = 0\n\n # after polling for job status, sleep for this time duration\n # before polling again (in seconds)\n self.poll_interval = build_option('job_polling_interval')\n\n def make_job(self, script, name, env_vars=None, hours=None, cores=None):\n \"\"\"\n Create and return a job object with the given parameters.\n\n First argument `server` is an instance of the corresponding\n `JobBackend` class, i.e., a `GC3Pie`:class: instance in this case.\n\n Second argument `script` is the content of the job script\n itself, i.e., the sequence of shell commands that will be\n executed.\n\n Third argument `name` sets the job human-readable name.\n\n Fourth (optional) argument `env_vars` is a dictionary with\n key-value pairs of environment variables that should be passed\n on to the job.\n\n Fifth and sixth (optional) arguments `hours` and `cores` should be\n integer values:\n * hours must be in the range 1 .. MAX_WALLTIME;\n * cores depends on which cluster the job is being run.\n \"\"\"\n named_args = {\n 'jobname': name, # job name in GC3Pie\n 'name': name, # job name in EasyBuild\n }\n\n # environment\n if env_vars:\n named_args['environment'] = env_vars\n\n # input/output files for job (none)\n named_args['inputs'] = []\n named_args['outputs'] = []\n\n # job logs\n named_args.update({\n # join stdout/stderr in a single log\n 'join': True,\n # location for log file\n 'output_dir': self.output_dir,\n # log file name (including timestamp to try and ensure unique filename)\n 'stdout': 'eb-%s-gc3pie-job-%s.log' % (name, strftime(\"%Y%M%d-UTC-%H-%M-%S\", gmtime()))\n })\n\n # walltime\n max_walltime = build_option('job_max_walltime')\n if hours is None:\n hours = max_walltime\n if hours > max_walltime:\n self.log.warn(\"Specified %s hours, but this is impossible. (resetting to %s hours)\" % (hours, max_walltime))\n hours = max_walltime\n named_args['requested_walltime'] = hours * hr\n\n if cores:\n named_args['requested_cores'] = cores\n else:\n self.log.warn(\"Number of cores to request not specified, falling back to whatever GC3Pie does by default\")\n\n return Application(['/bin/sh', '-c', script], **named_args)\n\n def queue(self, job, dependencies=frozenset()):\n \"\"\"\n Add a job to the queue, optionally specifying dependencies.\n\n :param dependencies: jobs on which this job depends.\n \"\"\"\n self.jobs.add(job, dependencies)\n # since it's not trivial to determine the correct job count from self.jobs, we keep track of a count ourselves\n self.job_cnt += 1\n\n def complete(self):\n \"\"\"\n Complete a bulk job submission.\n\n Create engine, and progress it until all jobs have terminated.\n \"\"\"\n # create an instance of `Engine` using the list of configuration files\n try:\n self._engine = create_engine(*self.config_files, resource_errors_are_fatal=True)\n\n except gc3libs.exceptions.Error as err:\n raise EasyBuildError(\"Failed to create GC3Pie engine: %s\", err)\n\n # make sure that all job log files end up in the same directory, rather than renaming the output directory\n # see https://gc3pie.readthedocs.org/en/latest/programmers/api/gc3libs/core.html#gc3libs.core.Engine\n self._engine.retrieve_overwrites = True\n\n # some sites may not be happy with flooding the cluster with build jobs...\n self._engine.max_in_flight = build_option('job_max_jobs')\n\n # `Engine.stats()` (which is used later on in `_print_status_report()`)\n # changed between 2.4.2 and 2.5.0.dev -- make sure we stay compatible\n # with both\n try:\n self._engine.init_stats_for(Application)\n except AttributeError:\n _log.debug(\"No `init_stats_for` method in the Engine class;\"\n \" assuming pre-2.5.0 GC3Pie and ignoring error.\")\n\n # Add your application to the engine. This will NOT submit\n # your application yet, but will make the engine *aware* of\n # the application.\n self._engine.add(self.jobs)\n\n # in case you want to select a specific resource, call\n target_resource = build_option('job_target_resource')\n if target_resource:\n res = self._engine.select_resource(target_resource)\n if res == 0:\n raise EasyBuildError(\"Failed to select target resource '%s' in GC3Pie\", target_resource)\n\n # Periodically check the status of your application.\n while self.jobs.execution.state != Run.State.TERMINATED:\n # `Engine.progress()` will do the GC3Pie magic:\n # submit new jobs, update status of submitted jobs, get\n # results of terminating jobs etc...\n self._engine.progress()\n\n # report progress\n self._print_status_report()\n\n # Wait a few seconds...\n time.sleep(self.poll_interval)\n\n # final status report\n print_msg(\"Done processing jobs\", log=self.log, silent=build_option('silent'))\n self._print_status_report()\n\n def _print_status_report(self):\n \"\"\"\n Print a job status report to STDOUT and the log file.\n\n The number of jobs in each state is reported; the\n figures are extracted from the `stats()` method of the\n currently-running GC3Pie engine.\n \"\"\"\n stats = self._engine.stats(only=Application)\n states = ', '.join([\"%d %s\" % (stats[s], s.lower()) for s in stats if s != 'total' and stats[s]])\n print_msg(\"GC3Pie job overview: %s (total: %s)\" % (states, self.job_cnt),\n log=self.log, silent=build_option('silent'))\n", "path": "easybuild/tools/job/gc3pie.py"}], "after_files": [{"content": "##\n# Copyright 2015-2018 Ghent University\n# Copyright 2015 S3IT, University of Zurich\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nInterface for submitting jobs via GC3Pie.\n\n:author: Riccardo Murri (University of Zurich)\n:author: Kenneth Hoste (Ghent University)\n\"\"\"\nfrom distutils.version import LooseVersion\nfrom time import gmtime, strftime\nimport re\nimport time\n\nfrom vsc.utils import fancylogger\n\nfrom easybuild.tools.build_log import EasyBuildError, print_msg\nfrom easybuild.tools.config import build_option\nfrom easybuild.tools.job.backend import JobBackend\nfrom easybuild.tools.utilities import only_if_module_is_available\n\n\n_log = fancylogger.getLogger('gc3pie', fname=False)\n\n\ntry:\n import gc3libs\n import gc3libs.exceptions\n from gc3libs import Application, Run, create_engine\n from gc3libs.core import Engine\n from gc3libs.quantity import hours as hr\n from gc3libs.workflow import AbortOnError, DependentTaskCollection\n\n # inject EasyBuild logger into GC3Pie\n gc3libs.log = fancylogger.getLogger('gc3pie', fname=False)\n # make handling of log.error compatible with stdlib logging\n gc3libs.log.raiseError = False\n\n # instruct GC3Pie to not ignore errors, but raise exceptions instead\n gc3libs.UNIGNORE_ALL_ERRORS = True\n\n # note: order of class inheritance is important!\n class _BuildTaskCollection(AbortOnError, DependentTaskCollection):\n \"\"\"\n A `DependentTaskCollection`:class: that aborts execution upon error.\n\n This is used to stop the build process in case some dependency\n fails. See also `<https://github.com/easybuilders/easybuild-framework/issues/1441>`_\n \"\"\"\n pass\n\nexcept ImportError as err:\n _log.debug(\"Failed to import gc3libs from GC3Pie.\"\n \" Silently ignoring, this is a real issue only when GC3Pie is used as backend for --job\")\n\n\n# eb --job --job-backend=GC3Pie\nclass GC3Pie(JobBackend):\n \"\"\"\n Use the GC3Pie framework to submit and monitor compilation jobs,\n see http://gc3pie.readthedocs.org/.\n\n In contrast with accessing an external service, GC3Pie implements\n its own workflow manager, which means ``eb --job\n --job-backend=GC3Pie`` will keep running until all jobs have\n terminated.\n \"\"\"\n\n REQ_VERSION = '2.4.0'\n VERSION_REGEX = re.compile(r'^(?P<version>\\S*) version')\n\n @only_if_module_is_available('gc3libs', pkgname='gc3pie')\n def __init__(self, *args, **kwargs):\n \"\"\"GC3Pie JobBackend constructor.\"\"\"\n super(GC3Pie, self).__init__(*args, **kwargs)\n\n # _check_version is called by __init__, so guard it (too) with the decorator\n @only_if_module_is_available('gc3libs', pkgname='gc3pie')\n def _check_version(self):\n \"\"\"Check whether GC3Pie version complies with required version.\"\"\"\n # location of __version__ to use may change, depending on the minimal required SVN revision for development versions\n version_str = gc3libs.core.__version__\n\n match = self.VERSION_REGEX.search(version_str)\n if match:\n version = match.group('version')\n self.log.debug(\"Parsed GC3Pie version info: '%s'\", version)\n\n if version == 'development':\n # presume it's OK -- there's no way to check since GC3Pie switched to git\n return True\n\n if LooseVersion(version) < LooseVersion(self.REQ_VERSION):\n raise EasyBuildError(\"Found GC3Pie version %s, but version %s or more recent is required\",\n version, self.REQ_VERSION)\n else:\n raise EasyBuildError(\"Failed to parse GC3Pie version string '%s' using pattern %s\",\n version_str, self.VERSION_REGEX.pattern)\n\n def init(self):\n \"\"\"\n Initialise the GC3Pie job backend.\n \"\"\"\n # List of config files for GC3Pie; non-existing ones will be\n # silently ignored. The list here copies GC3Pie's default,\n # for the principle of minimal surprise, but there is no\n # strict requirement that this be done and EB could actually\n # choose to use a completely distinct set of conf. files.\n self.config_files = gc3libs.Default.CONFIG_FILE_LOCATIONS[:]\n cfgfile = build_option('job_backend_config')\n if cfgfile:\n self.config_files.append(cfgfile)\n\n self.output_dir = build_option('job_output_dir')\n self.jobs = _BuildTaskCollection(output_dir=self.output_dir)\n self.job_cnt = 0\n\n # after polling for job status, sleep for this time duration\n # before polling again (in seconds)\n self.poll_interval = build_option('job_polling_interval')\n\n def make_job(self, script, name, env_vars=None, hours=None, cores=None):\n \"\"\"\n Create and return a job object with the given parameters.\n\n Argument *script* is the content of the job script\n itself, i.e., the sequence of shell commands that will be\n executed.\n\n Argument *name* sets the job's human-readable name.\n\n Optional argument *env_vars* is a dictionary with\n key-value pairs of environment variables that should be passed\n on to the job.\n\n Optional arguments *hours* and *cores* should be\n integer values:\n - *hours* must be in the range 1 .. ``MAX_WALLTIME``;\n - *cores* depends on which cluster the job is being run.\n \"\"\"\n named_args = {\n 'jobname': name, # job name in GC3Pie\n 'name': name, # job name in EasyBuild\n }\n\n # environment\n if env_vars:\n named_args['environment'] = env_vars\n\n # input/output files for job (none)\n named_args['inputs'] = []\n named_args['outputs'] = []\n\n # job logs\n named_args.update({\n # join stdout/stderr in a single log\n 'join': True,\n # location for log file\n 'output_dir': self.output_dir,\n # log file name (including timestamp to try and ensure unique filename)\n 'stdout': 'eb-%s-gc3pie-job-%s.log' % (name, strftime(\"%Y%M%d-UTC-%H-%M-%S\", gmtime()))\n })\n\n # walltime\n max_walltime = build_option('job_max_walltime')\n if hours is None:\n hours = max_walltime\n if hours > max_walltime:\n self.log.warn(\"Specified %s hours, but this is impossible. (resetting to %s hours)\" % (hours, max_walltime))\n hours = max_walltime\n named_args['requested_walltime'] = hours * hr\n\n if cores:\n named_args['requested_cores'] = cores\n else:\n self.log.warn(\"Number of cores to request not specified, falling back to whatever GC3Pie does by default\")\n\n return Application(['/bin/sh', '-c', script], **named_args)\n\n def queue(self, job, dependencies=frozenset()):\n \"\"\"\n Add a job to the queue, optionally specifying dependencies.\n\n :param dependencies: jobs on which this job depends.\n \"\"\"\n self.jobs.add(job, dependencies)\n # since it's not trivial to determine the correct job count from self.jobs, we keep track of a count ourselves\n self.job_cnt += 1\n\n def complete(self):\n \"\"\"\n Complete a bulk job submission.\n\n Create engine, and progress it until all jobs have terminated.\n \"\"\"\n # create an instance of `Engine` using the list of configuration files\n try:\n self._engine = create_engine(*self.config_files, resource_errors_are_fatal=True)\n\n except gc3libs.exceptions.Error as err:\n raise EasyBuildError(\"Failed to create GC3Pie engine: %s\", err)\n\n # make sure that all job log files end up in the same directory, rather than renaming the output directory\n # see https://gc3pie.readthedocs.org/en/latest/programmers/api/gc3libs/core.html#gc3libs.core.Engine\n self._engine.retrieve_overwrites = True\n\n # some sites may not be happy with flooding the cluster with build jobs...\n self._engine.max_in_flight = build_option('job_max_jobs')\n\n # `Engine.stats()` (which is used later on in `_print_status_report()`)\n # changed between 2.4.2 and 2.5.0.dev -- make sure we stay compatible\n # with both\n try:\n self._engine.init_stats_for(Application)\n except AttributeError:\n _log.debug(\"No `init_stats_for` method in the Engine class;\"\n \" assuming pre-2.5.0 GC3Pie and ignoring error.\")\n\n # Add your application to the engine. This will NOT submit\n # your application yet, but will make the engine *aware* of\n # the application.\n self._engine.add(self.jobs)\n\n # select a specific execution resource?\n target_resource = build_option('job_target_resource')\n if target_resource:\n res = self._engine.select_resource(target_resource)\n if res == 0:\n raise EasyBuildError(\"Failed to select target resource '%s' in GC3Pie\", target_resource)\n\n # Periodically check the status of your application.\n while self.jobs.execution.state != Run.State.TERMINATED:\n # `Engine.progress()` will do the GC3Pie magic:\n # submit new jobs, update status of submitted jobs, get\n # results of terminating jobs etc...\n self._engine.progress()\n\n # report progress\n self._print_status_report()\n\n # Wait a few seconds...\n time.sleep(self.poll_interval)\n\n # final status report\n print_msg(\"Done processing jobs\", log=self.log, silent=build_option('silent'))\n self._print_status_report()\n\n def _print_status_report(self):\n \"\"\"\n Print a job status report to STDOUT and the log file.\n\n The number of jobs in each state is reported; the\n figures are extracted from the `stats()` method of the\n currently-running GC3Pie engine.\n \"\"\"\n stats = self._engine.stats(only=Application)\n states = ', '.join([\"%d %s\" % (stats[s], s.lower()) for s in stats if s != 'total' and stats[s]])\n print_msg(\"GC3Pie job overview: %s (total: %s)\" % (states, self.job_cnt),\n log=self.log, silent=build_option('silent'))\n", "path": "easybuild/tools/job/gc3pie.py"}]}
3,914
826
gh_patches_debug_20978
rasdani/github-patches
git_diff
python-poetry__poetry-1635
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 1.0.0b5 removes my private source from the lockfile <!-- Hi there! Thank you for discovering and submitting an issue. Before you submit this; let's make sure of a few things. Please make sure the following boxes are ticked if they are correct. If not, please try and fulfill these first. --> <!-- Checked checkbox should look like this: [x] --> - [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version. - [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate. - [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option). <!-- Once those are done, if you're able to fill in the following list with your information, it'd be very helpful to whoever handles the issue. --> - **OS version and name**: macOS 10.14.6 (18G103) - **Poetry version**: <!-- 1.0.0b5 - **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: <!-- Gist Link Here --> https://gist.github.com/JBKahn/4bbb040c0181d974e0b71407ad764b6c ## Issue When moving from `1.0.0b4` to `1.0.0b5` `poetry lock` is removing the ``` [package.source] reference = "private" type = "legacy" url = "https://private.url" ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `poetry/packages/locker.py` Content: ``` 1 import json 2 import re 3 4 from hashlib import sha256 5 from typing import List 6 7 from tomlkit import document 8 from tomlkit import inline_table 9 from tomlkit import item 10 from tomlkit import table 11 from tomlkit.exceptions import TOMLKitError 12 13 import poetry.packages 14 import poetry.repositories 15 16 from poetry.utils._compat import Path 17 from poetry.utils.toml_file import TomlFile 18 from poetry.version.markers import parse_marker 19 20 21 class Locker(object): 22 23 _relevant_keys = ["dependencies", "dev-dependencies", "source", "extras"] 24 25 def __init__(self, lock, local_config): # type: (Path, dict) -> None 26 self._lock = TomlFile(lock) 27 self._local_config = local_config 28 self._lock_data = None 29 self._content_hash = self._get_content_hash() 30 31 @property 32 def lock(self): # type: () -> TomlFile 33 return self._lock 34 35 @property 36 def lock_data(self): 37 if self._lock_data is None: 38 self._lock_data = self._get_lock_data() 39 40 return self._lock_data 41 42 def is_locked(self): # type: () -> bool 43 """ 44 Checks whether the locker has been locked (lockfile found). 45 """ 46 if not self._lock.exists(): 47 return False 48 49 return "package" in self.lock_data 50 51 def is_fresh(self): # type: () -> bool 52 """ 53 Checks whether the lock file is still up to date with the current hash. 54 """ 55 lock = self._lock.read() 56 metadata = lock.get("metadata", {}) 57 58 if "content-hash" in metadata: 59 return self._content_hash == lock["metadata"]["content-hash"] 60 61 return False 62 63 def locked_repository( 64 self, with_dev_reqs=False 65 ): # type: (bool) -> poetry.repositories.Repository 66 """ 67 Searches and returns a repository of locked packages. 68 """ 69 if not self.is_locked(): 70 return poetry.repositories.Repository() 71 72 lock_data = self.lock_data 73 packages = poetry.repositories.Repository() 74 75 if with_dev_reqs: 76 locked_packages = lock_data["package"] 77 else: 78 locked_packages = [ 79 p for p in lock_data["package"] if p["category"] == "main" 80 ] 81 82 if not locked_packages: 83 return packages 84 85 for info in locked_packages: 86 package = poetry.packages.Package( 87 info["name"], info["version"], info["version"] 88 ) 89 package.description = info.get("description", "") 90 package.category = info["category"] 91 package.optional = info["optional"] 92 if "hashes" in lock_data["metadata"]: 93 # Old lock so we create dummy files from the hashes 94 package.files = [ 95 {"name": h, "hash": h} 96 for h in lock_data["metadata"]["hashes"][info["name"]] 97 ] 98 else: 99 package.files = lock_data["metadata"]["files"][info["name"]] 100 101 package.python_versions = info["python-versions"] 102 extras = info.get("extras", {}) 103 if extras: 104 for name, deps in extras.items(): 105 package.extras[name] = [] 106 107 for dep in deps: 108 m = re.match(r"^(.+?)(?:\s+\((.+)\))?$", dep) 109 dep_name = m.group(1) 110 constraint = m.group(2) or "*" 111 112 package.extras[name].append( 113 poetry.packages.Dependency(dep_name, constraint) 114 ) 115 116 if "marker" in info: 117 package.marker = parse_marker(info["marker"]) 118 else: 119 # Compatibility for old locks 120 if "requirements" in info: 121 dep = poetry.packages.Dependency("foo", "0.0.0") 122 for name, value in info["requirements"].items(): 123 if name == "python": 124 dep.python_versions = value 125 elif name == "platform": 126 dep.platform = value 127 128 split_dep = dep.to_pep_508(False).split(";") 129 if len(split_dep) > 1: 130 package.marker = parse_marker(split_dep[1].strip()) 131 132 for dep_name, constraint in info.get("dependencies", {}).items(): 133 if isinstance(constraint, list): 134 for c in constraint: 135 package.add_dependency(dep_name, c) 136 137 continue 138 139 package.add_dependency(dep_name, constraint) 140 141 if "source" in info: 142 package.source_type = info["source"]["type"] 143 package.source_url = info["source"]["url"] 144 package.source_reference = info["source"]["reference"] 145 146 packages.add_package(package) 147 148 return packages 149 150 def set_lock_data(self, root, packages): # type: (...) -> bool 151 files = table() 152 packages = self._lock_packages(packages) 153 # Retrieving hashes 154 for package in packages: 155 if package["name"] not in files: 156 files[package["name"]] = [] 157 158 for f in package["files"]: 159 file_metadata = inline_table() 160 for k, v in sorted(f.items()): 161 file_metadata[k] = v 162 163 files[package["name"]].append(file_metadata) 164 165 if files[package["name"]]: 166 files[package["name"]] = item(files[package["name"]]).multiline(True) 167 168 del package["files"] 169 170 lock = document() 171 lock["package"] = packages 172 173 if root.extras: 174 lock["extras"] = { 175 extra: [dep.pretty_name for dep in deps] 176 for extra, deps in root.extras.items() 177 } 178 179 lock["metadata"] = { 180 "python-versions": root.python_versions, 181 "content-hash": self._content_hash, 182 "files": files, 183 } 184 185 if not self.is_locked() or lock != self.lock_data: 186 self._write_lock_data(lock) 187 188 return True 189 190 return False 191 192 def _write_lock_data(self, data): 193 self.lock.write(data) 194 195 # Checking lock file data consistency 196 if data != self.lock.read(): 197 raise RuntimeError("Inconsistent lock file data.") 198 199 self._lock_data = None 200 201 def _get_content_hash(self): # type: () -> str 202 """ 203 Returns the sha256 hash of the sorted content of the pyproject file. 204 """ 205 content = self._local_config 206 207 relevant_content = {} 208 for key in self._relevant_keys: 209 relevant_content[key] = content.get(key) 210 211 content_hash = sha256( 212 json.dumps(relevant_content, sort_keys=True).encode() 213 ).hexdigest() 214 215 return content_hash 216 217 def _get_lock_data(self): # type: () -> dict 218 if not self._lock.exists(): 219 raise RuntimeError("No lockfile found. Unable to read locked packages") 220 221 try: 222 return self._lock.read() 223 except TOMLKitError as e: 224 raise RuntimeError("Unable to read the lock file ({}).".format(e)) 225 226 def _lock_packages( 227 self, packages 228 ): # type: (List['poetry.packages.Package']) -> list 229 locked = [] 230 231 for package in sorted(packages, key=lambda x: x.name): 232 spec = self._dump_package(package) 233 234 locked.append(spec) 235 236 return locked 237 238 def _dump_package(self, package): # type: (poetry.packages.Package) -> dict 239 dependencies = {} 240 for dependency in sorted(package.requires, key=lambda d: d.name): 241 if dependency.is_optional() and not dependency.is_activated(): 242 continue 243 244 if dependency.pretty_name not in dependencies: 245 dependencies[dependency.pretty_name] = [] 246 247 constraint = {"version": str(dependency.pretty_constraint)} 248 249 if dependency.extras: 250 constraint["extras"] = dependency.extras 251 252 if dependency.is_optional(): 253 constraint["optional"] = True 254 255 if not dependency.python_constraint.is_any(): 256 constraint["python"] = str(dependency.python_constraint) 257 258 dependencies[dependency.pretty_name].append(constraint) 259 260 # All the constraints should have the same type, 261 # but we want to simplify them if it's possible 262 for dependency, constraints in tuple(dependencies.items()): 263 if all(len(constraint) == 1 for constraint in constraints): 264 dependencies[dependency] = [ 265 constraint["version"] for constraint in constraints 266 ] 267 268 data = { 269 "name": package.pretty_name, 270 "version": package.pretty_version, 271 "description": package.description or "", 272 "category": package.category, 273 "optional": package.optional, 274 "python-versions": package.python_versions, 275 "files": sorted(package.files, key=lambda x: x["file"]), 276 } 277 if not package.marker.is_any(): 278 data["marker"] = str(package.marker) 279 280 if package.extras: 281 extras = {} 282 for name, deps in package.extras.items(): 283 extras[name] = [ 284 str(dep) if not dep.constraint.is_any() else dep.name 285 for dep in deps 286 ] 287 288 data["extras"] = extras 289 290 if dependencies: 291 for k, constraints in dependencies.items(): 292 if len(constraints) == 1: 293 dependencies[k] = constraints[0] 294 295 data["dependencies"] = dependencies 296 297 if package.source_type: 298 data["source"] = { 299 "type": package.source_type, 300 "url": package.source_url, 301 "reference": package.source_reference, 302 } 303 304 return data 305 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/poetry/packages/locker.py b/poetry/packages/locker.py --- a/poetry/packages/locker.py +++ b/poetry/packages/locker.py @@ -139,7 +139,7 @@ package.add_dependency(dep_name, constraint) if "source" in info: - package.source_type = info["source"]["type"] + package.source_type = info["source"].get("type", "") package.source_url = info["source"]["url"] package.source_reference = info["source"]["reference"] @@ -294,11 +294,12 @@ data["dependencies"] = dependencies - if package.source_type: + if package.source_url: data["source"] = { - "type": package.source_type, "url": package.source_url, "reference": package.source_reference, } + if package.source_type: + data["source"]["type"] = package.source_type return data
{"golden_diff": "diff --git a/poetry/packages/locker.py b/poetry/packages/locker.py\n--- a/poetry/packages/locker.py\n+++ b/poetry/packages/locker.py\n@@ -139,7 +139,7 @@\n package.add_dependency(dep_name, constraint)\n \n if \"source\" in info:\n- package.source_type = info[\"source\"][\"type\"]\n+ package.source_type = info[\"source\"].get(\"type\", \"\")\n package.source_url = info[\"source\"][\"url\"]\n package.source_reference = info[\"source\"][\"reference\"]\n \n@@ -294,11 +294,12 @@\n \n data[\"dependencies\"] = dependencies\n \n- if package.source_type:\n+ if package.source_url:\n data[\"source\"] = {\n- \"type\": package.source_type,\n \"url\": package.source_url,\n \"reference\": package.source_reference,\n }\n+ if package.source_type:\n+ data[\"source\"][\"type\"] = package.source_type\n \n return data\n", "issue": "1.0.0b5 removes my private source from the lockfile\n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **OS version and name**: macOS 10.14.6 (18G103)\r\n- **Poetry version**: <!-- 1.0.0b5\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: <!-- Gist Link Here --> https://gist.github.com/JBKahn/4bbb040c0181d974e0b71407ad764b6c\r\n\r\n\r\n## Issue\r\nWhen moving from `1.0.0b4` to `1.0.0b5` `poetry lock` is removing the \r\n```\r\n[package.source]\r\nreference = \"private\"\r\ntype = \"legacy\"\r\nurl = \"https://private.url\"\r\n```\r\n\n", "before_files": [{"content": "import json\nimport re\n\nfrom hashlib import sha256\nfrom typing import List\n\nfrom tomlkit import document\nfrom tomlkit import inline_table\nfrom tomlkit import item\nfrom tomlkit import table\nfrom tomlkit.exceptions import TOMLKitError\n\nimport poetry.packages\nimport poetry.repositories\n\nfrom poetry.utils._compat import Path\nfrom poetry.utils.toml_file import TomlFile\nfrom poetry.version.markers import parse_marker\n\n\nclass Locker(object):\n\n _relevant_keys = [\"dependencies\", \"dev-dependencies\", \"source\", \"extras\"]\n\n def __init__(self, lock, local_config): # type: (Path, dict) -> None\n self._lock = TomlFile(lock)\n self._local_config = local_config\n self._lock_data = None\n self._content_hash = self._get_content_hash()\n\n @property\n def lock(self): # type: () -> TomlFile\n return self._lock\n\n @property\n def lock_data(self):\n if self._lock_data is None:\n self._lock_data = self._get_lock_data()\n\n return self._lock_data\n\n def is_locked(self): # type: () -> bool\n \"\"\"\n Checks whether the locker has been locked (lockfile found).\n \"\"\"\n if not self._lock.exists():\n return False\n\n return \"package\" in self.lock_data\n\n def is_fresh(self): # type: () -> bool\n \"\"\"\n Checks whether the lock file is still up to date with the current hash.\n \"\"\"\n lock = self._lock.read()\n metadata = lock.get(\"metadata\", {})\n\n if \"content-hash\" in metadata:\n return self._content_hash == lock[\"metadata\"][\"content-hash\"]\n\n return False\n\n def locked_repository(\n self, with_dev_reqs=False\n ): # type: (bool) -> poetry.repositories.Repository\n \"\"\"\n Searches and returns a repository of locked packages.\n \"\"\"\n if not self.is_locked():\n return poetry.repositories.Repository()\n\n lock_data = self.lock_data\n packages = poetry.repositories.Repository()\n\n if with_dev_reqs:\n locked_packages = lock_data[\"package\"]\n else:\n locked_packages = [\n p for p in lock_data[\"package\"] if p[\"category\"] == \"main\"\n ]\n\n if not locked_packages:\n return packages\n\n for info in locked_packages:\n package = poetry.packages.Package(\n info[\"name\"], info[\"version\"], info[\"version\"]\n )\n package.description = info.get(\"description\", \"\")\n package.category = info[\"category\"]\n package.optional = info[\"optional\"]\n if \"hashes\" in lock_data[\"metadata\"]:\n # Old lock so we create dummy files from the hashes\n package.files = [\n {\"name\": h, \"hash\": h}\n for h in lock_data[\"metadata\"][\"hashes\"][info[\"name\"]]\n ]\n else:\n package.files = lock_data[\"metadata\"][\"files\"][info[\"name\"]]\n\n package.python_versions = info[\"python-versions\"]\n extras = info.get(\"extras\", {})\n if extras:\n for name, deps in extras.items():\n package.extras[name] = []\n\n for dep in deps:\n m = re.match(r\"^(.+?)(?:\\s+\\((.+)\\))?$\", dep)\n dep_name = m.group(1)\n constraint = m.group(2) or \"*\"\n\n package.extras[name].append(\n poetry.packages.Dependency(dep_name, constraint)\n )\n\n if \"marker\" in info:\n package.marker = parse_marker(info[\"marker\"])\n else:\n # Compatibility for old locks\n if \"requirements\" in info:\n dep = poetry.packages.Dependency(\"foo\", \"0.0.0\")\n for name, value in info[\"requirements\"].items():\n if name == \"python\":\n dep.python_versions = value\n elif name == \"platform\":\n dep.platform = value\n\n split_dep = dep.to_pep_508(False).split(\";\")\n if len(split_dep) > 1:\n package.marker = parse_marker(split_dep[1].strip())\n\n for dep_name, constraint in info.get(\"dependencies\", {}).items():\n if isinstance(constraint, list):\n for c in constraint:\n package.add_dependency(dep_name, c)\n\n continue\n\n package.add_dependency(dep_name, constraint)\n\n if \"source\" in info:\n package.source_type = info[\"source\"][\"type\"]\n package.source_url = info[\"source\"][\"url\"]\n package.source_reference = info[\"source\"][\"reference\"]\n\n packages.add_package(package)\n\n return packages\n\n def set_lock_data(self, root, packages): # type: (...) -> bool\n files = table()\n packages = self._lock_packages(packages)\n # Retrieving hashes\n for package in packages:\n if package[\"name\"] not in files:\n files[package[\"name\"]] = []\n\n for f in package[\"files\"]:\n file_metadata = inline_table()\n for k, v in sorted(f.items()):\n file_metadata[k] = v\n\n files[package[\"name\"]].append(file_metadata)\n\n if files[package[\"name\"]]:\n files[package[\"name\"]] = item(files[package[\"name\"]]).multiline(True)\n\n del package[\"files\"]\n\n lock = document()\n lock[\"package\"] = packages\n\n if root.extras:\n lock[\"extras\"] = {\n extra: [dep.pretty_name for dep in deps]\n for extra, deps in root.extras.items()\n }\n\n lock[\"metadata\"] = {\n \"python-versions\": root.python_versions,\n \"content-hash\": self._content_hash,\n \"files\": files,\n }\n\n if not self.is_locked() or lock != self.lock_data:\n self._write_lock_data(lock)\n\n return True\n\n return False\n\n def _write_lock_data(self, data):\n self.lock.write(data)\n\n # Checking lock file data consistency\n if data != self.lock.read():\n raise RuntimeError(\"Inconsistent lock file data.\")\n\n self._lock_data = None\n\n def _get_content_hash(self): # type: () -> str\n \"\"\"\n Returns the sha256 hash of the sorted content of the pyproject file.\n \"\"\"\n content = self._local_config\n\n relevant_content = {}\n for key in self._relevant_keys:\n relevant_content[key] = content.get(key)\n\n content_hash = sha256(\n json.dumps(relevant_content, sort_keys=True).encode()\n ).hexdigest()\n\n return content_hash\n\n def _get_lock_data(self): # type: () -> dict\n if not self._lock.exists():\n raise RuntimeError(\"No lockfile found. Unable to read locked packages\")\n\n try:\n return self._lock.read()\n except TOMLKitError as e:\n raise RuntimeError(\"Unable to read the lock file ({}).\".format(e))\n\n def _lock_packages(\n self, packages\n ): # type: (List['poetry.packages.Package']) -> list\n locked = []\n\n for package in sorted(packages, key=lambda x: x.name):\n spec = self._dump_package(package)\n\n locked.append(spec)\n\n return locked\n\n def _dump_package(self, package): # type: (poetry.packages.Package) -> dict\n dependencies = {}\n for dependency in sorted(package.requires, key=lambda d: d.name):\n if dependency.is_optional() and not dependency.is_activated():\n continue\n\n if dependency.pretty_name not in dependencies:\n dependencies[dependency.pretty_name] = []\n\n constraint = {\"version\": str(dependency.pretty_constraint)}\n\n if dependency.extras:\n constraint[\"extras\"] = dependency.extras\n\n if dependency.is_optional():\n constraint[\"optional\"] = True\n\n if not dependency.python_constraint.is_any():\n constraint[\"python\"] = str(dependency.python_constraint)\n\n dependencies[dependency.pretty_name].append(constraint)\n\n # All the constraints should have the same type,\n # but we want to simplify them if it's possible\n for dependency, constraints in tuple(dependencies.items()):\n if all(len(constraint) == 1 for constraint in constraints):\n dependencies[dependency] = [\n constraint[\"version\"] for constraint in constraints\n ]\n\n data = {\n \"name\": package.pretty_name,\n \"version\": package.pretty_version,\n \"description\": package.description or \"\",\n \"category\": package.category,\n \"optional\": package.optional,\n \"python-versions\": package.python_versions,\n \"files\": sorted(package.files, key=lambda x: x[\"file\"]),\n }\n if not package.marker.is_any():\n data[\"marker\"] = str(package.marker)\n\n if package.extras:\n extras = {}\n for name, deps in package.extras.items():\n extras[name] = [\n str(dep) if not dep.constraint.is_any() else dep.name\n for dep in deps\n ]\n\n data[\"extras\"] = extras\n\n if dependencies:\n for k, constraints in dependencies.items():\n if len(constraints) == 1:\n dependencies[k] = constraints[0]\n\n data[\"dependencies\"] = dependencies\n\n if package.source_type:\n data[\"source\"] = {\n \"type\": package.source_type,\n \"url\": package.source_url,\n \"reference\": package.source_reference,\n }\n\n return data\n", "path": "poetry/packages/locker.py"}], "after_files": [{"content": "import json\nimport re\n\nfrom hashlib import sha256\nfrom typing import List\n\nfrom tomlkit import document\nfrom tomlkit import inline_table\nfrom tomlkit import item\nfrom tomlkit import table\nfrom tomlkit.exceptions import TOMLKitError\n\nimport poetry.packages\nimport poetry.repositories\n\nfrom poetry.utils._compat import Path\nfrom poetry.utils.toml_file import TomlFile\nfrom poetry.version.markers import parse_marker\n\n\nclass Locker(object):\n\n _relevant_keys = [\"dependencies\", \"dev-dependencies\", \"source\", \"extras\"]\n\n def __init__(self, lock, local_config): # type: (Path, dict) -> None\n self._lock = TomlFile(lock)\n self._local_config = local_config\n self._lock_data = None\n self._content_hash = self._get_content_hash()\n\n @property\n def lock(self): # type: () -> TomlFile\n return self._lock\n\n @property\n def lock_data(self):\n if self._lock_data is None:\n self._lock_data = self._get_lock_data()\n\n return self._lock_data\n\n def is_locked(self): # type: () -> bool\n \"\"\"\n Checks whether the locker has been locked (lockfile found).\n \"\"\"\n if not self._lock.exists():\n return False\n\n return \"package\" in self.lock_data\n\n def is_fresh(self): # type: () -> bool\n \"\"\"\n Checks whether the lock file is still up to date with the current hash.\n \"\"\"\n lock = self._lock.read()\n metadata = lock.get(\"metadata\", {})\n\n if \"content-hash\" in metadata:\n return self._content_hash == lock[\"metadata\"][\"content-hash\"]\n\n return False\n\n def locked_repository(\n self, with_dev_reqs=False\n ): # type: (bool) -> poetry.repositories.Repository\n \"\"\"\n Searches and returns a repository of locked packages.\n \"\"\"\n if not self.is_locked():\n return poetry.repositories.Repository()\n\n lock_data = self.lock_data\n packages = poetry.repositories.Repository()\n\n if with_dev_reqs:\n locked_packages = lock_data[\"package\"]\n else:\n locked_packages = [\n p for p in lock_data[\"package\"] if p[\"category\"] == \"main\"\n ]\n\n if not locked_packages:\n return packages\n\n for info in locked_packages:\n package = poetry.packages.Package(\n info[\"name\"], info[\"version\"], info[\"version\"]\n )\n package.description = info.get(\"description\", \"\")\n package.category = info[\"category\"]\n package.optional = info[\"optional\"]\n if \"hashes\" in lock_data[\"metadata\"]:\n # Old lock so we create dummy files from the hashes\n package.files = [\n {\"name\": h, \"hash\": h}\n for h in lock_data[\"metadata\"][\"hashes\"][info[\"name\"]]\n ]\n else:\n package.files = lock_data[\"metadata\"][\"files\"][info[\"name\"]]\n\n package.python_versions = info[\"python-versions\"]\n extras = info.get(\"extras\", {})\n if extras:\n for name, deps in extras.items():\n package.extras[name] = []\n\n for dep in deps:\n m = re.match(r\"^(.+?)(?:\\s+\\((.+)\\))?$\", dep)\n dep_name = m.group(1)\n constraint = m.group(2) or \"*\"\n\n package.extras[name].append(\n poetry.packages.Dependency(dep_name, constraint)\n )\n\n if \"marker\" in info:\n package.marker = parse_marker(info[\"marker\"])\n else:\n # Compatibility for old locks\n if \"requirements\" in info:\n dep = poetry.packages.Dependency(\"foo\", \"0.0.0\")\n for name, value in info[\"requirements\"].items():\n if name == \"python\":\n dep.python_versions = value\n elif name == \"platform\":\n dep.platform = value\n\n split_dep = dep.to_pep_508(False).split(\";\")\n if len(split_dep) > 1:\n package.marker = parse_marker(split_dep[1].strip())\n\n for dep_name, constraint in info.get(\"dependencies\", {}).items():\n if isinstance(constraint, list):\n for c in constraint:\n package.add_dependency(dep_name, c)\n\n continue\n\n package.add_dependency(dep_name, constraint)\n\n if \"source\" in info:\n package.source_type = info[\"source\"].get(\"type\", \"\")\n package.source_url = info[\"source\"][\"url\"]\n package.source_reference = info[\"source\"][\"reference\"]\n\n packages.add_package(package)\n\n return packages\n\n def set_lock_data(self, root, packages): # type: (...) -> bool\n files = table()\n packages = self._lock_packages(packages)\n # Retrieving hashes\n for package in packages:\n if package[\"name\"] not in files:\n files[package[\"name\"]] = []\n\n for f in package[\"files\"]:\n file_metadata = inline_table()\n for k, v in sorted(f.items()):\n file_metadata[k] = v\n\n files[package[\"name\"]].append(file_metadata)\n\n if files[package[\"name\"]]:\n files[package[\"name\"]] = item(files[package[\"name\"]]).multiline(True)\n\n del package[\"files\"]\n\n lock = document()\n lock[\"package\"] = packages\n\n if root.extras:\n lock[\"extras\"] = {\n extra: [dep.pretty_name for dep in deps]\n for extra, deps in root.extras.items()\n }\n\n lock[\"metadata\"] = {\n \"python-versions\": root.python_versions,\n \"content-hash\": self._content_hash,\n \"files\": files,\n }\n\n if not self.is_locked() or lock != self.lock_data:\n self._write_lock_data(lock)\n\n return True\n\n return False\n\n def _write_lock_data(self, data):\n self.lock.write(data)\n\n # Checking lock file data consistency\n if data != self.lock.read():\n raise RuntimeError(\"Inconsistent lock file data.\")\n\n self._lock_data = None\n\n def _get_content_hash(self): # type: () -> str\n \"\"\"\n Returns the sha256 hash of the sorted content of the pyproject file.\n \"\"\"\n content = self._local_config\n\n relevant_content = {}\n for key in self._relevant_keys:\n relevant_content[key] = content.get(key)\n\n content_hash = sha256(\n json.dumps(relevant_content, sort_keys=True).encode()\n ).hexdigest()\n\n return content_hash\n\n def _get_lock_data(self): # type: () -> dict\n if not self._lock.exists():\n raise RuntimeError(\"No lockfile found. Unable to read locked packages\")\n\n try:\n return self._lock.read()\n except TOMLKitError as e:\n raise RuntimeError(\"Unable to read the lock file ({}).\".format(e))\n\n def _lock_packages(\n self, packages\n ): # type: (List['poetry.packages.Package']) -> list\n locked = []\n\n for package in sorted(packages, key=lambda x: x.name):\n spec = self._dump_package(package)\n\n locked.append(spec)\n\n return locked\n\n def _dump_package(self, package): # type: (poetry.packages.Package) -> dict\n dependencies = {}\n for dependency in sorted(package.requires, key=lambda d: d.name):\n if dependency.is_optional() and not dependency.is_activated():\n continue\n\n if dependency.pretty_name not in dependencies:\n dependencies[dependency.pretty_name] = []\n\n constraint = {\"version\": str(dependency.pretty_constraint)}\n\n if dependency.extras:\n constraint[\"extras\"] = dependency.extras\n\n if dependency.is_optional():\n constraint[\"optional\"] = True\n\n if not dependency.python_constraint.is_any():\n constraint[\"python\"] = str(dependency.python_constraint)\n\n dependencies[dependency.pretty_name].append(constraint)\n\n # All the constraints should have the same type,\n # but we want to simplify them if it's possible\n for dependency, constraints in tuple(dependencies.items()):\n if all(len(constraint) == 1 for constraint in constraints):\n dependencies[dependency] = [\n constraint[\"version\"] for constraint in constraints\n ]\n\n data = {\n \"name\": package.pretty_name,\n \"version\": package.pretty_version,\n \"description\": package.description or \"\",\n \"category\": package.category,\n \"optional\": package.optional,\n \"python-versions\": package.python_versions,\n \"files\": sorted(package.files, key=lambda x: x[\"file\"]),\n }\n if not package.marker.is_any():\n data[\"marker\"] = str(package.marker)\n\n if package.extras:\n extras = {}\n for name, deps in package.extras.items():\n extras[name] = [\n str(dep) if not dep.constraint.is_any() else dep.name\n for dep in deps\n ]\n\n data[\"extras\"] = extras\n\n if dependencies:\n for k, constraints in dependencies.items():\n if len(constraints) == 1:\n dependencies[k] = constraints[0]\n\n data[\"dependencies\"] = dependencies\n\n if package.source_url:\n data[\"source\"] = {\n \"url\": package.source_url,\n \"reference\": package.source_reference,\n }\n if package.source_type:\n data[\"source\"][\"type\"] = package.source_type\n\n return data\n", "path": "poetry/packages/locker.py"}]}
3,500
219
gh_patches_debug_42509
rasdani/github-patches
git_diff
napari__napari-1749
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Track vertex shader does not handle blending/opacity correctly ## 🐛 Bug Two somewhat related bugs in `_vispy_tracks_shader.py` in the `Tracks` layer: 1. The opacity slider only changes the vispy node opacity, which has no effect on the vertex shader used for rendering the tracks or graph. 2. The translucent mode renders parts of the track, which should not be rendered, as dark lines: ![bug](https://user-images.githubusercontent.com/8217795/96697072-64ef9b00-1383-11eb-9648-54c92319ae84.gif) ## To Reproduce Steps to reproduce the behavior: 1. run `examples\tracks_3d.py` 2. Change the blending mode to translucent in the `Tracks` layer 3. Change the opacity slider ## Expected behavior `opacity` should change the track opacity, and the `translucent` blending should not render parts of the track which should not be visible. ## Environment napari: 0.3.9.dev32+g911e095.d20201007 Platform: Linux-4.15.0-1100-oem-x86_64-with-debian-buster-sid Python: 3.7.7 (default, May 7 2020, 21:25:33) [GCC 7.3.0] Qt: 5.14.2 PySide2: 5.14.2.2 NumPy: 1.19.1 SciPy: 1.4.1 Dask: 2.17.2 VisPy: 0.6.4 GL version: 4.6.0 NVIDIA 450.80.02 MAX_TEXTURE_SIZE: 32768 Plugins: - napari-plugin-engine: 0.1.5 - svg: 0.1.3 ## Additional context This relates to the fact that the shader manipulates the alpha values of the vertices directly, late in the rendering pipeline. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `napari/_vispy/vispy_tracks_layer.py` Content: ``` 1 from vispy.scene.visuals import Compound, Line, Text 2 3 from ._vispy_tracks_shader import TrackShader 4 from .vispy_base_layer import VispyBaseLayer 5 6 7 class VispyTracksLayer(VispyBaseLayer): 8 """VispyTracksLayer 9 10 Track layer for visualizing tracks. 11 12 Components: 13 - Track lines (vispy.LineVisual) 14 - Track IDs (vispy.TextVisual) 15 - Graph edges (vispy.LineVisual) 16 17 """ 18 19 def __init__(self, layer): 20 node = Compound([Line(), Text(), Line()]) 21 super().__init__(layer, node) 22 23 self.layer.events.tail_width.connect(self._on_appearance_change) 24 self.layer.events.tail_length.connect(self._on_appearance_change) 25 self.layer.events.display_id.connect(self._on_appearance_change) 26 self.layer.events.display_tail.connect(self._on_appearance_change) 27 self.layer.events.display_graph.connect(self._on_appearance_change) 28 29 self.layer.events.color_by.connect(self._on_appearance_change) 30 self.layer.events.colormap.connect(self._on_appearance_change) 31 32 # these events are fired when changes occur to the tracks or the 33 # graph - as the vertex buffer of the shader needs to be updated 34 # alongside the actual vertex data 35 self.layer.events.rebuild_tracks.connect(self._on_tracks_change) 36 self.layer.events.rebuild_graph.connect(self._on_graph_change) 37 38 # build and attach the shader to the track 39 self.track_shader = TrackShader() 40 self.graph_shader = TrackShader() 41 node._subvisuals[0].attach(self.track_shader) 42 node._subvisuals[2].attach(self.graph_shader) 43 44 # text label properties 45 self.node._subvisuals[1].color = 'white' 46 self.node._subvisuals[1].font_size = 8 47 48 self._reset_base() 49 50 self._on_data_change() 51 self._on_appearance_change() 52 53 def _on_data_change(self, event=None): 54 """ update the display """ 55 56 # update the shaders 57 self.track_shader.current_time = self.layer.current_time 58 self.graph_shader.current_time = self.layer.current_time 59 60 # add text labels if they're visible 61 if self.node._subvisuals[1].visible: 62 labels_text, labels_pos = self.layer.track_labels 63 self.node._subvisuals[1].text = labels_text 64 self.node._subvisuals[1].pos = labels_pos 65 66 self.node.update() 67 # Call to update order of translation values with new dims: 68 self._on_matrix_change() 69 70 def _on_appearance_change(self, event=None): 71 """ change the appearance of the data """ 72 73 # update shader properties related to appearance 74 self.track_shader.use_fade = self.layer.use_fade 75 self.track_shader.tail_length = self.layer.tail_length 76 self.graph_shader.use_fade = self.layer.use_fade 77 self.graph_shader.tail_length = self.layer.tail_length 78 79 # set visibility of subvisuals 80 self.node._subvisuals[0].visible = self.layer.display_tail 81 self.node._subvisuals[1].visible = self.layer.display_id 82 self.node._subvisuals[2].visible = self.layer.display_graph 83 84 # set the width of the track tails 85 self.node._subvisuals[0].set_data( 86 width=self.layer.tail_width, color=self.layer.track_colors, 87 ) 88 self.node._subvisuals[2].set_data(width=self.layer.tail_width,) 89 90 def _on_tracks_change(self, event=None): 91 """ update the shader when the track data changes """ 92 93 self.track_shader.use_fade = self.layer.use_fade 94 self.track_shader.tail_length = self.layer.tail_length 95 self.track_shader.vertex_time = self.layer.track_times 96 97 # change the data to the vispy line visual 98 self.node._subvisuals[0].set_data( 99 pos=self.layer._view_data, 100 connect=self.layer.track_connex, 101 width=self.layer.tail_width, 102 color=self.layer.track_colors, 103 ) 104 105 # Call to update order of translation values with new dims: 106 self._on_matrix_change() 107 108 def _on_graph_change(self, event=None): 109 """ update the shader when the graph data changes """ 110 111 self.graph_shader.use_fade = self.layer.use_fade 112 self.graph_shader.tail_length = self.layer.tail_length 113 self.graph_shader.vertex_time = self.layer.graph_times 114 115 # if the user clears a graph after it has been created, vispy offers 116 # no method to clear the data, therefore, we need to set private 117 # attributes to None to prevent errors 118 if self.layer._view_graph is None: 119 self.node._subvisuals[2]._pos = None 120 self.node._subvisuals[2]._connect = None 121 self.node.update() 122 return 123 124 self.node._subvisuals[2].set_data( 125 pos=self.layer._view_graph, 126 connect=self.layer.graph_connex, 127 width=self.layer.tail_width, 128 color='white', 129 ) 130 131 # Call to update order of translation values with new dims: 132 self._on_matrix_change() 133 ``` Path: `napari/_vispy/_vispy_tracks_shader.py` Content: ``` 1 from typing import List, Union 2 3 import numpy as np 4 from vispy.gloo import VertexBuffer 5 from vispy.visuals.filters.base_filter import Filter 6 7 8 class TrackShader(Filter): 9 """ TrackShader 10 11 Custom vertex and fragment shaders for visualizing tracks quickly with 12 vispy. The central assumption is that the tracks are rendered as a 13 continuous vispy Line segment, with connections and colors defined when 14 the visual is created. 15 16 The shader simply changes the visibility and/or fading of the data according 17 to the current_time and the associate time metadata for each vertex. This 18 is scaled according to the tail length. Points ahead of the current time 19 are rendered with alpha set to zero. 20 21 Parameters 22 ---------- 23 current_time : int, float 24 the current time, which is typically the frame index, although this 25 can be an arbitrary float 26 tail_length : int, float 27 the upper limit on length of the 'tail' 28 use_fade : bool 29 this will enable/disable tail fading with time 30 vertex_time : 1D array, list 31 a vector describing the time associated with each vertex 32 33 34 TODO 35 ---- 36 - the track is still displayed, albeit with fading, once the track has 37 finished but is still within the 'tail_length' window. Should it 38 disappear? 39 - check the shader positioning within the GL pipeline, currently 40 overrides layer opacity settings 41 42 vertex_mask: 1D array, list 43 a vector describing whether to mask each vertex 44 45 """ 46 47 VERT_SHADER = """ 48 varying vec4 v_track_color; 49 void apply_track_shading() { 50 51 float alpha; 52 53 if ($a_vertex_time > $current_time) { 54 // this is a hack to minimize the frag shader rendering ahead 55 // of the current time point due to interpolation 56 if ($a_vertex_time <= $current_time + 1){ 57 alpha = -100.; 58 } else { 59 alpha = 0.; 60 } 61 } else { 62 // fade the track into the temporal distance, scaled by the 63 // maximum tail length from the gui 64 float fade = ($current_time - $a_vertex_time) / $tail_length; 65 alpha = clamp(1.0-fade, 0.0, 1.0); 66 } 67 68 // when use_fade is disabled, the entire track is visible 69 if ($use_fade == 0) { 70 alpha = 1.0; 71 } 72 73 // set the vertex alpha according to the fade 74 v_track_color.a = alpha; 75 } 76 """ 77 78 FRAG_SHADER = """ 79 varying vec4 v_track_color; 80 void apply_track_shading() { 81 // interpolate 82 gl_FragColor.a = clamp(v_track_color.a, 0.0, 1.0); 83 } 84 """ 85 86 def __init__( 87 self, 88 current_time=0, 89 tail_length=30, 90 use_fade: bool = True, 91 vertex_time: Union[List, np.ndarray] = None, 92 ): 93 94 super().__init__( 95 vcode=self.VERT_SHADER, vpos=3, fcode=self.FRAG_SHADER, fpos=9 96 ) 97 98 self.current_time = current_time 99 self.tail_length = tail_length 100 self.use_fade = use_fade 101 self.vertex_time = vertex_time 102 103 @property 104 def current_time(self) -> Union[int, float]: 105 return self._current_time 106 107 @current_time.setter 108 def current_time(self, n: Union[int, float]): 109 self._current_time = n 110 if isinstance(n, slice): 111 n = np.max(self._vertex_time) 112 self.vshader['current_time'] = float(n) 113 114 @property 115 def use_fade(self) -> bool: 116 return self._use_fade 117 118 @use_fade.setter 119 def use_fade(self, value: bool): 120 self._use_fade = value 121 self.vshader['use_fade'] = float(self._use_fade) 122 123 @property 124 def tail_length(self) -> Union[int, float]: 125 return self._tail_length 126 127 @tail_length.setter 128 def tail_length(self, tail_length: Union[int, float]): 129 self._tail_length = tail_length 130 self.vshader['tail_length'] = float(self._tail_length) 131 132 def _attach(self, visual): 133 super()._attach(visual) 134 135 @property 136 def vertex_time(self): 137 return self._vertex_time 138 139 @vertex_time.setter 140 def vertex_time(self, v_time): 141 self._vertex_time = np.array(v_time).reshape(-1, 1).astype(np.float32) 142 self.vshader['a_vertex_time'] = VertexBuffer(self.vertex_time) 143 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/napari/_vispy/_vispy_tracks_shader.py b/napari/_vispy/_vispy_tracks_shader.py --- a/napari/_vispy/_vispy_tracks_shader.py +++ b/napari/_vispy/_vispy_tracks_shader.py @@ -6,7 +6,7 @@ class TrackShader(Filter): - """ TrackShader + """TrackShader. Custom vertex and fragment shaders for visualizing tracks quickly with vispy. The central assumption is that the tracks are rendered as a @@ -30,17 +30,11 @@ vertex_time : 1D array, list a vector describing the time associated with each vertex - TODO ---- - the track is still displayed, albeit with fading, once the track has finished but is still within the 'tail_length' window. Should it disappear? - - check the shader positioning within the GL pipeline, currently - overrides layer opacity settings - - vertex_mask: 1D array, list - a vector describing whether to mask each vertex """ @@ -78,15 +72,21 @@ FRAG_SHADER = """ varying vec4 v_track_color; void apply_track_shading() { + + // if the alpha is below the threshold, discard the fragment + if( v_track_color.a <= 0.0 ) { + discard; + } + // interpolate - gl_FragColor.a = clamp(v_track_color.a, 0.0, 1.0); + gl_FragColor.a = clamp(v_track_color.a * gl_FragColor.a, 0.0, 1.0); } """ def __init__( self, - current_time=0, - tail_length=30, + current_time: Union[int, float] = 0, + tail_length: Union[int, float] = 30, use_fade: bool = True, vertex_time: Union[List, np.ndarray] = None, ): diff --git a/napari/_vispy/vispy_tracks_layer.py b/napari/_vispy/vispy_tracks_layer.py --- a/napari/_vispy/vispy_tracks_layer.py +++ b/napari/_vispy/vispy_tracks_layer.py @@ -51,7 +51,7 @@ self._on_appearance_change() def _on_data_change(self, event=None): - """ update the display """ + """Update the display.""" # update the shaders self.track_shader.current_time = self.layer.current_time @@ -68,7 +68,7 @@ self._on_matrix_change() def _on_appearance_change(self, event=None): - """ change the appearance of the data """ + """Change the appearance of the data.""" # update shader properties related to appearance self.track_shader.use_fade = self.layer.use_fade @@ -88,7 +88,7 @@ self.node._subvisuals[2].set_data(width=self.layer.tail_width,) def _on_tracks_change(self, event=None): - """ update the shader when the track data changes """ + """Update the shader when the track data changes.""" self.track_shader.use_fade = self.layer.use_fade self.track_shader.tail_length = self.layer.tail_length @@ -106,7 +106,7 @@ self._on_matrix_change() def _on_graph_change(self, event=None): - """ update the shader when the graph data changes """ + """Update the shader when the graph data changes.""" self.graph_shader.use_fade = self.layer.use_fade self.graph_shader.tail_length = self.layer.tail_length
{"golden_diff": "diff --git a/napari/_vispy/_vispy_tracks_shader.py b/napari/_vispy/_vispy_tracks_shader.py\n--- a/napari/_vispy/_vispy_tracks_shader.py\n+++ b/napari/_vispy/_vispy_tracks_shader.py\n@@ -6,7 +6,7 @@\n \n \n class TrackShader(Filter):\n- \"\"\" TrackShader\n+ \"\"\"TrackShader.\n \n Custom vertex and fragment shaders for visualizing tracks quickly with\n vispy. The central assumption is that the tracks are rendered as a\n@@ -30,17 +30,11 @@\n vertex_time : 1D array, list\n a vector describing the time associated with each vertex\n \n-\n TODO\n ----\n - the track is still displayed, albeit with fading, once the track has\n finished but is still within the 'tail_length' window. Should it\n disappear?\n- - check the shader positioning within the GL pipeline, currently\n- overrides layer opacity settings\n-\n- vertex_mask: 1D array, list\n- a vector describing whether to mask each vertex\n \n \"\"\"\n \n@@ -78,15 +72,21 @@\n FRAG_SHADER = \"\"\"\n varying vec4 v_track_color;\n void apply_track_shading() {\n+\n+ // if the alpha is below the threshold, discard the fragment\n+ if( v_track_color.a <= 0.0 ) {\n+ discard;\n+ }\n+\n // interpolate\n- gl_FragColor.a = clamp(v_track_color.a, 0.0, 1.0);\n+ gl_FragColor.a = clamp(v_track_color.a * gl_FragColor.a, 0.0, 1.0);\n }\n \"\"\"\n \n def __init__(\n self,\n- current_time=0,\n- tail_length=30,\n+ current_time: Union[int, float] = 0,\n+ tail_length: Union[int, float] = 30,\n use_fade: bool = True,\n vertex_time: Union[List, np.ndarray] = None,\n ):\ndiff --git a/napari/_vispy/vispy_tracks_layer.py b/napari/_vispy/vispy_tracks_layer.py\n--- a/napari/_vispy/vispy_tracks_layer.py\n+++ b/napari/_vispy/vispy_tracks_layer.py\n@@ -51,7 +51,7 @@\n self._on_appearance_change()\n \n def _on_data_change(self, event=None):\n- \"\"\" update the display \"\"\"\n+ \"\"\"Update the display.\"\"\"\n \n # update the shaders\n self.track_shader.current_time = self.layer.current_time\n@@ -68,7 +68,7 @@\n self._on_matrix_change()\n \n def _on_appearance_change(self, event=None):\n- \"\"\" change the appearance of the data \"\"\"\n+ \"\"\"Change the appearance of the data.\"\"\"\n \n # update shader properties related to appearance\n self.track_shader.use_fade = self.layer.use_fade\n@@ -88,7 +88,7 @@\n self.node._subvisuals[2].set_data(width=self.layer.tail_width,)\n \n def _on_tracks_change(self, event=None):\n- \"\"\" update the shader when the track data changes \"\"\"\n+ \"\"\"Update the shader when the track data changes.\"\"\"\n \n self.track_shader.use_fade = self.layer.use_fade\n self.track_shader.tail_length = self.layer.tail_length\n@@ -106,7 +106,7 @@\n self._on_matrix_change()\n \n def _on_graph_change(self, event=None):\n- \"\"\" update the shader when the graph data changes \"\"\"\n+ \"\"\"Update the shader when the graph data changes.\"\"\"\n \n self.graph_shader.use_fade = self.layer.use_fade\n self.graph_shader.tail_length = self.layer.tail_length\n", "issue": "Track vertex shader does not handle blending/opacity correctly\n## \ud83d\udc1b Bug\r\n\r\nTwo somewhat related bugs in `_vispy_tracks_shader.py` in the `Tracks` layer:\r\n1. The opacity slider only changes the vispy node opacity, which has no effect on the vertex shader used for rendering the tracks or graph.\r\n2. The translucent mode renders parts of the track, which should not be rendered, as dark lines:\r\n\r\n![bug](https://user-images.githubusercontent.com/8217795/96697072-64ef9b00-1383-11eb-9648-54c92319ae84.gif)\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. run `examples\\tracks_3d.py`\r\n2. Change the blending mode to translucent in the `Tracks` layer\r\n3. Change the opacity slider \r\n\r\n## Expected behavior\r\n\r\n`opacity` should change the track opacity, and the `translucent` blending should not render parts of the track which should not be visible.\r\n\r\n## Environment\r\n\r\nnapari: 0.3.9.dev32+g911e095.d20201007\r\nPlatform: Linux-4.15.0-1100-oem-x86_64-with-debian-buster-sid\r\nPython: 3.7.7 (default, May 7 2020, 21:25:33) [GCC 7.3.0]\r\nQt: 5.14.2\r\nPySide2: 5.14.2.2\r\nNumPy: 1.19.1\r\nSciPy: 1.4.1\r\nDask: 2.17.2\r\nVisPy: 0.6.4\r\n\r\nGL version: 4.6.0 NVIDIA 450.80.02\r\nMAX_TEXTURE_SIZE: 32768\r\n\r\nPlugins:\r\n- napari-plugin-engine: 0.1.5\r\n- svg: 0.1.3\r\n\r\n## Additional context\r\nThis relates to the fact that the shader manipulates the alpha values of the vertices directly, late in the rendering pipeline.\n", "before_files": [{"content": "from vispy.scene.visuals import Compound, Line, Text\n\nfrom ._vispy_tracks_shader import TrackShader\nfrom .vispy_base_layer import VispyBaseLayer\n\n\nclass VispyTracksLayer(VispyBaseLayer):\n \"\"\"VispyTracksLayer\n\n Track layer for visualizing tracks.\n\n Components:\n - Track lines (vispy.LineVisual)\n - Track IDs (vispy.TextVisual)\n - Graph edges (vispy.LineVisual)\n\n \"\"\"\n\n def __init__(self, layer):\n node = Compound([Line(), Text(), Line()])\n super().__init__(layer, node)\n\n self.layer.events.tail_width.connect(self._on_appearance_change)\n self.layer.events.tail_length.connect(self._on_appearance_change)\n self.layer.events.display_id.connect(self._on_appearance_change)\n self.layer.events.display_tail.connect(self._on_appearance_change)\n self.layer.events.display_graph.connect(self._on_appearance_change)\n\n self.layer.events.color_by.connect(self._on_appearance_change)\n self.layer.events.colormap.connect(self._on_appearance_change)\n\n # these events are fired when changes occur to the tracks or the\n # graph - as the vertex buffer of the shader needs to be updated\n # alongside the actual vertex data\n self.layer.events.rebuild_tracks.connect(self._on_tracks_change)\n self.layer.events.rebuild_graph.connect(self._on_graph_change)\n\n # build and attach the shader to the track\n self.track_shader = TrackShader()\n self.graph_shader = TrackShader()\n node._subvisuals[0].attach(self.track_shader)\n node._subvisuals[2].attach(self.graph_shader)\n\n # text label properties\n self.node._subvisuals[1].color = 'white'\n self.node._subvisuals[1].font_size = 8\n\n self._reset_base()\n\n self._on_data_change()\n self._on_appearance_change()\n\n def _on_data_change(self, event=None):\n \"\"\" update the display \"\"\"\n\n # update the shaders\n self.track_shader.current_time = self.layer.current_time\n self.graph_shader.current_time = self.layer.current_time\n\n # add text labels if they're visible\n if self.node._subvisuals[1].visible:\n labels_text, labels_pos = self.layer.track_labels\n self.node._subvisuals[1].text = labels_text\n self.node._subvisuals[1].pos = labels_pos\n\n self.node.update()\n # Call to update order of translation values with new dims:\n self._on_matrix_change()\n\n def _on_appearance_change(self, event=None):\n \"\"\" change the appearance of the data \"\"\"\n\n # update shader properties related to appearance\n self.track_shader.use_fade = self.layer.use_fade\n self.track_shader.tail_length = self.layer.tail_length\n self.graph_shader.use_fade = self.layer.use_fade\n self.graph_shader.tail_length = self.layer.tail_length\n\n # set visibility of subvisuals\n self.node._subvisuals[0].visible = self.layer.display_tail\n self.node._subvisuals[1].visible = self.layer.display_id\n self.node._subvisuals[2].visible = self.layer.display_graph\n\n # set the width of the track tails\n self.node._subvisuals[0].set_data(\n width=self.layer.tail_width, color=self.layer.track_colors,\n )\n self.node._subvisuals[2].set_data(width=self.layer.tail_width,)\n\n def _on_tracks_change(self, event=None):\n \"\"\" update the shader when the track data changes \"\"\"\n\n self.track_shader.use_fade = self.layer.use_fade\n self.track_shader.tail_length = self.layer.tail_length\n self.track_shader.vertex_time = self.layer.track_times\n\n # change the data to the vispy line visual\n self.node._subvisuals[0].set_data(\n pos=self.layer._view_data,\n connect=self.layer.track_connex,\n width=self.layer.tail_width,\n color=self.layer.track_colors,\n )\n\n # Call to update order of translation values with new dims:\n self._on_matrix_change()\n\n def _on_graph_change(self, event=None):\n \"\"\" update the shader when the graph data changes \"\"\"\n\n self.graph_shader.use_fade = self.layer.use_fade\n self.graph_shader.tail_length = self.layer.tail_length\n self.graph_shader.vertex_time = self.layer.graph_times\n\n # if the user clears a graph after it has been created, vispy offers\n # no method to clear the data, therefore, we need to set private\n # attributes to None to prevent errors\n if self.layer._view_graph is None:\n self.node._subvisuals[2]._pos = None\n self.node._subvisuals[2]._connect = None\n self.node.update()\n return\n\n self.node._subvisuals[2].set_data(\n pos=self.layer._view_graph,\n connect=self.layer.graph_connex,\n width=self.layer.tail_width,\n color='white',\n )\n\n # Call to update order of translation values with new dims:\n self._on_matrix_change()\n", "path": "napari/_vispy/vispy_tracks_layer.py"}, {"content": "from typing import List, Union\n\nimport numpy as np\nfrom vispy.gloo import VertexBuffer\nfrom vispy.visuals.filters.base_filter import Filter\n\n\nclass TrackShader(Filter):\n \"\"\" TrackShader\n\n Custom vertex and fragment shaders for visualizing tracks quickly with\n vispy. The central assumption is that the tracks are rendered as a\n continuous vispy Line segment, with connections and colors defined when\n the visual is created.\n\n The shader simply changes the visibility and/or fading of the data according\n to the current_time and the associate time metadata for each vertex. This\n is scaled according to the tail length. Points ahead of the current time\n are rendered with alpha set to zero.\n\n Parameters\n ----------\n current_time : int, float\n the current time, which is typically the frame index, although this\n can be an arbitrary float\n tail_length : int, float\n the upper limit on length of the 'tail'\n use_fade : bool\n this will enable/disable tail fading with time\n vertex_time : 1D array, list\n a vector describing the time associated with each vertex\n\n\n TODO\n ----\n - the track is still displayed, albeit with fading, once the track has\n finished but is still within the 'tail_length' window. Should it\n disappear?\n - check the shader positioning within the GL pipeline, currently\n overrides layer opacity settings\n\n vertex_mask: 1D array, list\n a vector describing whether to mask each vertex\n\n \"\"\"\n\n VERT_SHADER = \"\"\"\n varying vec4 v_track_color;\n void apply_track_shading() {\n\n float alpha;\n\n if ($a_vertex_time > $current_time) {\n // this is a hack to minimize the frag shader rendering ahead\n // of the current time point due to interpolation\n if ($a_vertex_time <= $current_time + 1){\n alpha = -100.;\n } else {\n alpha = 0.;\n }\n } else {\n // fade the track into the temporal distance, scaled by the\n // maximum tail length from the gui\n float fade = ($current_time - $a_vertex_time) / $tail_length;\n alpha = clamp(1.0-fade, 0.0, 1.0);\n }\n\n // when use_fade is disabled, the entire track is visible\n if ($use_fade == 0) {\n alpha = 1.0;\n }\n\n // set the vertex alpha according to the fade\n v_track_color.a = alpha;\n }\n \"\"\"\n\n FRAG_SHADER = \"\"\"\n varying vec4 v_track_color;\n void apply_track_shading() {\n // interpolate\n gl_FragColor.a = clamp(v_track_color.a, 0.0, 1.0);\n }\n \"\"\"\n\n def __init__(\n self,\n current_time=0,\n tail_length=30,\n use_fade: bool = True,\n vertex_time: Union[List, np.ndarray] = None,\n ):\n\n super().__init__(\n vcode=self.VERT_SHADER, vpos=3, fcode=self.FRAG_SHADER, fpos=9\n )\n\n self.current_time = current_time\n self.tail_length = tail_length\n self.use_fade = use_fade\n self.vertex_time = vertex_time\n\n @property\n def current_time(self) -> Union[int, float]:\n return self._current_time\n\n @current_time.setter\n def current_time(self, n: Union[int, float]):\n self._current_time = n\n if isinstance(n, slice):\n n = np.max(self._vertex_time)\n self.vshader['current_time'] = float(n)\n\n @property\n def use_fade(self) -> bool:\n return self._use_fade\n\n @use_fade.setter\n def use_fade(self, value: bool):\n self._use_fade = value\n self.vshader['use_fade'] = float(self._use_fade)\n\n @property\n def tail_length(self) -> Union[int, float]:\n return self._tail_length\n\n @tail_length.setter\n def tail_length(self, tail_length: Union[int, float]):\n self._tail_length = tail_length\n self.vshader['tail_length'] = float(self._tail_length)\n\n def _attach(self, visual):\n super()._attach(visual)\n\n @property\n def vertex_time(self):\n return self._vertex_time\n\n @vertex_time.setter\n def vertex_time(self, v_time):\n self._vertex_time = np.array(v_time).reshape(-1, 1).astype(np.float32)\n self.vshader['a_vertex_time'] = VertexBuffer(self.vertex_time)\n", "path": "napari/_vispy/_vispy_tracks_shader.py"}], "after_files": [{"content": "from vispy.scene.visuals import Compound, Line, Text\n\nfrom ._vispy_tracks_shader import TrackShader\nfrom .vispy_base_layer import VispyBaseLayer\n\n\nclass VispyTracksLayer(VispyBaseLayer):\n \"\"\"VispyTracksLayer\n\n Track layer for visualizing tracks.\n\n Components:\n - Track lines (vispy.LineVisual)\n - Track IDs (vispy.TextVisual)\n - Graph edges (vispy.LineVisual)\n\n \"\"\"\n\n def __init__(self, layer):\n node = Compound([Line(), Text(), Line()])\n super().__init__(layer, node)\n\n self.layer.events.tail_width.connect(self._on_appearance_change)\n self.layer.events.tail_length.connect(self._on_appearance_change)\n self.layer.events.display_id.connect(self._on_appearance_change)\n self.layer.events.display_tail.connect(self._on_appearance_change)\n self.layer.events.display_graph.connect(self._on_appearance_change)\n\n self.layer.events.color_by.connect(self._on_appearance_change)\n self.layer.events.colormap.connect(self._on_appearance_change)\n\n # these events are fired when changes occur to the tracks or the\n # graph - as the vertex buffer of the shader needs to be updated\n # alongside the actual vertex data\n self.layer.events.rebuild_tracks.connect(self._on_tracks_change)\n self.layer.events.rebuild_graph.connect(self._on_graph_change)\n\n # build and attach the shader to the track\n self.track_shader = TrackShader()\n self.graph_shader = TrackShader()\n node._subvisuals[0].attach(self.track_shader)\n node._subvisuals[2].attach(self.graph_shader)\n\n # text label properties\n self.node._subvisuals[1].color = 'white'\n self.node._subvisuals[1].font_size = 8\n\n self._reset_base()\n\n self._on_data_change()\n self._on_appearance_change()\n\n def _on_data_change(self, event=None):\n \"\"\"Update the display.\"\"\"\n\n # update the shaders\n self.track_shader.current_time = self.layer.current_time\n self.graph_shader.current_time = self.layer.current_time\n\n # add text labels if they're visible\n if self.node._subvisuals[1].visible:\n labels_text, labels_pos = self.layer.track_labels\n self.node._subvisuals[1].text = labels_text\n self.node._subvisuals[1].pos = labels_pos\n\n self.node.update()\n # Call to update order of translation values with new dims:\n self._on_matrix_change()\n\n def _on_appearance_change(self, event=None):\n \"\"\"Change the appearance of the data.\"\"\"\n\n # update shader properties related to appearance\n self.track_shader.use_fade = self.layer.use_fade\n self.track_shader.tail_length = self.layer.tail_length\n self.graph_shader.use_fade = self.layer.use_fade\n self.graph_shader.tail_length = self.layer.tail_length\n\n # set visibility of subvisuals\n self.node._subvisuals[0].visible = self.layer.display_tail\n self.node._subvisuals[1].visible = self.layer.display_id\n self.node._subvisuals[2].visible = self.layer.display_graph\n\n # set the width of the track tails\n self.node._subvisuals[0].set_data(\n width=self.layer.tail_width, color=self.layer.track_colors,\n )\n self.node._subvisuals[2].set_data(width=self.layer.tail_width,)\n\n def _on_tracks_change(self, event=None):\n \"\"\"Update the shader when the track data changes.\"\"\"\n\n self.track_shader.use_fade = self.layer.use_fade\n self.track_shader.tail_length = self.layer.tail_length\n self.track_shader.vertex_time = self.layer.track_times\n\n # change the data to the vispy line visual\n self.node._subvisuals[0].set_data(\n pos=self.layer._view_data,\n connect=self.layer.track_connex,\n width=self.layer.tail_width,\n color=self.layer.track_colors,\n )\n\n # Call to update order of translation values with new dims:\n self._on_matrix_change()\n\n def _on_graph_change(self, event=None):\n \"\"\"Update the shader when the graph data changes.\"\"\"\n\n self.graph_shader.use_fade = self.layer.use_fade\n self.graph_shader.tail_length = self.layer.tail_length\n self.graph_shader.vertex_time = self.layer.graph_times\n\n # if the user clears a graph after it has been created, vispy offers\n # no method to clear the data, therefore, we need to set private\n # attributes to None to prevent errors\n if self.layer._view_graph is None:\n self.node._subvisuals[2]._pos = None\n self.node._subvisuals[2]._connect = None\n self.node.update()\n return\n\n self.node._subvisuals[2].set_data(\n pos=self.layer._view_graph,\n connect=self.layer.graph_connex,\n width=self.layer.tail_width,\n color='white',\n )\n\n # Call to update order of translation values with new dims:\n self._on_matrix_change()\n", "path": "napari/_vispy/vispy_tracks_layer.py"}, {"content": "from typing import List, Union\n\nimport numpy as np\nfrom vispy.gloo import VertexBuffer\nfrom vispy.visuals.filters.base_filter import Filter\n\n\nclass TrackShader(Filter):\n \"\"\"TrackShader.\n\n Custom vertex and fragment shaders for visualizing tracks quickly with\n vispy. The central assumption is that the tracks are rendered as a\n continuous vispy Line segment, with connections and colors defined when\n the visual is created.\n\n The shader simply changes the visibility and/or fading of the data according\n to the current_time and the associate time metadata for each vertex. This\n is scaled according to the tail length. Points ahead of the current time\n are rendered with alpha set to zero.\n\n Parameters\n ----------\n current_time : int, float\n the current time, which is typically the frame index, although this\n can be an arbitrary float\n tail_length : int, float\n the upper limit on length of the 'tail'\n use_fade : bool\n this will enable/disable tail fading with time\n vertex_time : 1D array, list\n a vector describing the time associated with each vertex\n\n TODO\n ----\n - the track is still displayed, albeit with fading, once the track has\n finished but is still within the 'tail_length' window. Should it\n disappear?\n\n \"\"\"\n\n VERT_SHADER = \"\"\"\n varying vec4 v_track_color;\n void apply_track_shading() {\n\n float alpha;\n\n if ($a_vertex_time > $current_time) {\n // this is a hack to minimize the frag shader rendering ahead\n // of the current time point due to interpolation\n if ($a_vertex_time <= $current_time + 1){\n alpha = -100.;\n } else {\n alpha = 0.;\n }\n } else {\n // fade the track into the temporal distance, scaled by the\n // maximum tail length from the gui\n float fade = ($current_time - $a_vertex_time) / $tail_length;\n alpha = clamp(1.0-fade, 0.0, 1.0);\n }\n\n // when use_fade is disabled, the entire track is visible\n if ($use_fade == 0) {\n alpha = 1.0;\n }\n\n // set the vertex alpha according to the fade\n v_track_color.a = alpha;\n }\n \"\"\"\n\n FRAG_SHADER = \"\"\"\n varying vec4 v_track_color;\n void apply_track_shading() {\n\n // if the alpha is below the threshold, discard the fragment\n if( v_track_color.a <= 0.0 ) {\n discard;\n }\n\n // interpolate\n gl_FragColor.a = clamp(v_track_color.a * gl_FragColor.a, 0.0, 1.0);\n }\n \"\"\"\n\n def __init__(\n self,\n current_time: Union[int, float] = 0,\n tail_length: Union[int, float] = 30,\n use_fade: bool = True,\n vertex_time: Union[List, np.ndarray] = None,\n ):\n\n super().__init__(\n vcode=self.VERT_SHADER, vpos=3, fcode=self.FRAG_SHADER, fpos=9\n )\n\n self.current_time = current_time\n self.tail_length = tail_length\n self.use_fade = use_fade\n self.vertex_time = vertex_time\n\n @property\n def current_time(self) -> Union[int, float]:\n return self._current_time\n\n @current_time.setter\n def current_time(self, n: Union[int, float]):\n self._current_time = n\n if isinstance(n, slice):\n n = np.max(self._vertex_time)\n self.vshader['current_time'] = float(n)\n\n @property\n def use_fade(self) -> bool:\n return self._use_fade\n\n @use_fade.setter\n def use_fade(self, value: bool):\n self._use_fade = value\n self.vshader['use_fade'] = float(self._use_fade)\n\n @property\n def tail_length(self) -> Union[int, float]:\n return self._tail_length\n\n @tail_length.setter\n def tail_length(self, tail_length: Union[int, float]):\n self._tail_length = tail_length\n self.vshader['tail_length'] = float(self._tail_length)\n\n def _attach(self, visual):\n super()._attach(visual)\n\n @property\n def vertex_time(self):\n return self._vertex_time\n\n @vertex_time.setter\n def vertex_time(self, v_time):\n self._vertex_time = np.array(v_time).reshape(-1, 1).astype(np.float32)\n self.vshader['a_vertex_time'] = VertexBuffer(self.vertex_time)\n", "path": "napari/_vispy/_vispy_tracks_shader.py"}]}
3,538
837
gh_patches_debug_32652
rasdani/github-patches
git_diff
getnikola__nikola-1267
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unable to use Tidy on html Hi, I enabled the tidy filter on html file: from nikola import filters FILTERS = { ".html": [filters.tidy], } and now NIkola refuses to build my site: TaskError - taskid:render_tags:../categories/index.html PythonAction Error Traceback (most recent call last): File "/usr/lib64/python3.3/site-packages/doit/action.py", line 360, in execute returned_value = self.py_callable(_self.args, *_kwargs) File "/usr/lib64/python3.3/site-packages/nikola/utils.py", line 529, in unlessLink action(target) File "/usr/lib64/python3.3/site-packages/nikola/filters.py", line 144, in tidy for line in output.split("\n"): TypeError: Type str doesn't support the buffer AP Is there a problem with tidy ? Can I use another thing as a replacement ? Unable to use Tidy on html Hi, I enabled the tidy filter on html file: from nikola import filters FILTERS = { ".html": [filters.tidy], } and now NIkola refuses to build my site: TaskError - taskid:render_tags:../categories/index.html PythonAction Error Traceback (most recent call last): File "/usr/lib64/python3.3/site-packages/doit/action.py", line 360, in execute returned_value = self.py_callable(_self.args, *_kwargs) File "/usr/lib64/python3.3/site-packages/nikola/utils.py", line 529, in unlessLink action(target) File "/usr/lib64/python3.3/site-packages/nikola/filters.py", line 144, in tidy for line in output.split("\n"): TypeError: Type str doesn't support the buffer AP Is there a problem with tidy ? Can I use another thing as a replacement ? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nikola/filters.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # Copyright © 2012-2014 Roberto Alsina and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 """Utility functions to help you run filters on files.""" 28 29 from .utils import req_missing 30 from functools import wraps 31 import os 32 import codecs 33 import re 34 import shutil 35 import subprocess 36 import tempfile 37 import shlex 38 39 try: 40 import typogrify.filters as typo 41 except ImportError: 42 typo = None # NOQA 43 44 45 def apply_to_binary_file(f): 46 """Take a function f that transforms a data argument, and returns 47 a function that takes a filename and applies f to the contents, 48 in place. Reads files in binary mode.""" 49 @wraps(f) 50 def f_in_file(fname): 51 with open(fname, 'rb') as inf: 52 data = inf.read() 53 data = f(data) 54 with open(fname, 'wb+') as outf: 55 outf.write(data) 56 57 return f_in_file 58 59 60 def apply_to_text_file(f): 61 """Take a function f that transforms a data argument, and returns 62 a function that takes a filename and applies f to the contents, 63 in place. Reads files in UTF-8.""" 64 @wraps(f) 65 def f_in_file(fname): 66 with codecs.open(fname, 'r', 'utf-8') as inf: 67 data = inf.read() 68 data = f(data) 69 with codecs.open(fname, 'w+', 'utf-8') as outf: 70 outf.write(data) 71 72 return f_in_file 73 74 75 def list_replace(the_list, find, replacement): 76 "Replace all occurrences of ``find`` with ``replacement`` in ``the_list``" 77 for i, v in enumerate(the_list): 78 if v == find: 79 the_list[i] = replacement 80 81 82 def runinplace(command, infile): 83 """Run a command in-place on a file. 84 85 command is a string of the form: "commandname %1 %2" and 86 it will be execed with infile as %1 and a temporary file 87 as %2. Then, that temporary file will be moved over %1. 88 89 Example usage: 90 91 runinplace("yui-compressor %1 -o %2", "myfile.css") 92 93 That will replace myfile.css with a minified version. 94 95 You can also supply command as a list. 96 """ 97 98 if not isinstance(command, list): 99 command = shlex.split(command) 100 101 tmpdir = None 102 103 if "%2" in command: 104 tmpdir = tempfile.mkdtemp(prefix="nikola") 105 tmpfname = os.path.join(tmpdir, os.path.basename(infile)) 106 107 try: 108 list_replace(command, "%1", infile) 109 if tmpdir: 110 list_replace(command, "%2", tmpfname) 111 112 subprocess.check_call(command) 113 114 if tmpdir: 115 shutil.move(tmpfname, infile) 116 finally: 117 if tmpdir: 118 shutil.rmtree(tmpdir) 119 120 121 def yui_compressor(infile): 122 yuicompressor = False 123 try: 124 subprocess.call('yui-compressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w')) 125 yuicompressor = 'yui-compressor' 126 except Exception: 127 pass 128 if not yuicompressor: 129 try: 130 subprocess.call('yuicompressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w')) 131 yuicompressor = 'yuicompressor' 132 except: 133 raise Exception("yui-compressor is not installed.") 134 return False 135 136 return runinplace(r'{} --nomunge %1 -o %2'.format(yuicompressor), infile) 137 138 139 def optipng(infile): 140 return runinplace(r"optipng -preserve -o2 -quiet %1", infile) 141 142 143 def jpegoptim(infile): 144 return runinplace(r"jpegoptim -p --strip-all -q %1", infile) 145 146 147 def tidy(inplace): 148 # Google site verifcation files are not HTML 149 if re.match(r"google[a-f0-9]+.html", os.path.basename(inplace)) \ 150 and open(inplace).readline().startswith( 151 "google-site-verification:"): 152 return 153 154 # Tidy will give error exits, that we will ignore. 155 output = subprocess.check_output( 156 "tidy -m -w 90 --indent no --quote-marks" 157 "no --keep-time yes --tidy-mark no " 158 "--force-output yes '{0}'; exit 0".format(inplace), stderr=subprocess.STDOUT, shell=True) 159 160 for line in output.split("\n"): 161 if "Warning:" in line: 162 if '<meta> proprietary attribute "charset"' in line: 163 # We want to set it though. 164 continue 165 elif '<meta> lacks "content" attribute' in line: 166 # False alarm to me. 167 continue 168 elif '<div> anchor' in line and 'already defined' in line: 169 # Some seeming problem with JavaScript terminators. 170 continue 171 elif '<img> lacks "alt" attribute' in line: 172 # Happens in gallery code, probably can be tolerated. 173 continue 174 elif '<table> lacks "summary" attribute' in line: 175 # Happens for tables, TODO: Check this is normal. 176 continue 177 elif 'proprietary attribute "data-toggle"' in line or \ 178 'proprietary attribute "data-target"': 179 # Some of our own tricks 180 continue 181 else: 182 assert False, (inplace, line) 183 elif "Error:" in line: 184 if '<time> is not recognized' in line: 185 # False alarm, time is proper HTML5. 186 continue 187 else: 188 assert False, line 189 190 191 @apply_to_text_file 192 def typogrify(data): 193 if typo is None: 194 req_missing(['typogrify'], 'use the typogrify filter') 195 196 data = typo.amp(data) 197 data = typo.widont(data) 198 data = typo.smartypants(data) 199 # Disabled because of typogrify bug where it breaks <title> 200 # data = typo.caps(data) 201 data = typo.initial_quotes(data) 202 return data 203 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nikola/filters.py b/nikola/filters.py --- a/nikola/filters.py +++ b/nikola/filters.py @@ -30,7 +30,6 @@ from functools import wraps import os import codecs -import re import shutil import subprocess import tempfile @@ -144,50 +143,6 @@ return runinplace(r"jpegoptim -p --strip-all -q %1", infile) -def tidy(inplace): - # Google site verifcation files are not HTML - if re.match(r"google[a-f0-9]+.html", os.path.basename(inplace)) \ - and open(inplace).readline().startswith( - "google-site-verification:"): - return - - # Tidy will give error exits, that we will ignore. - output = subprocess.check_output( - "tidy -m -w 90 --indent no --quote-marks" - "no --keep-time yes --tidy-mark no " - "--force-output yes '{0}'; exit 0".format(inplace), stderr=subprocess.STDOUT, shell=True) - - for line in output.split("\n"): - if "Warning:" in line: - if '<meta> proprietary attribute "charset"' in line: - # We want to set it though. - continue - elif '<meta> lacks "content" attribute' in line: - # False alarm to me. - continue - elif '<div> anchor' in line and 'already defined' in line: - # Some seeming problem with JavaScript terminators. - continue - elif '<img> lacks "alt" attribute' in line: - # Happens in gallery code, probably can be tolerated. - continue - elif '<table> lacks "summary" attribute' in line: - # Happens for tables, TODO: Check this is normal. - continue - elif 'proprietary attribute "data-toggle"' in line or \ - 'proprietary attribute "data-target"': - # Some of our own tricks - continue - else: - assert False, (inplace, line) - elif "Error:" in line: - if '<time> is not recognized' in line: - # False alarm, time is proper HTML5. - continue - else: - assert False, line - - @apply_to_text_file def typogrify(data): if typo is None:
{"golden_diff": "diff --git a/nikola/filters.py b/nikola/filters.py\n--- a/nikola/filters.py\n+++ b/nikola/filters.py\n@@ -30,7 +30,6 @@\n from functools import wraps\n import os\n import codecs\n-import re\n import shutil\n import subprocess\n import tempfile\n@@ -144,50 +143,6 @@\n return runinplace(r\"jpegoptim -p --strip-all -q %1\", infile)\n \n \n-def tidy(inplace):\n- # Google site verifcation files are not HTML\n- if re.match(r\"google[a-f0-9]+.html\", os.path.basename(inplace)) \\\n- and open(inplace).readline().startswith(\n- \"google-site-verification:\"):\n- return\n-\n- # Tidy will give error exits, that we will ignore.\n- output = subprocess.check_output(\n- \"tidy -m -w 90 --indent no --quote-marks\"\n- \"no --keep-time yes --tidy-mark no \"\n- \"--force-output yes '{0}'; exit 0\".format(inplace), stderr=subprocess.STDOUT, shell=True)\n-\n- for line in output.split(\"\\n\"):\n- if \"Warning:\" in line:\n- if '<meta> proprietary attribute \"charset\"' in line:\n- # We want to set it though.\n- continue\n- elif '<meta> lacks \"content\" attribute' in line:\n- # False alarm to me.\n- continue\n- elif '<div> anchor' in line and 'already defined' in line:\n- # Some seeming problem with JavaScript terminators.\n- continue\n- elif '<img> lacks \"alt\" attribute' in line:\n- # Happens in gallery code, probably can be tolerated.\n- continue\n- elif '<table> lacks \"summary\" attribute' in line:\n- # Happens for tables, TODO: Check this is normal.\n- continue\n- elif 'proprietary attribute \"data-toggle\"' in line or \\\n- 'proprietary attribute \"data-target\"':\n- # Some of our own tricks\n- continue\n- else:\n- assert False, (inplace, line)\n- elif \"Error:\" in line:\n- if '<time> is not recognized' in line:\n- # False alarm, time is proper HTML5.\n- continue\n- else:\n- assert False, line\n-\n-\n @apply_to_text_file\n def typogrify(data):\n if typo is None:\n", "issue": "Unable to use Tidy on html\nHi, \n\nI enabled the tidy filter on html file:\n\nfrom nikola import filters\nFILTERS = {\n \".html\": [filters.tidy],\n }\n\nand now NIkola refuses to build my site: \n\nTaskError - taskid:render_tags:../categories/index.html\nPythonAction Error\nTraceback (most recent call last):\n File \"/usr/lib64/python3.3/site-packages/doit/action.py\", line 360, in execute\n returned_value = self.py_callable(_self.args, *_kwargs)\n File \"/usr/lib64/python3.3/site-packages/nikola/utils.py\", line 529, in unlessLink\n action(target)\n File \"/usr/lib64/python3.3/site-packages/nikola/filters.py\", line 144, in tidy\n for line in output.split(\"\\n\"):\nTypeError: Type str doesn't support the buffer AP\n\nIs there a problem with tidy ? Can I use another thing as a replacement ?\n\nUnable to use Tidy on html\nHi, \n\nI enabled the tidy filter on html file:\n\nfrom nikola import filters\nFILTERS = {\n \".html\": [filters.tidy],\n }\n\nand now NIkola refuses to build my site: \n\nTaskError - taskid:render_tags:../categories/index.html\nPythonAction Error\nTraceback (most recent call last):\n File \"/usr/lib64/python3.3/site-packages/doit/action.py\", line 360, in execute\n returned_value = self.py_callable(_self.args, *_kwargs)\n File \"/usr/lib64/python3.3/site-packages/nikola/utils.py\", line 529, in unlessLink\n action(target)\n File \"/usr/lib64/python3.3/site-packages/nikola/filters.py\", line 144, in tidy\n for line in output.split(\"\\n\"):\nTypeError: Type str doesn't support the buffer AP\n\nIs there a problem with tidy ? Can I use another thing as a replacement ?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Utility functions to help you run filters on files.\"\"\"\n\nfrom .utils import req_missing\nfrom functools import wraps\nimport os\nimport codecs\nimport re\nimport shutil\nimport subprocess\nimport tempfile\nimport shlex\n\ntry:\n import typogrify.filters as typo\nexcept ImportError:\n typo = None # NOQA\n\n\ndef apply_to_binary_file(f):\n \"\"\"Take a function f that transforms a data argument, and returns\n a function that takes a filename and applies f to the contents,\n in place. Reads files in binary mode.\"\"\"\n @wraps(f)\n def f_in_file(fname):\n with open(fname, 'rb') as inf:\n data = inf.read()\n data = f(data)\n with open(fname, 'wb+') as outf:\n outf.write(data)\n\n return f_in_file\n\n\ndef apply_to_text_file(f):\n \"\"\"Take a function f that transforms a data argument, and returns\n a function that takes a filename and applies f to the contents,\n in place. Reads files in UTF-8.\"\"\"\n @wraps(f)\n def f_in_file(fname):\n with codecs.open(fname, 'r', 'utf-8') as inf:\n data = inf.read()\n data = f(data)\n with codecs.open(fname, 'w+', 'utf-8') as outf:\n outf.write(data)\n\n return f_in_file\n\n\ndef list_replace(the_list, find, replacement):\n \"Replace all occurrences of ``find`` with ``replacement`` in ``the_list``\"\n for i, v in enumerate(the_list):\n if v == find:\n the_list[i] = replacement\n\n\ndef runinplace(command, infile):\n \"\"\"Run a command in-place on a file.\n\n command is a string of the form: \"commandname %1 %2\" and\n it will be execed with infile as %1 and a temporary file\n as %2. Then, that temporary file will be moved over %1.\n\n Example usage:\n\n runinplace(\"yui-compressor %1 -o %2\", \"myfile.css\")\n\n That will replace myfile.css with a minified version.\n\n You can also supply command as a list.\n \"\"\"\n\n if not isinstance(command, list):\n command = shlex.split(command)\n\n tmpdir = None\n\n if \"%2\" in command:\n tmpdir = tempfile.mkdtemp(prefix=\"nikola\")\n tmpfname = os.path.join(tmpdir, os.path.basename(infile))\n\n try:\n list_replace(command, \"%1\", infile)\n if tmpdir:\n list_replace(command, \"%2\", tmpfname)\n\n subprocess.check_call(command)\n\n if tmpdir:\n shutil.move(tmpfname, infile)\n finally:\n if tmpdir:\n shutil.rmtree(tmpdir)\n\n\ndef yui_compressor(infile):\n yuicompressor = False\n try:\n subprocess.call('yui-compressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))\n yuicompressor = 'yui-compressor'\n except Exception:\n pass\n if not yuicompressor:\n try:\n subprocess.call('yuicompressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))\n yuicompressor = 'yuicompressor'\n except:\n raise Exception(\"yui-compressor is not installed.\")\n return False\n\n return runinplace(r'{} --nomunge %1 -o %2'.format(yuicompressor), infile)\n\n\ndef optipng(infile):\n return runinplace(r\"optipng -preserve -o2 -quiet %1\", infile)\n\n\ndef jpegoptim(infile):\n return runinplace(r\"jpegoptim -p --strip-all -q %1\", infile)\n\n\ndef tidy(inplace):\n # Google site verifcation files are not HTML\n if re.match(r\"google[a-f0-9]+.html\", os.path.basename(inplace)) \\\n and open(inplace).readline().startswith(\n \"google-site-verification:\"):\n return\n\n # Tidy will give error exits, that we will ignore.\n output = subprocess.check_output(\n \"tidy -m -w 90 --indent no --quote-marks\"\n \"no --keep-time yes --tidy-mark no \"\n \"--force-output yes '{0}'; exit 0\".format(inplace), stderr=subprocess.STDOUT, shell=True)\n\n for line in output.split(\"\\n\"):\n if \"Warning:\" in line:\n if '<meta> proprietary attribute \"charset\"' in line:\n # We want to set it though.\n continue\n elif '<meta> lacks \"content\" attribute' in line:\n # False alarm to me.\n continue\n elif '<div> anchor' in line and 'already defined' in line:\n # Some seeming problem with JavaScript terminators.\n continue\n elif '<img> lacks \"alt\" attribute' in line:\n # Happens in gallery code, probably can be tolerated.\n continue\n elif '<table> lacks \"summary\" attribute' in line:\n # Happens for tables, TODO: Check this is normal.\n continue\n elif 'proprietary attribute \"data-toggle\"' in line or \\\n 'proprietary attribute \"data-target\"':\n # Some of our own tricks\n continue\n else:\n assert False, (inplace, line)\n elif \"Error:\" in line:\n if '<time> is not recognized' in line:\n # False alarm, time is proper HTML5.\n continue\n else:\n assert False, line\n\n\n@apply_to_text_file\ndef typogrify(data):\n if typo is None:\n req_missing(['typogrify'], 'use the typogrify filter')\n\n data = typo.amp(data)\n data = typo.widont(data)\n data = typo.smartypants(data)\n # Disabled because of typogrify bug where it breaks <title>\n # data = typo.caps(data)\n data = typo.initial_quotes(data)\n return data\n", "path": "nikola/filters.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Utility functions to help you run filters on files.\"\"\"\n\nfrom .utils import req_missing\nfrom functools import wraps\nimport os\nimport codecs\nimport shutil\nimport subprocess\nimport tempfile\nimport shlex\n\ntry:\n import typogrify.filters as typo\nexcept ImportError:\n typo = None # NOQA\n\n\ndef apply_to_binary_file(f):\n \"\"\"Take a function f that transforms a data argument, and returns\n a function that takes a filename and applies f to the contents,\n in place. Reads files in binary mode.\"\"\"\n @wraps(f)\n def f_in_file(fname):\n with open(fname, 'rb') as inf:\n data = inf.read()\n data = f(data)\n with open(fname, 'wb+') as outf:\n outf.write(data)\n\n return f_in_file\n\n\ndef apply_to_text_file(f):\n \"\"\"Take a function f that transforms a data argument, and returns\n a function that takes a filename and applies f to the contents,\n in place. Reads files in UTF-8.\"\"\"\n @wraps(f)\n def f_in_file(fname):\n with codecs.open(fname, 'r', 'utf-8') as inf:\n data = inf.read()\n data = f(data)\n with codecs.open(fname, 'w+', 'utf-8') as outf:\n outf.write(data)\n\n return f_in_file\n\n\ndef list_replace(the_list, find, replacement):\n \"Replace all occurrences of ``find`` with ``replacement`` in ``the_list``\"\n for i, v in enumerate(the_list):\n if v == find:\n the_list[i] = replacement\n\n\ndef runinplace(command, infile):\n \"\"\"Run a command in-place on a file.\n\n command is a string of the form: \"commandname %1 %2\" and\n it will be execed with infile as %1 and a temporary file\n as %2. Then, that temporary file will be moved over %1.\n\n Example usage:\n\n runinplace(\"yui-compressor %1 -o %2\", \"myfile.css\")\n\n That will replace myfile.css with a minified version.\n\n You can also supply command as a list.\n \"\"\"\n\n if not isinstance(command, list):\n command = shlex.split(command)\n\n tmpdir = None\n\n if \"%2\" in command:\n tmpdir = tempfile.mkdtemp(prefix=\"nikola\")\n tmpfname = os.path.join(tmpdir, os.path.basename(infile))\n\n try:\n list_replace(command, \"%1\", infile)\n if tmpdir:\n list_replace(command, \"%2\", tmpfname)\n\n subprocess.check_call(command)\n\n if tmpdir:\n shutil.move(tmpfname, infile)\n finally:\n if tmpdir:\n shutil.rmtree(tmpdir)\n\n\ndef yui_compressor(infile):\n yuicompressor = False\n try:\n subprocess.call('yui-compressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))\n yuicompressor = 'yui-compressor'\n except Exception:\n pass\n if not yuicompressor:\n try:\n subprocess.call('yuicompressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))\n yuicompressor = 'yuicompressor'\n except:\n raise Exception(\"yui-compressor is not installed.\")\n return False\n\n return runinplace(r'{} --nomunge %1 -o %2'.format(yuicompressor), infile)\n\n\ndef optipng(infile):\n return runinplace(r\"optipng -preserve -o2 -quiet %1\", infile)\n\n\ndef jpegoptim(infile):\n return runinplace(r\"jpegoptim -p --strip-all -q %1\", infile)\n\n\n@apply_to_text_file\ndef typogrify(data):\n if typo is None:\n req_missing(['typogrify'], 'use the typogrify filter')\n\n data = typo.amp(data)\n data = typo.widont(data)\n data = typo.smartypants(data)\n # Disabled because of typogrify bug where it breaks <title>\n # data = typo.caps(data)\n data = typo.initial_quotes(data)\n return data\n", "path": "nikola/filters.py"}]}
2,767
554
gh_patches_debug_20428
rasdani/github-patches
git_diff
pyodide__pyodide-1231
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Minor fix: add parameter for shallowCopyToJavascript Follow #1202 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conftest.py` Content: ``` 1 """ 2 Various common utilities for testing. 3 """ 4 5 import contextlib 6 import multiprocessing 7 import textwrap 8 import tempfile 9 import time 10 import os 11 import pathlib 12 import queue 13 import sys 14 import shutil 15 16 ROOT_PATH = pathlib.Path(__file__).parents[0].resolve() 17 TEST_PATH = ROOT_PATH / "src" / "tests" 18 BUILD_PATH = ROOT_PATH / "build" 19 20 sys.path.append(str(ROOT_PATH)) 21 22 from pyodide_build._fixes import _selenium_is_connectable # noqa: E402 23 import selenium.webdriver.common.utils # noqa: E402 24 25 # XXX: Temporary fix for ConnectionError in selenium 26 27 selenium.webdriver.common.utils.is_connectable = _selenium_is_connectable 28 29 try: 30 import pytest 31 32 def pytest_addoption(parser): 33 group = parser.getgroup("general") 34 group.addoption( 35 "--build-dir", 36 action="store", 37 default=BUILD_PATH, 38 help="Path to the build directory", 39 ) 40 group.addoption( 41 "--run-xfail", 42 action="store_true", 43 help="If provided, tests marked as xfail will be run", 44 ) 45 46 47 except ImportError: 48 pytest = None # type: ignore 49 50 51 class JavascriptException(Exception): 52 def __init__(self, msg, stack): 53 self.msg = msg 54 self.stack = stack 55 # In chrome the stack contains the message 56 if self.stack and self.stack.startswith(self.msg): 57 self.msg = "" 58 59 def __str__(self): 60 return "\n\n".join(x for x in [self.msg, self.stack] if x) 61 62 63 class SeleniumWrapper: 64 JavascriptException = JavascriptException 65 66 def __init__( 67 self, server_port, server_hostname="127.0.0.1", server_log=None, build_dir=None 68 ): 69 if build_dir is None: 70 build_dir = BUILD_PATH 71 72 self.driver = self.get_driver() 73 self.server_port = server_port 74 self.server_hostname = server_hostname 75 self.server_log = server_log 76 77 if not (pathlib.Path(build_dir) / "test.html").exists(): 78 # selenium does not expose HTTP response codes 79 raise ValueError( 80 f"{(build_dir / 'test.html').resolve()} " f"does not exist!" 81 ) 82 self.driver.get(f"http://{server_hostname}:{server_port}/test.html") 83 self.run_js("Error.stackTraceLimit = Infinity") 84 self.run_js("await languagePluginLoader") 85 86 @property 87 def logs(self): 88 logs = self.driver.execute_script("return window.logs") 89 if logs is not None: 90 return "\n".join(str(x) for x in logs) 91 else: 92 return "" 93 94 def clean_logs(self): 95 self.driver.execute_script("window.logs = []") 96 97 def run(self, code): 98 return self.run_js( 99 f""" 100 let result = pyodide.runPython({code!r}); 101 if(result && result.deepCopyToJavascript){{ 102 let converted_result = result.deepCopyToJavascript(); 103 result.destroy(); 104 return converted_result; 105 }} 106 return result; 107 """ 108 ) 109 110 def run_async(self, code): 111 return self.run_js( 112 f""" 113 let result = await pyodide.runPythonAsync({code!r}); 114 if(result && result.deepCopyToJavascript){{ 115 let converted_result = result.deepCopyToJavascript(); 116 result.destroy(); 117 return converted_result; 118 }} 119 return result; 120 """ 121 ) 122 123 def run_js(self, code): 124 if isinstance(code, str) and code.startswith("\n"): 125 # we have a multiline string, fix indentation 126 code = textwrap.dedent(code) 127 128 wrapper = """ 129 let cb = arguments[arguments.length - 1]; 130 let run = async () => { %s } 131 (async () => { 132 try { 133 let result = await run(); 134 if(pyodide && pyodide._module && pyodide._module._PyErr_Occurred()){ 135 try { 136 pyodide._module._pythonexc2js(); 137 } catch(e){ 138 console.error(`Python exited with error flag set! Error was:\n{e.message}`); 139 // Don't put original error message in new one: we want 140 // "pytest.raises(xxx, match=msg)" to fail 141 throw new Error(`Python exited with error flag set!`); 142 } 143 } 144 cb([0, result]); 145 } catch (e) { 146 cb([1, e.toString(), e.stack]); 147 } 148 })() 149 """ 150 151 retval = self.driver.execute_async_script(wrapper % code) 152 153 if retval[0] == 0: 154 return retval[1] 155 else: 156 raise JavascriptException(retval[1], retval[2]) 157 158 def run_webworker(self, code): 159 if isinstance(code, str) and code.startswith("\n"): 160 # we have a multiline string, fix indentation 161 code = textwrap.dedent(code) 162 163 return self.run_js( 164 """ 165 let worker = new Worker( '{}' ); 166 worker.postMessage({{ python: {!r} }}); 167 return new Promise((res, rej) => {{ 168 worker.onerror = e => rej(e); 169 worker.onmessage = e => {{ 170 if (e.data.results) {{ 171 res(e.data.results); 172 }} else {{ 173 rej(e.data.error); 174 }} 175 }}; 176 }}) 177 """.format( 178 f"http://{self.server_hostname}:{self.server_port}/webworker_dev.js", 179 code, 180 ) 181 ) 182 183 def load_package(self, packages): 184 self.run_js("await pyodide.loadPackage({!r})".format(packages)) 185 186 @property 187 def urls(self): 188 for handle in self.driver.window_handles: 189 self.driver.switch_to.window(handle) 190 yield self.driver.current_url 191 192 193 class FirefoxWrapper(SeleniumWrapper): 194 195 browser = "firefox" 196 197 def get_driver(self): 198 from selenium.webdriver import Firefox 199 from selenium.webdriver.firefox.options import Options 200 201 options = Options() 202 options.add_argument("-headless") 203 204 return Firefox(executable_path="geckodriver", options=options) 205 206 207 class ChromeWrapper(SeleniumWrapper): 208 209 browser = "chrome" 210 211 def get_driver(self): 212 from selenium.webdriver import Chrome 213 from selenium.webdriver.chrome.options import Options 214 215 options = Options() 216 options.add_argument("--headless") 217 options.add_argument("--no-sandbox") 218 219 return Chrome(options=options) 220 221 222 if pytest is not None: 223 224 @pytest.fixture(params=["firefox", "chrome"]) 225 def selenium_standalone(request, web_server_main): 226 server_hostname, server_port, server_log = web_server_main 227 if request.param == "firefox": 228 cls = FirefoxWrapper 229 elif request.param == "chrome": 230 cls = ChromeWrapper 231 selenium = cls( 232 build_dir=request.config.option.build_dir, 233 server_port=server_port, 234 server_hostname=server_hostname, 235 server_log=server_log, 236 ) 237 try: 238 yield selenium 239 finally: 240 print(selenium.logs) 241 selenium.driver.quit() 242 243 @pytest.fixture(params=["firefox", "chrome"], scope="module") 244 def _selenium_cached(request, web_server_main): 245 # Cached selenium instance. This is a copy-paste of 246 # selenium_standalone to avoid fixture scope issues 247 server_hostname, server_port, server_log = web_server_main 248 if request.param == "firefox": 249 cls = FirefoxWrapper 250 elif request.param == "chrome": 251 cls = ChromeWrapper 252 selenium = cls( 253 build_dir=request.config.option.build_dir, 254 server_port=server_port, 255 server_hostname=server_hostname, 256 server_log=server_log, 257 ) 258 try: 259 yield selenium 260 finally: 261 selenium.driver.quit() 262 263 @pytest.fixture 264 def selenium(_selenium_cached): 265 # selenium instance cached at the module level 266 try: 267 _selenium_cached.clean_logs() 268 yield _selenium_cached 269 finally: 270 print(_selenium_cached.logs) 271 272 273 @pytest.fixture(scope="session") 274 def web_server_main(request): 275 """Web server that serves files in the build/ directory""" 276 with spawn_web_server(request.config.option.build_dir) as output: 277 yield output 278 279 280 @pytest.fixture(scope="session") 281 def web_server_secondary(request): 282 """Secondary web server that serves files build/ directory""" 283 with spawn_web_server(request.config.option.build_dir) as output: 284 yield output 285 286 287 @pytest.fixture(scope="session") 288 def web_server_tst_data(request): 289 """Web server that serves files in the src/tests/data/ directory""" 290 with spawn_web_server(TEST_PATH / "data") as output: 291 yield output 292 293 294 @contextlib.contextmanager 295 def spawn_web_server(build_dir=None): 296 297 if build_dir is None: 298 build_dir = BUILD_PATH 299 300 tmp_dir = tempfile.mkdtemp() 301 log_path = pathlib.Path(tmp_dir) / "http-server.log" 302 q = multiprocessing.Queue() 303 p = multiprocessing.Process(target=run_web_server, args=(q, log_path, build_dir)) 304 305 try: 306 p.start() 307 port = q.get() 308 hostname = "127.0.0.1" 309 310 print( 311 f"Spawning webserver at http://{hostname}:{port} " 312 f"(see logs in {log_path})" 313 ) 314 yield hostname, port, log_path 315 finally: 316 q.put("TERMINATE") 317 p.join() 318 shutil.rmtree(tmp_dir) 319 320 321 def run_web_server(q, log_filepath, build_dir): 322 """Start the HTTP web server 323 324 Parameters 325 ---------- 326 q : Queue 327 communication queue 328 log_path : pathlib.Path 329 path to the file where to store the logs 330 """ 331 import http.server 332 import socketserver 333 334 os.chdir(build_dir) 335 336 log_fh = log_filepath.open("w", buffering=1) 337 sys.stdout = log_fh 338 sys.stderr = log_fh 339 340 test_prefix = "/src/tests/" 341 342 class Handler(http.server.SimpleHTTPRequestHandler): 343 def log_message(self, format_, *args): 344 print( 345 "[%s] source: %s:%s - %s" 346 % (self.log_date_time_string(), *self.client_address, format_ % args) 347 ) 348 349 def end_headers(self): 350 # Enable Cross-Origin Resource Sharing (CORS) 351 self.send_header("Access-Control-Allow-Origin", "*") 352 super().end_headers() 353 354 with socketserver.TCPServer(("", 0), Handler) as httpd: 355 host, port = httpd.server_address 356 print(f"Starting webserver at http://{host}:{port}") 357 httpd.server_name = "test-server" 358 httpd.server_port = port 359 q.put(port) 360 361 def service_actions(): 362 try: 363 if q.get(False) == "TERMINATE": 364 print("Stopping server...") 365 sys.exit(0) 366 except queue.Empty: 367 pass 368 369 httpd.service_actions = service_actions 370 httpd.serve_forever() 371 372 373 if ( 374 __name__ == "__main__" 375 and multiprocessing.current_process().name == "MainProcess" 376 and not hasattr(sys, "_pytest_session") 377 ): 378 with spawn_web_server(): 379 # run forever 380 while True: 381 time.sleep(1) 382 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conftest.py b/conftest.py --- a/conftest.py +++ b/conftest.py @@ -98,8 +98,8 @@ return self.run_js( f""" let result = pyodide.runPython({code!r}); - if(result && result.deepCopyToJavascript){{ - let converted_result = result.deepCopyToJavascript(); + if(result && result.toJs){{ + let converted_result = result.toJs(); result.destroy(); return converted_result; }} @@ -111,8 +111,8 @@ return self.run_js( f""" let result = await pyodide.runPythonAsync({code!r}); - if(result && result.deepCopyToJavascript){{ - let converted_result = result.deepCopyToJavascript(); + if(result && result.toJs){{ + let converted_result = result.toJs(); result.destroy(); return converted_result; }}
{"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -98,8 +98,8 @@\n return self.run_js(\n f\"\"\"\n let result = pyodide.runPython({code!r});\n- if(result && result.deepCopyToJavascript){{\n- let converted_result = result.deepCopyToJavascript();\n+ if(result && result.toJs){{\n+ let converted_result = result.toJs();\n result.destroy();\n return converted_result;\n }}\n@@ -111,8 +111,8 @@\n return self.run_js(\n f\"\"\"\n let result = await pyodide.runPythonAsync({code!r});\n- if(result && result.deepCopyToJavascript){{\n- let converted_result = result.deepCopyToJavascript();\n+ if(result && result.toJs){{\n+ let converted_result = result.toJs();\n result.destroy();\n return converted_result;\n }}\n", "issue": "Minor fix: add parameter for shallowCopyToJavascript \nFollow #1202 \r\n\r\n\n", "before_files": [{"content": "\"\"\"\nVarious common utilities for testing.\n\"\"\"\n\nimport contextlib\nimport multiprocessing\nimport textwrap\nimport tempfile\nimport time\nimport os\nimport pathlib\nimport queue\nimport sys\nimport shutil\n\nROOT_PATH = pathlib.Path(__file__).parents[0].resolve()\nTEST_PATH = ROOT_PATH / \"src\" / \"tests\"\nBUILD_PATH = ROOT_PATH / \"build\"\n\nsys.path.append(str(ROOT_PATH))\n\nfrom pyodide_build._fixes import _selenium_is_connectable # noqa: E402\nimport selenium.webdriver.common.utils # noqa: E402\n\n# XXX: Temporary fix for ConnectionError in selenium\n\nselenium.webdriver.common.utils.is_connectable = _selenium_is_connectable\n\ntry:\n import pytest\n\n def pytest_addoption(parser):\n group = parser.getgroup(\"general\")\n group.addoption(\n \"--build-dir\",\n action=\"store\",\n default=BUILD_PATH,\n help=\"Path to the build directory\",\n )\n group.addoption(\n \"--run-xfail\",\n action=\"store_true\",\n help=\"If provided, tests marked as xfail will be run\",\n )\n\n\nexcept ImportError:\n pytest = None # type: ignore\n\n\nclass JavascriptException(Exception):\n def __init__(self, msg, stack):\n self.msg = msg\n self.stack = stack\n # In chrome the stack contains the message\n if self.stack and self.stack.startswith(self.msg):\n self.msg = \"\"\n\n def __str__(self):\n return \"\\n\\n\".join(x for x in [self.msg, self.stack] if x)\n\n\nclass SeleniumWrapper:\n JavascriptException = JavascriptException\n\n def __init__(\n self, server_port, server_hostname=\"127.0.0.1\", server_log=None, build_dir=None\n ):\n if build_dir is None:\n build_dir = BUILD_PATH\n\n self.driver = self.get_driver()\n self.server_port = server_port\n self.server_hostname = server_hostname\n self.server_log = server_log\n\n if not (pathlib.Path(build_dir) / \"test.html\").exists():\n # selenium does not expose HTTP response codes\n raise ValueError(\n f\"{(build_dir / 'test.html').resolve()} \" f\"does not exist!\"\n )\n self.driver.get(f\"http://{server_hostname}:{server_port}/test.html\")\n self.run_js(\"Error.stackTraceLimit = Infinity\")\n self.run_js(\"await languagePluginLoader\")\n\n @property\n def logs(self):\n logs = self.driver.execute_script(\"return window.logs\")\n if logs is not None:\n return \"\\n\".join(str(x) for x in logs)\n else:\n return \"\"\n\n def clean_logs(self):\n self.driver.execute_script(\"window.logs = []\")\n\n def run(self, code):\n return self.run_js(\n f\"\"\"\n let result = pyodide.runPython({code!r});\n if(result && result.deepCopyToJavascript){{\n let converted_result = result.deepCopyToJavascript();\n result.destroy();\n return converted_result;\n }}\n return result;\n \"\"\"\n )\n\n def run_async(self, code):\n return self.run_js(\n f\"\"\"\n let result = await pyodide.runPythonAsync({code!r});\n if(result && result.deepCopyToJavascript){{\n let converted_result = result.deepCopyToJavascript();\n result.destroy();\n return converted_result;\n }}\n return result;\n \"\"\"\n )\n\n def run_js(self, code):\n if isinstance(code, str) and code.startswith(\"\\n\"):\n # we have a multiline string, fix indentation\n code = textwrap.dedent(code)\n\n wrapper = \"\"\"\n let cb = arguments[arguments.length - 1];\n let run = async () => { %s }\n (async () => {\n try {\n let result = await run();\n if(pyodide && pyodide._module && pyodide._module._PyErr_Occurred()){\n try {\n pyodide._module._pythonexc2js();\n } catch(e){\n console.error(`Python exited with error flag set! Error was:\\n{e.message}`);\n // Don't put original error message in new one: we want\n // \"pytest.raises(xxx, match=msg)\" to fail\n throw new Error(`Python exited with error flag set!`);\n }\n }\n cb([0, result]);\n } catch (e) {\n cb([1, e.toString(), e.stack]);\n }\n })()\n \"\"\"\n\n retval = self.driver.execute_async_script(wrapper % code)\n\n if retval[0] == 0:\n return retval[1]\n else:\n raise JavascriptException(retval[1], retval[2])\n\n def run_webworker(self, code):\n if isinstance(code, str) and code.startswith(\"\\n\"):\n # we have a multiline string, fix indentation\n code = textwrap.dedent(code)\n\n return self.run_js(\n \"\"\"\n let worker = new Worker( '{}' );\n worker.postMessage({{ python: {!r} }});\n return new Promise((res, rej) => {{\n worker.onerror = e => rej(e);\n worker.onmessage = e => {{\n if (e.data.results) {{\n res(e.data.results);\n }} else {{\n rej(e.data.error);\n }}\n }};\n }})\n \"\"\".format(\n f\"http://{self.server_hostname}:{self.server_port}/webworker_dev.js\",\n code,\n )\n )\n\n def load_package(self, packages):\n self.run_js(\"await pyodide.loadPackage({!r})\".format(packages))\n\n @property\n def urls(self):\n for handle in self.driver.window_handles:\n self.driver.switch_to.window(handle)\n yield self.driver.current_url\n\n\nclass FirefoxWrapper(SeleniumWrapper):\n\n browser = \"firefox\"\n\n def get_driver(self):\n from selenium.webdriver import Firefox\n from selenium.webdriver.firefox.options import Options\n\n options = Options()\n options.add_argument(\"-headless\")\n\n return Firefox(executable_path=\"geckodriver\", options=options)\n\n\nclass ChromeWrapper(SeleniumWrapper):\n\n browser = \"chrome\"\n\n def get_driver(self):\n from selenium.webdriver import Chrome\n from selenium.webdriver.chrome.options import Options\n\n options = Options()\n options.add_argument(\"--headless\")\n options.add_argument(\"--no-sandbox\")\n\n return Chrome(options=options)\n\n\nif pytest is not None:\n\n @pytest.fixture(params=[\"firefox\", \"chrome\"])\n def selenium_standalone(request, web_server_main):\n server_hostname, server_port, server_log = web_server_main\n if request.param == \"firefox\":\n cls = FirefoxWrapper\n elif request.param == \"chrome\":\n cls = ChromeWrapper\n selenium = cls(\n build_dir=request.config.option.build_dir,\n server_port=server_port,\n server_hostname=server_hostname,\n server_log=server_log,\n )\n try:\n yield selenium\n finally:\n print(selenium.logs)\n selenium.driver.quit()\n\n @pytest.fixture(params=[\"firefox\", \"chrome\"], scope=\"module\")\n def _selenium_cached(request, web_server_main):\n # Cached selenium instance. This is a copy-paste of\n # selenium_standalone to avoid fixture scope issues\n server_hostname, server_port, server_log = web_server_main\n if request.param == \"firefox\":\n cls = FirefoxWrapper\n elif request.param == \"chrome\":\n cls = ChromeWrapper\n selenium = cls(\n build_dir=request.config.option.build_dir,\n server_port=server_port,\n server_hostname=server_hostname,\n server_log=server_log,\n )\n try:\n yield selenium\n finally:\n selenium.driver.quit()\n\n @pytest.fixture\n def selenium(_selenium_cached):\n # selenium instance cached at the module level\n try:\n _selenium_cached.clean_logs()\n yield _selenium_cached\n finally:\n print(_selenium_cached.logs)\n\n\[email protected](scope=\"session\")\ndef web_server_main(request):\n \"\"\"Web server that serves files in the build/ directory\"\"\"\n with spawn_web_server(request.config.option.build_dir) as output:\n yield output\n\n\[email protected](scope=\"session\")\ndef web_server_secondary(request):\n \"\"\"Secondary web server that serves files build/ directory\"\"\"\n with spawn_web_server(request.config.option.build_dir) as output:\n yield output\n\n\[email protected](scope=\"session\")\ndef web_server_tst_data(request):\n \"\"\"Web server that serves files in the src/tests/data/ directory\"\"\"\n with spawn_web_server(TEST_PATH / \"data\") as output:\n yield output\n\n\[email protected]\ndef spawn_web_server(build_dir=None):\n\n if build_dir is None:\n build_dir = BUILD_PATH\n\n tmp_dir = tempfile.mkdtemp()\n log_path = pathlib.Path(tmp_dir) / \"http-server.log\"\n q = multiprocessing.Queue()\n p = multiprocessing.Process(target=run_web_server, args=(q, log_path, build_dir))\n\n try:\n p.start()\n port = q.get()\n hostname = \"127.0.0.1\"\n\n print(\n f\"Spawning webserver at http://{hostname}:{port} \"\n f\"(see logs in {log_path})\"\n )\n yield hostname, port, log_path\n finally:\n q.put(\"TERMINATE\")\n p.join()\n shutil.rmtree(tmp_dir)\n\n\ndef run_web_server(q, log_filepath, build_dir):\n \"\"\"Start the HTTP web server\n\n Parameters\n ----------\n q : Queue\n communication queue\n log_path : pathlib.Path\n path to the file where to store the logs\n \"\"\"\n import http.server\n import socketserver\n\n os.chdir(build_dir)\n\n log_fh = log_filepath.open(\"w\", buffering=1)\n sys.stdout = log_fh\n sys.stderr = log_fh\n\n test_prefix = \"/src/tests/\"\n\n class Handler(http.server.SimpleHTTPRequestHandler):\n def log_message(self, format_, *args):\n print(\n \"[%s] source: %s:%s - %s\"\n % (self.log_date_time_string(), *self.client_address, format_ % args)\n )\n\n def end_headers(self):\n # Enable Cross-Origin Resource Sharing (CORS)\n self.send_header(\"Access-Control-Allow-Origin\", \"*\")\n super().end_headers()\n\n with socketserver.TCPServer((\"\", 0), Handler) as httpd:\n host, port = httpd.server_address\n print(f\"Starting webserver at http://{host}:{port}\")\n httpd.server_name = \"test-server\"\n httpd.server_port = port\n q.put(port)\n\n def service_actions():\n try:\n if q.get(False) == \"TERMINATE\":\n print(\"Stopping server...\")\n sys.exit(0)\n except queue.Empty:\n pass\n\n httpd.service_actions = service_actions\n httpd.serve_forever()\n\n\nif (\n __name__ == \"__main__\"\n and multiprocessing.current_process().name == \"MainProcess\"\n and not hasattr(sys, \"_pytest_session\")\n):\n with spawn_web_server():\n # run forever\n while True:\n time.sleep(1)\n", "path": "conftest.py"}], "after_files": [{"content": "\"\"\"\nVarious common utilities for testing.\n\"\"\"\n\nimport contextlib\nimport multiprocessing\nimport textwrap\nimport tempfile\nimport time\nimport os\nimport pathlib\nimport queue\nimport sys\nimport shutil\n\nROOT_PATH = pathlib.Path(__file__).parents[0].resolve()\nTEST_PATH = ROOT_PATH / \"src\" / \"tests\"\nBUILD_PATH = ROOT_PATH / \"build\"\n\nsys.path.append(str(ROOT_PATH))\n\nfrom pyodide_build._fixes import _selenium_is_connectable # noqa: E402\nimport selenium.webdriver.common.utils # noqa: E402\n\n# XXX: Temporary fix for ConnectionError in selenium\n\nselenium.webdriver.common.utils.is_connectable = _selenium_is_connectable\n\ntry:\n import pytest\n\n def pytest_addoption(parser):\n group = parser.getgroup(\"general\")\n group.addoption(\n \"--build-dir\",\n action=\"store\",\n default=BUILD_PATH,\n help=\"Path to the build directory\",\n )\n group.addoption(\n \"--run-xfail\",\n action=\"store_true\",\n help=\"If provided, tests marked as xfail will be run\",\n )\n\n\nexcept ImportError:\n pytest = None # type: ignore\n\n\nclass JavascriptException(Exception):\n def __init__(self, msg, stack):\n self.msg = msg\n self.stack = stack\n # In chrome the stack contains the message\n if self.stack and self.stack.startswith(self.msg):\n self.msg = \"\"\n\n def __str__(self):\n return \"\\n\\n\".join(x for x in [self.msg, self.stack] if x)\n\n\nclass SeleniumWrapper:\n JavascriptException = JavascriptException\n\n def __init__(\n self, server_port, server_hostname=\"127.0.0.1\", server_log=None, build_dir=None\n ):\n if build_dir is None:\n build_dir = BUILD_PATH\n\n self.driver = self.get_driver()\n self.server_port = server_port\n self.server_hostname = server_hostname\n self.server_log = server_log\n\n if not (pathlib.Path(build_dir) / \"test.html\").exists():\n # selenium does not expose HTTP response codes\n raise ValueError(\n f\"{(build_dir / 'test.html').resolve()} \" f\"does not exist!\"\n )\n self.driver.get(f\"http://{server_hostname}:{server_port}/test.html\")\n self.run_js(\"Error.stackTraceLimit = Infinity\")\n self.run_js(\"await languagePluginLoader\")\n\n @property\n def logs(self):\n logs = self.driver.execute_script(\"return window.logs\")\n if logs is not None:\n return \"\\n\".join(str(x) for x in logs)\n else:\n return \"\"\n\n def clean_logs(self):\n self.driver.execute_script(\"window.logs = []\")\n\n def run(self, code):\n return self.run_js(\n f\"\"\"\n let result = pyodide.runPython({code!r});\n if(result && result.toJs){{\n let converted_result = result.toJs();\n result.destroy();\n return converted_result;\n }}\n return result;\n \"\"\"\n )\n\n def run_async(self, code):\n return self.run_js(\n f\"\"\"\n let result = await pyodide.runPythonAsync({code!r});\n if(result && result.toJs){{\n let converted_result = result.toJs();\n result.destroy();\n return converted_result;\n }}\n return result;\n \"\"\"\n )\n\n def run_js(self, code):\n if isinstance(code, str) and code.startswith(\"\\n\"):\n # we have a multiline string, fix indentation\n code = textwrap.dedent(code)\n\n wrapper = \"\"\"\n let cb = arguments[arguments.length - 1];\n let run = async () => { %s }\n (async () => {\n try {\n let result = await run();\n if(pyodide && pyodide._module && pyodide._module._PyErr_Occurred()){\n try {\n pyodide._module._pythonexc2js();\n } catch(e){\n console.error(`Python exited with error flag set! Error was:\\n{e.message}`);\n // Don't put original error message in new one: we want\n // \"pytest.raises(xxx, match=msg)\" to fail\n throw new Error(`Python exited with error flag set!`);\n }\n }\n cb([0, result]);\n } catch (e) {\n cb([1, e.toString(), e.stack]);\n }\n })()\n \"\"\"\n\n retval = self.driver.execute_async_script(wrapper % code)\n\n if retval[0] == 0:\n return retval[1]\n else:\n raise JavascriptException(retval[1], retval[2])\n\n def run_webworker(self, code):\n if isinstance(code, str) and code.startswith(\"\\n\"):\n # we have a multiline string, fix indentation\n code = textwrap.dedent(code)\n\n return self.run_js(\n \"\"\"\n let worker = new Worker( '{}' );\n worker.postMessage({{ python: {!r} }});\n return new Promise((res, rej) => {{\n worker.onerror = e => rej(e);\n worker.onmessage = e => {{\n if (e.data.results) {{\n res(e.data.results);\n }} else {{\n rej(e.data.error);\n }}\n }};\n }})\n \"\"\".format(\n f\"http://{self.server_hostname}:{self.server_port}/webworker_dev.js\",\n code,\n )\n )\n\n def load_package(self, packages):\n self.run_js(\"await pyodide.loadPackage({!r})\".format(packages))\n\n @property\n def urls(self):\n for handle in self.driver.window_handles:\n self.driver.switch_to.window(handle)\n yield self.driver.current_url\n\n\nclass FirefoxWrapper(SeleniumWrapper):\n\n browser = \"firefox\"\n\n def get_driver(self):\n from selenium.webdriver import Firefox\n from selenium.webdriver.firefox.options import Options\n\n options = Options()\n options.add_argument(\"-headless\")\n\n return Firefox(executable_path=\"geckodriver\", options=options)\n\n\nclass ChromeWrapper(SeleniumWrapper):\n\n browser = \"chrome\"\n\n def get_driver(self):\n from selenium.webdriver import Chrome\n from selenium.webdriver.chrome.options import Options\n\n options = Options()\n options.add_argument(\"--headless\")\n options.add_argument(\"--no-sandbox\")\n\n return Chrome(options=options)\n\n\nif pytest is not None:\n\n @pytest.fixture(params=[\"firefox\", \"chrome\"])\n def selenium_standalone(request, web_server_main):\n server_hostname, server_port, server_log = web_server_main\n if request.param == \"firefox\":\n cls = FirefoxWrapper\n elif request.param == \"chrome\":\n cls = ChromeWrapper\n selenium = cls(\n build_dir=request.config.option.build_dir,\n server_port=server_port,\n server_hostname=server_hostname,\n server_log=server_log,\n )\n try:\n yield selenium\n finally:\n print(selenium.logs)\n selenium.driver.quit()\n\n @pytest.fixture(params=[\"firefox\", \"chrome\"], scope=\"module\")\n def _selenium_cached(request, web_server_main):\n # Cached selenium instance. This is a copy-paste of\n # selenium_standalone to avoid fixture scope issues\n server_hostname, server_port, server_log = web_server_main\n if request.param == \"firefox\":\n cls = FirefoxWrapper\n elif request.param == \"chrome\":\n cls = ChromeWrapper\n selenium = cls(\n build_dir=request.config.option.build_dir,\n server_port=server_port,\n server_hostname=server_hostname,\n server_log=server_log,\n )\n try:\n yield selenium\n finally:\n selenium.driver.quit()\n\n @pytest.fixture\n def selenium(_selenium_cached):\n # selenium instance cached at the module level\n try:\n _selenium_cached.clean_logs()\n yield _selenium_cached\n finally:\n print(_selenium_cached.logs)\n\n\[email protected](scope=\"session\")\ndef web_server_main(request):\n \"\"\"Web server that serves files in the build/ directory\"\"\"\n with spawn_web_server(request.config.option.build_dir) as output:\n yield output\n\n\[email protected](scope=\"session\")\ndef web_server_secondary(request):\n \"\"\"Secondary web server that serves files build/ directory\"\"\"\n with spawn_web_server(request.config.option.build_dir) as output:\n yield output\n\n\[email protected](scope=\"session\")\ndef web_server_tst_data(request):\n \"\"\"Web server that serves files in the src/tests/data/ directory\"\"\"\n with spawn_web_server(TEST_PATH / \"data\") as output:\n yield output\n\n\[email protected]\ndef spawn_web_server(build_dir=None):\n\n if build_dir is None:\n build_dir = BUILD_PATH\n\n tmp_dir = tempfile.mkdtemp()\n log_path = pathlib.Path(tmp_dir) / \"http-server.log\"\n q = multiprocessing.Queue()\n p = multiprocessing.Process(target=run_web_server, args=(q, log_path, build_dir))\n\n try:\n p.start()\n port = q.get()\n hostname = \"127.0.0.1\"\n\n print(\n f\"Spawning webserver at http://{hostname}:{port} \"\n f\"(see logs in {log_path})\"\n )\n yield hostname, port, log_path\n finally:\n q.put(\"TERMINATE\")\n p.join()\n shutil.rmtree(tmp_dir)\n\n\ndef run_web_server(q, log_filepath, build_dir):\n \"\"\"Start the HTTP web server\n\n Parameters\n ----------\n q : Queue\n communication queue\n log_path : pathlib.Path\n path to the file where to store the logs\n \"\"\"\n import http.server\n import socketserver\n\n os.chdir(build_dir)\n\n log_fh = log_filepath.open(\"w\", buffering=1)\n sys.stdout = log_fh\n sys.stderr = log_fh\n\n test_prefix = \"/src/tests/\"\n\n class Handler(http.server.SimpleHTTPRequestHandler):\n def log_message(self, format_, *args):\n print(\n \"[%s] source: %s:%s - %s\"\n % (self.log_date_time_string(), *self.client_address, format_ % args)\n )\n\n def end_headers(self):\n # Enable Cross-Origin Resource Sharing (CORS)\n self.send_header(\"Access-Control-Allow-Origin\", \"*\")\n super().end_headers()\n\n with socketserver.TCPServer((\"\", 0), Handler) as httpd:\n host, port = httpd.server_address\n print(f\"Starting webserver at http://{host}:{port}\")\n httpd.server_name = \"test-server\"\n httpd.server_port = port\n q.put(port)\n\n def service_actions():\n try:\n if q.get(False) == \"TERMINATE\":\n print(\"Stopping server...\")\n sys.exit(0)\n except queue.Empty:\n pass\n\n httpd.service_actions = service_actions\n httpd.serve_forever()\n\n\nif (\n __name__ == \"__main__\"\n and multiprocessing.current_process().name == \"MainProcess\"\n and not hasattr(sys, \"_pytest_session\")\n):\n with spawn_web_server():\n # run forever\n while True:\n time.sleep(1)\n", "path": "conftest.py"}]}
3,731
212
gh_patches_debug_19727
rasdani/github-patches
git_diff
facebookresearch__hydra-1424
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Upgrade to OmegaConf 2.1 OmegaConf 2.1 is adding many important new features. For example: * Powerful interpolation grammar supporting nested interpolations * Relative interpolations * And many many bug fixes Release notes: [omegaconf==2.1.0.rc1](https://github.com/omry/omegaconf/releases/tag/v2.1.0.rc1). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 from dataclasses import dataclass 3 from typing import Optional 4 5 from hydra.core.config_store import ConfigStore 6 from omegaconf import II 7 8 9 @dataclass 10 class RedisConf: 11 # host address via REDIS_HOST environment variable, default: localhost 12 host: str = II("env:REDIS_HOST,localhost") 13 # port via REDIS_PORT environment variable, default: 6379 14 port: int = II("env:REDIS_PORT,6379") 15 # database via REDIS_DB environment variable, default: 0 16 db: Optional[str] = II("env:REDIS_DB,0") 17 # password via REDIS_PASSWORD environment variable, default: no password 18 password: str = II("env:REDIS_PASSWORD,") 19 # switch to run without redis server in single thread, for testing purposes only 20 mock: bool = II("env:REDIS_MOCK,False") 21 22 23 @dataclass 24 class EnqueueConf: 25 # maximum runtime of the job before it's killed (e.g. "1d" for 1 day, units: d/h/m/s), default: no limit 26 job_timeout: Optional[str] = None 27 # maximum queued time before the job before is discarded (e.g. "1d" for 1 day, units: d/h/m/s), default: no limit 28 ttl: Optional[str] = None 29 # how long successful jobs and their results are kept (e.g. "1d" for 1 day, units: d/h/m/s), default: no limit 30 result_ttl: Optional[str] = None 31 # specifies how long failed jobs are kept (e.g. "1d" for 1 day, units: d/h/m/s), default: no limit 32 failure_ttl: Optional[str] = None 33 # place job at the front of the queue, instead of the back 34 at_front: bool = False 35 # job id, will be overidden automatically by a uuid unless specified explicitly 36 job_id: Optional[str] = None 37 # description, will be overidden automatically unless specified explicitly 38 description: Optional[str] = None 39 40 41 @dataclass 42 class RQLauncherConf: 43 _target_: str = "hydra_plugins.hydra_rq_launcher.rq_launcher.RQLauncher" 44 # enqueue configuration 45 enqueue: EnqueueConf = EnqueueConf() 46 # queue name 47 queue: str = "default" 48 # redis configuration 49 redis: RedisConf = RedisConf() 50 # stop after enqueueing by raising custom exception 51 stop_after_enqueue: bool = False 52 # wait time in seconds when polling results 53 wait_polling: float = 1.0 54 55 56 ConfigStore.instance().store( 57 group="hydra/launcher", name="rq", node=RQLauncherConf, provider="rq_launcher" 58 ) 59 ``` Path: `plugins/hydra_ax_sweeper/setup.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 # type: ignore 3 from setuptools import find_namespace_packages, setup 4 5 with open("README.md", "r") as fh: 6 LONG_DESC = fh.read() 7 setup( 8 name="hydra-ax-sweeper", 9 version="1.1.0rc1", 10 author="Omry Yadan, Shagun Sodhani", 11 author_email="[email protected], [email protected]", 12 description="Hydra Ax Sweeper plugin", 13 long_description=LONG_DESC, 14 long_description_content_type="text/markdown", 15 url="https://github.com/facebookresearch/hydra/", 16 packages=find_namespace_packages(include=["hydra_plugins.*"]), 17 classifiers=[ 18 "License :: OSI Approved :: MIT License", 19 "Programming Language :: Python :: 3.7", 20 "Programming Language :: Python :: 3.8", 21 "Programming Language :: Python :: 3.9", 22 "Operating System :: POSIX :: Linux", 23 "Operating System :: MacOS", 24 "Development Status :: 4 - Beta", 25 ], 26 install_requires=[ 27 "hydra-core>=1.0.0", 28 "ax-platform>=0.1.13", 29 "numpy<1.20.0", # remove once ax is upgraded to support numpy 1.20 30 ], 31 include_package_data=True, 32 ) 33 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugins/hydra_ax_sweeper/setup.py b/plugins/hydra_ax_sweeper/setup.py --- a/plugins/hydra_ax_sweeper/setup.py +++ b/plugins/hydra_ax_sweeper/setup.py @@ -25,8 +25,7 @@ ], install_requires=[ "hydra-core>=1.0.0", - "ax-platform>=0.1.13", - "numpy<1.20.0", # remove once ax is upgraded to support numpy 1.20 + "ax-platform>=0.1.20", ], include_package_data=True, ) diff --git a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py --- a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py +++ b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py @@ -15,7 +15,7 @@ # database via REDIS_DB environment variable, default: 0 db: Optional[str] = II("env:REDIS_DB,0") # password via REDIS_PASSWORD environment variable, default: no password - password: str = II("env:REDIS_PASSWORD,") + password: str = II("env:REDIS_PASSWORD") # switch to run without redis server in single thread, for testing purposes only mock: bool = II("env:REDIS_MOCK,False")
{"golden_diff": "diff --git a/plugins/hydra_ax_sweeper/setup.py b/plugins/hydra_ax_sweeper/setup.py\n--- a/plugins/hydra_ax_sweeper/setup.py\n+++ b/plugins/hydra_ax_sweeper/setup.py\n@@ -25,8 +25,7 @@\n ],\n install_requires=[\n \"hydra-core>=1.0.0\",\n- \"ax-platform>=0.1.13\",\n- \"numpy<1.20.0\", # remove once ax is upgraded to support numpy 1.20\n+ \"ax-platform>=0.1.20\",\n ],\n include_package_data=True,\n )\ndiff --git a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py\n--- a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py\n+++ b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py\n@@ -15,7 +15,7 @@\n # database via REDIS_DB environment variable, default: 0\n db: Optional[str] = II(\"env:REDIS_DB,0\")\n # password via REDIS_PASSWORD environment variable, default: no password\n- password: str = II(\"env:REDIS_PASSWORD,\")\n+ password: str = II(\"env:REDIS_PASSWORD\")\n # switch to run without redis server in single thread, for testing purposes only\n mock: bool = II(\"env:REDIS_MOCK,False\")\n", "issue": "Upgrade to OmegaConf 2.1\nOmegaConf 2.1 is adding many important new features.\r\nFor example:\r\n* Powerful interpolation grammar supporting nested interpolations\r\n* Relative interpolations\r\n* And many many bug fixes\r\n\r\nRelease notes: [omegaconf==2.1.0.rc1](https://github.com/omry/omegaconf/releases/tag/v2.1.0.rc1).\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nfrom hydra.core.config_store import ConfigStore\nfrom omegaconf import II\n\n\n@dataclass\nclass RedisConf:\n # host address via REDIS_HOST environment variable, default: localhost\n host: str = II(\"env:REDIS_HOST,localhost\")\n # port via REDIS_PORT environment variable, default: 6379\n port: int = II(\"env:REDIS_PORT,6379\")\n # database via REDIS_DB environment variable, default: 0\n db: Optional[str] = II(\"env:REDIS_DB,0\")\n # password via REDIS_PASSWORD environment variable, default: no password\n password: str = II(\"env:REDIS_PASSWORD,\")\n # switch to run without redis server in single thread, for testing purposes only\n mock: bool = II(\"env:REDIS_MOCK,False\")\n\n\n@dataclass\nclass EnqueueConf:\n # maximum runtime of the job before it's killed (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n job_timeout: Optional[str] = None\n # maximum queued time before the job before is discarded (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n ttl: Optional[str] = None\n # how long successful jobs and their results are kept (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n result_ttl: Optional[str] = None\n # specifies how long failed jobs are kept (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n failure_ttl: Optional[str] = None\n # place job at the front of the queue, instead of the back\n at_front: bool = False\n # job id, will be overidden automatically by a uuid unless specified explicitly\n job_id: Optional[str] = None\n # description, will be overidden automatically unless specified explicitly\n description: Optional[str] = None\n\n\n@dataclass\nclass RQLauncherConf:\n _target_: str = \"hydra_plugins.hydra_rq_launcher.rq_launcher.RQLauncher\"\n # enqueue configuration\n enqueue: EnqueueConf = EnqueueConf()\n # queue name\n queue: str = \"default\"\n # redis configuration\n redis: RedisConf = RedisConf()\n # stop after enqueueing by raising custom exception\n stop_after_enqueue: bool = False\n # wait time in seconds when polling results\n wait_polling: float = 1.0\n\n\nConfigStore.instance().store(\n group=\"hydra/launcher\", name=\"rq\", node=RQLauncherConf, provider=\"rq_launcher\"\n)\n", "path": "plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom setuptools import find_namespace_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESC = fh.read()\n setup(\n name=\"hydra-ax-sweeper\",\n version=\"1.1.0rc1\",\n author=\"Omry Yadan, Shagun Sodhani\",\n author_email=\"[email protected], [email protected]\",\n description=\"Hydra Ax Sweeper plugin\",\n long_description=LONG_DESC,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\n \"hydra-core>=1.0.0\",\n \"ax-platform>=0.1.13\",\n \"numpy<1.20.0\", # remove once ax is upgraded to support numpy 1.20\n ],\n include_package_data=True,\n )\n", "path": "plugins/hydra_ax_sweeper/setup.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nfrom hydra.core.config_store import ConfigStore\nfrom omegaconf import II\n\n\n@dataclass\nclass RedisConf:\n # host address via REDIS_HOST environment variable, default: localhost\n host: str = II(\"env:REDIS_HOST,localhost\")\n # port via REDIS_PORT environment variable, default: 6379\n port: int = II(\"env:REDIS_PORT,6379\")\n # database via REDIS_DB environment variable, default: 0\n db: Optional[str] = II(\"env:REDIS_DB,0\")\n # password via REDIS_PASSWORD environment variable, default: no password\n password: str = II(\"env:REDIS_PASSWORD\")\n # switch to run without redis server in single thread, for testing purposes only\n mock: bool = II(\"env:REDIS_MOCK,False\")\n\n\n@dataclass\nclass EnqueueConf:\n # maximum runtime of the job before it's killed (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n job_timeout: Optional[str] = None\n # maximum queued time before the job before is discarded (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n ttl: Optional[str] = None\n # how long successful jobs and their results are kept (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n result_ttl: Optional[str] = None\n # specifies how long failed jobs are kept (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n failure_ttl: Optional[str] = None\n # place job at the front of the queue, instead of the back\n at_front: bool = False\n # job id, will be overidden automatically by a uuid unless specified explicitly\n job_id: Optional[str] = None\n # description, will be overidden automatically unless specified explicitly\n description: Optional[str] = None\n\n\n@dataclass\nclass RQLauncherConf:\n _target_: str = \"hydra_plugins.hydra_rq_launcher.rq_launcher.RQLauncher\"\n # enqueue configuration\n enqueue: EnqueueConf = EnqueueConf()\n # queue name\n queue: str = \"default\"\n # redis configuration\n redis: RedisConf = RedisConf()\n # stop after enqueueing by raising custom exception\n stop_after_enqueue: bool = False\n # wait time in seconds when polling results\n wait_polling: float = 1.0\n\n\nConfigStore.instance().store(\n group=\"hydra/launcher\", name=\"rq\", node=RQLauncherConf, provider=\"rq_launcher\"\n)\n", "path": "plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom setuptools import find_namespace_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESC = fh.read()\n setup(\n name=\"hydra-ax-sweeper\",\n version=\"1.1.0rc1\",\n author=\"Omry Yadan, Shagun Sodhani\",\n author_email=\"[email protected], [email protected]\",\n description=\"Hydra Ax Sweeper plugin\",\n long_description=LONG_DESC,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\n \"hydra-core>=1.0.0\",\n \"ax-platform>=0.1.20\",\n ],\n include_package_data=True,\n )\n", "path": "plugins/hydra_ax_sweeper/setup.py"}]}
1,471
342
gh_patches_debug_31077
rasdani/github-patches
git_diff
sopel-irc__sopel-1441
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- imdb module not working anymore I just noticed that anytime you make a call to imdb now the bot responds: > [MOVIE] No API key provided. I know it used to work, not sure how recently. Maybe it can be switched to a different database that doesn't require an API key? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sopel/modules/movie.py` Content: ``` 1 # coding=utf-8 2 """ 3 imdb.py - Sopel Movie Information Module 4 Copyright © 2012-2013, Elad Alfassa, <[email protected]> 5 Licensed under the Eiffel Forum License 2. 6 7 This module relies on omdbapi.com 8 """ 9 from __future__ import unicode_literals, absolute_import, print_function, division 10 11 import requests 12 import sopel.module 13 from sopel.logger import get_logger 14 15 LOGGER = get_logger(__name__) 16 17 18 @sopel.module.commands('movie', 'imdb') 19 @sopel.module.example('.movie ThisTitleDoesNotExist', '[MOVIE] Movie not found!') 20 @sopel.module.example('.movie Citizen Kane', '[MOVIE] Title: Citizen Kane | Year: 1941 | Rating: 8.4 | Genre: Drama, Mystery | IMDB Link: http://imdb.com/title/tt0033467') 21 def movie(bot, trigger): 22 """ 23 Returns some information about a movie, like Title, Year, Rating, Genre and IMDB Link. 24 """ 25 if not trigger.group(2): 26 return 27 word = trigger.group(2).rstrip() 28 uri = "http://www.omdbapi.com/" 29 data = requests.get(uri, params={'t': word}, timeout=30, 30 verify=bot.config.core.verify_ssl).json() 31 if data['Response'] == 'False': 32 if 'Error' in data: 33 message = '[MOVIE] %s' % data['Error'] 34 else: 35 LOGGER.warning( 36 'Got an error from the OMDb api, search phrase was %s; data was %s', 37 word, str(data)) 38 message = '[MOVIE] Got an error from OMDbapi' 39 else: 40 message = '[MOVIE] Title: ' + data['Title'] + \ 41 ' | Year: ' + data['Year'] + \ 42 ' | Rating: ' + data['imdbRating'] + \ 43 ' | Genre: ' + data['Genre'] + \ 44 ' | IMDB Link: http://imdb.com/title/' + data['imdbID'] 45 bot.say(message) 46 47 48 if __name__ == "__main__": 49 from sopel.test_tools import run_example_tests 50 run_example_tests(__file__) 51 ``` Path: `conftest.py` Content: ``` 1 # This file lists files which should be ignored by pytest 2 collect_ignore = ["setup.py", "sopel.py", "sopel/modules/ipython.py", "sopel/modules/movie.py"] 3 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conftest.py b/conftest.py --- a/conftest.py +++ b/conftest.py @@ -1,2 +1,2 @@ # This file lists files which should be ignored by pytest -collect_ignore = ["setup.py", "sopel.py", "sopel/modules/ipython.py", "sopel/modules/movie.py"] +collect_ignore = ["setup.py", "sopel.py", "sopel/modules/ipython.py"] diff --git a/sopel/modules/movie.py b/sopel/modules/movie.py deleted file mode 100644 --- a/sopel/modules/movie.py +++ /dev/null @@ -1,50 +0,0 @@ -# coding=utf-8 -""" -imdb.py - Sopel Movie Information Module -Copyright © 2012-2013, Elad Alfassa, <[email protected]> -Licensed under the Eiffel Forum License 2. - -This module relies on omdbapi.com -""" -from __future__ import unicode_literals, absolute_import, print_function, division - -import requests -import sopel.module -from sopel.logger import get_logger - -LOGGER = get_logger(__name__) - - [email protected]('movie', 'imdb') [email protected]('.movie ThisTitleDoesNotExist', '[MOVIE] Movie not found!') [email protected]('.movie Citizen Kane', '[MOVIE] Title: Citizen Kane | Year: 1941 | Rating: 8.4 | Genre: Drama, Mystery | IMDB Link: http://imdb.com/title/tt0033467') -def movie(bot, trigger): - """ - Returns some information about a movie, like Title, Year, Rating, Genre and IMDB Link. - """ - if not trigger.group(2): - return - word = trigger.group(2).rstrip() - uri = "http://www.omdbapi.com/" - data = requests.get(uri, params={'t': word}, timeout=30, - verify=bot.config.core.verify_ssl).json() - if data['Response'] == 'False': - if 'Error' in data: - message = '[MOVIE] %s' % data['Error'] - else: - LOGGER.warning( - 'Got an error from the OMDb api, search phrase was %s; data was %s', - word, str(data)) - message = '[MOVIE] Got an error from OMDbapi' - else: - message = '[MOVIE] Title: ' + data['Title'] + \ - ' | Year: ' + data['Year'] + \ - ' | Rating: ' + data['imdbRating'] + \ - ' | Genre: ' + data['Genre'] + \ - ' | IMDB Link: http://imdb.com/title/' + data['imdbID'] - bot.say(message) - - -if __name__ == "__main__": - from sopel.test_tools import run_example_tests - run_example_tests(__file__)
{"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -1,2 +1,2 @@\n # This file lists files which should be ignored by pytest\n-collect_ignore = [\"setup.py\", \"sopel.py\", \"sopel/modules/ipython.py\", \"sopel/modules/movie.py\"]\n+collect_ignore = [\"setup.py\", \"sopel.py\", \"sopel/modules/ipython.py\"]\ndiff --git a/sopel/modules/movie.py b/sopel/modules/movie.py\ndeleted file mode 100644\n--- a/sopel/modules/movie.py\n+++ /dev/null\n@@ -1,50 +0,0 @@\n-# coding=utf-8\n-\"\"\"\n-imdb.py - Sopel Movie Information Module\n-Copyright \u00a9 2012-2013, Elad Alfassa, <[email protected]>\n-Licensed under the Eiffel Forum License 2.\n-\n-This module relies on omdbapi.com\n-\"\"\"\n-from __future__ import unicode_literals, absolute_import, print_function, division\n-\n-import requests\n-import sopel.module\n-from sopel.logger import get_logger\n-\n-LOGGER = get_logger(__name__)\n-\n-\[email protected]('movie', 'imdb')\[email protected]('.movie ThisTitleDoesNotExist', '[MOVIE] Movie not found!')\[email protected]('.movie Citizen Kane', '[MOVIE] Title: Citizen Kane | Year: 1941 | Rating: 8.4 | Genre: Drama, Mystery | IMDB Link: http://imdb.com/title/tt0033467')\n-def movie(bot, trigger):\n- \"\"\"\n- Returns some information about a movie, like Title, Year, Rating, Genre and IMDB Link.\n- \"\"\"\n- if not trigger.group(2):\n- return\n- word = trigger.group(2).rstrip()\n- uri = \"http://www.omdbapi.com/\"\n- data = requests.get(uri, params={'t': word}, timeout=30,\n- verify=bot.config.core.verify_ssl).json()\n- if data['Response'] == 'False':\n- if 'Error' in data:\n- message = '[MOVIE] %s' % data['Error']\n- else:\n- LOGGER.warning(\n- 'Got an error from the OMDb api, search phrase was %s; data was %s',\n- word, str(data))\n- message = '[MOVIE] Got an error from OMDbapi'\n- else:\n- message = '[MOVIE] Title: ' + data['Title'] + \\\n- ' | Year: ' + data['Year'] + \\\n- ' | Rating: ' + data['imdbRating'] + \\\n- ' | Genre: ' + data['Genre'] + \\\n- ' | IMDB Link: http://imdb.com/title/' + data['imdbID']\n- bot.say(message)\n-\n-\n-if __name__ == \"__main__\":\n- from sopel.test_tools import run_example_tests\n- run_example_tests(__file__)\n", "issue": "imdb module not working anymore\nI just noticed that anytime you make a call to imdb now the bot responds: \r\n\r\n> [MOVIE] No API key provided.\r\n\r\nI know it used to work, not sure how recently. Maybe it can be switched to a different database that doesn't require an API key?\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nimdb.py - Sopel Movie Information Module\nCopyright \u00a9 2012-2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nThis module relies on omdbapi.com\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport requests\nimport sopel.module\nfrom sopel.logger import get_logger\n\nLOGGER = get_logger(__name__)\n\n\[email protected]('movie', 'imdb')\[email protected]('.movie ThisTitleDoesNotExist', '[MOVIE] Movie not found!')\[email protected]('.movie Citizen Kane', '[MOVIE] Title: Citizen Kane | Year: 1941 | Rating: 8.4 | Genre: Drama, Mystery | IMDB Link: http://imdb.com/title/tt0033467')\ndef movie(bot, trigger):\n \"\"\"\n Returns some information about a movie, like Title, Year, Rating, Genre and IMDB Link.\n \"\"\"\n if not trigger.group(2):\n return\n word = trigger.group(2).rstrip()\n uri = \"http://www.omdbapi.com/\"\n data = requests.get(uri, params={'t': word}, timeout=30,\n verify=bot.config.core.verify_ssl).json()\n if data['Response'] == 'False':\n if 'Error' in data:\n message = '[MOVIE] %s' % data['Error']\n else:\n LOGGER.warning(\n 'Got an error from the OMDb api, search phrase was %s; data was %s',\n word, str(data))\n message = '[MOVIE] Got an error from OMDbapi'\n else:\n message = '[MOVIE] Title: ' + data['Title'] + \\\n ' | Year: ' + data['Year'] + \\\n ' | Rating: ' + data['imdbRating'] + \\\n ' | Genre: ' + data['Genre'] + \\\n ' | IMDB Link: http://imdb.com/title/' + data['imdbID']\n bot.say(message)\n\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/movie.py"}, {"content": "# This file lists files which should be ignored by pytest\ncollect_ignore = [\"setup.py\", \"sopel.py\", \"sopel/modules/ipython.py\", \"sopel/modules/movie.py\"]\n", "path": "conftest.py"}], "after_files": [{"content": null, "path": "sopel/modules/movie.py"}, {"content": "# This file lists files which should be ignored by pytest\ncollect_ignore = [\"setup.py\", \"sopel.py\", \"sopel/modules/ipython.py\"]\n", "path": "conftest.py"}]}
975
695
gh_patches_debug_19487
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-1011
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Compatibility with future PT 1.12 ## 🐛 Bug ``` > distance = x @ y.T E RuntimeError: "addmm_impl_cpu_" not implemented for 'Half' ``` ### To Reproduce https://github.com/PyTorchLightning/metrics/runs/6275393755?check_suite_focus=true #### Code sample ```py distance = x @ y.T ``` ### Expected behavior <!-- A clear and concise description of what you expected to happen. --> ### Environment - OS (e.g., Linux): linux - Python & PyTorch Version (e.g., 1.0): py3.8 & pt1.12 ### Additional context context see https://github.com/pytorch/pytorch/pull/75647 slack: https://pytorch.slack.com/archives/C3PDTEV8E/p1651742487294399 tl;dr: We don't have correct and fast linalg algorithms for half in CPU as none of the backend libraries that we use support them so it's better not to support these via half-baked implementations, and simply make the users cast their inputs if they want to use these methods > but for GPU it shall be supported, correct? >> Yes note as well that this just applies to Half on CPU and for linalg functios (think matmul and conv). Pointwise functions on Half on CPU will still be available, and Half on CUDA will still have full support > it was implemented up till 1.11 but there was no real speed-up, correct? Not only it was slower, but it was not numerically stable, so it was pretty much a bug (hence the removal without deprecation) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchmetrics/functional/pairwise/cosine.py` Content: ``` 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from typing import Optional 15 16 import torch 17 from torch import Tensor 18 from typing_extensions import Literal 19 20 from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix 21 22 23 def _pairwise_cosine_similarity_update( 24 x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None 25 ) -> Tensor: 26 """Calculates the pairwise cosine similarity matrix. 27 28 Args: 29 x: tensor of shape ``[N,d]`` 30 y: tensor of shape ``[M,d]`` 31 zero_diagonal: determines if the diagonal of the distance matrix should be set to zero 32 """ 33 x, y, zero_diagonal = _check_input(x, y, zero_diagonal) 34 35 norm = torch.norm(x, p=2, dim=1) 36 x /= norm.unsqueeze(1) 37 norm = torch.norm(y, p=2, dim=1) 38 y /= norm.unsqueeze(1) 39 40 distance = x @ y.T 41 if zero_diagonal: 42 distance.fill_diagonal_(0) 43 return distance 44 45 46 def pairwise_cosine_similarity( 47 x: Tensor, 48 y: Optional[Tensor] = None, 49 reduction: Literal["mean", "sum", "none", None] = None, 50 zero_diagonal: Optional[bool] = None, 51 ) -> Tensor: 52 r"""Calculates pairwise cosine similarity: 53 54 .. math:: 55 s_{cos}(x,y) = \frac{<x,y>}{||x|| \cdot ||y||} 56 = \frac{\sum_{d=1}^D x_d \cdot y_d }{\sqrt{\sum_{d=1}^D x_i^2} \cdot \sqrt{\sum_{d=1}^D x_i^2}} 57 58 If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise 59 between the rows of :math:`x` and :math:`y`. 60 If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`. 61 62 Args: 63 x: Tensor with shape ``[N, d]`` 64 y: Tensor with shape ``[M, d]``, optional 65 reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'` 66 (applied along column dimension) or `'none'`, `None` for no reduction 67 zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given 68 this defaults to ``True`` else if :math:`y` is also given it defaults to ``False`` 69 70 Returns: 71 A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix 72 73 Example: 74 >>> import torch 75 >>> from torchmetrics.functional import pairwise_cosine_similarity 76 >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32) 77 >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32) 78 >>> pairwise_cosine_similarity(x, y) 79 tensor([[0.5547, 0.8682], 80 [0.5145, 0.8437], 81 [0.5300, 0.8533]]) 82 >>> pairwise_cosine_similarity(x) 83 tensor([[0.0000, 0.9989, 0.9996], 84 [0.9989, 0.0000, 0.9998], 85 [0.9996, 0.9998, 0.0000]]) 86 87 """ 88 distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal) 89 return _reduce_distance_matrix(distance, reduction) 90 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchmetrics/functional/pairwise/cosine.py b/torchmetrics/functional/pairwise/cosine.py --- a/torchmetrics/functional/pairwise/cosine.py +++ b/torchmetrics/functional/pairwise/cosine.py @@ -20,6 +20,16 @@ from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix +def _safe_matmul(x: Tensor, y: Tensor) -> Tensor: + """Safe calculation of matrix multiplication. + + If input is float16, will cast to float32 for computation and back again. + """ + if x.dtype == torch.float16 or y.dtype == torch.float16: + return (x.float() @ y.T.float()).half() + return x @ y.T + + def _pairwise_cosine_similarity_update( x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None ) -> Tensor: @@ -37,7 +47,7 @@ norm = torch.norm(y, p=2, dim=1) y /= norm.unsqueeze(1) - distance = x @ y.T + distance = _safe_matmul(x, y) if zero_diagonal: distance.fill_diagonal_(0) return distance
{"golden_diff": "diff --git a/torchmetrics/functional/pairwise/cosine.py b/torchmetrics/functional/pairwise/cosine.py\n--- a/torchmetrics/functional/pairwise/cosine.py\n+++ b/torchmetrics/functional/pairwise/cosine.py\n@@ -20,6 +20,16 @@\n from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix\n \n \n+def _safe_matmul(x: Tensor, y: Tensor) -> Tensor:\n+ \"\"\"Safe calculation of matrix multiplication.\n+\n+ If input is float16, will cast to float32 for computation and back again.\n+ \"\"\"\n+ if x.dtype == torch.float16 or y.dtype == torch.float16:\n+ return (x.float() @ y.T.float()).half()\n+ return x @ y.T\n+\n+\n def _pairwise_cosine_similarity_update(\n x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None\n ) -> Tensor:\n@@ -37,7 +47,7 @@\n norm = torch.norm(y, p=2, dim=1)\n y /= norm.unsqueeze(1)\n \n- distance = x @ y.T\n+ distance = _safe_matmul(x, y)\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance\n", "issue": "Compatibility with future PT 1.12\n## \ud83d\udc1b Bug\r\n\r\n```\r\n> distance = x @ y.T\r\nE RuntimeError: \"addmm_impl_cpu_\" not implemented for 'Half'\r\n```\r\n\r\n### To Reproduce\r\n\r\nhttps://github.com/PyTorchLightning/metrics/runs/6275393755?check_suite_focus=true\r\n\r\n#### Code sample\r\n\r\n```py\r\ndistance = x @ y.T\r\n```\r\n\r\n### Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n\r\n- OS (e.g., Linux): linux\r\n- Python & PyTorch Version (e.g., 1.0): py3.8 & pt1.12\r\n\r\n### Additional context\r\n\r\ncontext see https://github.com/pytorch/pytorch/pull/75647\r\nslack: https://pytorch.slack.com/archives/C3PDTEV8E/p1651742487294399\r\n\r\ntl;dr: We don't have correct and fast linalg algorithms for half in CPU as none of the backend libraries that we use support them so it's better not to support these via half-baked implementations, and simply make the users cast their inputs if they want to use these methods\r\n\r\n> but for GPU it shall be supported, correct? >> Yes\r\n\r\nnote as well that this just applies to Half on CPU and for linalg functios (think matmul and conv). Pointwise functions on Half on CPU will still be available, and Half on CUDA will still have full support\r\n\r\n> it was implemented up till 1.11 but there was no real speed-up, correct?\r\n\r\nNot only it was slower, but it was not numerically stable, so it was pretty much a bug (hence the removal without deprecation)\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix\n\n\ndef _pairwise_cosine_similarity_update(\n x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None\n) -> Tensor:\n \"\"\"Calculates the pairwise cosine similarity matrix.\n\n Args:\n x: tensor of shape ``[N,d]``\n y: tensor of shape ``[M,d]``\n zero_diagonal: determines if the diagonal of the distance matrix should be set to zero\n \"\"\"\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n\n norm = torch.norm(x, p=2, dim=1)\n x /= norm.unsqueeze(1)\n norm = torch.norm(y, p=2, dim=1)\n y /= norm.unsqueeze(1)\n\n distance = x @ y.T\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance\n\n\ndef pairwise_cosine_similarity(\n x: Tensor,\n y: Optional[Tensor] = None,\n reduction: Literal[\"mean\", \"sum\", \"none\", None] = None,\n zero_diagonal: Optional[bool] = None,\n) -> Tensor:\n r\"\"\"Calculates pairwise cosine similarity:\n\n .. math::\n s_{cos}(x,y) = \\frac{<x,y>}{||x|| \\cdot ||y||}\n = \\frac{\\sum_{d=1}^D x_d \\cdot y_d }{\\sqrt{\\sum_{d=1}^D x_i^2} \\cdot \\sqrt{\\sum_{d=1}^D x_i^2}}\n\n If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise\n between the rows of :math:`x` and :math:`y`.\n If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.\n\n Args:\n x: Tensor with shape ``[N, d]``\n y: Tensor with shape ``[M, d]``, optional\n reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`\n (applied along column dimension) or `'none'`, `None` for no reduction\n zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given\n this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``\n\n Returns:\n A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix\n\n Example:\n >>> import torch\n >>> from torchmetrics.functional import pairwise_cosine_similarity\n >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)\n >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)\n >>> pairwise_cosine_similarity(x, y)\n tensor([[0.5547, 0.8682],\n [0.5145, 0.8437],\n [0.5300, 0.8533]])\n >>> pairwise_cosine_similarity(x)\n tensor([[0.0000, 0.9989, 0.9996],\n [0.9989, 0.0000, 0.9998],\n [0.9996, 0.9998, 0.0000]])\n\n \"\"\"\n distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)\n return _reduce_distance_matrix(distance, reduction)\n", "path": "torchmetrics/functional/pairwise/cosine.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix\n\n\ndef _safe_matmul(x: Tensor, y: Tensor) -> Tensor:\n \"\"\"Safe calculation of matrix multiplication.\n\n If input is float16, will cast to float32 for computation and back again.\n \"\"\"\n if x.dtype == torch.float16 or y.dtype == torch.float16:\n return (x.float() @ y.T.float()).half()\n return x @ y.T\n\n\ndef _pairwise_cosine_similarity_update(\n x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None\n) -> Tensor:\n \"\"\"Calculates the pairwise cosine similarity matrix.\n\n Args:\n x: tensor of shape ``[N,d]``\n y: tensor of shape ``[M,d]``\n zero_diagonal: determines if the diagonal of the distance matrix should be set to zero\n \"\"\"\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n\n norm = torch.norm(x, p=2, dim=1)\n x /= norm.unsqueeze(1)\n norm = torch.norm(y, p=2, dim=1)\n y /= norm.unsqueeze(1)\n\n distance = _safe_matmul(x, y)\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance\n\n\ndef pairwise_cosine_similarity(\n x: Tensor,\n y: Optional[Tensor] = None,\n reduction: Literal[\"mean\", \"sum\", \"none\", None] = None,\n zero_diagonal: Optional[bool] = None,\n) -> Tensor:\n r\"\"\"Calculates pairwise cosine similarity:\n\n .. math::\n s_{cos}(x,y) = \\frac{<x,y>}{||x|| \\cdot ||y||}\n = \\frac{\\sum_{d=1}^D x_d \\cdot y_d }{\\sqrt{\\sum_{d=1}^D x_i^2} \\cdot \\sqrt{\\sum_{d=1}^D x_i^2}}\n\n If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise\n between the rows of :math:`x` and :math:`y`.\n If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.\n\n Args:\n x: Tensor with shape ``[N, d]``\n y: Tensor with shape ``[M, d]``, optional\n reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`\n (applied along column dimension) or `'none'`, `None` for no reduction\n zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given\n this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``\n\n Returns:\n A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix\n\n Example:\n >>> import torch\n >>> from torchmetrics.functional import pairwise_cosine_similarity\n >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)\n >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)\n >>> pairwise_cosine_similarity(x, y)\n tensor([[0.5547, 0.8682],\n [0.5145, 0.8437],\n [0.5300, 0.8533]])\n >>> pairwise_cosine_similarity(x)\n tensor([[0.0000, 0.9989, 0.9996],\n [0.9989, 0.0000, 0.9998],\n [0.9996, 0.9998, 0.0000]])\n\n \"\"\"\n distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)\n return _reduce_distance_matrix(distance, reduction)\n", "path": "torchmetrics/functional/pairwise/cosine.py"}]}
1,811
298
gh_patches_debug_38729
rasdani/github-patches
git_diff
kivy__kivy-1348
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Kivy 1.8.0 dev: touch.is_double_tap is set allways to true on Windows Hello, On MS Windows with Kivy 1.8.0 dev: If you press a button twice, even after a greater time gap between the touches, you always get touch.is_double_tap == True on the second tap. Does not appear with 1.7.1 on Android. Greetings Carsten --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kivy/input/postproc/doubletap.py` Content: ``` 1 ''' 2 Double Tap 3 ========== 4 5 Search touch for a double tap 6 ''' 7 8 __all__ = ('InputPostprocDoubleTap', ) 9 10 from kivy.config import Config 11 from kivy.vector import Vector 12 from kivy.clock import Clock 13 14 15 class InputPostprocDoubleTap(object): 16 ''' 17 InputPostProcDoubleTap is a post-processor to check if 18 a touch is a double tap or not. 19 Double tap can be configured in the Kivy config file:: 20 21 [postproc] 22 double_tap_time = 250 23 double_tap_distance = 20 24 25 Distance parameter is in 0-1000, and time is in millisecond. 26 ''' 27 28 def __init__(self): 29 dist = Config.getint('postproc', 'double_tap_distance') 30 self.double_tap_distance = dist / 1000.0 31 time = Config.getint('postproc', 'double_tap_time') 32 self.double_tap_time = time / 1000.0 33 self.touches = {} 34 35 def find_double_tap(self, ref): 36 '''Find a double tap touch within self.touches. 37 The touch must be not a previous double tap, and the distance 38 must be ok, also, the touch profile must be compared so the kind 39 of touch is the same 40 ''' 41 ref_button = None 42 if 'button' in ref.profile: 43 ref_button = ref.button 44 45 for touchid in self.touches: 46 if ref.uid == touchid: 47 continue 48 etype, touch = self.touches[touchid] 49 if etype != 'end': 50 continue 51 if touch.is_double_tap: 52 continue 53 distance = Vector.distance( 54 Vector(ref.sx, ref.sy), 55 Vector(touch.osx, touch.osy)) 56 if distance > self.double_tap_distance: 57 continue 58 if touch.is_mouse_scrolling or ref.is_mouse_scrolling: 59 continue 60 touch_button = None 61 if 'button' in touch.profile: 62 touch_button = touch.button 63 if touch_button != ref_button: 64 continue 65 touch.double_tap_distance = distance 66 return touch 67 return None 68 69 def process(self, events): 70 if self.double_tap_distance == 0 or self.double_tap_time == 0: 71 return events 72 # first, check if a touch down have a double tap 73 for etype, touch in events: 74 if not touch.is_touch: 75 continue 76 if etype == 'begin': 77 double_tap = self.find_double_tap(touch) 78 if double_tap: 79 touch.is_double_tap = True 80 time = touch.time_start - double_tap.time_start 81 touch.double_tap_time = time 82 distance = double_tap.double_tap_distance 83 touch.double_tap_distance = distance 84 85 # add the touch internaly 86 self.touches[touch.uid] = (etype, touch) 87 88 # second, check if up-touch is timeout for double tap 89 time_current = Clock.get_time() 90 to_delete = [] 91 for touchid in self.touches.keys(): 92 etype, touch = self.touches[touchid] 93 if etype != 'end': 94 continue 95 if time_current - touch.time_start < self.double_tap_time: 96 continue 97 to_delete.append(touchid) 98 99 for touchid in to_delete: 100 del self.touches[touchid] 101 102 return events 103 ``` Path: `kivy/input/postproc/tripletap.py` Content: ``` 1 ''' 2 Triple Tap 3 ========== 4 5 .. versionadded:: 1.7.0 6 7 Search touch for a triple tap 8 ''' 9 10 __all__ = ('InputPostprocTripleTap', ) 11 12 from kivy.config import Config 13 from kivy.vector import Vector 14 from kivy.clock import Clock 15 16 17 class InputPostprocTripleTap(object): 18 ''' 19 InputPostProcTripleTap is a post-processor to check if 20 a touch is a triple tap or not. 21 Triple tap can be configured in the Kivy config file:: 22 23 [postproc] 24 triple_tap_time = 250 25 triple_tap_distance = 20 26 27 Distance parameter is in 0-1000, and time is in millisecond. 28 ''' 29 30 def __init__(self): 31 dist = Config.getint('postproc', 'triple_tap_distance') 32 self.triple_tap_distance = dist / 1000.0 33 time = Config.getint('postproc', 'triple_tap_time') 34 self.triple_tap_time = time / 1000.0 35 self.touches = {} 36 37 def find_triple_tap(self, ref): 38 '''Find a triple tap touch within self.touches. 39 The touch must be not a previous triple tap, and the distance 40 must be ok, also, the touch profile must be compared so the kind 41 of touch is the same 42 ''' 43 ref_button = None 44 if 'button' in ref.profile: 45 ref_button = ref.button 46 47 for touchid in self.touches: 48 if ref.uid == touchid: 49 continue 50 etype, touch = self.touches[touchid] 51 if not touch.is_double_tap: 52 continue 53 if etype != 'end': 54 continue 55 if touch.is_triple_tap: 56 continue 57 distance = Vector.distance( 58 Vector(ref.sx, ref.sy), 59 Vector(touch.osx, touch.osy)) 60 if distance > self.triple_tap_distance: 61 continue 62 if touch.is_mouse_scrolling or ref.is_mouse_scrolling: 63 continue 64 touch_button = None 65 if 'button' in touch.profile: 66 touch_button = touch.button 67 if touch_button != ref_button: 68 continue 69 touch.triple_tap_distance = distance 70 return touch 71 return None 72 73 def process(self, events): 74 if self.triple_tap_distance == 0 or self.triple_tap_time == 0: 75 return events 76 # first, check if a touch down have a triple tap 77 for etype, touch in events: 78 if not touch.is_touch: 79 continue 80 if etype == 'begin': 81 triple_tap = self.find_triple_tap(touch) 82 if triple_tap: 83 touch.is_double_tap = False 84 touch.is_triple_tap = True 85 time = touch.time_start - triple_tap.time_start 86 touch.triple_tap_time = time 87 distance = triple_tap.triple_tap_distance 88 touch.triple_tap_distance = distance 89 90 # add the touch internaly 91 self.touches[touch.uid] = (etype, touch) 92 93 # second, check if up-touch is timeout for triple tap 94 time_current = Clock.get_time() 95 to_delete = [] 96 for touchid in self.touches.keys(): 97 etype, touch = self.touches[touchid] 98 if etype != 'end': 99 continue 100 if time_current - touch.time_start < self.triple_tap_time: 101 continue 102 to_delete.append(touchid) 103 104 for touchid in to_delete: 105 del self.touches[touchid] 106 107 return events 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kivy/input/postproc/doubletap.py b/kivy/input/postproc/doubletap.py --- a/kivy/input/postproc/doubletap.py +++ b/kivy/input/postproc/doubletap.py @@ -7,6 +7,7 @@ __all__ = ('InputPostprocDoubleTap', ) +from time import time from kivy.config import Config from kivy.vector import Vector from kivy.clock import Clock @@ -77,8 +78,8 @@ double_tap = self.find_double_tap(touch) if double_tap: touch.is_double_tap = True - time = touch.time_start - double_tap.time_start - touch.double_tap_time = time + tap_time = touch.time_start - double_tap.time_start + touch.double_tap_time = tap_time distance = double_tap.double_tap_distance touch.double_tap_distance = distance @@ -86,7 +87,7 @@ self.touches[touch.uid] = (etype, touch) # second, check if up-touch is timeout for double tap - time_current = Clock.get_time() + time_current = time() to_delete = [] for touchid in self.touches.keys(): etype, touch = self.touches[touchid] diff --git a/kivy/input/postproc/tripletap.py b/kivy/input/postproc/tripletap.py --- a/kivy/input/postproc/tripletap.py +++ b/kivy/input/postproc/tripletap.py @@ -9,6 +9,7 @@ __all__ = ('InputPostprocTripleTap', ) +from time import time from kivy.config import Config from kivy.vector import Vector from kivy.clock import Clock @@ -82,8 +83,8 @@ if triple_tap: touch.is_double_tap = False touch.is_triple_tap = True - time = touch.time_start - triple_tap.time_start - touch.triple_tap_time = time + tap_time = touch.time_start - triple_tap.time_start + touch.triple_tap_time = tap_time distance = triple_tap.triple_tap_distance touch.triple_tap_distance = distance @@ -91,7 +92,7 @@ self.touches[touch.uid] = (etype, touch) # second, check if up-touch is timeout for triple tap - time_current = Clock.get_time() + time_current = time() to_delete = [] for touchid in self.touches.keys(): etype, touch = self.touches[touchid]
{"golden_diff": "diff --git a/kivy/input/postproc/doubletap.py b/kivy/input/postproc/doubletap.py\n--- a/kivy/input/postproc/doubletap.py\n+++ b/kivy/input/postproc/doubletap.py\n@@ -7,6 +7,7 @@\n \n __all__ = ('InputPostprocDoubleTap', )\n \n+from time import time\n from kivy.config import Config\n from kivy.vector import Vector\n from kivy.clock import Clock\n@@ -77,8 +78,8 @@\n double_tap = self.find_double_tap(touch)\n if double_tap:\n touch.is_double_tap = True\n- time = touch.time_start - double_tap.time_start\n- touch.double_tap_time = time\n+ tap_time = touch.time_start - double_tap.time_start\n+ touch.double_tap_time = tap_time\n distance = double_tap.double_tap_distance\n touch.double_tap_distance = distance\n \n@@ -86,7 +87,7 @@\n self.touches[touch.uid] = (etype, touch)\n \n # second, check if up-touch is timeout for double tap\n- time_current = Clock.get_time()\n+ time_current = time()\n to_delete = []\n for touchid in self.touches.keys():\n etype, touch = self.touches[touchid]\ndiff --git a/kivy/input/postproc/tripletap.py b/kivy/input/postproc/tripletap.py\n--- a/kivy/input/postproc/tripletap.py\n+++ b/kivy/input/postproc/tripletap.py\n@@ -9,6 +9,7 @@\n \n __all__ = ('InputPostprocTripleTap', )\n \n+from time import time\n from kivy.config import Config\n from kivy.vector import Vector\n from kivy.clock import Clock\n@@ -82,8 +83,8 @@\n if triple_tap:\n touch.is_double_tap = False\n touch.is_triple_tap = True\n- time = touch.time_start - triple_tap.time_start\n- touch.triple_tap_time = time\n+ tap_time = touch.time_start - triple_tap.time_start\n+ touch.triple_tap_time = tap_time\n distance = triple_tap.triple_tap_distance\n touch.triple_tap_distance = distance\n \n@@ -91,7 +92,7 @@\n self.touches[touch.uid] = (etype, touch)\n \n # second, check if up-touch is timeout for triple tap\n- time_current = Clock.get_time()\n+ time_current = time()\n to_delete = []\n for touchid in self.touches.keys():\n etype, touch = self.touches[touchid]\n", "issue": "Kivy 1.8.0 dev: touch.is_double_tap is set allways to true on Windows\nHello,\nOn MS Windows with Kivy 1.8.0 dev: If you press a button twice, even after a greater time gap between the touches, you always get touch.is_double_tap == True on the second tap. \n\nDoes not appear with 1.7.1 on Android.\n\nGreetings\n\nCarsten\n\n", "before_files": [{"content": "'''\nDouble Tap\n==========\n\nSearch touch for a double tap\n'''\n\n__all__ = ('InputPostprocDoubleTap', )\n\nfrom kivy.config import Config\nfrom kivy.vector import Vector\nfrom kivy.clock import Clock\n\n\nclass InputPostprocDoubleTap(object):\n '''\n InputPostProcDoubleTap is a post-processor to check if\n a touch is a double tap or not.\n Double tap can be configured in the Kivy config file::\n\n [postproc]\n double_tap_time = 250\n double_tap_distance = 20\n\n Distance parameter is in 0-1000, and time is in millisecond.\n '''\n\n def __init__(self):\n dist = Config.getint('postproc', 'double_tap_distance')\n self.double_tap_distance = dist / 1000.0\n time = Config.getint('postproc', 'double_tap_time')\n self.double_tap_time = time / 1000.0\n self.touches = {}\n\n def find_double_tap(self, ref):\n '''Find a double tap touch within self.touches.\n The touch must be not a previous double tap, and the distance\n must be ok, also, the touch profile must be compared so the kind\n of touch is the same\n '''\n ref_button = None\n if 'button' in ref.profile:\n ref_button = ref.button\n\n for touchid in self.touches:\n if ref.uid == touchid:\n continue\n etype, touch = self.touches[touchid]\n if etype != 'end':\n continue\n if touch.is_double_tap:\n continue\n distance = Vector.distance(\n Vector(ref.sx, ref.sy),\n Vector(touch.osx, touch.osy))\n if distance > self.double_tap_distance:\n continue\n if touch.is_mouse_scrolling or ref.is_mouse_scrolling:\n continue\n touch_button = None\n if 'button' in touch.profile:\n touch_button = touch.button\n if touch_button != ref_button:\n continue\n touch.double_tap_distance = distance\n return touch\n return None\n\n def process(self, events):\n if self.double_tap_distance == 0 or self.double_tap_time == 0:\n return events\n # first, check if a touch down have a double tap\n for etype, touch in events:\n if not touch.is_touch:\n continue\n if etype == 'begin':\n double_tap = self.find_double_tap(touch)\n if double_tap:\n touch.is_double_tap = True\n time = touch.time_start - double_tap.time_start\n touch.double_tap_time = time\n distance = double_tap.double_tap_distance\n touch.double_tap_distance = distance\n\n # add the touch internaly\n self.touches[touch.uid] = (etype, touch)\n\n # second, check if up-touch is timeout for double tap\n time_current = Clock.get_time()\n to_delete = []\n for touchid in self.touches.keys():\n etype, touch = self.touches[touchid]\n if etype != 'end':\n continue\n if time_current - touch.time_start < self.double_tap_time:\n continue\n to_delete.append(touchid)\n\n for touchid in to_delete:\n del self.touches[touchid]\n\n return events\n", "path": "kivy/input/postproc/doubletap.py"}, {"content": "'''\nTriple Tap\n==========\n\n.. versionadded:: 1.7.0\n\nSearch touch for a triple tap\n'''\n\n__all__ = ('InputPostprocTripleTap', )\n\nfrom kivy.config import Config\nfrom kivy.vector import Vector\nfrom kivy.clock import Clock\n\n\nclass InputPostprocTripleTap(object):\n '''\n InputPostProcTripleTap is a post-processor to check if\n a touch is a triple tap or not.\n Triple tap can be configured in the Kivy config file::\n\n [postproc]\n triple_tap_time = 250\n triple_tap_distance = 20\n\n Distance parameter is in 0-1000, and time is in millisecond.\n '''\n\n def __init__(self):\n dist = Config.getint('postproc', 'triple_tap_distance')\n self.triple_tap_distance = dist / 1000.0\n time = Config.getint('postproc', 'triple_tap_time')\n self.triple_tap_time = time / 1000.0\n self.touches = {}\n\n def find_triple_tap(self, ref):\n '''Find a triple tap touch within self.touches.\n The touch must be not a previous triple tap, and the distance\n must be ok, also, the touch profile must be compared so the kind\n of touch is the same\n '''\n ref_button = None\n if 'button' in ref.profile:\n ref_button = ref.button\n\n for touchid in self.touches:\n if ref.uid == touchid:\n continue\n etype, touch = self.touches[touchid]\n if not touch.is_double_tap:\n continue\n if etype != 'end':\n continue\n if touch.is_triple_tap:\n continue\n distance = Vector.distance(\n Vector(ref.sx, ref.sy),\n Vector(touch.osx, touch.osy))\n if distance > self.triple_tap_distance:\n continue\n if touch.is_mouse_scrolling or ref.is_mouse_scrolling:\n continue\n touch_button = None\n if 'button' in touch.profile:\n touch_button = touch.button\n if touch_button != ref_button:\n continue\n touch.triple_tap_distance = distance\n return touch\n return None\n\n def process(self, events):\n if self.triple_tap_distance == 0 or self.triple_tap_time == 0:\n return events\n # first, check if a touch down have a triple tap\n for etype, touch in events:\n if not touch.is_touch:\n continue\n if etype == 'begin':\n triple_tap = self.find_triple_tap(touch)\n if triple_tap:\n touch.is_double_tap = False\n touch.is_triple_tap = True\n time = touch.time_start - triple_tap.time_start\n touch.triple_tap_time = time\n distance = triple_tap.triple_tap_distance\n touch.triple_tap_distance = distance\n\n # add the touch internaly\n self.touches[touch.uid] = (etype, touch)\n\n # second, check if up-touch is timeout for triple tap\n time_current = Clock.get_time()\n to_delete = []\n for touchid in self.touches.keys():\n etype, touch = self.touches[touchid]\n if etype != 'end':\n continue\n if time_current - touch.time_start < self.triple_tap_time:\n continue\n to_delete.append(touchid)\n\n for touchid in to_delete:\n del self.touches[touchid]\n\n return events\n", "path": "kivy/input/postproc/tripletap.py"}], "after_files": [{"content": "'''\nDouble Tap\n==========\n\nSearch touch for a double tap\n'''\n\n__all__ = ('InputPostprocDoubleTap', )\n\nfrom time import time\nfrom kivy.config import Config\nfrom kivy.vector import Vector\nfrom kivy.clock import Clock\n\n\nclass InputPostprocDoubleTap(object):\n '''\n InputPostProcDoubleTap is a post-processor to check if\n a touch is a double tap or not.\n Double tap can be configured in the Kivy config file::\n\n [postproc]\n double_tap_time = 250\n double_tap_distance = 20\n\n Distance parameter is in 0-1000, and time is in millisecond.\n '''\n\n def __init__(self):\n dist = Config.getint('postproc', 'double_tap_distance')\n self.double_tap_distance = dist / 1000.0\n time = Config.getint('postproc', 'double_tap_time')\n self.double_tap_time = time / 1000.0\n self.touches = {}\n\n def find_double_tap(self, ref):\n '''Find a double tap touch within self.touches.\n The touch must be not a previous double tap, and the distance\n must be ok, also, the touch profile must be compared so the kind\n of touch is the same\n '''\n ref_button = None\n if 'button' in ref.profile:\n ref_button = ref.button\n\n for touchid in self.touches:\n if ref.uid == touchid:\n continue\n etype, touch = self.touches[touchid]\n if etype != 'end':\n continue\n if touch.is_double_tap:\n continue\n distance = Vector.distance(\n Vector(ref.sx, ref.sy),\n Vector(touch.osx, touch.osy))\n if distance > self.double_tap_distance:\n continue\n if touch.is_mouse_scrolling or ref.is_mouse_scrolling:\n continue\n touch_button = None\n if 'button' in touch.profile:\n touch_button = touch.button\n if touch_button != ref_button:\n continue\n touch.double_tap_distance = distance\n return touch\n return None\n\n def process(self, events):\n if self.double_tap_distance == 0 or self.double_tap_time == 0:\n return events\n # first, check if a touch down have a double tap\n for etype, touch in events:\n if not touch.is_touch:\n continue\n if etype == 'begin':\n double_tap = self.find_double_tap(touch)\n if double_tap:\n touch.is_double_tap = True\n tap_time = touch.time_start - double_tap.time_start\n touch.double_tap_time = tap_time\n distance = double_tap.double_tap_distance\n touch.double_tap_distance = distance\n\n # add the touch internaly\n self.touches[touch.uid] = (etype, touch)\n\n # second, check if up-touch is timeout for double tap\n time_current = time()\n to_delete = []\n for touchid in self.touches.keys():\n etype, touch = self.touches[touchid]\n if etype != 'end':\n continue\n if time_current - touch.time_start < self.double_tap_time:\n continue\n to_delete.append(touchid)\n\n for touchid in to_delete:\n del self.touches[touchid]\n\n return events\n", "path": "kivy/input/postproc/doubletap.py"}, {"content": "'''\nTriple Tap\n==========\n\n.. versionadded:: 1.7.0\n\nSearch touch for a triple tap\n'''\n\n__all__ = ('InputPostprocTripleTap', )\n\nfrom time import time\nfrom kivy.config import Config\nfrom kivy.vector import Vector\nfrom kivy.clock import Clock\n\n\nclass InputPostprocTripleTap(object):\n '''\n InputPostProcTripleTap is a post-processor to check if\n a touch is a triple tap or not.\n Triple tap can be configured in the Kivy config file::\n\n [postproc]\n triple_tap_time = 250\n triple_tap_distance = 20\n\n Distance parameter is in 0-1000, and time is in millisecond.\n '''\n\n def __init__(self):\n dist = Config.getint('postproc', 'triple_tap_distance')\n self.triple_tap_distance = dist / 1000.0\n time = Config.getint('postproc', 'triple_tap_time')\n self.triple_tap_time = time / 1000.0\n self.touches = {}\n\n def find_triple_tap(self, ref):\n '''Find a triple tap touch within self.touches.\n The touch must be not a previous triple tap, and the distance\n must be ok, also, the touch profile must be compared so the kind\n of touch is the same\n '''\n ref_button = None\n if 'button' in ref.profile:\n ref_button = ref.button\n\n for touchid in self.touches:\n if ref.uid == touchid:\n continue\n etype, touch = self.touches[touchid]\n if not touch.is_double_tap:\n continue\n if etype != 'end':\n continue\n if touch.is_triple_tap:\n continue\n distance = Vector.distance(\n Vector(ref.sx, ref.sy),\n Vector(touch.osx, touch.osy))\n if distance > self.triple_tap_distance:\n continue\n if touch.is_mouse_scrolling or ref.is_mouse_scrolling:\n continue\n touch_button = None\n if 'button' in touch.profile:\n touch_button = touch.button\n if touch_button != ref_button:\n continue\n touch.triple_tap_distance = distance\n return touch\n return None\n\n def process(self, events):\n if self.triple_tap_distance == 0 or self.triple_tap_time == 0:\n return events\n # first, check if a touch down have a triple tap\n for etype, touch in events:\n if not touch.is_touch:\n continue\n if etype == 'begin':\n triple_tap = self.find_triple_tap(touch)\n if triple_tap:\n touch.is_double_tap = False\n touch.is_triple_tap = True\n tap_time = touch.time_start - triple_tap.time_start\n touch.triple_tap_time = tap_time\n distance = triple_tap.triple_tap_distance\n touch.triple_tap_distance = distance\n\n # add the touch internaly\n self.touches[touch.uid] = (etype, touch)\n\n # second, check if up-touch is timeout for triple tap\n time_current = time()\n to_delete = []\n for touchid in self.touches.keys():\n etype, touch = self.touches[touchid]\n if etype != 'end':\n continue\n if time_current - touch.time_start < self.triple_tap_time:\n continue\n to_delete.append(touchid)\n\n for touchid in to_delete:\n del self.touches[touchid]\n\n return events\n", "path": "kivy/input/postproc/tripletap.py"}]}
2,329
586
gh_patches_debug_37251
rasdani/github-patches
git_diff
databricks__koalas-189
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Document all the methods in Metadata There are a bunch of methods like index_info, index_fields. It's pretty difficult to figure out what they do. We should just add some basic docstring comments. @ueshin you are probably the best person to take this since you created the file. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `databricks/koalas/metadata.py` Content: ``` 1 # 2 # Copyright (C) 2019 Databricks, Inc. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 # 16 17 """ 18 A metadata to manage indexes. 19 """ 20 import pandas as pd 21 22 from databricks.koalas.dask.compatibility import string_types 23 24 25 class Metadata(object): 26 """ 27 Manages column names and index information 28 """ 29 30 def __init__(self, column_fields, index_info=None): 31 """ Create a new metadata to manage column fields and index fields and names. 32 33 :param column_fields: list of string 34 Field names to appear as columns. 35 :param index_info: list of string pair 36 Each pair holds the index field name which exists in Spark fields, 37 and the index name. 38 """ 39 assert all(isinstance(col, string_types) for col in column_fields) 40 assert index_info is None \ 41 or all(isinstance(index_field, string_types) 42 and (index_name is None or isinstance(index_name, string_types)) 43 for index_field, index_name in index_info) 44 self._column_fields = column_fields 45 self._index_info = index_info or [] 46 47 @property 48 def column_fields(self): 49 return self._column_fields 50 51 @property 52 def index_info(self): 53 return self._index_info 54 55 @property 56 def index_fields(self): 57 return [index_field for index_field, _ in self._index_info] 58 59 @property 60 def index_names(self): 61 return [name for _, name in self._index_info] 62 63 @property 64 def all_fields(self): 65 index_fields = self.index_fields 66 return index_fields + [field for field in self._column_fields 67 if field not in index_fields] 68 69 def copy(self, column_fields=None, index_info=None): 70 if column_fields is None: 71 column_fields = self._column_fields 72 if index_info is None: 73 index_info = self._index_info 74 return Metadata(column_fields=column_fields.copy(), index_info=index_info.copy()) 75 76 @staticmethod 77 def from_pandas(pdf): 78 column_fields = [str(col) for col in pdf.columns] 79 index = pdf.index 80 if isinstance(index, pd.MultiIndex): 81 if index.names is None: 82 index_info = [('__index_level_{}__'.format(i), None) 83 for i in range(len(index.levels))] 84 else: 85 index_info = [('__index_level_{}__'.format(i) if name is None else name, name) 86 for i, name in enumerate(index.names)] 87 else: 88 index_info = [(index.name 89 if index.name is not None else '__index_level_0__', index.name)] 90 91 return Metadata(column_fields=column_fields, index_info=index_info) 92 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/databricks/koalas/metadata.py b/databricks/koalas/metadata.py --- a/databricks/koalas/metadata.py +++ b/databricks/koalas/metadata.py @@ -24,7 +24,11 @@ class Metadata(object): """ - Manages column names and index information + Manages column names and index information. + + :ivar _column_fields: list of the Spark field names to be seen as columns in Koalas DataFrame. + :ivar _index_info: list of pair holding the Spark field names for indexes, + and the index name to be seen in Koalas DataFrame. """ def __init__(self, column_fields, index_info=None): @@ -46,27 +50,38 @@ @property def column_fields(self): + """ Returns the managed column field names. """ return self._column_fields @property def index_info(self): + """ Return the managed index information. """ return self._index_info @property def index_fields(self): + """ Returns the managed index field names. """ return [index_field for index_field, _ in self._index_info] @property def index_names(self): + """ Return the managed index names. """ return [name for _, name in self._index_info] @property def all_fields(self): + """ Return all the field names including index field names. """ index_fields = self.index_fields return index_fields + [field for field in self._column_fields if field not in index_fields] def copy(self, column_fields=None, index_info=None): + """ Copy the metadata. + + :param column_fields: the new column field names. If None, then the original ones are used. + :param index_info: the new index information. If None, then the original one is used. + :return: the copied metadata. + """ if column_fields is None: column_fields = self._column_fields if index_info is None: @@ -75,6 +90,11 @@ @staticmethod def from_pandas(pdf): + """ Create a metadata from pandas DataFrame. + + :param pdf: :class:`pd.DataFrame` + :return: the created metadata + """ column_fields = [str(col) for col in pdf.columns] index = pdf.index if isinstance(index, pd.MultiIndex):
{"golden_diff": "diff --git a/databricks/koalas/metadata.py b/databricks/koalas/metadata.py\n--- a/databricks/koalas/metadata.py\n+++ b/databricks/koalas/metadata.py\n@@ -24,7 +24,11 @@\n \n class Metadata(object):\n \"\"\"\n- Manages column names and index information\n+ Manages column names and index information.\n+\n+ :ivar _column_fields: list of the Spark field names to be seen as columns in Koalas DataFrame.\n+ :ivar _index_info: list of pair holding the Spark field names for indexes,\n+ and the index name to be seen in Koalas DataFrame.\n \"\"\"\n \n def __init__(self, column_fields, index_info=None):\n@@ -46,27 +50,38 @@\n \n @property\n def column_fields(self):\n+ \"\"\" Returns the managed column field names. \"\"\"\n return self._column_fields\n \n @property\n def index_info(self):\n+ \"\"\" Return the managed index information. \"\"\"\n return self._index_info\n \n @property\n def index_fields(self):\n+ \"\"\" Returns the managed index field names. \"\"\"\n return [index_field for index_field, _ in self._index_info]\n \n @property\n def index_names(self):\n+ \"\"\" Return the managed index names. \"\"\"\n return [name for _, name in self._index_info]\n \n @property\n def all_fields(self):\n+ \"\"\" Return all the field names including index field names. \"\"\"\n index_fields = self.index_fields\n return index_fields + [field for field in self._column_fields\n if field not in index_fields]\n \n def copy(self, column_fields=None, index_info=None):\n+ \"\"\" Copy the metadata.\n+\n+ :param column_fields: the new column field names. If None, then the original ones are used.\n+ :param index_info: the new index information. If None, then the original one is used.\n+ :return: the copied metadata.\n+ \"\"\"\n if column_fields is None:\n column_fields = self._column_fields\n if index_info is None:\n@@ -75,6 +90,11 @@\n \n @staticmethod\n def from_pandas(pdf):\n+ \"\"\" Create a metadata from pandas DataFrame.\n+\n+ :param pdf: :class:`pd.DataFrame`\n+ :return: the created metadata\n+ \"\"\"\n column_fields = [str(col) for col in pdf.columns]\n index = pdf.index\n if isinstance(index, pd.MultiIndex):\n", "issue": "Document all the methods in Metadata\nThere are a bunch of methods like index_info, index_fields. It's pretty difficult to figure out what they do. We should just add some basic docstring comments.\r\n\r\n@ueshin you are probably the best person to take this since you created the file.\r\n\n", "before_files": [{"content": "#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nA metadata to manage indexes.\n\"\"\"\nimport pandas as pd\n\nfrom databricks.koalas.dask.compatibility import string_types\n\n\nclass Metadata(object):\n \"\"\"\n Manages column names and index information\n \"\"\"\n\n def __init__(self, column_fields, index_info=None):\n \"\"\" Create a new metadata to manage column fields and index fields and names.\n\n :param column_fields: list of string\n Field names to appear as columns.\n :param index_info: list of string pair\n Each pair holds the index field name which exists in Spark fields,\n and the index name.\n \"\"\"\n assert all(isinstance(col, string_types) for col in column_fields)\n assert index_info is None \\\n or all(isinstance(index_field, string_types)\n and (index_name is None or isinstance(index_name, string_types))\n for index_field, index_name in index_info)\n self._column_fields = column_fields\n self._index_info = index_info or []\n\n @property\n def column_fields(self):\n return self._column_fields\n\n @property\n def index_info(self):\n return self._index_info\n\n @property\n def index_fields(self):\n return [index_field for index_field, _ in self._index_info]\n\n @property\n def index_names(self):\n return [name for _, name in self._index_info]\n\n @property\n def all_fields(self):\n index_fields = self.index_fields\n return index_fields + [field for field in self._column_fields\n if field not in index_fields]\n\n def copy(self, column_fields=None, index_info=None):\n if column_fields is None:\n column_fields = self._column_fields\n if index_info is None:\n index_info = self._index_info\n return Metadata(column_fields=column_fields.copy(), index_info=index_info.copy())\n\n @staticmethod\n def from_pandas(pdf):\n column_fields = [str(col) for col in pdf.columns]\n index = pdf.index\n if isinstance(index, pd.MultiIndex):\n if index.names is None:\n index_info = [('__index_level_{}__'.format(i), None)\n for i in range(len(index.levels))]\n else:\n index_info = [('__index_level_{}__'.format(i) if name is None else name, name)\n for i, name in enumerate(index.names)]\n else:\n index_info = [(index.name\n if index.name is not None else '__index_level_0__', index.name)]\n\n return Metadata(column_fields=column_fields, index_info=index_info)\n", "path": "databricks/koalas/metadata.py"}], "after_files": [{"content": "#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nA metadata to manage indexes.\n\"\"\"\nimport pandas as pd\n\nfrom databricks.koalas.dask.compatibility import string_types\n\n\nclass Metadata(object):\n \"\"\"\n Manages column names and index information.\n\n :ivar _column_fields: list of the Spark field names to be seen as columns in Koalas DataFrame.\n :ivar _index_info: list of pair holding the Spark field names for indexes,\n and the index name to be seen in Koalas DataFrame.\n \"\"\"\n\n def __init__(self, column_fields, index_info=None):\n \"\"\" Create a new metadata to manage column fields and index fields and names.\n\n :param column_fields: list of string\n Field names to appear as columns.\n :param index_info: list of string pair\n Each pair holds the index field name which exists in Spark fields,\n and the index name.\n \"\"\"\n assert all(isinstance(col, string_types) for col in column_fields)\n assert index_info is None \\\n or all(isinstance(index_field, string_types)\n and (index_name is None or isinstance(index_name, string_types))\n for index_field, index_name in index_info)\n self._column_fields = column_fields\n self._index_info = index_info or []\n\n @property\n def column_fields(self):\n \"\"\" Returns the managed column field names. \"\"\"\n return self._column_fields\n\n @property\n def index_info(self):\n \"\"\" Return the managed index information. \"\"\"\n return self._index_info\n\n @property\n def index_fields(self):\n \"\"\" Returns the managed index field names. \"\"\"\n return [index_field for index_field, _ in self._index_info]\n\n @property\n def index_names(self):\n \"\"\" Return the managed index names. \"\"\"\n return [name for _, name in self._index_info]\n\n @property\n def all_fields(self):\n \"\"\" Return all the field names including index field names. \"\"\"\n index_fields = self.index_fields\n return index_fields + [field for field in self._column_fields\n if field not in index_fields]\n\n def copy(self, column_fields=None, index_info=None):\n \"\"\" Copy the metadata.\n\n :param column_fields: the new column field names. If None, then the original ones are used.\n :param index_info: the new index information. If None, then the original one is used.\n :return: the copied metadata.\n \"\"\"\n if column_fields is None:\n column_fields = self._column_fields\n if index_info is None:\n index_info = self._index_info\n return Metadata(column_fields=column_fields.copy(), index_info=index_info.copy())\n\n @staticmethod\n def from_pandas(pdf):\n \"\"\" Create a metadata from pandas DataFrame.\n\n :param pdf: :class:`pd.DataFrame`\n :return: the created metadata\n \"\"\"\n column_fields = [str(col) for col in pdf.columns]\n index = pdf.index\n if isinstance(index, pd.MultiIndex):\n if index.names is None:\n index_info = [('__index_level_{}__'.format(i), None)\n for i in range(len(index.levels))]\n else:\n index_info = [('__index_level_{}__'.format(i) if name is None else name, name)\n for i, name in enumerate(index.names)]\n else:\n index_info = [(index.name\n if index.name is not None else '__index_level_0__', index.name)]\n\n return Metadata(column_fields=column_fields, index_info=index_info)\n", "path": "databricks/koalas/metadata.py"}]}
1,192
557
gh_patches_debug_5870
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-1513
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Wait for new cryptography release For the next mitmproxy release, we want to recommend Python 3.5 by default. This requires a cryptography release which includes https://github.com/pyca/cryptography/pull/3063. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup, find_packages 2 from codecs import open 3 import os 4 5 from netlib import version 6 7 # Based on https://github.com/pypa/sampleproject/blob/master/setup.py 8 # and https://python-packaging-user-guide.readthedocs.org/ 9 10 here = os.path.abspath(os.path.dirname(__file__)) 11 12 with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: 13 long_description = f.read() 14 15 setup( 16 name="mitmproxy", 17 version=version.VERSION, 18 description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.", 19 long_description=long_description, 20 url="http://mitmproxy.org", 21 author="Aldo Cortesi", 22 author_email="[email protected]", 23 license="MIT", 24 classifiers=[ 25 "License :: OSI Approved :: MIT License", 26 "Development Status :: 5 - Production/Stable", 27 "Environment :: Console", 28 "Environment :: Console :: Curses", 29 "Operating System :: MacOS :: MacOS X", 30 "Operating System :: POSIX", 31 "Operating System :: Microsoft :: Windows", 32 "Programming Language :: Python", 33 "Programming Language :: Python :: 2", 34 "Programming Language :: Python :: 2.7", 35 "Programming Language :: Python :: 3", 36 "Programming Language :: Python :: 3.5", 37 "Programming Language :: Python :: Implementation :: CPython", 38 "Programming Language :: Python :: Implementation :: PyPy", 39 "Topic :: Security", 40 "Topic :: Internet", 41 "Topic :: Internet :: WWW/HTTP", 42 "Topic :: Internet :: Proxy Servers", 43 "Topic :: Software Development :: Testing" 44 ], 45 packages=find_packages(include=[ 46 "mitmproxy", "mitmproxy.*", 47 "pathod", "pathod.*", 48 "netlib", "netlib.*" 49 ]), 50 include_package_data=True, 51 entry_points={ 52 'console_scripts': [ 53 "mitmproxy = mitmproxy.main:mitmproxy", 54 "mitmdump = mitmproxy.main:mitmdump", 55 "mitmweb = mitmproxy.main:mitmweb", 56 "pathod = pathod.pathod_cmdline:go_pathod", 57 "pathoc = pathod.pathoc_cmdline:go_pathoc" 58 ] 59 }, 60 # https://packaging.python.org/en/latest/requirements/#install-requires 61 # It is not considered best practice to use install_requires to pin dependencies to specific versions. 62 install_requires=[ 63 "backports.ssl_match_hostname>=3.5.0.1, <3.6", 64 "blinker>=1.4, <1.5", 65 "click>=6.2, <7.0", 66 "certifi>=2015.11.20.1", # no semver here - this should always be on the last release! 67 "configargparse>=0.10, <0.11", 68 "construct>=2.5.2, <2.6", 69 "cryptography>=1.3, <1.5", 70 "cssutils>=1.0.1, <1.1", 71 "Flask>=0.10.1, <0.12", 72 "h2>=2.4.1, <3", 73 "html2text>=2016.1.8, <=2016.5.29", 74 "hyperframe>=4.0.1, <5", 75 "jsbeautifier>=1.6.3, <1.7", 76 "lxml>=3.5.0, <=3.6.0", # no wheels for 3.6.1 yet. 77 "Pillow>=3.2, <3.4", 78 "passlib>=1.6.5, <1.7", 79 "pyasn1>=0.1.9, <0.2", 80 "pyOpenSSL>=16.0, <17.0", 81 "pyparsing>=2.1.3, <2.2", 82 "pyperclip>=1.5.22, <1.6", 83 "requests>=2.9.1, <2.12", 84 "six>=1.10, <1.11", 85 "tornado>=4.3, <4.5", 86 "urwid>=1.3.1, <1.4", 87 "watchdog>=0.8.3, <0.9", 88 "brotlipy>=0.3.0, <0.5", 89 ], 90 extras_require={ 91 ':sys_platform == "win32"': [ 92 "pydivert>=0.0.7, <0.1", 93 ], 94 ':sys_platform != "win32"': [ 95 ], 96 # Do not use a range operator here: https://bitbucket.org/pypa/setuptools/issues/380 97 # Ubuntu Trusty and other still ship with setuptools < 17.1 98 ':python_version == "2.7"': [ 99 "enum34>=1.0.4, <2", 100 "ipaddress>=1.0.15, <1.1", 101 "typing==3.5.2.2", 102 ], 103 'dev': [ 104 "tox>=2.3, <3", 105 "mock>=2.0, <2.1", 106 "pytest>=2.8.7, <3", 107 "pytest-cov>=2.2.1, <3", 108 "pytest-timeout>=1.0.0, <2", 109 "pytest-xdist>=1.14, <2", 110 "sphinx>=1.3.5, <1.5", 111 "sphinx-autobuild>=0.5.2, <0.7", 112 "sphinxcontrib-documentedlist>=0.4.0, <0.5", 113 "sphinx_rtd_theme>=0.1.9, <0.2", 114 ], 115 'contentviews': [ 116 # TODO: Find Python 3 replacements 117 # "protobuf>=2.6.1, <2.7", 118 # "pyamf>=0.8.0, <0.9", 119 ], 120 'examples': [ 121 "beautifulsoup4>=4.4.1, <4.6", 122 "pytz>=2015.07.0, <=2016.6.1", 123 ] 124 } 125 ) 126 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -66,7 +66,7 @@ "certifi>=2015.11.20.1", # no semver here - this should always be on the last release! "configargparse>=0.10, <0.11", "construct>=2.5.2, <2.6", - "cryptography>=1.3, <1.5", + "cryptography>=1.3, <1.6", "cssutils>=1.0.1, <1.1", "Flask>=0.10.1, <0.12", "h2>=2.4.1, <3",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,7 +66,7 @@\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"configargparse>=0.10, <0.11\",\n \"construct>=2.5.2, <2.6\",\n- \"cryptography>=1.3, <1.5\",\n+ \"cryptography>=1.3, <1.6\",\n \"cssutils>=1.0.1, <1.1\",\n \"Flask>=0.10.1, <0.12\",\n \"h2>=2.4.1, <3\",\n", "issue": "Wait for new cryptography release\nFor the next mitmproxy release, we want to recommend Python 3.5 by default. This requires a cryptography release which includes https://github.com/pyca/cryptography/pull/3063.\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom codecs import open\nimport os\n\nfrom netlib import version\n\n# Based on https://github.com/pypa/sampleproject/blob/master/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"mitmproxy\",\n version=version.VERSION,\n description=\"An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.\",\n long_description=long_description,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: Software Development :: Testing\"\n ],\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n \"pathod\", \"pathod.*\",\n \"netlib\", \"netlib.*\"\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.main:mitmproxy\",\n \"mitmdump = mitmproxy.main:mitmdump\",\n \"mitmweb = mitmproxy.main:mitmweb\",\n \"pathod = pathod.pathod_cmdline:go_pathod\",\n \"pathoc = pathod.pathoc_cmdline:go_pathoc\"\n ]\n },\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"backports.ssl_match_hostname>=3.5.0.1, <3.6\",\n \"blinker>=1.4, <1.5\",\n \"click>=6.2, <7.0\",\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"configargparse>=0.10, <0.11\",\n \"construct>=2.5.2, <2.6\",\n \"cryptography>=1.3, <1.5\",\n \"cssutils>=1.0.1, <1.1\",\n \"Flask>=0.10.1, <0.12\",\n \"h2>=2.4.1, <3\",\n \"html2text>=2016.1.8, <=2016.5.29\",\n \"hyperframe>=4.0.1, <5\",\n \"jsbeautifier>=1.6.3, <1.7\",\n \"lxml>=3.5.0, <=3.6.0\", # no wheels for 3.6.1 yet.\n \"Pillow>=3.2, <3.4\",\n \"passlib>=1.6.5, <1.7\",\n \"pyasn1>=0.1.9, <0.2\",\n \"pyOpenSSL>=16.0, <17.0\",\n \"pyparsing>=2.1.3, <2.2\",\n \"pyperclip>=1.5.22, <1.6\",\n \"requests>=2.9.1, <2.12\",\n \"six>=1.10, <1.11\",\n \"tornado>=4.3, <4.5\",\n \"urwid>=1.3.1, <1.4\",\n \"watchdog>=0.8.3, <0.9\",\n \"brotlipy>=0.3.0, <0.5\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=0.0.7, <0.1\",\n ],\n ':sys_platform != \"win32\"': [\n ],\n # Do not use a range operator here: https://bitbucket.org/pypa/setuptools/issues/380\n # Ubuntu Trusty and other still ship with setuptools < 17.1\n ':python_version == \"2.7\"': [\n \"enum34>=1.0.4, <2\",\n \"ipaddress>=1.0.15, <1.1\",\n \"typing==3.5.2.2\",\n ],\n 'dev': [\n \"tox>=2.3, <3\",\n \"mock>=2.0, <2.1\",\n \"pytest>=2.8.7, <3\",\n \"pytest-cov>=2.2.1, <3\",\n \"pytest-timeout>=1.0.0, <2\",\n \"pytest-xdist>=1.14, <2\",\n \"sphinx>=1.3.5, <1.5\",\n \"sphinx-autobuild>=0.5.2, <0.7\",\n \"sphinxcontrib-documentedlist>=0.4.0, <0.5\",\n \"sphinx_rtd_theme>=0.1.9, <0.2\",\n ],\n 'contentviews': [\n # TODO: Find Python 3 replacements\n # \"protobuf>=2.6.1, <2.7\",\n # \"pyamf>=0.8.0, <0.9\",\n ],\n 'examples': [\n \"beautifulsoup4>=4.4.1, <4.6\",\n \"pytz>=2015.07.0, <=2016.6.1\",\n ]\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nfrom codecs import open\nimport os\n\nfrom netlib import version\n\n# Based on https://github.com/pypa/sampleproject/blob/master/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"mitmproxy\",\n version=version.VERSION,\n description=\"An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.\",\n long_description=long_description,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: Software Development :: Testing\"\n ],\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n \"pathod\", \"pathod.*\",\n \"netlib\", \"netlib.*\"\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.main:mitmproxy\",\n \"mitmdump = mitmproxy.main:mitmdump\",\n \"mitmweb = mitmproxy.main:mitmweb\",\n \"pathod = pathod.pathod_cmdline:go_pathod\",\n \"pathoc = pathod.pathoc_cmdline:go_pathoc\"\n ]\n },\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"backports.ssl_match_hostname>=3.5.0.1, <3.6\",\n \"blinker>=1.4, <1.5\",\n \"click>=6.2, <7.0\",\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"configargparse>=0.10, <0.11\",\n \"construct>=2.5.2, <2.6\",\n \"cryptography>=1.3, <1.6\",\n \"cssutils>=1.0.1, <1.1\",\n \"Flask>=0.10.1, <0.12\",\n \"h2>=2.4.1, <3\",\n \"html2text>=2016.1.8, <=2016.5.29\",\n \"hyperframe>=4.0.1, <5\",\n \"jsbeautifier>=1.6.3, <1.7\",\n \"lxml>=3.5.0, <=3.6.0\", # no wheels for 3.6.1 yet.\n \"Pillow>=3.2, <3.4\",\n \"passlib>=1.6.5, <1.7\",\n \"pyasn1>=0.1.9, <0.2\",\n \"pyOpenSSL>=16.0, <17.0\",\n \"pyparsing>=2.1.3, <2.2\",\n \"pyperclip>=1.5.22, <1.6\",\n \"requests>=2.9.1, <2.12\",\n \"six>=1.10, <1.11\",\n \"tornado>=4.3, <4.5\",\n \"urwid>=1.3.1, <1.4\",\n \"watchdog>=0.8.3, <0.9\",\n \"brotlipy>=0.3.0, <0.5\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=0.0.7, <0.1\",\n ],\n ':sys_platform != \"win32\"': [\n ],\n # Do not use a range operator here: https://bitbucket.org/pypa/setuptools/issues/380\n # Ubuntu Trusty and other still ship with setuptools < 17.1\n ':python_version == \"2.7\"': [\n \"enum34>=1.0.4, <2\",\n \"ipaddress>=1.0.15, <1.1\",\n \"typing==3.5.2.2\",\n ],\n 'dev': [\n \"tox>=2.3, <3\",\n \"mock>=2.0, <2.1\",\n \"pytest>=2.8.7, <3\",\n \"pytest-cov>=2.2.1, <3\",\n \"pytest-timeout>=1.0.0, <2\",\n \"pytest-xdist>=1.14, <2\",\n \"sphinx>=1.3.5, <1.5\",\n \"sphinx-autobuild>=0.5.2, <0.7\",\n \"sphinxcontrib-documentedlist>=0.4.0, <0.5\",\n \"sphinx_rtd_theme>=0.1.9, <0.2\",\n ],\n 'contentviews': [\n # TODO: Find Python 3 replacements\n # \"protobuf>=2.6.1, <2.7\",\n # \"pyamf>=0.8.0, <0.9\",\n ],\n 'examples': [\n \"beautifulsoup4>=4.4.1, <4.6\",\n \"pytz>=2015.07.0, <=2016.6.1\",\n ]\n }\n)\n", "path": "setup.py"}]}
1,990
176
gh_patches_debug_63301
rasdani/github-patches
git_diff
scikit-hep__pyhf-372
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update tensorflow-probability to the next release that includes continuous approximations # Description This is a follow up to #302. As the bug is fixed in upstream tensorflow-probability, we just need to wait for a new release to be shipped. This bug was because of a change in the API to get rid of the continuous approximation to the Poisson pmf which broke our tests. ### Describe the solution you'd like Unfix tensorflow-probability to `0.3.0` and bump to the next available release post-0.4.0. Update Tensorflow to TensorFlow 1.12.0 release # Description [TensorFlow 1.12.0 has been released](https://github.com/tensorflow/tensorflow/releases/tag/v1.12.0) and it has breaking changes. Most notably > Remove `tf.contrib.linalg`. `tf.linalg` should be used instead. Once there is a new release of TensorFlow probability (`v0.5.0` &mdash; c.f. Issue #360 and #330) that upgrades to `v1.12.0` then we can follow them in upgrading. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 from setuptools import setup, find_packages 4 from os import path 5 import sys 6 7 this_directory = path.abspath(path.dirname(__file__)) 8 if sys.version_info.major < 3: 9 from io import open 10 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md: 11 long_description = readme_md.read() 12 13 extras_require = { 14 'tensorflow': [ 15 'tensorflow<1.12.0,>=1.10.0', 16 'tensorflow-probability==0.3.0', 17 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass 18 'setuptools<=39.1.0', 19 ], 20 'torch': ['torch>=0.4.0'], 21 'mxnet': [ 22 'mxnet>=1.0.0', 23 'requests<2.19.0,>=2.18.4', 24 'numpy<1.15.0,>=1.8.2', 25 'requests<2.19.0,>=2.18.4', 26 ], 27 # 'dask': [ 28 # 'dask[array]' 29 # ], 30 'xmlimport': ['uproot'], 31 'minuit': ['iminuit'], 32 'develop': [ 33 'pyflakes', 34 'pytest<4.0.0,>=3.5.1', 35 'pytest-cov>=2.5.1', 36 'pytest-mock', 37 'pytest-benchmark[histogram]', 38 'pytest-console-scripts', 39 'python-coveralls', 40 'coverage>=4.0', # coveralls 41 'matplotlib', 42 'jupyter', 43 'nbdime', 44 'uproot>=3.0.0', 45 'papermill>=0.16.0', 46 'graphviz', 47 'bumpversion', 48 'sphinx', 49 'sphinxcontrib-bibtex', 50 'sphinxcontrib-napoleon', 51 'sphinx_rtd_theme', 52 'nbsphinx', 53 'sphinx-issues', 54 'm2r', 55 'jsonpatch', 56 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now 57 'pre-commit', 58 'black;python_version>="3.6"', # Black is Python3 only 59 'twine', 60 ], 61 } 62 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 63 64 setup( 65 name='pyhf', 66 version='0.0.15', 67 description='(partial) pure python histfactory implementation', 68 long_description=long_description, 69 long_description_content_type='text/markdown', 70 url='https://github.com/diana-hep/pyhf', 71 author='Lukas Heinrich', 72 author_email='[email protected]', 73 license='Apache', 74 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask', 75 classifiers=[ 76 "Programming Language :: Python :: 2", 77 "Programming Language :: Python :: 2.7", 78 "Programming Language :: Python :: 3", 79 "Programming Language :: Python :: 3.6", 80 ], 81 packages=find_packages(), 82 include_package_data=True, 83 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*", 84 install_requires=[ 85 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet 86 'click>=6.0', # for console scripts, 87 'tqdm', # for readxml 88 'six', # for modifiers 89 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6 90 'jsonpatch', 91 ], 92 extras_require=extras_require, 93 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']}, 94 dependency_links=[], 95 ) 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -12,8 +12,8 @@ extras_require = { 'tensorflow': [ - 'tensorflow<1.12.0,>=1.10.0', - 'tensorflow-probability==0.3.0', + 'tensorflow>=1.12.0', + 'tensorflow-probability>=0.5.0', 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass 'setuptools<=39.1.0', ],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,8 +12,8 @@\n \n extras_require = {\n 'tensorflow': [\n- 'tensorflow<1.12.0,>=1.10.0',\n- 'tensorflow-probability==0.3.0',\n+ 'tensorflow>=1.12.0',\n+ 'tensorflow-probability>=0.5.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n", "issue": "Update tensorflow-probability to the next release that includes continuous approximations\n# Description\r\n\r\nThis is a follow up to #302. As the bug is fixed in upstream tensorflow-probability, we just need to wait for a new release to be shipped.\r\n\r\nThis bug was because of a change in the API to get rid of the continuous approximation to the Poisson pmf which broke our tests.\r\n\r\n### Describe the solution you'd like\r\n\r\nUnfix tensorflow-probability to `0.3.0` and bump to the next available release post-0.4.0.\nUpdate Tensorflow to TensorFlow 1.12.0 release\n# Description\r\n\r\n[TensorFlow 1.12.0 has been released](https://github.com/tensorflow/tensorflow/releases/tag/v1.12.0) and it has breaking changes. Most notably\r\n\r\n> Remove `tf.contrib.linalg`. `tf.linalg` should be used instead. \r\n\r\nOnce there is a new release of TensorFlow probability (`v0.5.0` &mdash; c.f. Issue #360 and #330) that upgrades to `v1.12.0` then we can follow them in upgrading.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow<1.12.0,>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=0.4.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest<4.0.0,>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.0.0',\n 'papermill>=0.16.0',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow>=1.12.0',\n 'tensorflow-probability>=0.5.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=0.4.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest<4.0.0,>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.0.0',\n 'papermill>=0.16.0',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}]}
1,587
161
gh_patches_debug_33301
rasdani/github-patches
git_diff
ansible__ansible-lint-1747
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- command-instead-of-module "yum clean" revisited <!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and main branch are affected too --> ##### Summary <!--- Explain the problem briefly below --> The lint error `command-instead-of-module` is raised for command `yum clean`. As the `yum` module documentation states there is no support for doing `yum clean` operation in the module. So the only option is to run a command. This issue has been raised in the past as #369 and `warn: false` was recommended. But that is now deprecated in 2.11. So shouldn't ansible-lint detect if `yum clean` is being run and not raise `command-instead-of-module`? If there is positive feedback and nobody else wants to do it then I can volunteer to make the patch - it should be a simple change, no? ##### Issue Type - Bug Report ##### Ansible and Ansible Lint details <!--- Paste verbatim output between triple backticks --> ```console (paste below) ansible --version ansible [core 2.11.5] config file = /etc/ansible/ansible.cfg configured module search path = ['/usr/share/ansible/plugins/modules'] ansible python module location = /opt/ansible/lib64/python3.6/site-packages/ansible ansible collection location = /usr/share/ansible/collections executable location = /opt/ansible/bin/ansible python version = 3.6.8 (default, Nov 16 2020, 16:55:22) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)] jinja version = 3.0.2 libyaml = True ansible-lint --version ansible-lint 5.2.0 using ansible 2.11.5 ``` - ansible installation method: pip - ansible-lint installation method: pip ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> EL7 ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Have a command that runs `yum clean`. <!--- Paste example playbooks or commands between triple backticks below --> ```yaml command: cmd: yum clean all ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### Desired Behaviour <!--- Describe what you expected to happen when running the steps above --> `command-instead-of-module` should not be raised for `yum clean` which cannot be done using the `yum` module. Possible security bugs should be reported via email to `[email protected]` ##### Actual Behaviour <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> `command-instead-of-module` is raised even though the module cannot do this action. <!--- Paste verbatim command output between triple backticks --> ```paste below ``` [minimum complete verifiable example]: http://stackoverflow.com/help/mcve --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/ansiblelint/rules/CommandsInsteadOfModulesRule.py` Content: ``` 1 # Copyright (c) 2013-2014 Will Thames <[email protected]> 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a copy 4 # of this software and associated documentation files (the "Software"), to deal 5 # in the Software without restriction, including without limitation the rights 6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 # copies of the Software, and to permit persons to whom the Software is 8 # furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 # THE SOFTWARE. 20 21 import os 22 import sys 23 from typing import TYPE_CHECKING, Any, Dict, Union 24 25 from ansiblelint.rules import AnsibleLintRule 26 from ansiblelint.utils import convert_to_boolean, get_first_cmd_arg, get_second_cmd_arg 27 28 if TYPE_CHECKING: 29 from typing import Optional 30 31 from ansiblelint.file_utils import Lintable 32 33 34 class CommandsInsteadOfModulesRule(AnsibleLintRule): 35 id = 'command-instead-of-module' 36 shortdesc = 'Using command rather than module' 37 description = ( 38 'Executing a command when there is an Ansible module is generally a bad idea' 39 ) 40 severity = 'HIGH' 41 tags = ['command-shell', 'idiom'] 42 version_added = 'historic' 43 44 _commands = ['command', 'shell'] 45 _modules = { 46 'apt-get': 'apt-get', 47 'chkconfig': 'service', 48 'curl': 'get_url or uri', 49 'git': 'git', 50 'hg': 'hg', 51 'letsencrypt': 'acme_certificate', 52 'mktemp': 'tempfile', 53 'mount': 'mount', 54 'patch': 'patch', 55 'rpm': 'yum or rpm_key', 56 'rsync': 'synchronize', 57 'sed': 'template, replace or lineinfile', 58 'service': 'service', 59 'supervisorctl': 'supervisorctl', 60 'svn': 'subversion', 61 'systemctl': 'systemd', 62 'tar': 'unarchive', 63 'unzip': 'unarchive', 64 'wget': 'get_url or uri', 65 'yum': 'yum', 66 } 67 68 _executable_options = { 69 'git': ['branch', 'log'], 70 'systemctl': ['set-default', 'show-environment', 'status'], 71 } 72 73 def matchtask( 74 self, task: Dict[str, Any], file: 'Optional[Lintable]' = None 75 ) -> Union[bool, str]: 76 77 if task['action']['__ansible_module__'] not in self._commands: 78 return False 79 80 first_cmd_arg = get_first_cmd_arg(task) 81 second_cmd_arg = get_second_cmd_arg(task) 82 83 if not first_cmd_arg: 84 return False 85 86 executable = os.path.basename(first_cmd_arg) 87 88 if ( 89 second_cmd_arg 90 and executable in self._executable_options 91 and second_cmd_arg in self._executable_options[executable] 92 ): 93 return False 94 95 if executable in self._modules and convert_to_boolean( 96 task['action'].get('warn', True) 97 ): 98 message = '{0} used in place of {1} module' 99 return message.format(executable, self._modules[executable]) 100 return False 101 102 103 if "pytest" in sys.modules: 104 import pytest 105 106 from ansiblelint.testing import RunFromText # pylint: disable=ungrouped-imports 107 108 APT_GET = ''' 109 - hosts: all 110 tasks: 111 - name: run apt-get update 112 command: apt-get update 113 ''' 114 115 GIT_BRANCH = ''' 116 - hosts: all 117 tasks: 118 - name: print current git branch 119 command: git branch 120 ''' 121 122 GIT_LOG = ''' 123 - hosts: all 124 tasks: 125 - name: print git log 126 command: git log 127 ''' 128 129 RESTART_SSHD = ''' 130 - hosts: all 131 tasks: 132 - name: restart sshd 133 command: systemctl restart sshd 134 ''' 135 136 SYSTEMCTL_STATUS = ''' 137 - hosts: all 138 tasks: 139 - name: show systemctl service status 140 command: systemctl status systemd-timesyncd 141 ''' 142 143 SYSTEMD_ENVIRONMENT = ''' 144 - hosts: all 145 tasks: 146 - name: show systemd environment 147 command: systemctl show-environment 148 ''' 149 150 SYSTEMD_RUNLEVEL = ''' 151 - hosts: all 152 tasks: 153 - name: set systemd runlevel 154 command: systemctl set-default multi-user.target 155 ''' 156 157 @pytest.mark.parametrize( 158 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner'] 159 ) 160 def test_apt_get(rule_runner: RunFromText) -> None: 161 """The apt module supports update.""" 162 results = rule_runner.run_playbook(APT_GET) 163 assert len(results) == 1 164 165 @pytest.mark.parametrize( 166 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner'] 167 ) 168 def test_restart_sshd(rule_runner: RunFromText) -> None: 169 """Restarting services is supported by the systemd module.""" 170 results = rule_runner.run_playbook(RESTART_SSHD) 171 assert len(results) == 1 172 173 @pytest.mark.parametrize( 174 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner'] 175 ) 176 def test_git_log(rule_runner: RunFromText) -> None: 177 """The git log command is not supported by the git module.""" 178 results = rule_runner.run_playbook(GIT_LOG) 179 assert len(results) == 0 180 181 @pytest.mark.parametrize( 182 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner'] 183 ) 184 def test_git_branch(rule_runner: RunFromText) -> None: 185 """The git branch command is not supported by the git module.""" 186 results = rule_runner.run_playbook(GIT_BRANCH) 187 assert len(results) == 0 188 189 @pytest.mark.parametrize( 190 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner'] 191 ) 192 def test_systemd_status(rule_runner: RunFromText) -> None: 193 """Set-default is not supported by the systemd module.""" 194 results = rule_runner.run_playbook(SYSTEMCTL_STATUS) 195 assert len(results) == 0 196 197 @pytest.mark.parametrize( 198 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner'] 199 ) 200 def test_systemd_environment(rule_runner: RunFromText) -> None: 201 """Showing the environment is not supported by the systemd module.""" 202 results = rule_runner.run_playbook(SYSTEMD_ENVIRONMENT) 203 assert len(results) == 0 204 205 @pytest.mark.parametrize( 206 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner'] 207 ) 208 def test_systemd_runlevel(rule_runner: RunFromText) -> None: 209 """Set-default is not supported by the systemd module.""" 210 results = rule_runner.run_playbook(SYSTEMD_RUNLEVEL) 211 assert len(results) == 0 212 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/ansiblelint/rules/CommandsInsteadOfModulesRule.py b/src/ansiblelint/rules/CommandsInsteadOfModulesRule.py --- a/src/ansiblelint/rules/CommandsInsteadOfModulesRule.py +++ b/src/ansiblelint/rules/CommandsInsteadOfModulesRule.py @@ -68,6 +68,7 @@ _executable_options = { 'git': ['branch', 'log'], 'systemctl': ['set-default', 'show-environment', 'status'], + 'yum': ['clean'], } def matchtask( @@ -100,7 +101,7 @@ return False -if "pytest" in sys.modules: +if "pytest" in sys.modules: # noqa: C901 import pytest from ansiblelint.testing import RunFromText # pylint: disable=ungrouped-imports @@ -154,6 +155,20 @@ command: systemctl set-default multi-user.target ''' + YUM_UPDATE = ''' +- hosts: all + tasks: + - name: run yum update + command: yum update +''' + + YUM_CLEAN = ''' +- hosts: all + tasks: + - name: clear yum cache + command: yum clean all +''' + @pytest.mark.parametrize( 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner'] ) @@ -209,3 +224,19 @@ """Set-default is not supported by the systemd module.""" results = rule_runner.run_playbook(SYSTEMD_RUNLEVEL) assert len(results) == 0 + + @pytest.mark.parametrize( + 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner'] + ) + def test_yum_update(rule_runner: RunFromText) -> None: + """Using yum update should fail.""" + results = rule_runner.run_playbook(YUM_UPDATE) + assert len(results) == 1 + + @pytest.mark.parametrize( + 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner'] + ) + def test_yum_clean(rule_runner: RunFromText) -> None: + """The yum module does not support clearing yum cache.""" + results = rule_runner.run_playbook(YUM_CLEAN) + assert len(results) == 0
{"golden_diff": "diff --git a/src/ansiblelint/rules/CommandsInsteadOfModulesRule.py b/src/ansiblelint/rules/CommandsInsteadOfModulesRule.py\n--- a/src/ansiblelint/rules/CommandsInsteadOfModulesRule.py\n+++ b/src/ansiblelint/rules/CommandsInsteadOfModulesRule.py\n@@ -68,6 +68,7 @@\n _executable_options = {\n 'git': ['branch', 'log'],\n 'systemctl': ['set-default', 'show-environment', 'status'],\n+ 'yum': ['clean'],\n }\n \n def matchtask(\n@@ -100,7 +101,7 @@\n return False\n \n \n-if \"pytest\" in sys.modules:\n+if \"pytest\" in sys.modules: # noqa: C901\n import pytest\n \n from ansiblelint.testing import RunFromText # pylint: disable=ungrouped-imports\n@@ -154,6 +155,20 @@\n command: systemctl set-default multi-user.target\n '''\n \n+ YUM_UPDATE = '''\n+- hosts: all\n+ tasks:\n+ - name: run yum update\n+ command: yum update\n+'''\n+\n+ YUM_CLEAN = '''\n+- hosts: all\n+ tasks:\n+ - name: clear yum cache\n+ command: yum clean all\n+'''\n+\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n@@ -209,3 +224,19 @@\n \"\"\"Set-default is not supported by the systemd module.\"\"\"\n results = rule_runner.run_playbook(SYSTEMD_RUNLEVEL)\n assert len(results) == 0\n+\n+ @pytest.mark.parametrize(\n+ 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n+ )\n+ def test_yum_update(rule_runner: RunFromText) -> None:\n+ \"\"\"Using yum update should fail.\"\"\"\n+ results = rule_runner.run_playbook(YUM_UPDATE)\n+ assert len(results) == 1\n+\n+ @pytest.mark.parametrize(\n+ 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n+ )\n+ def test_yum_clean(rule_runner: RunFromText) -> None:\n+ \"\"\"The yum module does not support clearing yum cache.\"\"\"\n+ results = rule_runner.run_playbook(YUM_CLEAN)\n+ assert len(results) == 0\n", "issue": "command-instead-of-module \"yum clean\" revisited\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and main branch are affected too -->\r\n\r\n##### Summary\r\n<!--- Explain the problem briefly below -->\r\nThe lint error `command-instead-of-module` is raised for command `yum clean`. As the `yum` module documentation states there is no support for doing `yum clean` operation in the module. So the only option is to run a command.\r\n\r\nThis issue has been raised in the past as #369 and `warn: false` was recommended. But that is now deprecated in 2.11.\r\n\r\nSo shouldn't ansible-lint detect if `yum clean` is being run and not raise `command-instead-of-module`?\r\n\r\nIf there is positive feedback and nobody else wants to do it then I can volunteer to make the patch - it should be a simple change, no?\r\n\r\n##### Issue Type\r\n\r\n- Bug Report\r\n\r\n##### Ansible and Ansible Lint details\r\n<!--- Paste verbatim output between triple backticks -->\r\n```console (paste below)\r\nansible --version\r\nansible [core 2.11.5]\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = ['/usr/share/ansible/plugins/modules']\r\n ansible python module location = /opt/ansible/lib64/python3.6/site-packages/ansible\r\n ansible collection location = /usr/share/ansible/collections\r\n executable location = /opt/ansible/bin/ansible\r\n python version = 3.6.8 (default, Nov 16 2020, 16:55:22) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)]\r\n jinja version = 3.0.2\r\n libyaml = True\r\n\r\nansible-lint --version\r\nansible-lint 5.2.0 using ansible 2.11.5\r\n\r\n```\r\n\r\n- ansible installation method: pip\r\n- ansible-lint installation method: pip\r\n\r\n##### OS / ENVIRONMENT\r\n<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->\r\nEL7\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->\r\nHave a command that runs `yum clean`.\r\n\r\n<!--- Paste example playbooks or commands between triple backticks below -->\r\n```yaml\r\n command:\r\n cmd: yum clean all\r\n```\r\n\r\n<!--- HINT: You can paste gist.github.com links for larger files -->\r\n\r\n##### Desired Behaviour\r\n<!--- Describe what you expected to happen when running the steps above -->\r\n`command-instead-of-module` should not be raised for `yum clean` which cannot be done using the `yum` module.\r\n\r\nPossible security bugs should be reported via email to `[email protected]`\r\n\r\n##### Actual Behaviour\r\n<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->\r\n`command-instead-of-module` is raised even though the module cannot do this action.\r\n\r\n<!--- Paste verbatim command output between triple backticks -->\r\n```paste below\r\n\r\n```\r\n\r\n\r\n[minimum complete verifiable example]: http://stackoverflow.com/help/mcve\r\n\n", "before_files": [{"content": "# Copyright (c) 2013-2014 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport os\nimport sys\nfrom typing import TYPE_CHECKING, Any, Dict, Union\n\nfrom ansiblelint.rules import AnsibleLintRule\nfrom ansiblelint.utils import convert_to_boolean, get_first_cmd_arg, get_second_cmd_arg\n\nif TYPE_CHECKING:\n from typing import Optional\n\n from ansiblelint.file_utils import Lintable\n\n\nclass CommandsInsteadOfModulesRule(AnsibleLintRule):\n id = 'command-instead-of-module'\n shortdesc = 'Using command rather than module'\n description = (\n 'Executing a command when there is an Ansible module is generally a bad idea'\n )\n severity = 'HIGH'\n tags = ['command-shell', 'idiom']\n version_added = 'historic'\n\n _commands = ['command', 'shell']\n _modules = {\n 'apt-get': 'apt-get',\n 'chkconfig': 'service',\n 'curl': 'get_url or uri',\n 'git': 'git',\n 'hg': 'hg',\n 'letsencrypt': 'acme_certificate',\n 'mktemp': 'tempfile',\n 'mount': 'mount',\n 'patch': 'patch',\n 'rpm': 'yum or rpm_key',\n 'rsync': 'synchronize',\n 'sed': 'template, replace or lineinfile',\n 'service': 'service',\n 'supervisorctl': 'supervisorctl',\n 'svn': 'subversion',\n 'systemctl': 'systemd',\n 'tar': 'unarchive',\n 'unzip': 'unarchive',\n 'wget': 'get_url or uri',\n 'yum': 'yum',\n }\n\n _executable_options = {\n 'git': ['branch', 'log'],\n 'systemctl': ['set-default', 'show-environment', 'status'],\n }\n\n def matchtask(\n self, task: Dict[str, Any], file: 'Optional[Lintable]' = None\n ) -> Union[bool, str]:\n\n if task['action']['__ansible_module__'] not in self._commands:\n return False\n\n first_cmd_arg = get_first_cmd_arg(task)\n second_cmd_arg = get_second_cmd_arg(task)\n\n if not first_cmd_arg:\n return False\n\n executable = os.path.basename(first_cmd_arg)\n\n if (\n second_cmd_arg\n and executable in self._executable_options\n and second_cmd_arg in self._executable_options[executable]\n ):\n return False\n\n if executable in self._modules and convert_to_boolean(\n task['action'].get('warn', True)\n ):\n message = '{0} used in place of {1} module'\n return message.format(executable, self._modules[executable])\n return False\n\n\nif \"pytest\" in sys.modules:\n import pytest\n\n from ansiblelint.testing import RunFromText # pylint: disable=ungrouped-imports\n\n APT_GET = '''\n- hosts: all\n tasks:\n - name: run apt-get update\n command: apt-get update\n'''\n\n GIT_BRANCH = '''\n- hosts: all\n tasks:\n - name: print current git branch\n command: git branch\n'''\n\n GIT_LOG = '''\n- hosts: all\n tasks:\n - name: print git log\n command: git log\n'''\n\n RESTART_SSHD = '''\n- hosts: all\n tasks:\n - name: restart sshd\n command: systemctl restart sshd\n'''\n\n SYSTEMCTL_STATUS = '''\n- hosts: all\n tasks:\n - name: show systemctl service status\n command: systemctl status systemd-timesyncd\n'''\n\n SYSTEMD_ENVIRONMENT = '''\n- hosts: all\n tasks:\n - name: show systemd environment\n command: systemctl show-environment\n'''\n\n SYSTEMD_RUNLEVEL = '''\n- hosts: all\n tasks:\n - name: set systemd runlevel\n command: systemctl set-default multi-user.target\n'''\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_apt_get(rule_runner: RunFromText) -> None:\n \"\"\"The apt module supports update.\"\"\"\n results = rule_runner.run_playbook(APT_GET)\n assert len(results) == 1\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_restart_sshd(rule_runner: RunFromText) -> None:\n \"\"\"Restarting services is supported by the systemd module.\"\"\"\n results = rule_runner.run_playbook(RESTART_SSHD)\n assert len(results) == 1\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_git_log(rule_runner: RunFromText) -> None:\n \"\"\"The git log command is not supported by the git module.\"\"\"\n results = rule_runner.run_playbook(GIT_LOG)\n assert len(results) == 0\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_git_branch(rule_runner: RunFromText) -> None:\n \"\"\"The git branch command is not supported by the git module.\"\"\"\n results = rule_runner.run_playbook(GIT_BRANCH)\n assert len(results) == 0\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_systemd_status(rule_runner: RunFromText) -> None:\n \"\"\"Set-default is not supported by the systemd module.\"\"\"\n results = rule_runner.run_playbook(SYSTEMCTL_STATUS)\n assert len(results) == 0\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_systemd_environment(rule_runner: RunFromText) -> None:\n \"\"\"Showing the environment is not supported by the systemd module.\"\"\"\n results = rule_runner.run_playbook(SYSTEMD_ENVIRONMENT)\n assert len(results) == 0\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_systemd_runlevel(rule_runner: RunFromText) -> None:\n \"\"\"Set-default is not supported by the systemd module.\"\"\"\n results = rule_runner.run_playbook(SYSTEMD_RUNLEVEL)\n assert len(results) == 0\n", "path": "src/ansiblelint/rules/CommandsInsteadOfModulesRule.py"}], "after_files": [{"content": "# Copyright (c) 2013-2014 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport os\nimport sys\nfrom typing import TYPE_CHECKING, Any, Dict, Union\n\nfrom ansiblelint.rules import AnsibleLintRule\nfrom ansiblelint.utils import convert_to_boolean, get_first_cmd_arg, get_second_cmd_arg\n\nif TYPE_CHECKING:\n from typing import Optional\n\n from ansiblelint.file_utils import Lintable\n\n\nclass CommandsInsteadOfModulesRule(AnsibleLintRule):\n id = 'command-instead-of-module'\n shortdesc = 'Using command rather than module'\n description = (\n 'Executing a command when there is an Ansible module is generally a bad idea'\n )\n severity = 'HIGH'\n tags = ['command-shell', 'idiom']\n version_added = 'historic'\n\n _commands = ['command', 'shell']\n _modules = {\n 'apt-get': 'apt-get',\n 'chkconfig': 'service',\n 'curl': 'get_url or uri',\n 'git': 'git',\n 'hg': 'hg',\n 'letsencrypt': 'acme_certificate',\n 'mktemp': 'tempfile',\n 'mount': 'mount',\n 'patch': 'patch',\n 'rpm': 'yum or rpm_key',\n 'rsync': 'synchronize',\n 'sed': 'template, replace or lineinfile',\n 'service': 'service',\n 'supervisorctl': 'supervisorctl',\n 'svn': 'subversion',\n 'systemctl': 'systemd',\n 'tar': 'unarchive',\n 'unzip': 'unarchive',\n 'wget': 'get_url or uri',\n 'yum': 'yum',\n }\n\n _executable_options = {\n 'git': ['branch', 'log'],\n 'systemctl': ['set-default', 'show-environment', 'status'],\n 'yum': ['clean'],\n }\n\n def matchtask(\n self, task: Dict[str, Any], file: 'Optional[Lintable]' = None\n ) -> Union[bool, str]:\n\n if task['action']['__ansible_module__'] not in self._commands:\n return False\n\n first_cmd_arg = get_first_cmd_arg(task)\n second_cmd_arg = get_second_cmd_arg(task)\n\n if not first_cmd_arg:\n return False\n\n executable = os.path.basename(first_cmd_arg)\n\n if (\n second_cmd_arg\n and executable in self._executable_options\n and second_cmd_arg in self._executable_options[executable]\n ):\n return False\n\n if executable in self._modules and convert_to_boolean(\n task['action'].get('warn', True)\n ):\n message = '{0} used in place of {1} module'\n return message.format(executable, self._modules[executable])\n return False\n\n\nif \"pytest\" in sys.modules: # noqa: C901\n import pytest\n\n from ansiblelint.testing import RunFromText # pylint: disable=ungrouped-imports\n\n APT_GET = '''\n- hosts: all\n tasks:\n - name: run apt-get update\n command: apt-get update\n'''\n\n GIT_BRANCH = '''\n- hosts: all\n tasks:\n - name: print current git branch\n command: git branch\n'''\n\n GIT_LOG = '''\n- hosts: all\n tasks:\n - name: print git log\n command: git log\n'''\n\n RESTART_SSHD = '''\n- hosts: all\n tasks:\n - name: restart sshd\n command: systemctl restart sshd\n'''\n\n SYSTEMCTL_STATUS = '''\n- hosts: all\n tasks:\n - name: show systemctl service status\n command: systemctl status systemd-timesyncd\n'''\n\n SYSTEMD_ENVIRONMENT = '''\n- hosts: all\n tasks:\n - name: show systemd environment\n command: systemctl show-environment\n'''\n\n SYSTEMD_RUNLEVEL = '''\n- hosts: all\n tasks:\n - name: set systemd runlevel\n command: systemctl set-default multi-user.target\n'''\n\n YUM_UPDATE = '''\n- hosts: all\n tasks:\n - name: run yum update\n command: yum update\n'''\n\n YUM_CLEAN = '''\n- hosts: all\n tasks:\n - name: clear yum cache\n command: yum clean all\n'''\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_apt_get(rule_runner: RunFromText) -> None:\n \"\"\"The apt module supports update.\"\"\"\n results = rule_runner.run_playbook(APT_GET)\n assert len(results) == 1\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_restart_sshd(rule_runner: RunFromText) -> None:\n \"\"\"Restarting services is supported by the systemd module.\"\"\"\n results = rule_runner.run_playbook(RESTART_SSHD)\n assert len(results) == 1\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_git_log(rule_runner: RunFromText) -> None:\n \"\"\"The git log command is not supported by the git module.\"\"\"\n results = rule_runner.run_playbook(GIT_LOG)\n assert len(results) == 0\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_git_branch(rule_runner: RunFromText) -> None:\n \"\"\"The git branch command is not supported by the git module.\"\"\"\n results = rule_runner.run_playbook(GIT_BRANCH)\n assert len(results) == 0\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_systemd_status(rule_runner: RunFromText) -> None:\n \"\"\"Set-default is not supported by the systemd module.\"\"\"\n results = rule_runner.run_playbook(SYSTEMCTL_STATUS)\n assert len(results) == 0\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_systemd_environment(rule_runner: RunFromText) -> None:\n \"\"\"Showing the environment is not supported by the systemd module.\"\"\"\n results = rule_runner.run_playbook(SYSTEMD_ENVIRONMENT)\n assert len(results) == 0\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_systemd_runlevel(rule_runner: RunFromText) -> None:\n \"\"\"Set-default is not supported by the systemd module.\"\"\"\n results = rule_runner.run_playbook(SYSTEMD_RUNLEVEL)\n assert len(results) == 0\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_yum_update(rule_runner: RunFromText) -> None:\n \"\"\"Using yum update should fail.\"\"\"\n results = rule_runner.run_playbook(YUM_UPDATE)\n assert len(results) == 1\n\n @pytest.mark.parametrize(\n 'rule_runner', (CommandsInsteadOfModulesRule,), indirect=['rule_runner']\n )\n def test_yum_clean(rule_runner: RunFromText) -> None:\n \"\"\"The yum module does not support clearing yum cache.\"\"\"\n results = rule_runner.run_playbook(YUM_CLEAN)\n assert len(results) == 0\n", "path": "src/ansiblelint/rules/CommandsInsteadOfModulesRule.py"}]}
3,109
524
gh_patches_debug_2778
rasdani/github-patches
git_diff
pytorch__ignite-1324
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Create a logo for Ignite ### 🚀 Feature @vfdev-5 asked me to work on a logo to represent Ignite and its relation to main PyTorch, so I’m opening this issue to post iterations and provide a place for discussion. All feedback is welcome! I'm looking forward to hearing the community's thoughts. These first iterations use the same orange as PyTorch (`#ee4c2c`)and maintain similar line weight (and sometimes the small circle) in order to show a visual relationship between the projects. The potential directions for Ignite’s logomark are meant to highlight Ignite’s strengths of simplicity and speed as well as the kick starting nature of some of its “out-of-the-box” features. ### Option 1-1 Compared to the current PyTorch logo, Option 1 is meant to be more a energetic spark than a steady, symmetrical flame. <img width="502" alt="1-1" src="https://user-images.githubusercontent.com/50221806/88605197-19e91480-d02e-11ea-9d97-5fc07286dc95.png"> <img width="502" alt="1-1color" src="https://user-images.githubusercontent.com/50221806/88605198-1a81ab00-d02e-11ea-8771-2f2713077df9.png"> ### Option 2-1 Option 2 is the furthest from the current PyTorch logo and revolves around the idea of a matchstick. The flame shape is closest to the current PyTorch logo of the different options. <img width="502" alt="2-1" src="https://user-images.githubusercontent.com/50221806/88605212-22d9e600-d02e-11ea-8678-eb0785fcd140.png"> <img width="502" alt="2-1color" src="https://user-images.githubusercontent.com/50221806/88605215-23727c80-d02e-11ea-89cf-b5ac451839ac.png"> ### Option 3-1 Another idea around a spark originating from a single point. Different flame shape and direction. <img width="502" alt="3-1" src="https://user-images.githubusercontent.com/50221806/88605221-2a998a80-d02e-11ea-85d0-8e9ac7b711c3.png"> <img width="502" alt="3-1color" src="https://user-images.githubusercontent.com/50221806/88605222-2a998a80-d02e-11ea-8372-87e1aa4eaca7.png"> ### Option 4-1 This developed as a variation of Option 3. While it is my least favorite, I wanted to provide an option that feels sharper and more explosive. <img width="502" alt="4-1" src="https://user-images.githubusercontent.com/50221806/88605232-2ff6d500-d02e-11ea-8078-e4d25c48d10b.png"> <img width="502" alt="4-1color" src="https://user-images.githubusercontent.com/50221806/88605234-308f6b80-d02e-11ea-99c9-8ba077afe688.png"> Update Circle CI docker image to pytorch 1.6 ## 🚀 Feature Following the release of 1.6 version of PyTorch, let's update Circle CI docker image : - https://github.com/pytorch/ignite/blob/master/.circleci/config.yml and add 1.5.1 version to pytorch compatibilty versions checking : - https://github.com/pytorch/ignite/blob/master/.github/workflows/pytorch-version-tests.yml --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/source/conf.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Configuration file for the Sphinx documentation builder. 4 # 5 # This file does only contain a selection of the most common options. For a 6 # full list see the documentation: 7 # http://www.sphinx-doc.org/en/stable/config 8 9 # -- Path setup -------------------------------------------------------------- 10 11 # If extensions (or modules to document with autodoc) are in another directory, 12 # add these directories to sys.path here. If the directory is relative to the 13 # documentation root, use os.path.abspath to make it absolute, like shown here. 14 # 15 import os 16 import sys 17 18 sys.path.insert(0, os.path.abspath("../..")) 19 import ignite 20 import pytorch_sphinx_theme 21 22 # -- Project information ----------------------------------------------------- 23 24 project = "ignite" 25 copyright = "2020, PyTorch-Ignite Contributors" 26 author = "PyTorch-Ignite Contributors" 27 28 # The short X.Y version 29 try: 30 version = os.environ["code_version"] 31 if "master" in version: 32 version = "master (" + ignite.__version__ + ")" 33 else: 34 version = version.replace("v", "") 35 except KeyError: 36 version = ignite.__version__ 37 38 # The full version, including alpha/beta/rc tags 39 release = "master" 40 41 42 # -- General configuration --------------------------------------------------- 43 44 # If your documentation needs a minimal Sphinx version, state it here. 45 # 46 # needs_sphinx = '1.0' 47 48 # Add any Sphinx extension module names here, as strings. They can be 49 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 50 # ones. 51 extensions = [ 52 "sphinx.ext.autosummary", 53 "sphinx.ext.doctest", 54 "sphinx.ext.intersphinx", 55 "sphinx.ext.todo", 56 "sphinx.ext.coverage", 57 "sphinx.ext.mathjax", 58 "sphinx.ext.napoleon", 59 "sphinx.ext.viewcode", 60 "sphinx.ext.autosectionlabel", 61 ] 62 63 # Add any paths that contain templates here, relative to this directory. 64 templates_path = ["_templates"] 65 66 # The suffix(es) of source filenames. 67 # You can specify multiple suffix as a list of string: 68 # 69 # source_suffix = ['.rst', '.md'] 70 source_suffix = ".rst" 71 72 # The master toctree document. 73 master_doc = "index" 74 75 # The language for content autogenerated by Sphinx. Refer to documentation 76 # for a list of supported languages. 77 # 78 # This is also used if you do content translation via gettext catalogs. 79 # Usually you set "language" from the command line for these cases. 80 language = None 81 82 # List of patterns, relative to source directory, that match files and 83 # directories to ignore when looking for source files. 84 # This pattern also affects html_static_path and html_extra_path . 85 exclude_patterns = [] 86 87 # The name of the Pygments (syntax highlighting) style to use. 88 pygments_style = "sphinx" 89 90 91 # -- Options for HTML output ------------------------------------------------- 92 93 # The theme to use for HTML and HTML Help pages. See the documentation for 94 # a list of builtin themes. 95 # 96 html_theme = "pytorch_sphinx_theme" 97 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] 98 99 html_theme_options = { 100 "canonical_url": "https://pytorch.org/ignite/index.html", 101 "collapse_navigation": False, 102 "display_version": True, 103 "logo_only": True, 104 } 105 106 html_logo = "_static/img/ignite-logo-dark.svg" 107 108 # Theme options are theme-specific and customize the look and feel of a theme 109 # further. For a list of options available for each theme, see the 110 # documentation. 111 # 112 # html_theme_options = {} 113 114 # Add any paths that contain custom static files (such as style sheets) here, 115 # relative to this directory. They are copied after the builtin static files, 116 # so a file named "default.css" will overwrite the builtin "default.css". 117 html_static_path = ["_static", "_templates/_static"] 118 119 html_context = { 120 "css_files": [ 121 # 'https://fonts.googleapis.com/css?family=Lato', 122 # '_static/css/pytorch_theme.css' 123 "_static/css/ignite_theme.css" 124 ], 125 } 126 127 128 # -- Options for HTMLHelp output --------------------------------------------- 129 130 # Output file base name for HTML help builder. 131 htmlhelp_basename = "ignitedoc" 132 133 134 # -- Options for LaTeX output ------------------------------------------------ 135 136 latex_elements = { 137 # The paper size ('letterpaper' or 'a4paper'). 138 # 139 # 'papersize': 'letterpaper', 140 # The font size ('10pt', '11pt' or '12pt'). 141 # 142 # 'pointsize': '10pt', 143 # Additional stuff for the LaTeX preamble. 144 # 145 # 'preamble': '', 146 # Latex figure (float) alignment 147 # 148 # 'figure_align': 'htbp', 149 } 150 151 # Grouping the document tree into LaTeX files. List of tuples 152 # (source start file, target name, title, 153 # author, documentclass [howto, manual, or own class]). 154 latex_documents = [ 155 (master_doc, "ignite.tex", "ignite Documentation", "Torch Contributors", "manual"), 156 ] 157 158 159 # -- Options for manual page output ------------------------------------------ 160 161 # One entry per manual page. List of tuples 162 # (source start file, name, description, authors, manual section). 163 man_pages = [(master_doc, "ignite", "ignite Documentation", [author], 1)] 164 165 166 # -- Options for Texinfo output ---------------------------------------------- 167 168 # Grouping the document tree into Texinfo files. List of tuples 169 # (source start file, target name, title, author, 170 # dir menu entry, description, category) 171 texinfo_documents = [ 172 ( 173 master_doc, 174 "ignite", 175 "ignite Documentation", 176 author, 177 "ignite", 178 "One line description of project.", 179 "Miscellaneous", 180 ), 181 ] 182 183 184 # -- Extension configuration ------------------------------------------------- 185 186 # -- Options for intersphinx extension --------------------------------------- 187 188 # Example configuration for intersphinx: refer to the Python standard library. 189 intersphinx_mapping = {"https://docs.python.org/": None} 190 191 # -- Options for todo extension ---------------------------------------------- 192 193 # If true, `todo` and `todoList` produce output, else they produce nothing. 194 todo_include_todos = True 195 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -103,7 +103,7 @@ "logo_only": True, } -html_logo = "_static/img/ignite-logo-dark.svg" +html_logo = "_static/img/ignite_logo.svg" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -103,7 +103,7 @@\n \"logo_only\": True,\n }\n \n-html_logo = \"_static/img/ignite-logo-dark.svg\"\n+html_logo = \"_static/img/ignite_logo.svg\"\n \n # Theme options are theme-specific and customize the look and feel of a theme\n # further. For a list of options available for each theme, see the\n", "issue": "Create a logo for Ignite\n### \ud83d\ude80 Feature\r\n\r\n@vfdev-5 asked me to work on a logo to represent Ignite and its relation to main PyTorch, so I\u2019m opening this issue to post iterations and provide a place for discussion. \r\n\r\nAll feedback is welcome! I'm looking forward to hearing the community's thoughts.\r\n\r\nThese first iterations use the same orange as PyTorch (`#ee4c2c`)and maintain similar line weight (and sometimes the small circle) in order to show a visual relationship between the projects. The potential directions for Ignite\u2019s logomark are meant to highlight Ignite\u2019s strengths of simplicity and speed as well as the kick starting nature of some of its \u201cout-of-the-box\u201d features.\r\n\r\n### Option 1-1\r\nCompared to the current PyTorch logo, Option 1 is meant to be more a energetic spark than a steady, symmetrical flame.\r\n<img width=\"502\" alt=\"1-1\" src=\"https://user-images.githubusercontent.com/50221806/88605197-19e91480-d02e-11ea-9d97-5fc07286dc95.png\">\r\n<img width=\"502\" alt=\"1-1color\" src=\"https://user-images.githubusercontent.com/50221806/88605198-1a81ab00-d02e-11ea-8771-2f2713077df9.png\">\r\n\r\n### Option 2-1\r\nOption 2 is the furthest from the current PyTorch logo and revolves around the idea of a matchstick. The flame shape is closest to the current PyTorch logo of the different options.\r\n<img width=\"502\" alt=\"2-1\" src=\"https://user-images.githubusercontent.com/50221806/88605212-22d9e600-d02e-11ea-8678-eb0785fcd140.png\">\r\n<img width=\"502\" alt=\"2-1color\" src=\"https://user-images.githubusercontent.com/50221806/88605215-23727c80-d02e-11ea-89cf-b5ac451839ac.png\">\r\n\r\n### Option 3-1\r\nAnother idea around a spark originating from a single point. Different flame shape and direction.\r\n<img width=\"502\" alt=\"3-1\" src=\"https://user-images.githubusercontent.com/50221806/88605221-2a998a80-d02e-11ea-85d0-8e9ac7b711c3.png\">\r\n<img width=\"502\" alt=\"3-1color\" src=\"https://user-images.githubusercontent.com/50221806/88605222-2a998a80-d02e-11ea-8372-87e1aa4eaca7.png\">\r\n\r\n### Option 4-1\r\nThis developed as a variation of Option 3. While it is my least favorite, I wanted to provide an option that feels sharper and more explosive.\r\n<img width=\"502\" alt=\"4-1\" src=\"https://user-images.githubusercontent.com/50221806/88605232-2ff6d500-d02e-11ea-8078-e4d25c48d10b.png\">\r\n<img width=\"502\" alt=\"4-1color\" src=\"https://user-images.githubusercontent.com/50221806/88605234-308f6b80-d02e-11ea-99c9-8ba077afe688.png\">\nUpdate Circle CI docker image to pytorch 1.6\n## \ud83d\ude80 Feature\r\n\r\nFollowing the release of 1.6 version of PyTorch, let's update Circle CI docker image :\r\n- https://github.com/pytorch/ignite/blob/master/.circleci/config.yml\r\nand add 1.5.1 version to pytorch compatibilty versions checking :\r\n- https://github.com/pytorch/ignite/blob/master/.github/workflows/pytorch-version-tests.yml\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../..\"))\nimport ignite\nimport pytorch_sphinx_theme\n\n# -- Project information -----------------------------------------------------\n\nproject = \"ignite\"\ncopyright = \"2020, PyTorch-Ignite Contributors\"\nauthor = \"PyTorch-Ignite Contributors\"\n\n# The short X.Y version\ntry:\n version = os.environ[\"code_version\"]\n if \"master\" in version:\n version = \"master (\" + ignite.__version__ + \")\"\n else:\n version = version.replace(\"v\", \"\")\nexcept KeyError:\n version = ignite.__version__\n\n# The full version, including alpha/beta/rc tags\nrelease = \"master\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosectionlabel\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\nhtml_theme_options = {\n \"canonical_url\": \"https://pytorch.org/ignite/index.html\",\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n}\n\nhtml_logo = \"_static/img/ignite-logo-dark.svg\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\", \"_templates/_static\"]\n\nhtml_context = {\n \"css_files\": [\n # 'https://fonts.googleapis.com/css?family=Lato',\n # '_static/css/pytorch_theme.css'\n \"_static/css/ignite_theme.css\"\n ],\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ignitedoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"ignite.tex\", \"ignite Documentation\", \"Torch Contributors\", \"manual\"),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"ignite\", \"ignite Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"ignite\",\n \"ignite Documentation\",\n author,\n \"ignite\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\"https://docs.python.org/\": None}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../..\"))\nimport ignite\nimport pytorch_sphinx_theme\n\n# -- Project information -----------------------------------------------------\n\nproject = \"ignite\"\ncopyright = \"2020, PyTorch-Ignite Contributors\"\nauthor = \"PyTorch-Ignite Contributors\"\n\n# The short X.Y version\ntry:\n version = os.environ[\"code_version\"]\n if \"master\" in version:\n version = \"master (\" + ignite.__version__ + \")\"\n else:\n version = version.replace(\"v\", \"\")\nexcept KeyError:\n version = ignite.__version__\n\n# The full version, including alpha/beta/rc tags\nrelease = \"master\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosectionlabel\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\nhtml_theme_options = {\n \"canonical_url\": \"https://pytorch.org/ignite/index.html\",\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n}\n\nhtml_logo = \"_static/img/ignite_logo.svg\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\", \"_templates/_static\"]\n\nhtml_context = {\n \"css_files\": [\n # 'https://fonts.googleapis.com/css?family=Lato',\n # '_static/css/pytorch_theme.css'\n \"_static/css/ignite_theme.css\"\n ],\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ignitedoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"ignite.tex\", \"ignite Documentation\", \"Torch Contributors\", \"manual\"),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"ignite\", \"ignite Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"ignite\",\n \"ignite Documentation\",\n author,\n \"ignite\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\"https://docs.python.org/\": None}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/source/conf.py"}]}
3,065
109
gh_patches_debug_947
rasdani/github-patches
git_diff
databricks__koalas-105
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- How to call dev/_make_missing_functions.py? ``` rxin @ C02XT0W6JGH5 : ~/workspace/spark-pandas (master) > dev/_make_missing_functions.py Traceback (most recent call last): File "dev/_make_missing_functions.py", line 22, in <module> from databricks.koala.frame import PandasLikeDataFrame ImportError: No module named databricks.koala.frame ``` Do I need to install koala first? We should add documentation to CONTRIBUTING.md. It'd also be best if this runs against the existing code base, rather than a system-wide installed Koala. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dev/_make_missing_functions.py` Content: ``` 1 #!/usr/bin/env python 2 # 3 # Copyright (C) 2019 Databricks, Inc. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 # 17 18 import inspect 19 20 import pandas as pd 21 22 from databricks.koalas.frame import PandasLikeDataFrame 23 from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame 24 from databricks.koalas.missing.series import _MissingPandasLikeSeries 25 from databricks.koalas.series import PandasLikeSeries 26 27 28 INDENT_LEN = 4 29 LINE_LEN_LIMIT = 100 30 31 32 def inspect_missing_functions(original_type, target_type, missing_type): 33 """ 34 Find functions which exist in original_type but not in target_type, 35 or the signature is modified. 36 37 :return: the tuple of the missing function name and its signature, 38 and the name of the functions the signature of which is different 39 and its original and modified signature. 40 """ 41 missing = [] 42 modified = [] 43 44 already_in_missing = set([(name, inspect.signature(func)) for name, func 45 in inspect.getmembers(missing_type, inspect.isfunction)]) 46 for name, func in inspect.getmembers(original_type, inspect.isfunction): 47 # Skip the private attributes 48 if name.startswith('_'): 49 continue 50 51 original_signature = inspect.signature(func, follow_wrapped=True) 52 53 if hasattr(target_type, name): 54 f = getattr(target_type, name) 55 if inspect.isfunction(f): 56 target_signature = inspect.signature(f) 57 if (name, target_signature) in already_in_missing: 58 missing.append((name, original_signature)) 59 elif str(original_signature) != str(target_signature): 60 modified.append((name, original_signature, target_signature)) 61 continue 62 63 missing.append((name, original_signature)) 64 65 return missing, modified 66 67 68 def format_arguments(arguments, prefix_len, suffix_len): 69 """Format arguments not to break pydocstyle. 70 71 :param arguments: the argument list 72 :param prefix_len: the prefix length when the argument string needs line break 73 :param suffix_len: the suffix length to check the line length exceeds the limit 74 :return: the formatted argument string 75 """ 76 lines = [''] 77 78 def append_arg(arg): 79 if prefix_len + len(lines[-1]) + len(', ') + len(arg) + suffix_len > LINE_LEN_LIMIT: 80 lines.append('') 81 append_arg(arg) 82 else: 83 if len(lines[-1]) > 0: 84 arg = ', {}'.format(arg) 85 lines[-1] += arg 86 87 for arg in arguments: 88 append_arg(arg) 89 90 return (',\n' + (' ' * prefix_len)).join(lines) 91 92 93 def format_method_arguments(name, signature): 94 """Format the method arguments from its name and signature. 95 96 :return: the formatted argument string 97 """ 98 arguments = [] 99 100 for param in signature.parameters.values(): 101 if param.default is not inspect.Signature.empty and isinstance(param.default, type): 102 arguments.append('{}={}'.format(param.name, param.default.__name__)) 103 elif param.default is not inspect.Signature.empty and repr(param.default) == 'nan': 104 arguments.append('{}={}'.format(param.name, 'np.nan')) 105 else: 106 arguments.append(str(param)) 107 108 prefix_len = INDENT_LEN + len('def {}('.format(name)) 109 suffix_len = len('):') 110 return format_arguments(arguments, prefix_len, suffix_len) 111 112 113 def format_derived_from(original_type, unavailable_arguments, signature): 114 """Format `@derived_from` decorator. 115 116 :param original_type: the original type to be derived 117 :param unavailable_arguments: the arguments Koalas does not support yet 118 :param signature: the method signature 119 :return: the formatted `@derived_from` decorator 120 """ 121 if len(unavailable_arguments) == 0: 122 return '@derived_from(pd.{})'.format(original_type.__name__) 123 124 arguments = [] 125 126 for arg in unavailable_arguments: 127 param = signature.parameters[arg] 128 if param.default == inspect.Parameter.empty or \ 129 param.kind == inspect.Parameter.VAR_POSITIONAL or \ 130 param.kind == inspect.Parameter.VAR_KEYWORD: 131 continue 132 arguments.append(repr(arg)) 133 134 prefix = '@derived_from(pd.{}, ua_args=['.format(original_type.__name__) 135 suffix = '])' 136 prefix_len = INDENT_LEN + len(prefix) 137 suffix_len = len(suffix) 138 return '{}{}{}'.format(prefix, format_arguments(arguments, prefix_len, suffix_len), suffix) 139 140 141 def format_raise_errors(original_type, name, unavailable_arguments, signature): 142 """ 143 Format raise error statements for unavailable arguments when specified the different value 144 from the default value. 145 146 :return: the formatted raise error statements 147 """ 148 raise_errors = '' 149 150 for arg in unavailable_arguments: 151 param = signature.parameters[arg] 152 if param.default == inspect.Parameter.empty or \ 153 param.kind == inspect.Parameter.VAR_POSITIONAL or \ 154 param.kind == inspect.Parameter.VAR_KEYWORD: 155 continue 156 if repr(param.default) == 'nan': 157 not_equal = 'not np.isnan({})'.format(arg) 158 elif isinstance(param.default, type): 159 not_equal = '{} is not {}'.format(arg, param.default.__name__) 160 elif param.default is None or \ 161 param.default is True or param.default is False: 162 not_equal = '{} is not {}'.format(arg, repr(param.default)) 163 else: 164 not_equal = '{} != {}'.format(arg, repr(param.default)) 165 166 raise_error_prefix = 'raise PandasNotImplementedError(' 167 raise_error_suffix = ')' 168 arguments = format_arguments( 169 arguments=["class_name='pd.{}'".format(original_type.__name__), 170 "method_name='{}'".format(name), 171 "arg_name='{}'".format(arg)], 172 prefix_len=(INDENT_LEN * 3 + len(raise_error_prefix)), 173 suffix_len=len(raise_error_suffix)) 174 raise_errors += (""" 175 if {0}: 176 {1}{2}{3}""".format(not_equal, raise_error_prefix, arguments, raise_error_suffix)) 177 178 return raise_errors 179 180 181 def make_misssing_function(original_type, name, signature): 182 """Make a missing functions stub. 183 184 :return: the stub definition for the missing function 185 """ 186 arguments = format_method_arguments(name, signature) 187 error_argument = format_arguments( 188 arguments=["class_name='pd.{}'".format(original_type.__name__), 189 "method_name='{}'".format(name)], 190 prefix_len=(8 + len('raise PandasNotImplementedError(')), 191 suffix_len=len(')')) 192 193 return (""" 194 def {0}({1}): 195 \"""A stub for the equivalent method to `pd.{2}.{0}()`. 196 197 The method `pd.{2}.{0}()` is not implemented yet. 198 \""" 199 raise PandasNotImplementedError({3})""" 200 .format(name, arguments, original_type.__name__, error_argument)) 201 202 203 def make_modified_function_def(original_type, name, original, target): 204 """Make the modified function definition. 205 206 :return: the definition for the modified function 207 """ 208 arguments = format_method_arguments(name, original) 209 argument_names = set(target.parameters) 210 unavailable_arguments = [p for p in original.parameters if p not in argument_names] 211 derived_from = format_derived_from(original_type, unavailable_arguments, original) 212 raise_error = format_raise_errors(original_type, name, unavailable_arguments, original) 213 return (""" 214 {0} 215 def {1}({2}):{3}""".format(derived_from, name, arguments, raise_error)) 216 217 218 def _main(): 219 for original_type, target_type, missing_type in \ 220 [(pd.DataFrame, PandasLikeDataFrame, _MissingPandasLikeDataFrame), 221 (pd.Series, PandasLikeSeries, _MissingPandasLikeSeries)]: 222 missing, modified = inspect_missing_functions(original_type, target_type, missing_type) 223 224 print('MISSING functions for {}'.format(original_type.__name__)) 225 for name, signature in missing: 226 # print(make_misssing_function(original_type, name, signature)) 227 print(""" {0} = unsupported_function('{0}')""".format(name)) 228 229 print() 230 print('MODIFIED functions for {}'.format(original_type.__name__)) 231 for name, original, target in modified: 232 print(make_modified_function_def(original_type, name, original, target)) 233 234 235 if __name__ == '__main__': 236 _main() 237 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dev/_make_missing_functions.py b/dev/_make_missing_functions.py --- a/dev/_make_missing_functions.py +++ b/dev/_make_missing_functions.py @@ -15,6 +15,12 @@ # limitations under the License. # +""" +A script to generate the missing function stubs. Before running this, +make sure you install koalas from the current checkout by running: +pip install -e . +""" + import inspect import pandas as pd
{"golden_diff": "diff --git a/dev/_make_missing_functions.py b/dev/_make_missing_functions.py\n--- a/dev/_make_missing_functions.py\n+++ b/dev/_make_missing_functions.py\n@@ -15,6 +15,12 @@\n # limitations under the License.\n #\n \n+\"\"\"\n+A script to generate the missing function stubs. Before running this,\n+make sure you install koalas from the current checkout by running:\n+pip install -e .\n+\"\"\"\n+\n import inspect\n \n import pandas as pd\n", "issue": "How to call dev/_make_missing_functions.py?\n```\r\nrxin @ C02XT0W6JGH5 : ~/workspace/spark-pandas (master) \r\n> dev/_make_missing_functions.py \r\nTraceback (most recent call last):\r\n File \"dev/_make_missing_functions.py\", line 22, in <module>\r\n from databricks.koala.frame import PandasLikeDataFrame\r\nImportError: No module named databricks.koala.frame\r\n```\r\n\r\nDo I need to install koala first? We should add documentation to CONTRIBUTING.md. It'd also be best if this runs against the existing code base, rather than a system-wide installed Koala.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport inspect\n\nimport pandas as pd\n\nfrom databricks.koalas.frame import PandasLikeDataFrame\nfrom databricks.koalas.missing.frame import _MissingPandasLikeDataFrame\nfrom databricks.koalas.missing.series import _MissingPandasLikeSeries\nfrom databricks.koalas.series import PandasLikeSeries\n\n\nINDENT_LEN = 4\nLINE_LEN_LIMIT = 100\n\n\ndef inspect_missing_functions(original_type, target_type, missing_type):\n \"\"\"\n Find functions which exist in original_type but not in target_type,\n or the signature is modified.\n\n :return: the tuple of the missing function name and its signature,\n and the name of the functions the signature of which is different\n and its original and modified signature.\n \"\"\"\n missing = []\n modified = []\n\n already_in_missing = set([(name, inspect.signature(func)) for name, func\n in inspect.getmembers(missing_type, inspect.isfunction)])\n for name, func in inspect.getmembers(original_type, inspect.isfunction):\n # Skip the private attributes\n if name.startswith('_'):\n continue\n\n original_signature = inspect.signature(func, follow_wrapped=True)\n\n if hasattr(target_type, name):\n f = getattr(target_type, name)\n if inspect.isfunction(f):\n target_signature = inspect.signature(f)\n if (name, target_signature) in already_in_missing:\n missing.append((name, original_signature))\n elif str(original_signature) != str(target_signature):\n modified.append((name, original_signature, target_signature))\n continue\n\n missing.append((name, original_signature))\n\n return missing, modified\n\n\ndef format_arguments(arguments, prefix_len, suffix_len):\n \"\"\"Format arguments not to break pydocstyle.\n\n :param arguments: the argument list\n :param prefix_len: the prefix length when the argument string needs line break\n :param suffix_len: the suffix length to check the line length exceeds the limit\n :return: the formatted argument string\n \"\"\"\n lines = ['']\n\n def append_arg(arg):\n if prefix_len + len(lines[-1]) + len(', ') + len(arg) + suffix_len > LINE_LEN_LIMIT:\n lines.append('')\n append_arg(arg)\n else:\n if len(lines[-1]) > 0:\n arg = ', {}'.format(arg)\n lines[-1] += arg\n\n for arg in arguments:\n append_arg(arg)\n\n return (',\\n' + (' ' * prefix_len)).join(lines)\n\n\ndef format_method_arguments(name, signature):\n \"\"\"Format the method arguments from its name and signature.\n\n :return: the formatted argument string\n \"\"\"\n arguments = []\n\n for param in signature.parameters.values():\n if param.default is not inspect.Signature.empty and isinstance(param.default, type):\n arguments.append('{}={}'.format(param.name, param.default.__name__))\n elif param.default is not inspect.Signature.empty and repr(param.default) == 'nan':\n arguments.append('{}={}'.format(param.name, 'np.nan'))\n else:\n arguments.append(str(param))\n\n prefix_len = INDENT_LEN + len('def {}('.format(name))\n suffix_len = len('):')\n return format_arguments(arguments, prefix_len, suffix_len)\n\n\ndef format_derived_from(original_type, unavailable_arguments, signature):\n \"\"\"Format `@derived_from` decorator.\n\n :param original_type: the original type to be derived\n :param unavailable_arguments: the arguments Koalas does not support yet\n :param signature: the method signature\n :return: the formatted `@derived_from` decorator\n \"\"\"\n if len(unavailable_arguments) == 0:\n return '@derived_from(pd.{})'.format(original_type.__name__)\n\n arguments = []\n\n for arg in unavailable_arguments:\n param = signature.parameters[arg]\n if param.default == inspect.Parameter.empty or \\\n param.kind == inspect.Parameter.VAR_POSITIONAL or \\\n param.kind == inspect.Parameter.VAR_KEYWORD:\n continue\n arguments.append(repr(arg))\n\n prefix = '@derived_from(pd.{}, ua_args=['.format(original_type.__name__)\n suffix = '])'\n prefix_len = INDENT_LEN + len(prefix)\n suffix_len = len(suffix)\n return '{}{}{}'.format(prefix, format_arguments(arguments, prefix_len, suffix_len), suffix)\n\n\ndef format_raise_errors(original_type, name, unavailable_arguments, signature):\n \"\"\"\n Format raise error statements for unavailable arguments when specified the different value\n from the default value.\n\n :return: the formatted raise error statements\n \"\"\"\n raise_errors = ''\n\n for arg in unavailable_arguments:\n param = signature.parameters[arg]\n if param.default == inspect.Parameter.empty or \\\n param.kind == inspect.Parameter.VAR_POSITIONAL or \\\n param.kind == inspect.Parameter.VAR_KEYWORD:\n continue\n if repr(param.default) == 'nan':\n not_equal = 'not np.isnan({})'.format(arg)\n elif isinstance(param.default, type):\n not_equal = '{} is not {}'.format(arg, param.default.__name__)\n elif param.default is None or \\\n param.default is True or param.default is False:\n not_equal = '{} is not {}'.format(arg, repr(param.default))\n else:\n not_equal = '{} != {}'.format(arg, repr(param.default))\n\n raise_error_prefix = 'raise PandasNotImplementedError('\n raise_error_suffix = ')'\n arguments = format_arguments(\n arguments=[\"class_name='pd.{}'\".format(original_type.__name__),\n \"method_name='{}'\".format(name),\n \"arg_name='{}'\".format(arg)],\n prefix_len=(INDENT_LEN * 3 + len(raise_error_prefix)),\n suffix_len=len(raise_error_suffix))\n raise_errors += (\"\"\"\n if {0}:\n {1}{2}{3}\"\"\".format(not_equal, raise_error_prefix, arguments, raise_error_suffix))\n\n return raise_errors\n\n\ndef make_misssing_function(original_type, name, signature):\n \"\"\"Make a missing functions stub.\n\n :return: the stub definition for the missing function\n \"\"\"\n arguments = format_method_arguments(name, signature)\n error_argument = format_arguments(\n arguments=[\"class_name='pd.{}'\".format(original_type.__name__),\n \"method_name='{}'\".format(name)],\n prefix_len=(8 + len('raise PandasNotImplementedError(')),\n suffix_len=len(')'))\n\n return (\"\"\"\n def {0}({1}):\n \\\"\"\"A stub for the equivalent method to `pd.{2}.{0}()`.\n\n The method `pd.{2}.{0}()` is not implemented yet.\n \\\"\"\"\n raise PandasNotImplementedError({3})\"\"\"\n .format(name, arguments, original_type.__name__, error_argument))\n\n\ndef make_modified_function_def(original_type, name, original, target):\n \"\"\"Make the modified function definition.\n\n :return: the definition for the modified function\n \"\"\"\n arguments = format_method_arguments(name, original)\n argument_names = set(target.parameters)\n unavailable_arguments = [p for p in original.parameters if p not in argument_names]\n derived_from = format_derived_from(original_type, unavailable_arguments, original)\n raise_error = format_raise_errors(original_type, name, unavailable_arguments, original)\n return (\"\"\"\n {0}\n def {1}({2}):{3}\"\"\".format(derived_from, name, arguments, raise_error))\n\n\ndef _main():\n for original_type, target_type, missing_type in \\\n [(pd.DataFrame, PandasLikeDataFrame, _MissingPandasLikeDataFrame),\n (pd.Series, PandasLikeSeries, _MissingPandasLikeSeries)]:\n missing, modified = inspect_missing_functions(original_type, target_type, missing_type)\n\n print('MISSING functions for {}'.format(original_type.__name__))\n for name, signature in missing:\n # print(make_misssing_function(original_type, name, signature))\n print(\"\"\" {0} = unsupported_function('{0}')\"\"\".format(name))\n\n print()\n print('MODIFIED functions for {}'.format(original_type.__name__))\n for name, original, target in modified:\n print(make_modified_function_def(original_type, name, original, target))\n\n\nif __name__ == '__main__':\n _main()\n", "path": "dev/_make_missing_functions.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nA script to generate the missing function stubs. Before running this,\nmake sure you install koalas from the current checkout by running:\npip install -e .\n\"\"\"\n\nimport inspect\n\nimport pandas as pd\n\nfrom databricks.koalas.frame import PandasLikeDataFrame\nfrom databricks.koalas.missing.frame import _MissingPandasLikeDataFrame\nfrom databricks.koalas.missing.series import _MissingPandasLikeSeries\nfrom databricks.koalas.series import PandasLikeSeries\n\n\nINDENT_LEN = 4\nLINE_LEN_LIMIT = 100\n\n\ndef inspect_missing_functions(original_type, target_type, missing_type):\n \"\"\"\n Find functions which exist in original_type but not in target_type,\n or the signature is modified.\n\n :return: the tuple of the missing function name and its signature,\n and the name of the functions the signature of which is different\n and its original and modified signature.\n \"\"\"\n missing = []\n modified = []\n\n already_in_missing = set([(name, inspect.signature(func)) for name, func\n in inspect.getmembers(missing_type, inspect.isfunction)])\n for name, func in inspect.getmembers(original_type, inspect.isfunction):\n # Skip the private attributes\n if name.startswith('_'):\n continue\n\n original_signature = inspect.signature(func, follow_wrapped=True)\n\n if hasattr(target_type, name):\n f = getattr(target_type, name)\n if inspect.isfunction(f):\n target_signature = inspect.signature(f)\n if (name, target_signature) in already_in_missing:\n missing.append((name, original_signature))\n elif str(original_signature) != str(target_signature):\n modified.append((name, original_signature, target_signature))\n continue\n\n missing.append((name, original_signature))\n\n return missing, modified\n\n\ndef format_arguments(arguments, prefix_len, suffix_len):\n \"\"\"Format arguments not to break pydocstyle.\n\n :param arguments: the argument list\n :param prefix_len: the prefix length when the argument string needs line break\n :param suffix_len: the suffix length to check the line length exceeds the limit\n :return: the formatted argument string\n \"\"\"\n lines = ['']\n\n def append_arg(arg):\n if prefix_len + len(lines[-1]) + len(', ') + len(arg) + suffix_len > LINE_LEN_LIMIT:\n lines.append('')\n append_arg(arg)\n else:\n if len(lines[-1]) > 0:\n arg = ', {}'.format(arg)\n lines[-1] += arg\n\n for arg in arguments:\n append_arg(arg)\n\n return (',\\n' + (' ' * prefix_len)).join(lines)\n\n\ndef format_method_arguments(name, signature):\n \"\"\"Format the method arguments from its name and signature.\n\n :return: the formatted argument string\n \"\"\"\n arguments = []\n\n for param in signature.parameters.values():\n if param.default is not inspect.Signature.empty and isinstance(param.default, type):\n arguments.append('{}={}'.format(param.name, param.default.__name__))\n elif param.default is not inspect.Signature.empty and repr(param.default) == 'nan':\n arguments.append('{}={}'.format(param.name, 'np.nan'))\n else:\n arguments.append(str(param))\n\n prefix_len = INDENT_LEN + len('def {}('.format(name))\n suffix_len = len('):')\n return format_arguments(arguments, prefix_len, suffix_len)\n\n\ndef format_derived_from(original_type, unavailable_arguments, signature):\n \"\"\"Format `@derived_from` decorator.\n\n :param original_type: the original type to be derived\n :param unavailable_arguments: the arguments Koalas does not support yet\n :param signature: the method signature\n :return: the formatted `@derived_from` decorator\n \"\"\"\n if len(unavailable_arguments) == 0:\n return '@derived_from(pd.{})'.format(original_type.__name__)\n\n arguments = []\n\n for arg in unavailable_arguments:\n param = signature.parameters[arg]\n if param.default == inspect.Parameter.empty or \\\n param.kind == inspect.Parameter.VAR_POSITIONAL or \\\n param.kind == inspect.Parameter.VAR_KEYWORD:\n continue\n arguments.append(repr(arg))\n\n prefix = '@derived_from(pd.{}, ua_args=['.format(original_type.__name__)\n suffix = '])'\n prefix_len = INDENT_LEN + len(prefix)\n suffix_len = len(suffix)\n return '{}{}{}'.format(prefix, format_arguments(arguments, prefix_len, suffix_len), suffix)\n\n\ndef format_raise_errors(original_type, name, unavailable_arguments, signature):\n \"\"\"\n Format raise error statements for unavailable arguments when specified the different value\n from the default value.\n\n :return: the formatted raise error statements\n \"\"\"\n raise_errors = ''\n\n for arg in unavailable_arguments:\n param = signature.parameters[arg]\n if param.default == inspect.Parameter.empty or \\\n param.kind == inspect.Parameter.VAR_POSITIONAL or \\\n param.kind == inspect.Parameter.VAR_KEYWORD:\n continue\n if repr(param.default) == 'nan':\n not_equal = 'not np.isnan({})'.format(arg)\n elif isinstance(param.default, type):\n not_equal = '{} is not {}'.format(arg, param.default.__name__)\n elif param.default is None or \\\n param.default is True or param.default is False:\n not_equal = '{} is not {}'.format(arg, repr(param.default))\n else:\n not_equal = '{} != {}'.format(arg, repr(param.default))\n\n raise_error_prefix = 'raise PandasNotImplementedError('\n raise_error_suffix = ')'\n arguments = format_arguments(\n arguments=[\"class_name='pd.{}'\".format(original_type.__name__),\n \"method_name='{}'\".format(name),\n \"arg_name='{}'\".format(arg)],\n prefix_len=(INDENT_LEN * 3 + len(raise_error_prefix)),\n suffix_len=len(raise_error_suffix))\n raise_errors += (\"\"\"\n if {0}:\n {1}{2}{3}\"\"\".format(not_equal, raise_error_prefix, arguments, raise_error_suffix))\n\n return raise_errors\n\n\ndef make_misssing_function(original_type, name, signature):\n \"\"\"Make a missing functions stub.\n\n :return: the stub definition for the missing function\n \"\"\"\n arguments = format_method_arguments(name, signature)\n error_argument = format_arguments(\n arguments=[\"class_name='pd.{}'\".format(original_type.__name__),\n \"method_name='{}'\".format(name)],\n prefix_len=(8 + len('raise PandasNotImplementedError(')),\n suffix_len=len(')'))\n\n return (\"\"\"\n def {0}({1}):\n \\\"\"\"A stub for the equivalent method to `pd.{2}.{0}()`.\n\n The method `pd.{2}.{0}()` is not implemented yet.\n \\\"\"\"\n raise PandasNotImplementedError({3})\"\"\"\n .format(name, arguments, original_type.__name__, error_argument))\n\n\ndef make_modified_function_def(original_type, name, original, target):\n \"\"\"Make the modified function definition.\n\n :return: the definition for the modified function\n \"\"\"\n arguments = format_method_arguments(name, original)\n argument_names = set(target.parameters)\n unavailable_arguments = [p for p in original.parameters if p not in argument_names]\n derived_from = format_derived_from(original_type, unavailable_arguments, original)\n raise_error = format_raise_errors(original_type, name, unavailable_arguments, original)\n return (\"\"\"\n {0}\n def {1}({2}):{3}\"\"\".format(derived_from, name, arguments, raise_error))\n\n\ndef _main():\n for original_type, target_type, missing_type in \\\n [(pd.DataFrame, PandasLikeDataFrame, _MissingPandasLikeDataFrame),\n (pd.Series, PandasLikeSeries, _MissingPandasLikeSeries)]:\n missing, modified = inspect_missing_functions(original_type, target_type, missing_type)\n\n print('MISSING functions for {}'.format(original_type.__name__))\n for name, signature in missing:\n # print(make_misssing_function(original_type, name, signature))\n print(\"\"\" {0} = unsupported_function('{0}')\"\"\".format(name))\n\n print()\n print('MODIFIED functions for {}'.format(original_type.__name__))\n for name, original, target in modified:\n print(make_modified_function_def(original_type, name, original, target))\n\n\nif __name__ == '__main__':\n _main()\n", "path": "dev/_make_missing_functions.py"}]}
2,931
105
gh_patches_debug_14775
rasdani/github-patches
git_diff
hylang__hy-1122
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Hy sets are really broken. ``` Hy (env-hy) C:\Users\ME\Code>hy hy 0.11.0 using CPython(v3.4.3:9b73f1c3e601) 3.4.3 on Windows => #{:a 'a} Traceback (most recent call last): File "C:\Python34\Scripts\env-hy\Scripts\hy-script.py", line 9, in <module> load_entry_point('hy==0.11.0', 'console_scripts', 'hy')() File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\cmdline.py", line 341, in hy_main sys.exit(cmdline_handler("hy", sys.argv)) File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\cmdline.py", line 336, in cmdline_handler return run_repl(spy=options.spy) File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\cmdline.py", line 234, in run_repl os=platform.system() File "C:\Python34\Lib\code.py", line 234, in interact more = self.push(line) File "C:\Python34\Lib\code.py", line 256, in push more = self.runsource(source, self.filename) File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\cmdline.py", line 93, in runsource tokens = tokenize(source) File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\lex\__init__.py", line 33, in tokenize return parser.parse(lexer.lex(buf)) File "C:\Python34\Scripts\env-hy\lib\site-packages\rply\parser.py", line 23, in parse t, symstack, statestack, state File "C:\Python34\Scripts\env-hy\lib\site-packages\rply\parser.py", line 80, in _reduce_production value = p.func(targ) File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\lex\parser.py", line 69, in wrapped ret = fun(p) File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\lex\parser.py", line 214, in t_set return HySet(p[1]) File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\models\set.py", line 31, in __init__ items = sorted(items) TypeError: unorderable types: HyExpression() < HyKeyword() (env-hy) C:\Users\ME\Code> ``` That is NOT supposed to happen. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `hy/models/set.py` Content: ``` 1 # Copyright (c) 2013 Paul Tagliamonte <[email protected]> 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a 4 # copy of this software and associated documentation files (the "Software"), 5 # to deal in the Software without restriction, including without limitation 6 # the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 # and/or sell copies of the Software, and to permit persons to whom the 8 # Software is furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 16 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 # DEALINGS IN THE SOFTWARE. 20 21 from hy.models.list import HyList 22 from functools import reduce 23 24 25 class HySet(HyList): 26 """ 27 Hy set (actually a list that pretends to be a set) 28 """ 29 30 def __init__(self, items): 31 items = sorted(items) 32 items = list(reduce(lambda r, v: v in r and r or r+[v], items, [])) 33 super(HySet, self).__init__(items) 34 35 def __repr__(self): 36 return "#{%s}" % (" ".join([repr(x) for x in self])) 37 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/hy/models/set.py b/hy/models/set.py --- a/hy/models/set.py +++ b/hy/models/set.py @@ -18,19 +18,16 @@ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. +from hy.models import _wrappers, wrap_value from hy.models.list import HyList -from functools import reduce class HySet(HyList): """ - Hy set (actually a list that pretends to be a set) + Hy set (just a representation of a set) """ - def __init__(self, items): - items = sorted(items) - items = list(reduce(lambda r, v: v in r and r or r+[v], items, [])) - super(HySet, self).__init__(items) - def __repr__(self): return "#{%s}" % (" ".join([repr(x) for x in self])) + +_wrappers[set] = lambda s: HySet(wrap_value(x) for x in s)
{"golden_diff": "diff --git a/hy/models/set.py b/hy/models/set.py\n--- a/hy/models/set.py\n+++ b/hy/models/set.py\n@@ -18,19 +18,16 @@\n # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n # DEALINGS IN THE SOFTWARE.\n \n+from hy.models import _wrappers, wrap_value\n from hy.models.list import HyList\n-from functools import reduce\n \n \n class HySet(HyList):\n \"\"\"\n- Hy set (actually a list that pretends to be a set)\n+ Hy set (just a representation of a set)\n \"\"\"\n \n- def __init__(self, items):\n- items = sorted(items)\n- items = list(reduce(lambda r, v: v in r and r or r+[v], items, []))\n- super(HySet, self).__init__(items)\n-\n def __repr__(self):\n return \"#{%s}\" % (\" \".join([repr(x) for x in self]))\n+\n+_wrappers[set] = lambda s: HySet(wrap_value(x) for x in s)\n", "issue": "Hy sets are really broken.\n``` Hy\n(env-hy) C:\\Users\\ME\\Code>hy\nhy 0.11.0 using CPython(v3.4.3:9b73f1c3e601) 3.4.3 on Windows\n=> #{:a 'a}\nTraceback (most recent call last):\n File \"C:\\Python34\\Scripts\\env-hy\\Scripts\\hy-script.py\", line 9, in <module>\n load_entry_point('hy==0.11.0', 'console_scripts', 'hy')()\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\cmdline.py\", line 341, in hy_main\n sys.exit(cmdline_handler(\"hy\", sys.argv))\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\cmdline.py\", line 336, in cmdline_handler\n return run_repl(spy=options.spy)\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\cmdline.py\", line 234, in run_repl\n os=platform.system()\n File \"C:\\Python34\\Lib\\code.py\", line 234, in interact\n more = self.push(line)\n File \"C:\\Python34\\Lib\\code.py\", line 256, in push\n more = self.runsource(source, self.filename)\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\cmdline.py\", line 93, in runsource\n tokens = tokenize(source)\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\lex\\__init__.py\", line 33, in tokenize\n return parser.parse(lexer.lex(buf))\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\rply\\parser.py\", line 23, in parse\n t, symstack, statestack, state\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\rply\\parser.py\", line 80, in _reduce_production\n value = p.func(targ)\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\lex\\parser.py\", line 69, in wrapped\n ret = fun(p)\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\lex\\parser.py\", line 214, in t_set\n return HySet(p[1])\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\models\\set.py\", line 31, in __init__\n items = sorted(items)\nTypeError: unorderable types: HyExpression() < HyKeyword()\n\n(env-hy) C:\\Users\\ME\\Code>\n```\n\nThat is NOT supposed to happen.\n\n", "before_files": [{"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.models.list import HyList\nfrom functools import reduce\n\n\nclass HySet(HyList):\n \"\"\"\n Hy set (actually a list that pretends to be a set)\n \"\"\"\n\n def __init__(self, items):\n items = sorted(items)\n items = list(reduce(lambda r, v: v in r and r or r+[v], items, []))\n super(HySet, self).__init__(items)\n\n def __repr__(self):\n return \"#{%s}\" % (\" \".join([repr(x) for x in self]))\n", "path": "hy/models/set.py"}], "after_files": [{"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.models import _wrappers, wrap_value\nfrom hy.models.list import HyList\n\n\nclass HySet(HyList):\n \"\"\"\n Hy set (just a representation of a set)\n \"\"\"\n\n def __repr__(self):\n return \"#{%s}\" % (\" \".join([repr(x) for x in self]))\n\n_wrappers[set] = lambda s: HySet(wrap_value(x) for x in s)\n", "path": "hy/models/set.py"}]}
1,352
242
gh_patches_debug_28579
rasdani/github-patches
git_diff
davanstrien__flyswot-115
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- update default for ensure_model to get latest model --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/flyswot/inference.py` Content: ``` 1 """Inference functionality""" 2 import csv 3 import mimetypes 4 import time 5 from abc import ABC 6 from abc import abstractmethod 7 from dataclasses import asdict 8 from dataclasses import dataclass 9 from datetime import datetime 10 from datetime import timedelta 11 from pathlib import Path 12 from typing import Iterable 13 from typing import Iterator 14 from typing import List 15 from typing import Union 16 17 import numpy as np 18 import onnxruntime as rt # type: ignore 19 import typer 20 from PIL import Image # type: ignore 21 from rich.table import Table 22 from toolz import itertoolz # type: ignore 23 24 from flyswot import core 25 from flyswot import models 26 from flyswot.console import console 27 28 app = typer.Typer() 29 30 31 @dataclass() 32 class ImagePredictionItem: 33 """Prediction for an image. 34 35 Attributes: 36 path: The Path to the image 37 predicted_label: The predicted label i.e. the argmax value for the prediction tensor 38 condidence: The confidence for `predicted_label` i.e. the max value for prediction tensor 39 """ 40 41 path: Path 42 predicted_label: str 43 confidence: float 44 45 def __post_init__(self) -> Union[Path, None]: 46 """attempt to get absolute path""" 47 try: 48 self.path: Path = self.path.absolute() 49 except AttributeError: 50 pass 51 52 53 @dataclass 54 class PredictionBatch: 55 """Container for ImagePredictionItems""" 56 57 batch: List[ImagePredictionItem] 58 59 def __post_init__(self): 60 """Returns a list of all predicted labels in batch""" 61 self.batch_labels: Iterator[str] = (item.predicted_label for item in self.batch) 62 63 64 image_extensions = {k for k, v in mimetypes.types_map.items() if v.startswith("image/")} 65 66 67 @app.command() 68 def predict_image( 69 image: Path = typer.Argument(..., readable=True, resolve_path=True) 70 ) -> None: 71 """Predict a single image""" 72 pass # pragma: no cover 73 74 75 @app.command(name="directory") 76 def predict_directory( 77 directory: Path = typer.Argument( 78 ..., 79 readable=True, 80 resolve_path=True, 81 help="Directory to start searching for images from", 82 ), 83 csv_save_dir: Path = typer.Argument( 84 ..., 85 writable=True, 86 resolve_path=True, 87 help="Directory used to store the csv report", 88 ), 89 pattern: str = typer.Option("fse", help="Pattern used to filter image filenames"), 90 bs: int = typer.Option(16, help="Batch Size"), 91 image_format: str = typer.Option( 92 ".tif", 93 help="Image format for flyswot to use for predictions", 94 ), 95 ): 96 """Predicts against all images stored under DIRECTORY which match PATTERN in the filename. 97 98 By default searches for filenames containing 'fse'. 99 100 Creates a CSV report saved to `csv_save_dir` 101 """ 102 start_time = time.perf_counter() 103 model_dir = models.ensure_model_dir() 104 # TODO add load learner function that can be passed a model name 105 model_parts = models.ensure_model(model_dir) 106 model = model_parts.model 107 vocab = models.load_vocab(model_parts.vocab) 108 onnxinference = OnnxInferenceSession(model, vocab) 109 files = core.get_image_files_from_pattern(directory, pattern, image_format) 110 files = list(files) 111 typer.echo(f"Found {len(files)} files matching {pattern} in {directory}") 112 csv_fname = create_csv_fname(csv_save_dir) 113 create_csv_header(csv_fname) 114 with typer.progressbar(length=len(files)) as progress: 115 all_preds = [] 116 predictions = [] 117 for batch in itertoolz.partition_all(bs, files): 118 batch_predictions = onnxinference.predict_batch(batch, bs) 119 all_preds.append(batch_predictions.batch_labels) 120 predictions.append(batch_predictions) 121 progress.update(len(batch)) 122 write_batch_preds_to_csv(csv_fname, batch_predictions) 123 all_preds = list(itertoolz.concat(all_preds)) 124 typer.echo(f"CSV report stored in {csv_fname}") 125 delta = timedelta(seconds=time.perf_counter() - start_time) 126 typer.echo(f"Time taken to run: {str(delta)}") 127 print_table(all_preds) 128 129 130 def print_table(decoded) -> None: 131 """Prints table summary of predicted labels""" 132 table = Table(show_header=True, title="Prediction summary") 133 table.add_column( 134 "Class", 135 ) 136 table.add_column("Count") 137 table.add_column("Percentage") 138 total = len(decoded) 139 frequencies = itertoolz.frequencies(decoded) 140 for is_last_element, var in core.signal_last(frequencies.items()): 141 key, value = var 142 count = value 143 percentage = round((count / total) * 100, 2) 144 if is_last_element: 145 table.add_row(key, str(count), f"{percentage}", end_section=True) 146 table.add_row("Total", str(total), "") 147 else: 148 table.add_row(key, str(count), f"{percentage}") 149 console.print(table) 150 151 152 def create_csv_fname(csv_directory: Path) -> Path: 153 """Creates a csv filename""" 154 date_now = datetime.now() 155 date_now = date_now.strftime("%Y_%m_%d_%H_%M") 156 fname = Path(date_now + ".csv") 157 return Path(csv_directory / fname) 158 159 160 def create_csv_header(csv_path: Path) -> None: 161 """Creates a header for csv `csv_path`""" 162 with open(csv_path, mode="w", newline="") as csv_file: 163 field_names = ["path", "directory", "predicted_label", "confidence"] 164 writer = csv.DictWriter(csv_file, fieldnames=field_names) 165 writer.writeheader() 166 167 168 def write_batch_preds_to_csv(csv_fpath: Path, predictions: PredictionBatch) -> None: 169 """Appends `predictions` batch to `csv_path`""" 170 with open(csv_fpath, mode="a", newline="") as csv_file: 171 field_names = ["path", "directory", "predicted_label", "confidence"] 172 writer = csv.DictWriter(csv_file, fieldnames=field_names) 173 for pred in predictions.batch: 174 row = asdict(pred) 175 row["directory"] = pred.path.parent 176 writer.writerow(row) 177 178 179 class InferenceSession(ABC): 180 """Abstract class for inference sessions""" 181 182 @abstractmethod 183 def __init__(self, model: Path, vocab: List): 184 """Inference Sessions should init from a model file and vocab""" 185 self.model = model 186 self.vocab = vocab 187 188 @abstractmethod 189 def predict_image(self, image: Path): 190 """Predict a single image""" 191 pass 192 193 @abstractmethod 194 def predict_batch(self, model: Path, batch: Iterable[Path], bs: int): 195 """Predict a batch""" 196 pass 197 198 199 def softmax(x): 200 """return softmax of `x`""" 201 x = x.reshape(-1) 202 e_x = np.exp(x - np.max(x)) 203 return e_x / e_x.sum(axis=0) 204 205 206 # class FastaiInferenceModel(InferenceSession): 207 # def __init__(self, model): 208 # self.model = model 209 # self.learn = load_learner(model) 210 211 # def predict_image(self, image: Path) -> Any: 212 # return self.learn.predict(image) 213 214 # def predict_batch(self, batch: Iterable[Path], bs: int) -> PredictionBatch: 215 # test_dl = self.learn.dls.test_dl(batch, bs=bs) 216 # vocab = dict(enumerate(self.learn.dls.vocab)) 217 # with self.learn.no_bar(): 218 # fastai_preds: Any = self.learn.get_preds(dl=test_dl, with_decoded=True) 219 # prediction_tensors: Iterable[Any] = fastai_preds[0] 220 # prediction_items = [] 221 # for file, pred in zip(batch, prediction_tensors): 222 # arg_max = int(np.array(pred).argmax()) 223 # predicted_label = vocab[int(arg_max)] 224 # confidence = float(np.array(pred).max()) 225 # prediction_items.append( 226 # ImagePredictionItem(file, predicted_label, confidence) 227 # ) 228 # return PredictionBatch(prediction_items) 229 230 231 class OnnxInferenceSession(InferenceSession): 232 """onnx inference session""" 233 234 def __init__(self, model: Path, vocab: Path): 235 """Create onnx session""" 236 self.model = model 237 self.session = rt.InferenceSession(str(model)) 238 239 self.vocab = vocab 240 self.vocab_mapping = dict(enumerate(self.vocab)) 241 242 def _load_vocab(self, vocab: Path) -> List: 243 with open(vocab, "r") as f: 244 return [item.strip("\n") for item in f.readlines()] 245 246 def predict_image(self, image: Path): 247 """Predict a single image""" 248 img = self._load_image(image) 249 raw_result = self.session.run(["output"], {"image": img}) 250 pred = self._postprocess(raw_result) 251 arg_max = int(np.array(pred).argmax()) 252 predicted_label = self.vocab_mapping[int(arg_max)] 253 confidence = float(np.array(pred).max()) 254 return ImagePredictionItem(image, predicted_label, confidence) 255 256 def _preprocess(self, input_data: np.ndarray) -> np.ndarray: 257 # converts the input data into the float32 input for onnx 258 img_data = input_data.astype("float32") 259 260 # normalize 261 mean_vec = np.array([0.485, 0.456, 0.406]) 262 stddev_vec = np.array([0.229, 0.224, 0.225]) 263 norm_img_data = np.zeros(img_data.shape).astype("float32") 264 for i in range(img_data.shape[0]): 265 norm_img_data[i, :, :] = ( 266 img_data[i, :, :] / 255 - mean_vec[i] 267 ) / stddev_vec[i] 268 269 # add batch channel 270 norm_img_data = norm_img_data.reshape(1, 3, 512, 512).astype("float32") 271 return norm_img_data 272 273 def _load_image(self, file: Path) -> np.ndarray: 274 """loads image and carries out preprocessing for inference""" 275 image = Image.open(file, mode="r") 276 image = image.resize((512, 512), Image.BILINEAR) 277 image_data = np.array(image).transpose(2, 0, 1) 278 return self._preprocess(image_data) 279 280 def _postprocess(self, result: List): 281 """process results from onnx session""" 282 return softmax(np.array(result)).tolist() 283 284 def predict_batch(self, batch: Iterable[Path], bs: int): 285 """predicts a batch of images""" 286 prediction_items = [self.predict_image(file) for file in batch] 287 return PredictionBatch(prediction_items) 288 289 290 if __name__ == "__main__": 291 app() 292 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/flyswot/inference.py b/src/flyswot/inference.py --- a/src/flyswot/inference.py +++ b/src/flyswot/inference.py @@ -19,7 +19,7 @@ import typer from PIL import Image # type: ignore from rich.table import Table -from toolz import itertoolz # type: ignore +from toolz import itertoolz from flyswot import core from flyswot import models @@ -89,9 +89,9 @@ pattern: str = typer.Option("fse", help="Pattern used to filter image filenames"), bs: int = typer.Option(16, help="Batch Size"), image_format: str = typer.Option( - ".tif", - help="Image format for flyswot to use for predictions", + ".tif", help="Image format for flyswot to use for predictions" ), + check_latest: bool = typer.Option(True, help="Use latest available model"), ): """Predicts against all images stored under DIRECTORY which match PATTERN in the filename. @@ -102,7 +102,7 @@ start_time = time.perf_counter() model_dir = models.ensure_model_dir() # TODO add load learner function that can be passed a model name - model_parts = models.ensure_model(model_dir) + model_parts = models.ensure_model(model_dir, check_latest) model = model_parts.model vocab = models.load_vocab(model_parts.vocab) onnxinference = OnnxInferenceSession(model, vocab)
{"golden_diff": "diff --git a/src/flyswot/inference.py b/src/flyswot/inference.py\n--- a/src/flyswot/inference.py\n+++ b/src/flyswot/inference.py\n@@ -19,7 +19,7 @@\n import typer\n from PIL import Image # type: ignore\n from rich.table import Table\n-from toolz import itertoolz # type: ignore\n+from toolz import itertoolz\n \n from flyswot import core\n from flyswot import models\n@@ -89,9 +89,9 @@\n pattern: str = typer.Option(\"fse\", help=\"Pattern used to filter image filenames\"),\n bs: int = typer.Option(16, help=\"Batch Size\"),\n image_format: str = typer.Option(\n- \".tif\",\n- help=\"Image format for flyswot to use for predictions\",\n+ \".tif\", help=\"Image format for flyswot to use for predictions\"\n ),\n+ check_latest: bool = typer.Option(True, help=\"Use latest available model\"),\n ):\n \"\"\"Predicts against all images stored under DIRECTORY which match PATTERN in the filename.\n \n@@ -102,7 +102,7 @@\n start_time = time.perf_counter()\n model_dir = models.ensure_model_dir()\n # TODO add load learner function that can be passed a model name\n- model_parts = models.ensure_model(model_dir)\n+ model_parts = models.ensure_model(model_dir, check_latest)\n model = model_parts.model\n vocab = models.load_vocab(model_parts.vocab)\n onnxinference = OnnxInferenceSession(model, vocab)\n", "issue": "update default for ensure_model to get latest model \n\n", "before_files": [{"content": "\"\"\"Inference functionality\"\"\"\nimport csv\nimport mimetypes\nimport time\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom dataclasses import asdict\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom pathlib import Path\nfrom typing import Iterable\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Union\n\nimport numpy as np\nimport onnxruntime as rt # type: ignore\nimport typer\nfrom PIL import Image # type: ignore\nfrom rich.table import Table\nfrom toolz import itertoolz # type: ignore\n\nfrom flyswot import core\nfrom flyswot import models\nfrom flyswot.console import console\n\napp = typer.Typer()\n\n\n@dataclass()\nclass ImagePredictionItem:\n \"\"\"Prediction for an image.\n\n Attributes:\n path: The Path to the image\n predicted_label: The predicted label i.e. the argmax value for the prediction tensor\n condidence: The confidence for `predicted_label` i.e. the max value for prediction tensor\n \"\"\"\n\n path: Path\n predicted_label: str\n confidence: float\n\n def __post_init__(self) -> Union[Path, None]:\n \"\"\"attempt to get absolute path\"\"\"\n try:\n self.path: Path = self.path.absolute()\n except AttributeError:\n pass\n\n\n@dataclass\nclass PredictionBatch:\n \"\"\"Container for ImagePredictionItems\"\"\"\n\n batch: List[ImagePredictionItem]\n\n def __post_init__(self):\n \"\"\"Returns a list of all predicted labels in batch\"\"\"\n self.batch_labels: Iterator[str] = (item.predicted_label for item in self.batch)\n\n\nimage_extensions = {k for k, v in mimetypes.types_map.items() if v.startswith(\"image/\")}\n\n\[email protected]()\ndef predict_image(\n image: Path = typer.Argument(..., readable=True, resolve_path=True)\n) -> None:\n \"\"\"Predict a single image\"\"\"\n pass # pragma: no cover\n\n\[email protected](name=\"directory\")\ndef predict_directory(\n directory: Path = typer.Argument(\n ...,\n readable=True,\n resolve_path=True,\n help=\"Directory to start searching for images from\",\n ),\n csv_save_dir: Path = typer.Argument(\n ...,\n writable=True,\n resolve_path=True,\n help=\"Directory used to store the csv report\",\n ),\n pattern: str = typer.Option(\"fse\", help=\"Pattern used to filter image filenames\"),\n bs: int = typer.Option(16, help=\"Batch Size\"),\n image_format: str = typer.Option(\n \".tif\",\n help=\"Image format for flyswot to use for predictions\",\n ),\n):\n \"\"\"Predicts against all images stored under DIRECTORY which match PATTERN in the filename.\n\n By default searches for filenames containing 'fse'.\n\n Creates a CSV report saved to `csv_save_dir`\n \"\"\"\n start_time = time.perf_counter()\n model_dir = models.ensure_model_dir()\n # TODO add load learner function that can be passed a model name\n model_parts = models.ensure_model(model_dir)\n model = model_parts.model\n vocab = models.load_vocab(model_parts.vocab)\n onnxinference = OnnxInferenceSession(model, vocab)\n files = core.get_image_files_from_pattern(directory, pattern, image_format)\n files = list(files)\n typer.echo(f\"Found {len(files)} files matching {pattern} in {directory}\")\n csv_fname = create_csv_fname(csv_save_dir)\n create_csv_header(csv_fname)\n with typer.progressbar(length=len(files)) as progress:\n all_preds = []\n predictions = []\n for batch in itertoolz.partition_all(bs, files):\n batch_predictions = onnxinference.predict_batch(batch, bs)\n all_preds.append(batch_predictions.batch_labels)\n predictions.append(batch_predictions)\n progress.update(len(batch))\n write_batch_preds_to_csv(csv_fname, batch_predictions)\n all_preds = list(itertoolz.concat(all_preds))\n typer.echo(f\"CSV report stored in {csv_fname}\")\n delta = timedelta(seconds=time.perf_counter() - start_time)\n typer.echo(f\"Time taken to run: {str(delta)}\")\n print_table(all_preds)\n\n\ndef print_table(decoded) -> None:\n \"\"\"Prints table summary of predicted labels\"\"\"\n table = Table(show_header=True, title=\"Prediction summary\")\n table.add_column(\n \"Class\",\n )\n table.add_column(\"Count\")\n table.add_column(\"Percentage\")\n total = len(decoded)\n frequencies = itertoolz.frequencies(decoded)\n for is_last_element, var in core.signal_last(frequencies.items()):\n key, value = var\n count = value\n percentage = round((count / total) * 100, 2)\n if is_last_element:\n table.add_row(key, str(count), f\"{percentage}\", end_section=True)\n table.add_row(\"Total\", str(total), \"\")\n else:\n table.add_row(key, str(count), f\"{percentage}\")\n console.print(table)\n\n\ndef create_csv_fname(csv_directory: Path) -> Path:\n \"\"\"Creates a csv filename\"\"\"\n date_now = datetime.now()\n date_now = date_now.strftime(\"%Y_%m_%d_%H_%M\")\n fname = Path(date_now + \".csv\")\n return Path(csv_directory / fname)\n\n\ndef create_csv_header(csv_path: Path) -> None:\n \"\"\"Creates a header for csv `csv_path`\"\"\"\n with open(csv_path, mode=\"w\", newline=\"\") as csv_file:\n field_names = [\"path\", \"directory\", \"predicted_label\", \"confidence\"]\n writer = csv.DictWriter(csv_file, fieldnames=field_names)\n writer.writeheader()\n\n\ndef write_batch_preds_to_csv(csv_fpath: Path, predictions: PredictionBatch) -> None:\n \"\"\"Appends `predictions` batch to `csv_path`\"\"\"\n with open(csv_fpath, mode=\"a\", newline=\"\") as csv_file:\n field_names = [\"path\", \"directory\", \"predicted_label\", \"confidence\"]\n writer = csv.DictWriter(csv_file, fieldnames=field_names)\n for pred in predictions.batch:\n row = asdict(pred)\n row[\"directory\"] = pred.path.parent\n writer.writerow(row)\n\n\nclass InferenceSession(ABC):\n \"\"\"Abstract class for inference sessions\"\"\"\n\n @abstractmethod\n def __init__(self, model: Path, vocab: List):\n \"\"\"Inference Sessions should init from a model file and vocab\"\"\"\n self.model = model\n self.vocab = vocab\n\n @abstractmethod\n def predict_image(self, image: Path):\n \"\"\"Predict a single image\"\"\"\n pass\n\n @abstractmethod\n def predict_batch(self, model: Path, batch: Iterable[Path], bs: int):\n \"\"\"Predict a batch\"\"\"\n pass\n\n\ndef softmax(x):\n \"\"\"return softmax of `x`\"\"\"\n x = x.reshape(-1)\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)\n\n\n# class FastaiInferenceModel(InferenceSession):\n# def __init__(self, model):\n# self.model = model\n# self.learn = load_learner(model)\n\n# def predict_image(self, image: Path) -> Any:\n# return self.learn.predict(image)\n\n# def predict_batch(self, batch: Iterable[Path], bs: int) -> PredictionBatch:\n# test_dl = self.learn.dls.test_dl(batch, bs=bs)\n# vocab = dict(enumerate(self.learn.dls.vocab))\n# with self.learn.no_bar():\n# fastai_preds: Any = self.learn.get_preds(dl=test_dl, with_decoded=True)\n# prediction_tensors: Iterable[Any] = fastai_preds[0]\n# prediction_items = []\n# for file, pred in zip(batch, prediction_tensors):\n# arg_max = int(np.array(pred).argmax())\n# predicted_label = vocab[int(arg_max)]\n# confidence = float(np.array(pred).max())\n# prediction_items.append(\n# ImagePredictionItem(file, predicted_label, confidence)\n# )\n# return PredictionBatch(prediction_items)\n\n\nclass OnnxInferenceSession(InferenceSession):\n \"\"\"onnx inference session\"\"\"\n\n def __init__(self, model: Path, vocab: Path):\n \"\"\"Create onnx session\"\"\"\n self.model = model\n self.session = rt.InferenceSession(str(model))\n\n self.vocab = vocab\n self.vocab_mapping = dict(enumerate(self.vocab))\n\n def _load_vocab(self, vocab: Path) -> List:\n with open(vocab, \"r\") as f:\n return [item.strip(\"\\n\") for item in f.readlines()]\n\n def predict_image(self, image: Path):\n \"\"\"Predict a single image\"\"\"\n img = self._load_image(image)\n raw_result = self.session.run([\"output\"], {\"image\": img})\n pred = self._postprocess(raw_result)\n arg_max = int(np.array(pred).argmax())\n predicted_label = self.vocab_mapping[int(arg_max)]\n confidence = float(np.array(pred).max())\n return ImagePredictionItem(image, predicted_label, confidence)\n\n def _preprocess(self, input_data: np.ndarray) -> np.ndarray:\n # converts the input data into the float32 input for onnx\n img_data = input_data.astype(\"float32\")\n\n # normalize\n mean_vec = np.array([0.485, 0.456, 0.406])\n stddev_vec = np.array([0.229, 0.224, 0.225])\n norm_img_data = np.zeros(img_data.shape).astype(\"float32\")\n for i in range(img_data.shape[0]):\n norm_img_data[i, :, :] = (\n img_data[i, :, :] / 255 - mean_vec[i]\n ) / stddev_vec[i]\n\n # add batch channel\n norm_img_data = norm_img_data.reshape(1, 3, 512, 512).astype(\"float32\")\n return norm_img_data\n\n def _load_image(self, file: Path) -> np.ndarray:\n \"\"\"loads image and carries out preprocessing for inference\"\"\"\n image = Image.open(file, mode=\"r\")\n image = image.resize((512, 512), Image.BILINEAR)\n image_data = np.array(image).transpose(2, 0, 1)\n return self._preprocess(image_data)\n\n def _postprocess(self, result: List):\n \"\"\"process results from onnx session\"\"\"\n return softmax(np.array(result)).tolist()\n\n def predict_batch(self, batch: Iterable[Path], bs: int):\n \"\"\"predicts a batch of images\"\"\"\n prediction_items = [self.predict_image(file) for file in batch]\n return PredictionBatch(prediction_items)\n\n\nif __name__ == \"__main__\":\n app()\n", "path": "src/flyswot/inference.py"}], "after_files": [{"content": "\"\"\"Inference functionality\"\"\"\nimport csv\nimport mimetypes\nimport time\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom dataclasses import asdict\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom pathlib import Path\nfrom typing import Iterable\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Union\n\nimport numpy as np\nimport onnxruntime as rt # type: ignore\nimport typer\nfrom PIL import Image # type: ignore\nfrom rich.table import Table\nfrom toolz import itertoolz\n\nfrom flyswot import core\nfrom flyswot import models\nfrom flyswot.console import console\n\napp = typer.Typer()\n\n\n@dataclass()\nclass ImagePredictionItem:\n \"\"\"Prediction for an image.\n\n Attributes:\n path: The Path to the image\n predicted_label: The predicted label i.e. the argmax value for the prediction tensor\n condidence: The confidence for `predicted_label` i.e. the max value for prediction tensor\n \"\"\"\n\n path: Path\n predicted_label: str\n confidence: float\n\n def __post_init__(self) -> Union[Path, None]:\n \"\"\"attempt to get absolute path\"\"\"\n try:\n self.path: Path = self.path.absolute()\n except AttributeError:\n pass\n\n\n@dataclass\nclass PredictionBatch:\n \"\"\"Container for ImagePredictionItems\"\"\"\n\n batch: List[ImagePredictionItem]\n\n def __post_init__(self):\n \"\"\"Returns a list of all predicted labels in batch\"\"\"\n self.batch_labels: Iterator[str] = (item.predicted_label for item in self.batch)\n\n\nimage_extensions = {k for k, v in mimetypes.types_map.items() if v.startswith(\"image/\")}\n\n\[email protected]()\ndef predict_image(\n image: Path = typer.Argument(..., readable=True, resolve_path=True)\n) -> None:\n \"\"\"Predict a single image\"\"\"\n pass # pragma: no cover\n\n\[email protected](name=\"directory\")\ndef predict_directory(\n directory: Path = typer.Argument(\n ...,\n readable=True,\n resolve_path=True,\n help=\"Directory to start searching for images from\",\n ),\n csv_save_dir: Path = typer.Argument(\n ...,\n writable=True,\n resolve_path=True,\n help=\"Directory used to store the csv report\",\n ),\n pattern: str = typer.Option(\"fse\", help=\"Pattern used to filter image filenames\"),\n bs: int = typer.Option(16, help=\"Batch Size\"),\n image_format: str = typer.Option(\n \".tif\", help=\"Image format for flyswot to use for predictions\"\n ),\n check_latest: bool = typer.Option(True, help=\"Use latest available model\"),\n):\n \"\"\"Predicts against all images stored under DIRECTORY which match PATTERN in the filename.\n\n By default searches for filenames containing 'fse'.\n\n Creates a CSV report saved to `csv_save_dir`\n \"\"\"\n start_time = time.perf_counter()\n model_dir = models.ensure_model_dir()\n # TODO add load learner function that can be passed a model name\n model_parts = models.ensure_model(model_dir, check_latest)\n model = model_parts.model\n vocab = models.load_vocab(model_parts.vocab)\n onnxinference = OnnxInferenceSession(model, vocab)\n files = core.get_image_files_from_pattern(directory, pattern, image_format)\n files = list(files)\n typer.echo(f\"Found {len(files)} files matching {pattern} in {directory}\")\n csv_fname = create_csv_fname(csv_save_dir)\n create_csv_header(csv_fname)\n with typer.progressbar(length=len(files)) as progress:\n all_preds = []\n predictions = []\n for batch in itertoolz.partition_all(bs, files):\n batch_predictions = onnxinference.predict_batch(batch, bs)\n all_preds.append(batch_predictions.batch_labels)\n predictions.append(batch_predictions)\n progress.update(len(batch))\n write_batch_preds_to_csv(csv_fname, batch_predictions)\n all_preds = list(itertoolz.concat(all_preds))\n typer.echo(f\"CSV report stored in {csv_fname}\")\n delta = timedelta(seconds=time.perf_counter() - start_time)\n typer.echo(f\"Time taken to run: {str(delta)}\")\n print_table(all_preds)\n\n\ndef print_table(decoded) -> None:\n \"\"\"Prints table summary of predicted labels\"\"\"\n table = Table(show_header=True, title=\"Prediction summary\")\n table.add_column(\n \"Class\",\n )\n table.add_column(\"Count\")\n table.add_column(\"Percentage\")\n total = len(decoded)\n frequencies = itertoolz.frequencies(decoded)\n for is_last_element, var in core.signal_last(frequencies.items()):\n key, value = var\n count = value\n percentage = round((count / total) * 100, 2)\n if is_last_element:\n table.add_row(key, str(count), f\"{percentage}\", end_section=True)\n table.add_row(\"Total\", str(total), \"\")\n else:\n table.add_row(key, str(count), f\"{percentage}\")\n console.print(table)\n\n\ndef create_csv_fname(csv_directory: Path) -> Path:\n \"\"\"Creates a csv filename\"\"\"\n date_now = datetime.now()\n date_now = date_now.strftime(\"%Y_%m_%d_%H_%M\")\n fname = Path(date_now + \".csv\")\n return Path(csv_directory / fname)\n\n\ndef create_csv_header(csv_path: Path) -> None:\n \"\"\"Creates a header for csv `csv_path`\"\"\"\n with open(csv_path, mode=\"w\", newline=\"\") as csv_file:\n field_names = [\"path\", \"directory\", \"predicted_label\", \"confidence\"]\n writer = csv.DictWriter(csv_file, fieldnames=field_names)\n writer.writeheader()\n\n\ndef write_batch_preds_to_csv(csv_fpath: Path, predictions: PredictionBatch) -> None:\n \"\"\"Appends `predictions` batch to `csv_path`\"\"\"\n with open(csv_fpath, mode=\"a\", newline=\"\") as csv_file:\n field_names = [\"path\", \"directory\", \"predicted_label\", \"confidence\"]\n writer = csv.DictWriter(csv_file, fieldnames=field_names)\n for pred in predictions.batch:\n row = asdict(pred)\n row[\"directory\"] = pred.path.parent\n writer.writerow(row)\n\n\nclass InferenceSession(ABC):\n \"\"\"Abstract class for inference sessions\"\"\"\n\n @abstractmethod\n def __init__(self, model: Path, vocab: List):\n \"\"\"Inference Sessions should init from a model file and vocab\"\"\"\n self.model = model\n self.vocab = vocab\n\n @abstractmethod\n def predict_image(self, image: Path):\n \"\"\"Predict a single image\"\"\"\n pass\n\n @abstractmethod\n def predict_batch(self, model: Path, batch: Iterable[Path], bs: int):\n \"\"\"Predict a batch\"\"\"\n pass\n\n\ndef softmax(x):\n \"\"\"return softmax of `x`\"\"\"\n x = x.reshape(-1)\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)\n\n\n# class FastaiInferenceModel(InferenceSession):\n# def __init__(self, model):\n# self.model = model\n# self.learn = load_learner(model)\n\n# def predict_image(self, image: Path) -> Any:\n# return self.learn.predict(image)\n\n# def predict_batch(self, batch: Iterable[Path], bs: int) -> PredictionBatch:\n# test_dl = self.learn.dls.test_dl(batch, bs=bs)\n# vocab = dict(enumerate(self.learn.dls.vocab))\n# with self.learn.no_bar():\n# fastai_preds: Any = self.learn.get_preds(dl=test_dl, with_decoded=True)\n# prediction_tensors: Iterable[Any] = fastai_preds[0]\n# prediction_items = []\n# for file, pred in zip(batch, prediction_tensors):\n# arg_max = int(np.array(pred).argmax())\n# predicted_label = vocab[int(arg_max)]\n# confidence = float(np.array(pred).max())\n# prediction_items.append(\n# ImagePredictionItem(file, predicted_label, confidence)\n# )\n# return PredictionBatch(prediction_items)\n\n\nclass OnnxInferenceSession(InferenceSession):\n \"\"\"onnx inference session\"\"\"\n\n def __init__(self, model: Path, vocab: Path):\n \"\"\"Create onnx session\"\"\"\n self.model = model\n self.session = rt.InferenceSession(str(model))\n\n self.vocab = vocab\n self.vocab_mapping = dict(enumerate(self.vocab))\n\n def _load_vocab(self, vocab: Path) -> List:\n with open(vocab, \"r\") as f:\n return [item.strip(\"\\n\") for item in f.readlines()]\n\n def predict_image(self, image: Path):\n \"\"\"Predict a single image\"\"\"\n img = self._load_image(image)\n raw_result = self.session.run([\"output\"], {\"image\": img})\n pred = self._postprocess(raw_result)\n arg_max = int(np.array(pred).argmax())\n predicted_label = self.vocab_mapping[int(arg_max)]\n confidence = float(np.array(pred).max())\n return ImagePredictionItem(image, predicted_label, confidence)\n\n def _preprocess(self, input_data: np.ndarray) -> np.ndarray:\n # converts the input data into the float32 input for onnx\n img_data = input_data.astype(\"float32\")\n\n # normalize\n mean_vec = np.array([0.485, 0.456, 0.406])\n stddev_vec = np.array([0.229, 0.224, 0.225])\n norm_img_data = np.zeros(img_data.shape).astype(\"float32\")\n for i in range(img_data.shape[0]):\n norm_img_data[i, :, :] = (\n img_data[i, :, :] / 255 - mean_vec[i]\n ) / stddev_vec[i]\n\n # add batch channel\n norm_img_data = norm_img_data.reshape(1, 3, 512, 512).astype(\"float32\")\n return norm_img_data\n\n def _load_image(self, file: Path) -> np.ndarray:\n \"\"\"loads image and carries out preprocessing for inference\"\"\"\n image = Image.open(file, mode=\"r\")\n image = image.resize((512, 512), Image.BILINEAR)\n image_data = np.array(image).transpose(2, 0, 1)\n return self._preprocess(image_data)\n\n def _postprocess(self, result: List):\n \"\"\"process results from onnx session\"\"\"\n return softmax(np.array(result)).tolist()\n\n def predict_batch(self, batch: Iterable[Path], bs: int):\n \"\"\"predicts a batch of images\"\"\"\n prediction_items = [self.predict_image(file) for file in batch]\n return PredictionBatch(prediction_items)\n\n\nif __name__ == \"__main__\":\n app()\n", "path": "src/flyswot/inference.py"}]}
3,398
357
gh_patches_debug_1429
rasdani/github-patches
git_diff
google__turbinia-785
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- import TurbiniaException to partitions.py ``` Traceback (most recent call last): File "PATH/v2/lib/python3.8/site-packages/turbinia/workers/__init__.py", line 916, in run_wrapper self.result = self.run(evidence, self.result) File "PATH/v2/lib/python3.8/site-packages/turbinia/workers/partitions.py", line 144, in run path_specs = partitions.Enumerate(evidence) File "/PATH/v2/lib/python3.8/site-packages/turbinia/processors/partitions.py", line 49, in Enumerate raise TurbiniaException( NameError: name 'TurbiniaException' is not defined 2021-03-05 18:45:56 [ERROR] PartitionEnumerationTask Task failed with exception: [name 'TurbiniaException' is not defined] ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `turbinia/processors/partitions.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Copyright 2021 Google LLC 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # https://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 """Evidence processor to enumerate partitions.""" 16 17 import logging 18 19 from dfvfs.helpers import volume_scanner 20 from dfvfs.lib import definitions as dfvfs_definitions 21 from dfvfs.lib import errors as dfvfs_errors 22 23 from turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator 24 25 log = logging.getLogger('turbinia') 26 27 28 def Enumerate(evidence): 29 """Uses dfVFS to enumerate partitions in a disk / image. 30 31 Args: 32 evidence: Evidence object to be scanned. 33 34 Raises: 35 TurbiniaException if source evidence can't be scanned. 36 37 Returns: 38 list[dfVFS.path_spec]: path specs for identified partitions 39 """ 40 dfvfs_definitions.PREFERRED_GPT_BACK_END = ( 41 dfvfs_definitions.TYPE_INDICATOR_GPT) 42 mediator = UnattendedVolumeScannerMediator() 43 mediator.credentials = evidence.credentials 44 path_specs = [] 45 try: 46 scanner = volume_scanner.VolumeScanner(mediator=mediator) 47 path_specs = scanner.GetBasePathSpecs(evidence.local_path) 48 except dfvfs_errors.ScannerError as e: 49 raise TurbiniaException( 50 'Could not enumerate partitions [{0!s}]: {1!s}'.format( 51 evidence.local_path, e)) 52 53 return path_specs 54 55 56 def GetPartitionEncryptionType(path_spec): 57 """Checks a partition for encryption. 58 59 Args: 60 path_spec (dfVFS.path_spec): Partition path_spec. 61 62 Returns: 63 String representing the type of encryption, or None. 64 """ 65 encryption_type = None 66 if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE: 67 encryption_type = 'BDE' 68 return encryption_type 69 70 71 def GetPathSpecByLocation(path_specs, location): 72 """Finds a path_spec from a list of path_specs for a given location. 73 74 Args: 75 path_specs (list[dfVFS.path_spec]): List of path_specs from volume scanner. 76 location (str): dfVFS location to search for. 77 78 Returns: 79 dfVFS.path_spec for the given location or None if not found. 80 """ 81 for path_spec in path_specs: 82 child_path_spec = path_spec 83 fs_location = getattr(path_spec, 'location', None) 84 while path_spec.HasParent(): 85 type_indicator = path_spec.type_indicator 86 if type_indicator in (dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION, 87 dfvfs_definitions.TYPE_INDICATOR_GPT): 88 if fs_location in ('\\', '/'): 89 fs_location = getattr(path_spec, 'location', None) 90 break 91 path_spec = path_spec.parent 92 if fs_location == location: 93 return child_path_spec 94 return None 95 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/turbinia/processors/partitions.py b/turbinia/processors/partitions.py --- a/turbinia/processors/partitions.py +++ b/turbinia/processors/partitions.py @@ -21,6 +21,7 @@ from dfvfs.lib import errors as dfvfs_errors from turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator +from turbinia import TurbiniaException log = logging.getLogger('turbinia')
{"golden_diff": "diff --git a/turbinia/processors/partitions.py b/turbinia/processors/partitions.py\n--- a/turbinia/processors/partitions.py\n+++ b/turbinia/processors/partitions.py\n@@ -21,6 +21,7 @@\n from dfvfs.lib import errors as dfvfs_errors\n \n from turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator\n+from turbinia import TurbiniaException\n \n log = logging.getLogger('turbinia')\n", "issue": "import TurbiniaException to partitions.py\n```\r\nTraceback (most recent call last):\r\n File \"PATH/v2/lib/python3.8/site-packages/turbinia/workers/__init__.py\", line 916, in run_wrapper\r\n self.result = self.run(evidence, self.result)\r\n File \"PATH/v2/lib/python3.8/site-packages/turbinia/workers/partitions.py\", line 144, in run\r\n path_specs = partitions.Enumerate(evidence)\r\n File \"/PATH/v2/lib/python3.8/site-packages/turbinia/processors/partitions.py\", line 49, in Enumerate\r\n raise TurbiniaException(\r\nNameError: name 'TurbiniaException' is not defined\r\n\r\n2021-03-05 18:45:56 [ERROR] PartitionEnumerationTask Task failed with exception: [name 'TurbiniaException' is not defined]\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Evidence processor to enumerate partitions.\"\"\"\n\nimport logging\n\nfrom dfvfs.helpers import volume_scanner\nfrom dfvfs.lib import definitions as dfvfs_definitions\nfrom dfvfs.lib import errors as dfvfs_errors\n\nfrom turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator\n\nlog = logging.getLogger('turbinia')\n\n\ndef Enumerate(evidence):\n \"\"\"Uses dfVFS to enumerate partitions in a disk / image.\n\n Args:\n evidence: Evidence object to be scanned.\n\n Raises:\n TurbiniaException if source evidence can't be scanned.\n\n Returns:\n list[dfVFS.path_spec]: path specs for identified partitions\n \"\"\"\n dfvfs_definitions.PREFERRED_GPT_BACK_END = (\n dfvfs_definitions.TYPE_INDICATOR_GPT)\n mediator = UnattendedVolumeScannerMediator()\n mediator.credentials = evidence.credentials\n path_specs = []\n try:\n scanner = volume_scanner.VolumeScanner(mediator=mediator)\n path_specs = scanner.GetBasePathSpecs(evidence.local_path)\n except dfvfs_errors.ScannerError as e:\n raise TurbiniaException(\n 'Could not enumerate partitions [{0!s}]: {1!s}'.format(\n evidence.local_path, e))\n\n return path_specs\n\n\ndef GetPartitionEncryptionType(path_spec):\n \"\"\"Checks a partition for encryption.\n\n Args:\n path_spec (dfVFS.path_spec): Partition path_spec.\n\n Returns:\n String representing the type of encryption, or None.\n \"\"\"\n encryption_type = None\n if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:\n encryption_type = 'BDE'\n return encryption_type\n\n\ndef GetPathSpecByLocation(path_specs, location):\n \"\"\"Finds a path_spec from a list of path_specs for a given location.\n\n Args:\n path_specs (list[dfVFS.path_spec]): List of path_specs from volume scanner.\n location (str): dfVFS location to search for.\n\n Returns:\n dfVFS.path_spec for the given location or None if not found.\n \"\"\"\n for path_spec in path_specs:\n child_path_spec = path_spec\n fs_location = getattr(path_spec, 'location', None)\n while path_spec.HasParent():\n type_indicator = path_spec.type_indicator\n if type_indicator in (dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,\n dfvfs_definitions.TYPE_INDICATOR_GPT):\n if fs_location in ('\\\\', '/'):\n fs_location = getattr(path_spec, 'location', None)\n break\n path_spec = path_spec.parent\n if fs_location == location:\n return child_path_spec\n return None\n", "path": "turbinia/processors/partitions.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Evidence processor to enumerate partitions.\"\"\"\n\nimport logging\n\nfrom dfvfs.helpers import volume_scanner\nfrom dfvfs.lib import definitions as dfvfs_definitions\nfrom dfvfs.lib import errors as dfvfs_errors\n\nfrom turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator\nfrom turbinia import TurbiniaException\n\nlog = logging.getLogger('turbinia')\n\n\ndef Enumerate(evidence):\n \"\"\"Uses dfVFS to enumerate partitions in a disk / image.\n\n Args:\n evidence: Evidence object to be scanned.\n\n Raises:\n TurbiniaException if source evidence can't be scanned.\n\n Returns:\n list[dfVFS.path_spec]: path specs for identified partitions\n \"\"\"\n dfvfs_definitions.PREFERRED_GPT_BACK_END = (\n dfvfs_definitions.TYPE_INDICATOR_GPT)\n mediator = UnattendedVolumeScannerMediator()\n mediator.credentials = evidence.credentials\n path_specs = []\n try:\n scanner = volume_scanner.VolumeScanner(mediator=mediator)\n path_specs = scanner.GetBasePathSpecs(evidence.local_path)\n except dfvfs_errors.ScannerError as e:\n raise TurbiniaException(\n 'Could not enumerate partitions [{0!s}]: {1!s}'.format(\n evidence.local_path, e))\n\n return path_specs\n\n\ndef GetPartitionEncryptionType(path_spec):\n \"\"\"Checks a partition for encryption.\n\n Args:\n path_spec (dfVFS.path_spec): Partition path_spec.\n\n Returns:\n String representing the type of encryption, or None.\n \"\"\"\n encryption_type = None\n if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:\n encryption_type = 'BDE'\n return encryption_type\n\n\ndef GetPathSpecByLocation(path_specs, location):\n \"\"\"Finds a path_spec from a list of path_specs for a given location.\n\n Args:\n path_specs (list[dfVFS.path_spec]): List of path_specs from volume scanner.\n location (str): dfVFS location to search for.\n\n Returns:\n dfVFS.path_spec for the given location or None if not found.\n \"\"\"\n for path_spec in path_specs:\n child_path_spec = path_spec\n fs_location = getattr(path_spec, 'location', None)\n while path_spec.HasParent():\n type_indicator = path_spec.type_indicator\n if type_indicator in (dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,\n dfvfs_definitions.TYPE_INDICATOR_GPT):\n if fs_location in ('\\\\', '/'):\n fs_location = getattr(path_spec, 'location', None)\n break\n path_spec = path_spec.parent\n if fs_location == location:\n return child_path_spec\n return None\n", "path": "turbinia/processors/partitions.py"}]}
1,357
108
gh_patches_debug_21968
rasdani/github-patches
git_diff
pypa__pip-4429
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- support git refs in @ syntax We'd like to be able to put paths to gerrit reviews in requirements files. Given a gerrit like https://review.openstack.org/#/c/345601/6, the path given for a git pull looks like: git pull https://git.openstack.org/openstack/oslo.db refs/changes/01/345601/6 pip syntax we'd expect would be: ``` .venv/bin/pip install -e git+https://git.openstack.org/openstack/oslo.db@refs/changes/01/345601/6#egg=oslo.db ``` current output: ``` Obtaining oslo.db from git+https://git.openstack.org/openstack/oslo.db@refs/changes/01/345601/6#egg=oslo.db Cloning https://git.openstack.org/openstack/oslo.db (to refs/changes/01/345601/6) to ./.venv/src/oslo.db Could not find a tag or branch 'refs/changes/01/345601/6', assuming commit. error: pathspec 'refs/changes/01/345601/6' did not match any file(s) known to git. Command "git checkout -q refs/changes/01/345601/6" failed with error code 1 in /home/classic/.venv/src/oslo.db ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pip/vcs/git.py` Content: ``` 1 from __future__ import absolute_import 2 3 import logging 4 import tempfile 5 import os.path 6 7 from pip.compat import samefile 8 from pip.exceptions import BadCommand 9 from pip._vendor.six.moves.urllib import parse as urllib_parse 10 from pip._vendor.six.moves.urllib import request as urllib_request 11 from pip._vendor.packaging.version import parse as parse_version 12 13 from pip.utils import display_path, rmtree 14 from pip.vcs import vcs, VersionControl 15 16 17 urlsplit = urllib_parse.urlsplit 18 urlunsplit = urllib_parse.urlunsplit 19 20 21 logger = logging.getLogger(__name__) 22 23 24 class Git(VersionControl): 25 name = 'git' 26 dirname = '.git' 27 repo_name = 'clone' 28 schemes = ( 29 'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file', 30 ) 31 32 def __init__(self, url=None, *args, **kwargs): 33 34 # Works around an apparent Git bug 35 # (see http://article.gmane.org/gmane.comp.version-control.git/146500) 36 if url: 37 scheme, netloc, path, query, fragment = urlsplit(url) 38 if scheme.endswith('file'): 39 initial_slashes = path[:-len(path.lstrip('/'))] 40 newpath = ( 41 initial_slashes + 42 urllib_request.url2pathname(path) 43 .replace('\\', '/').lstrip('/') 44 ) 45 url = urlunsplit((scheme, netloc, newpath, query, fragment)) 46 after_plus = scheme.find('+') + 1 47 url = scheme[:after_plus] + urlunsplit( 48 (scheme[after_plus:], netloc, newpath, query, fragment), 49 ) 50 51 super(Git, self).__init__(url, *args, **kwargs) 52 53 def get_git_version(self): 54 VERSION_PFX = 'git version ' 55 version = self.run_command(['version'], show_stdout=False) 56 if version.startswith(VERSION_PFX): 57 version = version[len(VERSION_PFX):].split()[0] 58 else: 59 version = '' 60 # get first 3 positions of the git version becasue 61 # on windows it is x.y.z.windows.t, and this parses as 62 # LegacyVersion which always smaller than a Version. 63 version = '.'.join(version.split('.')[:3]) 64 return parse_version(version) 65 66 def export(self, location): 67 """Export the Git repository at the url to the destination location""" 68 temp_dir = tempfile.mkdtemp('-export', 'pip-') 69 self.unpack(temp_dir) 70 try: 71 if not location.endswith('/'): 72 location = location + '/' 73 self.run_command( 74 ['checkout-index', '-a', '-f', '--prefix', location], 75 show_stdout=False, cwd=temp_dir) 76 finally: 77 rmtree(temp_dir) 78 79 def check_rev_options(self, rev, dest, rev_options): 80 """Check the revision options before checkout to compensate that tags 81 and branches may need origin/ as a prefix. 82 Returns the SHA1 of the branch or tag if found. 83 """ 84 revisions = self.get_short_refs(dest) 85 86 origin_rev = 'origin/%s' % rev 87 if origin_rev in revisions: 88 # remote branch 89 return [revisions[origin_rev]] 90 elif rev in revisions: 91 # a local tag or branch name 92 return [revisions[rev]] 93 else: 94 logger.warning( 95 "Could not find a tag or branch '%s', assuming commit.", rev, 96 ) 97 return rev_options 98 99 def check_version(self, dest, rev_options): 100 """ 101 Compare the current sha to the ref. ref may be a branch or tag name, 102 but current rev will always point to a sha. This means that a branch 103 or tag will never compare as True. So this ultimately only matches 104 against exact shas. 105 """ 106 return self.get_revision(dest).startswith(rev_options[0]) 107 108 def switch(self, dest, url, rev_options): 109 self.run_command(['config', 'remote.origin.url', url], cwd=dest) 110 self.run_command(['checkout', '-q'] + rev_options, cwd=dest) 111 112 self.update_submodules(dest) 113 114 def update(self, dest, rev_options): 115 # First fetch changes from the default remote 116 if self.get_git_version() >= parse_version('1.9.0'): 117 # fetch tags in addition to everything else 118 self.run_command(['fetch', '-q', '--tags'], cwd=dest) 119 else: 120 self.run_command(['fetch', '-q'], cwd=dest) 121 # Then reset to wanted revision (maybe even origin/master) 122 if rev_options: 123 rev_options = self.check_rev_options( 124 rev_options[0], dest, rev_options, 125 ) 126 self.run_command(['reset', '--hard', '-q'] + rev_options, cwd=dest) 127 #: update submodules 128 self.update_submodules(dest) 129 130 def obtain(self, dest): 131 url, rev = self.get_url_rev() 132 if rev: 133 rev_options = [rev] 134 rev_display = ' (to %s)' % rev 135 else: 136 rev_options = ['origin/master'] 137 rev_display = '' 138 if self.check_destination(dest, url, rev_options, rev_display): 139 logger.info( 140 'Cloning %s%s to %s', url, rev_display, display_path(dest), 141 ) 142 self.run_command(['clone', '-q', url, dest]) 143 144 if rev: 145 rev_options = self.check_rev_options(rev, dest, rev_options) 146 # Only do a checkout if rev_options differs from HEAD 147 if not self.check_version(dest, rev_options): 148 self.run_command( 149 ['checkout', '-q'] + rev_options, 150 cwd=dest, 151 ) 152 #: repo may contain submodules 153 self.update_submodules(dest) 154 155 def get_url(self, location): 156 """Return URL of the first remote encountered.""" 157 remotes = self.run_command( 158 ['config', '--get-regexp', r'remote\..*\.url'], 159 show_stdout=False, cwd=location) 160 remotes = remotes.splitlines() 161 found_remote = remotes[0] 162 for remote in remotes: 163 if remote.startswith('remote.origin.url '): 164 found_remote = remote 165 break 166 url = found_remote.split(' ')[1] 167 return url.strip() 168 169 def get_revision(self, location): 170 current_rev = self.run_command( 171 ['rev-parse', 'HEAD'], show_stdout=False, cwd=location) 172 return current_rev.strip() 173 174 def get_full_refs(self, location): 175 """Yields tuples of (commit, ref) for branches and tags""" 176 output = self.run_command(['show-ref'], 177 show_stdout=False, cwd=location) 178 for line in output.strip().splitlines(): 179 commit, ref = line.split(' ', 1) 180 yield commit.strip(), ref.strip() 181 182 def is_ref_remote(self, ref): 183 return ref.startswith('refs/remotes/') 184 185 def is_ref_branch(self, ref): 186 return ref.startswith('refs/heads/') 187 188 def is_ref_tag(self, ref): 189 return ref.startswith('refs/tags/') 190 191 def is_ref_commit(self, ref): 192 """A ref is a commit sha if it is not anything else""" 193 return not any(( 194 self.is_ref_remote(ref), 195 self.is_ref_branch(ref), 196 self.is_ref_tag(ref), 197 )) 198 199 # Should deprecate `get_refs` since it's ambiguous 200 def get_refs(self, location): 201 return self.get_short_refs(location) 202 203 def get_short_refs(self, location): 204 """Return map of named refs (branches or tags) to commit hashes.""" 205 rv = {} 206 for commit, ref in self.get_full_refs(location): 207 ref_name = None 208 if self.is_ref_remote(ref): 209 ref_name = ref[len('refs/remotes/'):] 210 elif self.is_ref_branch(ref): 211 ref_name = ref[len('refs/heads/'):] 212 elif self.is_ref_tag(ref): 213 ref_name = ref[len('refs/tags/'):] 214 if ref_name is not None: 215 rv[ref_name] = commit 216 return rv 217 218 def _get_subdirectory(self, location): 219 """Return the relative path of setup.py to the git repo root.""" 220 # find the repo root 221 git_dir = self.run_command(['rev-parse', '--git-dir'], 222 show_stdout=False, cwd=location).strip() 223 if not os.path.isabs(git_dir): 224 git_dir = os.path.join(location, git_dir) 225 root_dir = os.path.join(git_dir, '..') 226 # find setup.py 227 orig_location = location 228 while not os.path.exists(os.path.join(location, 'setup.py')): 229 last_location = location 230 location = os.path.dirname(location) 231 if location == last_location: 232 # We've traversed up to the root of the filesystem without 233 # finding setup.py 234 logger.warning( 235 "Could not find setup.py for directory %s (tried all " 236 "parent directories)", 237 orig_location, 238 ) 239 return None 240 # relative path of setup.py to repo root 241 if samefile(root_dir, location): 242 return None 243 return os.path.relpath(location, root_dir) 244 245 def get_src_requirement(self, dist, location): 246 repo = self.get_url(location) 247 if not repo.lower().startswith('git:'): 248 repo = 'git+' + repo 249 egg_project_name = dist.egg_name().split('-', 1)[0] 250 if not repo: 251 return None 252 current_rev = self.get_revision(location) 253 req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name) 254 subdirectory = self._get_subdirectory(location) 255 if subdirectory: 256 req += '&subdirectory=' + subdirectory 257 return req 258 259 def get_url_rev(self): 260 """ 261 Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. 262 That's required because although they use SSH they sometimes doesn't 263 work with a ssh:// scheme (e.g. Github). But we need a scheme for 264 parsing. Hence we remove it again afterwards and return it as a stub. 265 """ 266 if '://' not in self.url: 267 assert 'file:' not in self.url 268 self.url = self.url.replace('git+', 'git+ssh://') 269 url, rev = super(Git, self).get_url_rev() 270 url = url.replace('ssh://', '') 271 else: 272 url, rev = super(Git, self).get_url_rev() 273 274 return url, rev 275 276 def update_submodules(self, location): 277 if not os.path.exists(os.path.join(location, '.gitmodules')): 278 return 279 self.run_command( 280 ['submodule', 'update', '--init', '--recursive', '-q'], 281 cwd=location, 282 ) 283 284 @classmethod 285 def controls_location(cls, location): 286 if super(Git, cls).controls_location(location): 287 return True 288 try: 289 r = cls().run_command(['rev-parse'], 290 cwd=location, 291 show_stdout=False, 292 on_returncode='ignore') 293 return not r 294 except BadCommand: 295 logger.debug("could not determine if %s is under git control " 296 "because git is not available", location) 297 return False 298 299 300 vcs.register(Git) 301 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pip/vcs/git.py b/pip/vcs/git.py --- a/pip/vcs/git.py +++ b/pip/vcs/git.py @@ -92,7 +92,8 @@ return [revisions[rev]] else: logger.warning( - "Could not find a tag or branch '%s', assuming commit.", rev, + "Could not find a tag or branch '%s', assuming commit or ref", + rev, ) return rev_options @@ -146,9 +147,14 @@ # Only do a checkout if rev_options differs from HEAD if not self.check_version(dest, rev_options): self.run_command( - ['checkout', '-q'] + rev_options, + ['fetch', '-q', url] + rev_options, cwd=dest, ) + self.run_command( + ['checkout', '-q', 'FETCH_HEAD'], + cwd=dest, + ) + #: repo may contain submodules self.update_submodules(dest)
{"golden_diff": "diff --git a/pip/vcs/git.py b/pip/vcs/git.py\n--- a/pip/vcs/git.py\n+++ b/pip/vcs/git.py\n@@ -92,7 +92,8 @@\n return [revisions[rev]]\n else:\n logger.warning(\n- \"Could not find a tag or branch '%s', assuming commit.\", rev,\n+ \"Could not find a tag or branch '%s', assuming commit or ref\",\n+ rev,\n )\n return rev_options\n \n@@ -146,9 +147,14 @@\n # Only do a checkout if rev_options differs from HEAD\n if not self.check_version(dest, rev_options):\n self.run_command(\n- ['checkout', '-q'] + rev_options,\n+ ['fetch', '-q', url] + rev_options,\n cwd=dest,\n )\n+ self.run_command(\n+ ['checkout', '-q', 'FETCH_HEAD'],\n+ cwd=dest,\n+ )\n+\n #: repo may contain submodules\n self.update_submodules(dest)\n", "issue": "support git refs in @ syntax\nWe'd like to be able to put paths to gerrit reviews in requirements files.\n\nGiven a gerrit like https://review.openstack.org/#/c/345601/6, the path given for a git pull looks like:\n\n git pull https://git.openstack.org/openstack/oslo.db refs/changes/01/345601/6\n\npip syntax we'd expect would be:\n\n```\n.venv/bin/pip install -e git+https://git.openstack.org/openstack/oslo.db@refs/changes/01/345601/6#egg=oslo.db\n```\n\ncurrent output:\n\n```\n\nObtaining oslo.db from git+https://git.openstack.org/openstack/oslo.db@refs/changes/01/345601/6#egg=oslo.db\n Cloning https://git.openstack.org/openstack/oslo.db (to refs/changes/01/345601/6) to ./.venv/src/oslo.db\n Could not find a tag or branch 'refs/changes/01/345601/6', assuming commit.\nerror: pathspec 'refs/changes/01/345601/6' did not match any file(s) known to git.\nCommand \"git checkout -q refs/changes/01/345601/6\" failed with error code 1 in /home/classic/.venv/src/oslo.db\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport tempfile\nimport os.path\n\nfrom pip.compat import samefile\nfrom pip.exceptions import BadCommand\nfrom pip._vendor.six.moves.urllib import parse as urllib_parse\nfrom pip._vendor.six.moves.urllib import request as urllib_request\nfrom pip._vendor.packaging.version import parse as parse_version\n\nfrom pip.utils import display_path, rmtree\nfrom pip.vcs import vcs, VersionControl\n\n\nurlsplit = urllib_parse.urlsplit\nurlunsplit = urllib_parse.urlunsplit\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Git(VersionControl):\n name = 'git'\n dirname = '.git'\n repo_name = 'clone'\n schemes = (\n 'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',\n )\n\n def __init__(self, url=None, *args, **kwargs):\n\n # Works around an apparent Git bug\n # (see http://article.gmane.org/gmane.comp.version-control.git/146500)\n if url:\n scheme, netloc, path, query, fragment = urlsplit(url)\n if scheme.endswith('file'):\n initial_slashes = path[:-len(path.lstrip('/'))]\n newpath = (\n initial_slashes +\n urllib_request.url2pathname(path)\n .replace('\\\\', '/').lstrip('/')\n )\n url = urlunsplit((scheme, netloc, newpath, query, fragment))\n after_plus = scheme.find('+') + 1\n url = scheme[:after_plus] + urlunsplit(\n (scheme[after_plus:], netloc, newpath, query, fragment),\n )\n\n super(Git, self).__init__(url, *args, **kwargs)\n\n def get_git_version(self):\n VERSION_PFX = 'git version '\n version = self.run_command(['version'], show_stdout=False)\n if version.startswith(VERSION_PFX):\n version = version[len(VERSION_PFX):].split()[0]\n else:\n version = ''\n # get first 3 positions of the git version becasue\n # on windows it is x.y.z.windows.t, and this parses as\n # LegacyVersion which always smaller than a Version.\n version = '.'.join(version.split('.')[:3])\n return parse_version(version)\n\n def export(self, location):\n \"\"\"Export the Git repository at the url to the destination location\"\"\"\n temp_dir = tempfile.mkdtemp('-export', 'pip-')\n self.unpack(temp_dir)\n try:\n if not location.endswith('/'):\n location = location + '/'\n self.run_command(\n ['checkout-index', '-a', '-f', '--prefix', location],\n show_stdout=False, cwd=temp_dir)\n finally:\n rmtree(temp_dir)\n\n def check_rev_options(self, rev, dest, rev_options):\n \"\"\"Check the revision options before checkout to compensate that tags\n and branches may need origin/ as a prefix.\n Returns the SHA1 of the branch or tag if found.\n \"\"\"\n revisions = self.get_short_refs(dest)\n\n origin_rev = 'origin/%s' % rev\n if origin_rev in revisions:\n # remote branch\n return [revisions[origin_rev]]\n elif rev in revisions:\n # a local tag or branch name\n return [revisions[rev]]\n else:\n logger.warning(\n \"Could not find a tag or branch '%s', assuming commit.\", rev,\n )\n return rev_options\n\n def check_version(self, dest, rev_options):\n \"\"\"\n Compare the current sha to the ref. ref may be a branch or tag name,\n but current rev will always point to a sha. This means that a branch\n or tag will never compare as True. So this ultimately only matches\n against exact shas.\n \"\"\"\n return self.get_revision(dest).startswith(rev_options[0])\n\n def switch(self, dest, url, rev_options):\n self.run_command(['config', 'remote.origin.url', url], cwd=dest)\n self.run_command(['checkout', '-q'] + rev_options, cwd=dest)\n\n self.update_submodules(dest)\n\n def update(self, dest, rev_options):\n # First fetch changes from the default remote\n if self.get_git_version() >= parse_version('1.9.0'):\n # fetch tags in addition to everything else\n self.run_command(['fetch', '-q', '--tags'], cwd=dest)\n else:\n self.run_command(['fetch', '-q'], cwd=dest)\n # Then reset to wanted revision (maybe even origin/master)\n if rev_options:\n rev_options = self.check_rev_options(\n rev_options[0], dest, rev_options,\n )\n self.run_command(['reset', '--hard', '-q'] + rev_options, cwd=dest)\n #: update submodules\n self.update_submodules(dest)\n\n def obtain(self, dest):\n url, rev = self.get_url_rev()\n if rev:\n rev_options = [rev]\n rev_display = ' (to %s)' % rev\n else:\n rev_options = ['origin/master']\n rev_display = ''\n if self.check_destination(dest, url, rev_options, rev_display):\n logger.info(\n 'Cloning %s%s to %s', url, rev_display, display_path(dest),\n )\n self.run_command(['clone', '-q', url, dest])\n\n if rev:\n rev_options = self.check_rev_options(rev, dest, rev_options)\n # Only do a checkout if rev_options differs from HEAD\n if not self.check_version(dest, rev_options):\n self.run_command(\n ['checkout', '-q'] + rev_options,\n cwd=dest,\n )\n #: repo may contain submodules\n self.update_submodules(dest)\n\n def get_url(self, location):\n \"\"\"Return URL of the first remote encountered.\"\"\"\n remotes = self.run_command(\n ['config', '--get-regexp', r'remote\\..*\\.url'],\n show_stdout=False, cwd=location)\n remotes = remotes.splitlines()\n found_remote = remotes[0]\n for remote in remotes:\n if remote.startswith('remote.origin.url '):\n found_remote = remote\n break\n url = found_remote.split(' ')[1]\n return url.strip()\n\n def get_revision(self, location):\n current_rev = self.run_command(\n ['rev-parse', 'HEAD'], show_stdout=False, cwd=location)\n return current_rev.strip()\n\n def get_full_refs(self, location):\n \"\"\"Yields tuples of (commit, ref) for branches and tags\"\"\"\n output = self.run_command(['show-ref'],\n show_stdout=False, cwd=location)\n for line in output.strip().splitlines():\n commit, ref = line.split(' ', 1)\n yield commit.strip(), ref.strip()\n\n def is_ref_remote(self, ref):\n return ref.startswith('refs/remotes/')\n\n def is_ref_branch(self, ref):\n return ref.startswith('refs/heads/')\n\n def is_ref_tag(self, ref):\n return ref.startswith('refs/tags/')\n\n def is_ref_commit(self, ref):\n \"\"\"A ref is a commit sha if it is not anything else\"\"\"\n return not any((\n self.is_ref_remote(ref),\n self.is_ref_branch(ref),\n self.is_ref_tag(ref),\n ))\n\n # Should deprecate `get_refs` since it's ambiguous\n def get_refs(self, location):\n return self.get_short_refs(location)\n\n def get_short_refs(self, location):\n \"\"\"Return map of named refs (branches or tags) to commit hashes.\"\"\"\n rv = {}\n for commit, ref in self.get_full_refs(location):\n ref_name = None\n if self.is_ref_remote(ref):\n ref_name = ref[len('refs/remotes/'):]\n elif self.is_ref_branch(ref):\n ref_name = ref[len('refs/heads/'):]\n elif self.is_ref_tag(ref):\n ref_name = ref[len('refs/tags/'):]\n if ref_name is not None:\n rv[ref_name] = commit\n return rv\n\n def _get_subdirectory(self, location):\n \"\"\"Return the relative path of setup.py to the git repo root.\"\"\"\n # find the repo root\n git_dir = self.run_command(['rev-parse', '--git-dir'],\n show_stdout=False, cwd=location).strip()\n if not os.path.isabs(git_dir):\n git_dir = os.path.join(location, git_dir)\n root_dir = os.path.join(git_dir, '..')\n # find setup.py\n orig_location = location\n while not os.path.exists(os.path.join(location, 'setup.py')):\n last_location = location\n location = os.path.dirname(location)\n if location == last_location:\n # We've traversed up to the root of the filesystem without\n # finding setup.py\n logger.warning(\n \"Could not find setup.py for directory %s (tried all \"\n \"parent directories)\",\n orig_location,\n )\n return None\n # relative path of setup.py to repo root\n if samefile(root_dir, location):\n return None\n return os.path.relpath(location, root_dir)\n\n def get_src_requirement(self, dist, location):\n repo = self.get_url(location)\n if not repo.lower().startswith('git:'):\n repo = 'git+' + repo\n egg_project_name = dist.egg_name().split('-', 1)[0]\n if not repo:\n return None\n current_rev = self.get_revision(location)\n req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name)\n subdirectory = self._get_subdirectory(location)\n if subdirectory:\n req += '&subdirectory=' + subdirectory\n return req\n\n def get_url_rev(self):\n \"\"\"\n Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.\n That's required because although they use SSH they sometimes doesn't\n work with a ssh:// scheme (e.g. Github). But we need a scheme for\n parsing. Hence we remove it again afterwards and return it as a stub.\n \"\"\"\n if '://' not in self.url:\n assert 'file:' not in self.url\n self.url = self.url.replace('git+', 'git+ssh://')\n url, rev = super(Git, self).get_url_rev()\n url = url.replace('ssh://', '')\n else:\n url, rev = super(Git, self).get_url_rev()\n\n return url, rev\n\n def update_submodules(self, location):\n if not os.path.exists(os.path.join(location, '.gitmodules')):\n return\n self.run_command(\n ['submodule', 'update', '--init', '--recursive', '-q'],\n cwd=location,\n )\n\n @classmethod\n def controls_location(cls, location):\n if super(Git, cls).controls_location(location):\n return True\n try:\n r = cls().run_command(['rev-parse'],\n cwd=location,\n show_stdout=False,\n on_returncode='ignore')\n return not r\n except BadCommand:\n logger.debug(\"could not determine if %s is under git control \"\n \"because git is not available\", location)\n return False\n\n\nvcs.register(Git)\n", "path": "pip/vcs/git.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport tempfile\nimport os.path\n\nfrom pip.compat import samefile\nfrom pip.exceptions import BadCommand\nfrom pip._vendor.six.moves.urllib import parse as urllib_parse\nfrom pip._vendor.six.moves.urllib import request as urllib_request\nfrom pip._vendor.packaging.version import parse as parse_version\n\nfrom pip.utils import display_path, rmtree\nfrom pip.vcs import vcs, VersionControl\n\n\nurlsplit = urllib_parse.urlsplit\nurlunsplit = urllib_parse.urlunsplit\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Git(VersionControl):\n name = 'git'\n dirname = '.git'\n repo_name = 'clone'\n schemes = (\n 'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',\n )\n\n def __init__(self, url=None, *args, **kwargs):\n\n # Works around an apparent Git bug\n # (see http://article.gmane.org/gmane.comp.version-control.git/146500)\n if url:\n scheme, netloc, path, query, fragment = urlsplit(url)\n if scheme.endswith('file'):\n initial_slashes = path[:-len(path.lstrip('/'))]\n newpath = (\n initial_slashes +\n urllib_request.url2pathname(path)\n .replace('\\\\', '/').lstrip('/')\n )\n url = urlunsplit((scheme, netloc, newpath, query, fragment))\n after_plus = scheme.find('+') + 1\n url = scheme[:after_plus] + urlunsplit(\n (scheme[after_plus:], netloc, newpath, query, fragment),\n )\n\n super(Git, self).__init__(url, *args, **kwargs)\n\n def get_git_version(self):\n VERSION_PFX = 'git version '\n version = self.run_command(['version'], show_stdout=False)\n if version.startswith(VERSION_PFX):\n version = version[len(VERSION_PFX):].split()[0]\n else:\n version = ''\n # get first 3 positions of the git version becasue\n # on windows it is x.y.z.windows.t, and this parses as\n # LegacyVersion which always smaller than a Version.\n version = '.'.join(version.split('.')[:3])\n return parse_version(version)\n\n def export(self, location):\n \"\"\"Export the Git repository at the url to the destination location\"\"\"\n temp_dir = tempfile.mkdtemp('-export', 'pip-')\n self.unpack(temp_dir)\n try:\n if not location.endswith('/'):\n location = location + '/'\n self.run_command(\n ['checkout-index', '-a', '-f', '--prefix', location],\n show_stdout=False, cwd=temp_dir)\n finally:\n rmtree(temp_dir)\n\n def check_rev_options(self, rev, dest, rev_options):\n \"\"\"Check the revision options before checkout to compensate that tags\n and branches may need origin/ as a prefix.\n Returns the SHA1 of the branch or tag if found.\n \"\"\"\n revisions = self.get_short_refs(dest)\n\n origin_rev = 'origin/%s' % rev\n if origin_rev in revisions:\n # remote branch\n return [revisions[origin_rev]]\n elif rev in revisions:\n # a local tag or branch name\n return [revisions[rev]]\n else:\n logger.warning(\n \"Could not find a tag or branch '%s', assuming commit or ref\",\n rev,\n )\n return rev_options\n\n def check_version(self, dest, rev_options):\n \"\"\"\n Compare the current sha to the ref. ref may be a branch or tag name,\n but current rev will always point to a sha. This means that a branch\n or tag will never compare as True. So this ultimately only matches\n against exact shas.\n \"\"\"\n return self.get_revision(dest).startswith(rev_options[0])\n\n def switch(self, dest, url, rev_options):\n self.run_command(['config', 'remote.origin.url', url], cwd=dest)\n self.run_command(['checkout', '-q'] + rev_options, cwd=dest)\n\n self.update_submodules(dest)\n\n def update(self, dest, rev_options):\n # First fetch changes from the default remote\n if self.get_git_version() >= parse_version('1.9.0'):\n # fetch tags in addition to everything else\n self.run_command(['fetch', '-q', '--tags'], cwd=dest)\n else:\n self.run_command(['fetch', '-q'], cwd=dest)\n # Then reset to wanted revision (maybe even origin/master)\n if rev_options:\n rev_options = self.check_rev_options(\n rev_options[0], dest, rev_options,\n )\n self.run_command(['reset', '--hard', '-q'] + rev_options, cwd=dest)\n #: update submodules\n self.update_submodules(dest)\n\n def obtain(self, dest):\n url, rev = self.get_url_rev()\n if rev:\n rev_options = [rev]\n rev_display = ' (to %s)' % rev\n else:\n rev_options = ['origin/master']\n rev_display = ''\n if self.check_destination(dest, url, rev_options, rev_display):\n logger.info(\n 'Cloning %s%s to %s', url, rev_display, display_path(dest),\n )\n self.run_command(['clone', '-q', url, dest])\n\n if rev:\n rev_options = self.check_rev_options(rev, dest, rev_options)\n # Only do a checkout if rev_options differs from HEAD\n if not self.check_version(dest, rev_options):\n self.run_command(\n ['fetch', '-q', url] + rev_options,\n cwd=dest,\n )\n self.run_command(\n ['checkout', '-q', 'FETCH_HEAD'],\n cwd=dest,\n )\n\n #: repo may contain submodules\n self.update_submodules(dest)\n\n def get_url(self, location):\n \"\"\"Return URL of the first remote encountered.\"\"\"\n remotes = self.run_command(\n ['config', '--get-regexp', r'remote\\..*\\.url'],\n show_stdout=False, cwd=location)\n remotes = remotes.splitlines()\n found_remote = remotes[0]\n for remote in remotes:\n if remote.startswith('remote.origin.url '):\n found_remote = remote\n break\n url = found_remote.split(' ')[1]\n return url.strip()\n\n def get_revision(self, location):\n current_rev = self.run_command(\n ['rev-parse', 'HEAD'], show_stdout=False, cwd=location)\n return current_rev.strip()\n\n def get_full_refs(self, location):\n \"\"\"Yields tuples of (commit, ref) for branches and tags\"\"\"\n output = self.run_command(['show-ref'],\n show_stdout=False, cwd=location)\n for line in output.strip().splitlines():\n commit, ref = line.split(' ', 1)\n yield commit.strip(), ref.strip()\n\n def is_ref_remote(self, ref):\n return ref.startswith('refs/remotes/')\n\n def is_ref_branch(self, ref):\n return ref.startswith('refs/heads/')\n\n def is_ref_tag(self, ref):\n return ref.startswith('refs/tags/')\n\n def is_ref_commit(self, ref):\n \"\"\"A ref is a commit sha if it is not anything else\"\"\"\n return not any((\n self.is_ref_remote(ref),\n self.is_ref_branch(ref),\n self.is_ref_tag(ref),\n ))\n\n # Should deprecate `get_refs` since it's ambiguous\n def get_refs(self, location):\n return self.get_short_refs(location)\n\n def get_short_refs(self, location):\n \"\"\"Return map of named refs (branches or tags) to commit hashes.\"\"\"\n rv = {}\n for commit, ref in self.get_full_refs(location):\n ref_name = None\n if self.is_ref_remote(ref):\n ref_name = ref[len('refs/remotes/'):]\n elif self.is_ref_branch(ref):\n ref_name = ref[len('refs/heads/'):]\n elif self.is_ref_tag(ref):\n ref_name = ref[len('refs/tags/'):]\n if ref_name is not None:\n rv[ref_name] = commit\n return rv\n\n def _get_subdirectory(self, location):\n \"\"\"Return the relative path of setup.py to the git repo root.\"\"\"\n # find the repo root\n git_dir = self.run_command(['rev-parse', '--git-dir'],\n show_stdout=False, cwd=location).strip()\n if not os.path.isabs(git_dir):\n git_dir = os.path.join(location, git_dir)\n root_dir = os.path.join(git_dir, '..')\n # find setup.py\n orig_location = location\n while not os.path.exists(os.path.join(location, 'setup.py')):\n last_location = location\n location = os.path.dirname(location)\n if location == last_location:\n # We've traversed up to the root of the filesystem without\n # finding setup.py\n logger.warning(\n \"Could not find setup.py for directory %s (tried all \"\n \"parent directories)\",\n orig_location,\n )\n return None\n # relative path of setup.py to repo root\n if samefile(root_dir, location):\n return None\n return os.path.relpath(location, root_dir)\n\n def get_src_requirement(self, dist, location):\n repo = self.get_url(location)\n if not repo.lower().startswith('git:'):\n repo = 'git+' + repo\n egg_project_name = dist.egg_name().split('-', 1)[0]\n if not repo:\n return None\n current_rev = self.get_revision(location)\n req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name)\n subdirectory = self._get_subdirectory(location)\n if subdirectory:\n req += '&subdirectory=' + subdirectory\n return req\n\n def get_url_rev(self):\n \"\"\"\n Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.\n That's required because although they use SSH they sometimes doesn't\n work with a ssh:// scheme (e.g. Github). But we need a scheme for\n parsing. Hence we remove it again afterwards and return it as a stub.\n \"\"\"\n if '://' not in self.url:\n assert 'file:' not in self.url\n self.url = self.url.replace('git+', 'git+ssh://')\n url, rev = super(Git, self).get_url_rev()\n url = url.replace('ssh://', '')\n else:\n url, rev = super(Git, self).get_url_rev()\n\n return url, rev\n\n def update_submodules(self, location):\n if not os.path.exists(os.path.join(location, '.gitmodules')):\n return\n self.run_command(\n ['submodule', 'update', '--init', '--recursive', '-q'],\n cwd=location,\n )\n\n @classmethod\n def controls_location(cls, location):\n if super(Git, cls).controls_location(location):\n return True\n try:\n r = cls().run_command(['rev-parse'],\n cwd=location,\n show_stdout=False,\n on_returncode='ignore')\n return not r\n except BadCommand:\n logger.debug(\"could not determine if %s is under git control \"\n \"because git is not available\", location)\n return False\n\n\nvcs.register(Git)\n", "path": "pip/vcs/git.py"}]}
3,844
230
gh_patches_debug_16024
rasdani/github-patches
git_diff
mindsdb__mindsdb-2390
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error when csv header has empty space ### Discussed in https://github.com/mindsdb/mindsdb/discussions/2343 <div type='discussions-op-text'> <sup>Originally posted by **Ashok0** June 13, 2022</sup> Hi, I have a question regarding usage of imported data with MindsDB. I registered a MindsDB account and selected "Add Data" > "Import File" and uploaded some sample CSV data to a table. I can view the data just fine as shown in the following screencap: https://imgur.com/a/X6mrzHt If I attempt to select an individual column i.e. 'sensitivity' with the following statement: **SELECT sensitivity FROM files.bedcheck**, I receive the following error message: **Binder Error: Referenced column "sensitivity" not found in FROM clause! Candidate bindings: "df_table. sensitivity" LINE 1: SELECT sensitivity AS sensitivity** Any assistance in how I can resolve this error would be greatly appreciated. Thank you!</div> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mindsdb/integrations/handlers/file_handler/file_handler.py` Content: ``` 1 from io import BytesIO, StringIO 2 import os 3 import csv 4 import json 5 import codecs 6 import traceback 7 import tempfile 8 from urllib.parse import urlparse 9 10 import requests 11 import pandas as pd 12 13 from mindsdb_sql import parse_sql 14 from mindsdb_sql.parser.ast.base import ASTNode 15 from mindsdb_sql.parser.ast import DropTables, Select 16 17 from mindsdb.api.mysql.mysql_proxy.utilities.sql import query_df 18 from mindsdb.integrations.libs.base_handler import DatabaseHandler 19 from mindsdb.integrations.libs.response import ( 20 HandlerStatusResponse as StatusResponse, 21 HandlerResponse as Response, 22 RESPONSE_TYPE 23 ) 24 25 26 def clean_row(row): 27 n_row = [] 28 for cell in row: 29 if str(cell) in ['', ' ', ' ', 'NaN', 'nan', 'NA']: 30 n_row.append(None) 31 else: 32 n_row.append(cell) 33 34 return n_row 35 36 37 class FileHandler(DatabaseHandler): 38 """ 39 Handler for files 40 """ 41 type = 'files' 42 43 def __init__(self, name=None, db_store=None, fs_store=None, connection_data=None, file_controller=None): 44 super().__init__(name) 45 self.parser = parse_sql 46 self.fs_store = fs_store 47 self.custom_parser = connection_data.get('custom_parser') 48 self.clean_rows = connection_data.get('clean_rows', True) 49 self.file_controller = file_controller 50 51 def connect(self, **kwargs): 52 return 53 54 def disconnect(self, **kwargs): 55 return 56 57 def check_connection(self) -> StatusResponse: 58 return StatusResponse(True) 59 60 def query(self, query: ASTNode) -> Response: 61 if type(query) == DropTables: 62 for table_identifier in query.tables: 63 if len(table_identifier.parts) == 2 and table_identifier.parts[0] != self.name: 64 return Response( 65 RESPONSE_TYPE.ERROR, 66 error_message=f"Can't delete table from database '{table_identifier.parts[0]}'" 67 ) 68 table_name = table_identifier.parts[-1] 69 try: 70 self.file_controller.delete_file(table_name) 71 except Exception as e: 72 return Response( 73 RESPONSE_TYPE.ERROR, 74 error_message=f"Can't delete table '{table_name}': {e}" 75 ) 76 return Response(RESPONSE_TYPE.OK) 77 elif type(query) == Select: 78 table_name = query.from_table.parts[-1] 79 file_path = self.file_controller.get_file_path(table_name, company_id=None) 80 df, _columns = self._handle_source(file_path, self.clean_rows, self.custom_parser) 81 result_df = query_df(df, query) 82 return Response( 83 RESPONSE_TYPE.TABLE, 84 data_frame=result_df 85 ) 86 else: 87 return Response( 88 RESPONSE_TYPE.ERROR, 89 error_message="Only 'select' and 'drop' queries allowed for files" 90 ) 91 92 def native_query(self, query: str) -> Response: 93 ast = self.parser(query, dialect='mindsdb') 94 return self.query(ast) 95 96 @staticmethod 97 def _handle_source(file_path, clean_rows=True, custom_parser=None): 98 # get file data io, format and dialect 99 data, fmt, dialect = FileHandler._get_data_io(file_path) 100 data.seek(0) # make sure we are at 0 in file pointer 101 102 if custom_parser: 103 header, file_data = custom_parser(data, fmt) 104 105 elif fmt == 'csv': 106 csv_reader = list(csv.reader(data, dialect)) 107 header = csv_reader[0] 108 file_data = csv_reader[1:] 109 110 elif fmt in ['xlsx', 'xls']: 111 data.seek(0) 112 df = pd.read_excel(data) 113 header = df.columns.values.tolist() 114 file_data = df.values.tolist() 115 116 elif fmt == 'json': 117 data.seek(0) 118 json_doc = json.loads(data.read()) 119 df = pd.json_normalize(json_doc, max_level=0) 120 header = df.columns.values.tolist() 121 file_data = df.values.tolist() 122 123 else: 124 raise ValueError('Could not load file into any format, supported formats are csv, json, xls, xlsx') 125 126 if clean_rows: 127 file_list_data = [clean_row(row) for row in file_data] 128 else: 129 file_list_data = file_data 130 131 col_map = dict((col, col) for col in header) 132 return pd.DataFrame(file_list_data, columns=header), col_map 133 134 @staticmethod 135 def _get_data_io(file_path): 136 """ 137 This gets a file either url or local file and defiens what the format is as well as dialect 138 :param file: file path or url 139 :return: data_io, format, dialect 140 """ 141 142 ############ 143 # get file as io object 144 ############ 145 146 # file_path = self._get_file_path() 147 148 data = BytesIO() 149 150 try: 151 with open(file_path, 'rb') as fp: 152 data = BytesIO(fp.read()) 153 except Exception as e: 154 error = 'Could not load file, possible exception : {exception}'.format(exception=e) 155 print(error) 156 raise ValueError(error) 157 158 dialect = None 159 160 ############ 161 # check for file type 162 ############ 163 164 # try to guess if its an excel file 165 xlsx_sig = b'\x50\x4B\x05\06' 166 # xlsx_sig2 = b'\x50\x4B\x03\x04' 167 xls_sig = b'\x09\x08\x10\x00\x00\x06\x05\x00' 168 169 # different whence, offset, size for different types 170 excel_meta = [('xls', 0, 512, 8), ('xlsx', 2, -22, 4)] 171 172 for filename, whence, offset, size in excel_meta: 173 174 try: 175 data.seek(offset, whence) # Seek to the offset. 176 bytes = data.read(size) # Capture the specified number of bytes. 177 data.seek(0) 178 codecs.getencoder('hex')(bytes) 179 180 if bytes == xls_sig: 181 return data, 'xls', dialect 182 elif bytes == xlsx_sig: 183 return data, 'xlsx', dialect 184 185 except Exception: 186 data.seek(0) 187 188 # if not excel it can be a json file or a CSV, convert from binary to stringio 189 190 byte_str = data.read() 191 # Move it to StringIO 192 try: 193 # Handle Microsoft's BOM "special" UTF-8 encoding 194 if byte_str.startswith(codecs.BOM_UTF8): 195 data = StringIO(byte_str.decode('utf-8-sig')) 196 else: 197 data = StringIO(byte_str.decode('utf-8')) 198 199 except Exception: 200 print(traceback.format_exc()) 201 print('Could not load into string') 202 203 # see if its JSON 204 buffer = data.read(100) 205 data.seek(0) 206 text = buffer.strip() 207 # analyze first n characters 208 if len(text) > 0: 209 text = text.strip() 210 # it it looks like a json, then try to parse it 211 if text.startswith('{') or text.startswith('['): 212 try: 213 json.loads(data.read()) 214 data.seek(0) 215 return data, 'json', dialect 216 except Exception: 217 data.seek(0) 218 return data, None, dialect 219 220 # lets try to figure out if its a csv 221 try: 222 dialect = FileHandler._get_csv_dialect(file_path) 223 if dialect: 224 return data, 'csv', dialect 225 return data, None, dialect 226 except Exception: 227 data.seek(0) 228 print('Could not detect format for this file') 229 print(traceback.format_exc()) 230 # No file type identified 231 return data, None, dialect 232 233 @staticmethod 234 def _get_file_path(path) -> str: 235 try: 236 is_url = urlparse(path).scheme in ('http', 'https') 237 except Exception: 238 is_url = False 239 if is_url: 240 path = FileHandler._fetch_url(path) 241 return path 242 243 @staticmethod 244 def _get_csv_dialect(file_path) -> csv.Dialect: 245 with open(file_path, 'rt') as f: 246 try: 247 accepted_csv_delimiters = [',', '\t', ';'] 248 dialect = csv.Sniffer().sniff(f.read(128 * 1024), delimiters=accepted_csv_delimiters) 249 except csv.Error: 250 dialect = None 251 return dialect 252 253 @staticmethod 254 def _fetch_url(url: str) -> str: 255 temp_dir = tempfile.mkdtemp(prefix='mindsdb_file_url_') 256 try: 257 r = requests.get(url, stream=True) 258 if r.status_code == 200: 259 with open(os.path.join(temp_dir, 'file'), 'wb') as f: 260 for chunk in r: 261 f.write(chunk) 262 else: 263 raise Exception(f'Responce status code is {r.status_code}') 264 except Exception as e: 265 print(f'Error during getting {url}') 266 print(e) 267 raise 268 return os.path.join(temp_dir, 'file') 269 270 def get_tables(self) -> Response: 271 """ 272 List all files 273 """ 274 files_meta = self.file_controller.get_files() 275 data = [{ 276 'TABLE_NAME': x['name'], 277 'TABLE_ROWS': x['row_count'] 278 } for x in files_meta] 279 return Response( 280 RESPONSE_TYPE.TABLE, 281 data_frame=pd.DataFrame(data) 282 ) 283 284 def get_columns(self, table_name) -> Response: 285 file_meta = self.file_controller.get_file_meta(table_name) 286 result = Response( 287 RESPONSE_TYPE.TABLE, 288 data_frame=pd.DataFrame([ 289 { 290 'Field': x, 291 'Type': 'str' 292 } for x in file_meta['columns'] 293 ]) 294 ) 295 return result 296 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mindsdb/integrations/handlers/file_handler/file_handler.py b/mindsdb/integrations/handlers/file_handler/file_handler.py --- a/mindsdb/integrations/handlers/file_handler/file_handler.py +++ b/mindsdb/integrations/handlers/file_handler/file_handler.py @@ -128,6 +128,7 @@ else: file_list_data = file_data + header = [x.strip() for x in header] col_map = dict((col, col) for col in header) return pd.DataFrame(file_list_data, columns=header), col_map @@ -287,7 +288,7 @@ RESPONSE_TYPE.TABLE, data_frame=pd.DataFrame([ { - 'Field': x, + 'Field': x.strip(), 'Type': 'str' } for x in file_meta['columns'] ])
{"golden_diff": "diff --git a/mindsdb/integrations/handlers/file_handler/file_handler.py b/mindsdb/integrations/handlers/file_handler/file_handler.py\n--- a/mindsdb/integrations/handlers/file_handler/file_handler.py\n+++ b/mindsdb/integrations/handlers/file_handler/file_handler.py\n@@ -128,6 +128,7 @@\n else:\n file_list_data = file_data\n \n+ header = [x.strip() for x in header]\n col_map = dict((col, col) for col in header)\n return pd.DataFrame(file_list_data, columns=header), col_map\n \n@@ -287,7 +288,7 @@\n RESPONSE_TYPE.TABLE,\n data_frame=pd.DataFrame([\n {\n- 'Field': x,\n+ 'Field': x.strip(),\n 'Type': 'str'\n } for x in file_meta['columns']\n ])\n", "issue": "Error when csv header has empty space\n### Discussed in https://github.com/mindsdb/mindsdb/discussions/2343\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **Ashok0** June 13, 2022</sup>\r\nHi,\r\n\r\nI have a question regarding usage of imported data with MindsDB. I registered a MindsDB account and selected \"Add Data\" > \"Import File\" and uploaded some sample CSV data to a table. I can view the data just fine as shown in the following screencap: https://imgur.com/a/X6mrzHt\r\n\r\nIf I attempt to select an individual column i.e. 'sensitivity' with the following statement: **SELECT sensitivity FROM files.bedcheck**, \r\nI receive the following error message: **Binder Error: Referenced column \"sensitivity\" not found in FROM clause! Candidate bindings: \"df_table. sensitivity\" LINE 1: SELECT sensitivity AS sensitivity**\r\n\r\nAny assistance in how I can resolve this error would be greatly appreciated. \r\n\r\nThank you!</div>\n", "before_files": [{"content": "from io import BytesIO, StringIO\nimport os\nimport csv\nimport json\nimport codecs\nimport traceback\nimport tempfile\nfrom urllib.parse import urlparse\n\nimport requests\nimport pandas as pd\n\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.parser.ast.base import ASTNode\nfrom mindsdb_sql.parser.ast import DropTables, Select\n\nfrom mindsdb.api.mysql.mysql_proxy.utilities.sql import query_df\nfrom mindsdb.integrations.libs.base_handler import DatabaseHandler\nfrom mindsdb.integrations.libs.response import (\n HandlerStatusResponse as StatusResponse,\n HandlerResponse as Response,\n RESPONSE_TYPE\n)\n\n\ndef clean_row(row):\n n_row = []\n for cell in row:\n if str(cell) in ['', ' ', ' ', 'NaN', 'nan', 'NA']:\n n_row.append(None)\n else:\n n_row.append(cell)\n\n return n_row\n\n\nclass FileHandler(DatabaseHandler):\n \"\"\"\n Handler for files\n \"\"\"\n type = 'files'\n\n def __init__(self, name=None, db_store=None, fs_store=None, connection_data=None, file_controller=None):\n super().__init__(name)\n self.parser = parse_sql\n self.fs_store = fs_store\n self.custom_parser = connection_data.get('custom_parser')\n self.clean_rows = connection_data.get('clean_rows', True)\n self.file_controller = file_controller\n\n def connect(self, **kwargs):\n return\n\n def disconnect(self, **kwargs):\n return\n\n def check_connection(self) -> StatusResponse:\n return StatusResponse(True)\n\n def query(self, query: ASTNode) -> Response:\n if type(query) == DropTables:\n for table_identifier in query.tables:\n if len(table_identifier.parts) == 2 and table_identifier.parts[0] != self.name:\n return Response(\n RESPONSE_TYPE.ERROR,\n error_message=f\"Can't delete table from database '{table_identifier.parts[0]}'\"\n )\n table_name = table_identifier.parts[-1]\n try:\n self.file_controller.delete_file(table_name)\n except Exception as e:\n return Response(\n RESPONSE_TYPE.ERROR,\n error_message=f\"Can't delete table '{table_name}': {e}\"\n )\n return Response(RESPONSE_TYPE.OK)\n elif type(query) == Select:\n table_name = query.from_table.parts[-1]\n file_path = self.file_controller.get_file_path(table_name, company_id=None)\n df, _columns = self._handle_source(file_path, self.clean_rows, self.custom_parser)\n result_df = query_df(df, query)\n return Response(\n RESPONSE_TYPE.TABLE,\n data_frame=result_df\n )\n else:\n return Response(\n RESPONSE_TYPE.ERROR,\n error_message=\"Only 'select' and 'drop' queries allowed for files\"\n )\n\n def native_query(self, query: str) -> Response:\n ast = self.parser(query, dialect='mindsdb')\n return self.query(ast)\n\n @staticmethod\n def _handle_source(file_path, clean_rows=True, custom_parser=None):\n # get file data io, format and dialect\n data, fmt, dialect = FileHandler._get_data_io(file_path)\n data.seek(0) # make sure we are at 0 in file pointer\n\n if custom_parser:\n header, file_data = custom_parser(data, fmt)\n\n elif fmt == 'csv':\n csv_reader = list(csv.reader(data, dialect))\n header = csv_reader[0]\n file_data = csv_reader[1:]\n\n elif fmt in ['xlsx', 'xls']:\n data.seek(0)\n df = pd.read_excel(data)\n header = df.columns.values.tolist()\n file_data = df.values.tolist()\n\n elif fmt == 'json':\n data.seek(0)\n json_doc = json.loads(data.read())\n df = pd.json_normalize(json_doc, max_level=0)\n header = df.columns.values.tolist()\n file_data = df.values.tolist()\n\n else:\n raise ValueError('Could not load file into any format, supported formats are csv, json, xls, xlsx')\n\n if clean_rows:\n file_list_data = [clean_row(row) for row in file_data]\n else:\n file_list_data = file_data\n\n col_map = dict((col, col) for col in header)\n return pd.DataFrame(file_list_data, columns=header), col_map\n\n @staticmethod\n def _get_data_io(file_path):\n \"\"\"\n This gets a file either url or local file and defiens what the format is as well as dialect\n :param file: file path or url\n :return: data_io, format, dialect\n \"\"\"\n\n ############\n # get file as io object\n ############\n\n # file_path = self._get_file_path()\n\n data = BytesIO()\n\n try:\n with open(file_path, 'rb') as fp:\n data = BytesIO(fp.read())\n except Exception as e:\n error = 'Could not load file, possible exception : {exception}'.format(exception=e)\n print(error)\n raise ValueError(error)\n\n dialect = None\n\n ############\n # check for file type\n ############\n\n # try to guess if its an excel file\n xlsx_sig = b'\\x50\\x4B\\x05\\06'\n # xlsx_sig2 = b'\\x50\\x4B\\x03\\x04'\n xls_sig = b'\\x09\\x08\\x10\\x00\\x00\\x06\\x05\\x00'\n\n # different whence, offset, size for different types\n excel_meta = [('xls', 0, 512, 8), ('xlsx', 2, -22, 4)]\n\n for filename, whence, offset, size in excel_meta:\n\n try:\n data.seek(offset, whence) # Seek to the offset.\n bytes = data.read(size) # Capture the specified number of bytes.\n data.seek(0)\n codecs.getencoder('hex')(bytes)\n\n if bytes == xls_sig:\n return data, 'xls', dialect\n elif bytes == xlsx_sig:\n return data, 'xlsx', dialect\n\n except Exception:\n data.seek(0)\n\n # if not excel it can be a json file or a CSV, convert from binary to stringio\n\n byte_str = data.read()\n # Move it to StringIO\n try:\n # Handle Microsoft's BOM \"special\" UTF-8 encoding\n if byte_str.startswith(codecs.BOM_UTF8):\n data = StringIO(byte_str.decode('utf-8-sig'))\n else:\n data = StringIO(byte_str.decode('utf-8'))\n\n except Exception:\n print(traceback.format_exc())\n print('Could not load into string')\n\n # see if its JSON\n buffer = data.read(100)\n data.seek(0)\n text = buffer.strip()\n # analyze first n characters\n if len(text) > 0:\n text = text.strip()\n # it it looks like a json, then try to parse it\n if text.startswith('{') or text.startswith('['):\n try:\n json.loads(data.read())\n data.seek(0)\n return data, 'json', dialect\n except Exception:\n data.seek(0)\n return data, None, dialect\n\n # lets try to figure out if its a csv\n try:\n dialect = FileHandler._get_csv_dialect(file_path)\n if dialect:\n return data, 'csv', dialect\n return data, None, dialect\n except Exception:\n data.seek(0)\n print('Could not detect format for this file')\n print(traceback.format_exc())\n # No file type identified\n return data, None, dialect\n\n @staticmethod\n def _get_file_path(path) -> str:\n try:\n is_url = urlparse(path).scheme in ('http', 'https')\n except Exception:\n is_url = False\n if is_url:\n path = FileHandler._fetch_url(path)\n return path\n\n @staticmethod\n def _get_csv_dialect(file_path) -> csv.Dialect:\n with open(file_path, 'rt') as f:\n try:\n accepted_csv_delimiters = [',', '\\t', ';']\n dialect = csv.Sniffer().sniff(f.read(128 * 1024), delimiters=accepted_csv_delimiters)\n except csv.Error:\n dialect = None\n return dialect\n\n @staticmethod\n def _fetch_url(url: str) -> str:\n temp_dir = tempfile.mkdtemp(prefix='mindsdb_file_url_')\n try:\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(os.path.join(temp_dir, 'file'), 'wb') as f:\n for chunk in r:\n f.write(chunk)\n else:\n raise Exception(f'Responce status code is {r.status_code}')\n except Exception as e:\n print(f'Error during getting {url}')\n print(e)\n raise\n return os.path.join(temp_dir, 'file')\n\n def get_tables(self) -> Response:\n \"\"\"\n List all files\n \"\"\"\n files_meta = self.file_controller.get_files()\n data = [{\n 'TABLE_NAME': x['name'],\n 'TABLE_ROWS': x['row_count']\n } for x in files_meta]\n return Response(\n RESPONSE_TYPE.TABLE,\n data_frame=pd.DataFrame(data)\n )\n\n def get_columns(self, table_name) -> Response:\n file_meta = self.file_controller.get_file_meta(table_name)\n result = Response(\n RESPONSE_TYPE.TABLE,\n data_frame=pd.DataFrame([\n {\n 'Field': x,\n 'Type': 'str'\n } for x in file_meta['columns']\n ])\n )\n return result\n", "path": "mindsdb/integrations/handlers/file_handler/file_handler.py"}], "after_files": [{"content": "from io import BytesIO, StringIO\nimport os\nimport csv\nimport json\nimport codecs\nimport traceback\nimport tempfile\nfrom urllib.parse import urlparse\n\nimport requests\nimport pandas as pd\n\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.parser.ast.base import ASTNode\nfrom mindsdb_sql.parser.ast import DropTables, Select\n\nfrom mindsdb.api.mysql.mysql_proxy.utilities.sql import query_df\nfrom mindsdb.integrations.libs.base_handler import DatabaseHandler\nfrom mindsdb.integrations.libs.response import (\n HandlerStatusResponse as StatusResponse,\n HandlerResponse as Response,\n RESPONSE_TYPE\n)\n\n\ndef clean_row(row):\n n_row = []\n for cell in row:\n if str(cell) in ['', ' ', ' ', 'NaN', 'nan', 'NA']:\n n_row.append(None)\n else:\n n_row.append(cell)\n\n return n_row\n\n\nclass FileHandler(DatabaseHandler):\n \"\"\"\n Handler for files\n \"\"\"\n type = 'files'\n\n def __init__(self, name=None, db_store=None, fs_store=None, connection_data=None, file_controller=None):\n super().__init__(name)\n self.parser = parse_sql\n self.fs_store = fs_store\n self.custom_parser = connection_data.get('custom_parser')\n self.clean_rows = connection_data.get('clean_rows', True)\n self.file_controller = file_controller\n\n def connect(self, **kwargs):\n return\n\n def disconnect(self, **kwargs):\n return\n\n def check_connection(self) -> StatusResponse:\n return StatusResponse(True)\n\n def query(self, query: ASTNode) -> Response:\n if type(query) == DropTables:\n for table_identifier in query.tables:\n if len(table_identifier.parts) == 2 and table_identifier.parts[0] != self.name:\n return Response(\n RESPONSE_TYPE.ERROR,\n error_message=f\"Can't delete table from database '{table_identifier.parts[0]}'\"\n )\n table_name = table_identifier.parts[-1]\n try:\n self.file_controller.delete_file(table_name)\n except Exception as e:\n return Response(\n RESPONSE_TYPE.ERROR,\n error_message=f\"Can't delete table '{table_name}': {e}\"\n )\n return Response(RESPONSE_TYPE.OK)\n elif type(query) == Select:\n table_name = query.from_table.parts[-1]\n file_path = self.file_controller.get_file_path(table_name, company_id=None)\n df, _columns = self._handle_source(file_path, self.clean_rows, self.custom_parser)\n result_df = query_df(df, query)\n return Response(\n RESPONSE_TYPE.TABLE,\n data_frame=result_df\n )\n else:\n return Response(\n RESPONSE_TYPE.ERROR,\n error_message=\"Only 'select' and 'drop' queries allowed for files\"\n )\n\n def native_query(self, query: str) -> Response:\n ast = self.parser(query, dialect='mindsdb')\n return self.query(ast)\n\n @staticmethod\n def _handle_source(file_path, clean_rows=True, custom_parser=None):\n # get file data io, format and dialect\n data, fmt, dialect = FileHandler._get_data_io(file_path)\n data.seek(0) # make sure we are at 0 in file pointer\n\n if custom_parser:\n header, file_data = custom_parser(data, fmt)\n\n elif fmt == 'csv':\n csv_reader = list(csv.reader(data, dialect))\n header = csv_reader[0]\n file_data = csv_reader[1:]\n\n elif fmt in ['xlsx', 'xls']:\n data.seek(0)\n df = pd.read_excel(data)\n header = df.columns.values.tolist()\n file_data = df.values.tolist()\n\n elif fmt == 'json':\n data.seek(0)\n json_doc = json.loads(data.read())\n df = pd.json_normalize(json_doc, max_level=0)\n header = df.columns.values.tolist()\n file_data = df.values.tolist()\n\n else:\n raise ValueError('Could not load file into any format, supported formats are csv, json, xls, xlsx')\n\n if clean_rows:\n file_list_data = [clean_row(row) for row in file_data]\n else:\n file_list_data = file_data\n\n header = [x.strip() for x in header]\n col_map = dict((col, col) for col in header)\n return pd.DataFrame(file_list_data, columns=header), col_map\n\n @staticmethod\n def _get_data_io(file_path):\n \"\"\"\n This gets a file either url or local file and defiens what the format is as well as dialect\n :param file: file path or url\n :return: data_io, format, dialect\n \"\"\"\n\n ############\n # get file as io object\n ############\n\n # file_path = self._get_file_path()\n\n data = BytesIO()\n\n try:\n with open(file_path, 'rb') as fp:\n data = BytesIO(fp.read())\n except Exception as e:\n error = 'Could not load file, possible exception : {exception}'.format(exception=e)\n print(error)\n raise ValueError(error)\n\n dialect = None\n\n ############\n # check for file type\n ############\n\n # try to guess if its an excel file\n xlsx_sig = b'\\x50\\x4B\\x05\\06'\n # xlsx_sig2 = b'\\x50\\x4B\\x03\\x04'\n xls_sig = b'\\x09\\x08\\x10\\x00\\x00\\x06\\x05\\x00'\n\n # different whence, offset, size for different types\n excel_meta = [('xls', 0, 512, 8), ('xlsx', 2, -22, 4)]\n\n for filename, whence, offset, size in excel_meta:\n\n try:\n data.seek(offset, whence) # Seek to the offset.\n bytes = data.read(size) # Capture the specified number of bytes.\n data.seek(0)\n codecs.getencoder('hex')(bytes)\n\n if bytes == xls_sig:\n return data, 'xls', dialect\n elif bytes == xlsx_sig:\n return data, 'xlsx', dialect\n\n except Exception:\n data.seek(0)\n\n # if not excel it can be a json file or a CSV, convert from binary to stringio\n\n byte_str = data.read()\n # Move it to StringIO\n try:\n # Handle Microsoft's BOM \"special\" UTF-8 encoding\n if byte_str.startswith(codecs.BOM_UTF8):\n data = StringIO(byte_str.decode('utf-8-sig'))\n else:\n data = StringIO(byte_str.decode('utf-8'))\n\n except Exception:\n print(traceback.format_exc())\n print('Could not load into string')\n\n # see if its JSON\n buffer = data.read(100)\n data.seek(0)\n text = buffer.strip()\n # analyze first n characters\n if len(text) > 0:\n text = text.strip()\n # it it looks like a json, then try to parse it\n if text.startswith('{') or text.startswith('['):\n try:\n json.loads(data.read())\n data.seek(0)\n return data, 'json', dialect\n except Exception:\n data.seek(0)\n return data, None, dialect\n\n # lets try to figure out if its a csv\n try:\n dialect = FileHandler._get_csv_dialect(file_path)\n if dialect:\n return data, 'csv', dialect\n return data, None, dialect\n except Exception:\n data.seek(0)\n print('Could not detect format for this file')\n print(traceback.format_exc())\n # No file type identified\n return data, None, dialect\n\n @staticmethod\n def _get_file_path(path) -> str:\n try:\n is_url = urlparse(path).scheme in ('http', 'https')\n except Exception:\n is_url = False\n if is_url:\n path = FileHandler._fetch_url(path)\n return path\n\n @staticmethod\n def _get_csv_dialect(file_path) -> csv.Dialect:\n with open(file_path, 'rt') as f:\n try:\n accepted_csv_delimiters = [',', '\\t', ';']\n dialect = csv.Sniffer().sniff(f.read(128 * 1024), delimiters=accepted_csv_delimiters)\n except csv.Error:\n dialect = None\n return dialect\n\n @staticmethod\n def _fetch_url(url: str) -> str:\n temp_dir = tempfile.mkdtemp(prefix='mindsdb_file_url_')\n try:\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(os.path.join(temp_dir, 'file'), 'wb') as f:\n for chunk in r:\n f.write(chunk)\n else:\n raise Exception(f'Responce status code is {r.status_code}')\n except Exception as e:\n print(f'Error during getting {url}')\n print(e)\n raise\n return os.path.join(temp_dir, 'file')\n\n def get_tables(self) -> Response:\n \"\"\"\n List all files\n \"\"\"\n files_meta = self.file_controller.get_files()\n data = [{\n 'TABLE_NAME': x['name'],\n 'TABLE_ROWS': x['row_count']\n } for x in files_meta]\n return Response(\n RESPONSE_TYPE.TABLE,\n data_frame=pd.DataFrame(data)\n )\n\n def get_columns(self, table_name) -> Response:\n file_meta = self.file_controller.get_file_meta(table_name)\n result = Response(\n RESPONSE_TYPE.TABLE,\n data_frame=pd.DataFrame([\n {\n 'Field': x.strip(),\n 'Type': 'str'\n } for x in file_meta['columns']\n ])\n )\n return result\n", "path": "mindsdb/integrations/handlers/file_handler/file_handler.py"}]}
3,408
197
gh_patches_debug_2174
rasdani/github-patches
git_diff
fedora-infra__bodhi-1042
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- GET on /masher/ errors with a 404 response GET on `/masher` works, but GET on `/masher/` returns an HTTP 404 error code. We should add a route to allow the trailing slash on this URL. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bodhi/server/__init__.py` Content: ``` 1 # This program is free software; you can redistribute it and/or 2 # modify it under the terms of the GNU General Public License 3 # as published by the Free Software Foundation; either version 2 4 # of the License, or (at your option) any later version. 5 # 6 # This program is distributed in the hope that it will be useful, 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 # GNU General Public License for more details. 10 # 11 # You should have received a copy of the GNU General Public License 12 # along with this program; if not, write to the Free Software 13 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 15 from collections import defaultdict 16 from dogpile.cache import make_region 17 from munch import munchify 18 from sqlalchemy import engine_from_config 19 from sqlalchemy.orm import scoped_session, sessionmaker 20 from zope.sqlalchemy import ZopeTransactionExtension 21 22 from pyramid.settings import asbool 23 from pyramid.security import unauthenticated_userid 24 from pyramid.config import Configurator 25 from pyramid.authentication import AuthTktAuthenticationPolicy 26 from pyramid.authorization import ACLAuthorizationPolicy 27 from pyramid.renderers import JSONP 28 from pyramid.exceptions import HTTPForbidden 29 30 from . import buildsys 31 32 import logging 33 34 log = logging.getLogger(__name__) 35 36 37 # TODO -- someday move this externally to "fedora_flavored_markdown" 38 from bodhi.server import ffmarkdown 39 ffmarkdown.inject() 40 41 # 42 # Request methods 43 # 44 45 def get_dbsession(request): 46 engine = engine_from_config(request.registry.settings, 'sqlalchemy.') 47 Sess = scoped_session(sessionmaker(extension=ZopeTransactionExtension())) 48 Sess.configure(bind=engine) 49 session = Sess() 50 51 def cleanup(request): 52 ## No need to do rollback/commit ourselves. the zope transaction 53 ## manager takes care of that for us... 54 #if request.exception is not None: 55 # session.rollback() 56 #else: 57 # session.commit() 58 ## However, we may still want to explicitly close the session we opened 59 #log.debug("Closing session at the end of a request.") 60 session.close() 61 62 request.add_finished_callback(cleanup) 63 64 return session 65 66 67 def get_cacheregion(request): 68 region = make_region() 69 region.configure_from_config(request.registry.settings, "dogpile.cache.") 70 return region 71 72 73 def get_user(request): 74 from bodhi.server.models import User 75 userid = unauthenticated_userid(request) 76 if userid is not None: 77 user = request.db.query(User).filter_by(name=unicode(userid)).first() 78 # Why munch? https://github.com/fedora-infra/bodhi/issues/473 79 return munchify(user.__json__(request=request)) 80 81 82 def groupfinder(userid, request): 83 from bodhi.server.models import User 84 if request.user: 85 user = User.get(request.user.name, request.db) 86 return ['group:' + group.name for group in user.groups] 87 88 89 def get_koji(request): 90 return buildsys.get_session() 91 92 93 def get_buildinfo(request): 94 """ 95 A per-request cache populated by the validators and shared with the views 96 to store frequently used package-specific data, like build tags and ACLs. 97 """ 98 return defaultdict(dict) 99 100 101 def get_releases(request): 102 from bodhi.server.models import Release 103 return Release.all_releases(request.db) 104 105 # 106 # Cornice filters 107 # 108 109 def exception_filter(response, request): 110 """Log exceptions that get thrown up to cornice""" 111 if isinstance(response, Exception): 112 log.exception('Unhandled exception raised: %r' % response) 113 return response 114 115 from cornice.validators import DEFAULT_FILTERS 116 DEFAULT_FILTERS.insert(0, exception_filter) 117 118 119 # 120 # Bodhi initialization 121 # 122 123 def main(global_config, testing=None, session=None, **settings): 124 """ This function returns a WSGI application """ 125 # Setup our buildsystem 126 buildsys.setup_buildsystem(settings) 127 128 # Sessions & Caching 129 from pyramid.session import SignedCookieSessionFactory 130 session_factory = SignedCookieSessionFactory(settings['session.secret']) 131 132 # Construct a list of all groups we're interested in 133 default = ' '.join([settings.get(key, '') for key in [ 134 'important_groups', 135 'admin_packager_groups', 136 'mandatory_packager_groups', 137 'admin_groups', 138 ]]) 139 # pyramid_fas_openid looks for this setting 140 settings['openid.groups'] = settings.get('openid.groups', default).split() 141 142 config = Configurator(settings=settings, session_factory=session_factory) 143 144 # Plugins 145 config.include('pyramid_mako') 146 config.include('cornice') 147 148 # Lazy-loaded memoized request properties 149 if session: 150 config.add_request_method(lambda _: session, 'db', reify=True) 151 else: 152 config.add_request_method(get_dbsession, 'db', reify=True) 153 154 config.add_request_method(get_user, 'user', reify=True) 155 config.add_request_method(get_koji, 'koji', reify=True) 156 config.add_request_method(get_cacheregion, 'cache', reify=True) 157 config.add_request_method(get_buildinfo, 'buildinfo', reify=True) 158 config.add_request_method(get_releases, 'releases', reify=True) 159 160 # Templating 161 config.add_mako_renderer('.html', settings_prefix='mako.') 162 config.add_static_view('static', 'bodhi:server/static') 163 164 from bodhi.server.renderers import rss, jpeg 165 config.add_renderer('rss', rss) 166 config.add_renderer('jpeg', jpeg) 167 config.add_renderer('jsonp', JSONP(param_name='callback')) 168 169 # i18n 170 config.add_translation_dirs('bodhi:server/locale/') 171 172 # Authentication & Authorization 173 if testing: 174 # use a permissive security policy while running unit tests 175 config.testing_securitypolicy(userid=testing, permissive=True) 176 else: 177 config.set_authentication_policy(AuthTktAuthenticationPolicy( 178 settings['authtkt.secret'], 179 callback=groupfinder, 180 secure=asbool(settings['authtkt.secure']), 181 hashalg='sha512')) 182 config.set_authorization_policy(ACLAuthorizationPolicy()) 183 184 # Frontpage 185 config.add_route('home', '/') 186 187 # Views for creating new objects 188 config.add_route('new_update', '/updates/new') 189 config.add_route('new_override', '/overrides/new') 190 config.add_route('new_stack', '/stacks/new') 191 192 # Metrics 193 config.add_route('metrics', '/metrics') 194 config.add_route('masher_status', '/masher') 195 196 # Auto-completion search 197 config.add_route('search_packages', '/search/packages') 198 config.add_route('latest_candidates', '/latest_candidates') 199 config.add_route('latest_builds', '/latest_builds') 200 201 config.add_route('captcha_image', '/captcha/{cipherkey}/') 202 203 # pyramid.openid 204 config.add_route('login', '/login') 205 config.add_view('bodhi.server.security.login', route_name='login') 206 config.add_view('bodhi.server.security.login', context=HTTPForbidden) 207 config.add_route('logout', '/logout') 208 config.add_view('bodhi.server.security.logout', route_name='logout') 209 config.add_route('verify_openid', pattern='/dologin.html') 210 config.add_view('pyramid_fas_openid.verify_openid', route_name='verify_openid') 211 212 config.add_route('api_version', '/api_version') 213 214 # The only user preference we have. 215 config.add_route('popup_toggle', '/popup_toggle') 216 217 config.scan('bodhi.server.views') 218 config.scan('bodhi.server.services') 219 config.scan('bodhi.server.captcha') 220 config.scan('bodhi.server.events') 221 222 return config.make_wsgi_app() 223 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bodhi/server/__init__.py b/bodhi/server/__init__.py --- a/bodhi/server/__init__.py +++ b/bodhi/server/__init__.py @@ -191,7 +191,7 @@ # Metrics config.add_route('metrics', '/metrics') - config.add_route('masher_status', '/masher') + config.add_route('masher_status', '/masher/') # Auto-completion search config.add_route('search_packages', '/search/packages')
{"golden_diff": "diff --git a/bodhi/server/__init__.py b/bodhi/server/__init__.py\n--- a/bodhi/server/__init__.py\n+++ b/bodhi/server/__init__.py\n@@ -191,7 +191,7 @@\n \n # Metrics\n config.add_route('metrics', '/metrics')\n- config.add_route('masher_status', '/masher')\n+ config.add_route('masher_status', '/masher/')\n \n # Auto-completion search\n config.add_route('search_packages', '/search/packages')\n", "issue": "GET on /masher/ errors with a 404 response\nGET on `/masher` works, but GET on `/masher/` returns an HTTP 404 error code. We should add a route to allow the trailing slash on this URL.\n\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\nfrom collections import defaultdict\nfrom dogpile.cache import make_region\nfrom munch import munchify\nfrom sqlalchemy import engine_from_config\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom zope.sqlalchemy import ZopeTransactionExtension\n\nfrom pyramid.settings import asbool\nfrom pyramid.security import unauthenticated_userid\nfrom pyramid.config import Configurator\nfrom pyramid.authentication import AuthTktAuthenticationPolicy\nfrom pyramid.authorization import ACLAuthorizationPolicy\nfrom pyramid.renderers import JSONP\nfrom pyramid.exceptions import HTTPForbidden\n\nfrom . import buildsys\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\n# TODO -- someday move this externally to \"fedora_flavored_markdown\"\nfrom bodhi.server import ffmarkdown\nffmarkdown.inject()\n\n#\n# Request methods\n#\n\ndef get_dbsession(request):\n engine = engine_from_config(request.registry.settings, 'sqlalchemy.')\n Sess = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))\n Sess.configure(bind=engine)\n session = Sess()\n\n def cleanup(request):\n ## No need to do rollback/commit ourselves. the zope transaction\n ## manager takes care of that for us...\n #if request.exception is not None:\n # session.rollback()\n #else:\n # session.commit()\n ## However, we may still want to explicitly close the session we opened\n #log.debug(\"Closing session at the end of a request.\")\n session.close()\n\n request.add_finished_callback(cleanup)\n\n return session\n\n\ndef get_cacheregion(request):\n region = make_region()\n region.configure_from_config(request.registry.settings, \"dogpile.cache.\")\n return region\n\n\ndef get_user(request):\n from bodhi.server.models import User\n userid = unauthenticated_userid(request)\n if userid is not None:\n user = request.db.query(User).filter_by(name=unicode(userid)).first()\n # Why munch? https://github.com/fedora-infra/bodhi/issues/473\n return munchify(user.__json__(request=request))\n\n\ndef groupfinder(userid, request):\n from bodhi.server.models import User\n if request.user:\n user = User.get(request.user.name, request.db)\n return ['group:' + group.name for group in user.groups]\n\n\ndef get_koji(request):\n return buildsys.get_session()\n\n\ndef get_buildinfo(request):\n \"\"\"\n A per-request cache populated by the validators and shared with the views\n to store frequently used package-specific data, like build tags and ACLs.\n \"\"\"\n return defaultdict(dict)\n\n\ndef get_releases(request):\n from bodhi.server.models import Release\n return Release.all_releases(request.db)\n\n#\n# Cornice filters\n#\n\ndef exception_filter(response, request):\n \"\"\"Log exceptions that get thrown up to cornice\"\"\"\n if isinstance(response, Exception):\n log.exception('Unhandled exception raised: %r' % response)\n return response\n\nfrom cornice.validators import DEFAULT_FILTERS\nDEFAULT_FILTERS.insert(0, exception_filter)\n\n\n#\n# Bodhi initialization\n#\n\ndef main(global_config, testing=None, session=None, **settings):\n \"\"\" This function returns a WSGI application \"\"\"\n # Setup our buildsystem\n buildsys.setup_buildsystem(settings)\n\n # Sessions & Caching\n from pyramid.session import SignedCookieSessionFactory\n session_factory = SignedCookieSessionFactory(settings['session.secret'])\n\n # Construct a list of all groups we're interested in\n default = ' '.join([settings.get(key, '') for key in [\n 'important_groups',\n 'admin_packager_groups',\n 'mandatory_packager_groups',\n 'admin_groups',\n ]])\n # pyramid_fas_openid looks for this setting\n settings['openid.groups'] = settings.get('openid.groups', default).split()\n\n config = Configurator(settings=settings, session_factory=session_factory)\n\n # Plugins\n config.include('pyramid_mako')\n config.include('cornice')\n\n # Lazy-loaded memoized request properties\n if session:\n config.add_request_method(lambda _: session, 'db', reify=True)\n else:\n config.add_request_method(get_dbsession, 'db', reify=True)\n\n config.add_request_method(get_user, 'user', reify=True)\n config.add_request_method(get_koji, 'koji', reify=True)\n config.add_request_method(get_cacheregion, 'cache', reify=True)\n config.add_request_method(get_buildinfo, 'buildinfo', reify=True)\n config.add_request_method(get_releases, 'releases', reify=True)\n\n # Templating\n config.add_mako_renderer('.html', settings_prefix='mako.')\n config.add_static_view('static', 'bodhi:server/static')\n\n from bodhi.server.renderers import rss, jpeg\n config.add_renderer('rss', rss)\n config.add_renderer('jpeg', jpeg)\n config.add_renderer('jsonp', JSONP(param_name='callback'))\n\n # i18n\n config.add_translation_dirs('bodhi:server/locale/')\n\n # Authentication & Authorization\n if testing:\n # use a permissive security policy while running unit tests\n config.testing_securitypolicy(userid=testing, permissive=True)\n else:\n config.set_authentication_policy(AuthTktAuthenticationPolicy(\n settings['authtkt.secret'],\n callback=groupfinder,\n secure=asbool(settings['authtkt.secure']),\n hashalg='sha512'))\n config.set_authorization_policy(ACLAuthorizationPolicy())\n\n # Frontpage\n config.add_route('home', '/')\n\n # Views for creating new objects\n config.add_route('new_update', '/updates/new')\n config.add_route('new_override', '/overrides/new')\n config.add_route('new_stack', '/stacks/new')\n\n # Metrics\n config.add_route('metrics', '/metrics')\n config.add_route('masher_status', '/masher')\n\n # Auto-completion search\n config.add_route('search_packages', '/search/packages')\n config.add_route('latest_candidates', '/latest_candidates')\n config.add_route('latest_builds', '/latest_builds')\n\n config.add_route('captcha_image', '/captcha/{cipherkey}/')\n\n # pyramid.openid\n config.add_route('login', '/login')\n config.add_view('bodhi.server.security.login', route_name='login')\n config.add_view('bodhi.server.security.login', context=HTTPForbidden)\n config.add_route('logout', '/logout')\n config.add_view('bodhi.server.security.logout', route_name='logout')\n config.add_route('verify_openid', pattern='/dologin.html')\n config.add_view('pyramid_fas_openid.verify_openid', route_name='verify_openid')\n\n config.add_route('api_version', '/api_version')\n\n # The only user preference we have.\n config.add_route('popup_toggle', '/popup_toggle')\n\n config.scan('bodhi.server.views')\n config.scan('bodhi.server.services')\n config.scan('bodhi.server.captcha')\n config.scan('bodhi.server.events')\n\n return config.make_wsgi_app()\n", "path": "bodhi/server/__init__.py"}], "after_files": [{"content": "# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\nfrom collections import defaultdict\nfrom dogpile.cache import make_region\nfrom munch import munchify\nfrom sqlalchemy import engine_from_config\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom zope.sqlalchemy import ZopeTransactionExtension\n\nfrom pyramid.settings import asbool\nfrom pyramid.security import unauthenticated_userid\nfrom pyramid.config import Configurator\nfrom pyramid.authentication import AuthTktAuthenticationPolicy\nfrom pyramid.authorization import ACLAuthorizationPolicy\nfrom pyramid.renderers import JSONP\nfrom pyramid.exceptions import HTTPForbidden\n\nfrom . import buildsys\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\n# TODO -- someday move this externally to \"fedora_flavored_markdown\"\nfrom bodhi.server import ffmarkdown\nffmarkdown.inject()\n\n#\n# Request methods\n#\n\ndef get_dbsession(request):\n engine = engine_from_config(request.registry.settings, 'sqlalchemy.')\n Sess = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))\n Sess.configure(bind=engine)\n session = Sess()\n\n def cleanup(request):\n ## No need to do rollback/commit ourselves. the zope transaction\n ## manager takes care of that for us...\n #if request.exception is not None:\n # session.rollback()\n #else:\n # session.commit()\n ## However, we may still want to explicitly close the session we opened\n #log.debug(\"Closing session at the end of a request.\")\n session.close()\n\n request.add_finished_callback(cleanup)\n\n return session\n\n\ndef get_cacheregion(request):\n region = make_region()\n region.configure_from_config(request.registry.settings, \"dogpile.cache.\")\n return region\n\n\ndef get_user(request):\n from bodhi.server.models import User\n userid = unauthenticated_userid(request)\n if userid is not None:\n user = request.db.query(User).filter_by(name=unicode(userid)).first()\n # Why munch? https://github.com/fedora-infra/bodhi/issues/473\n return munchify(user.__json__(request=request))\n\n\ndef groupfinder(userid, request):\n from bodhi.server.models import User\n if request.user:\n user = User.get(request.user.name, request.db)\n return ['group:' + group.name for group in user.groups]\n\n\ndef get_koji(request):\n return buildsys.get_session()\n\n\ndef get_buildinfo(request):\n \"\"\"\n A per-request cache populated by the validators and shared with the views\n to store frequently used package-specific data, like build tags and ACLs.\n \"\"\"\n return defaultdict(dict)\n\n\ndef get_releases(request):\n from bodhi.server.models import Release\n return Release.all_releases(request.db)\n\n#\n# Cornice filters\n#\n\ndef exception_filter(response, request):\n \"\"\"Log exceptions that get thrown up to cornice\"\"\"\n if isinstance(response, Exception):\n log.exception('Unhandled exception raised: %r' % response)\n return response\n\nfrom cornice.validators import DEFAULT_FILTERS\nDEFAULT_FILTERS.insert(0, exception_filter)\n\n\n#\n# Bodhi initialization\n#\n\ndef main(global_config, testing=None, session=None, **settings):\n \"\"\" This function returns a WSGI application \"\"\"\n # Setup our buildsystem\n buildsys.setup_buildsystem(settings)\n\n # Sessions & Caching\n from pyramid.session import SignedCookieSessionFactory\n session_factory = SignedCookieSessionFactory(settings['session.secret'])\n\n # Construct a list of all groups we're interested in\n default = ' '.join([settings.get(key, '') for key in [\n 'important_groups',\n 'admin_packager_groups',\n 'mandatory_packager_groups',\n 'admin_groups',\n ]])\n # pyramid_fas_openid looks for this setting\n settings['openid.groups'] = settings.get('openid.groups', default).split()\n\n config = Configurator(settings=settings, session_factory=session_factory)\n\n # Plugins\n config.include('pyramid_mako')\n config.include('cornice')\n\n # Lazy-loaded memoized request properties\n if session:\n config.add_request_method(lambda _: session, 'db', reify=True)\n else:\n config.add_request_method(get_dbsession, 'db', reify=True)\n\n config.add_request_method(get_user, 'user', reify=True)\n config.add_request_method(get_koji, 'koji', reify=True)\n config.add_request_method(get_cacheregion, 'cache', reify=True)\n config.add_request_method(get_buildinfo, 'buildinfo', reify=True)\n config.add_request_method(get_releases, 'releases', reify=True)\n\n # Templating\n config.add_mako_renderer('.html', settings_prefix='mako.')\n config.add_static_view('static', 'bodhi:server/static')\n\n from bodhi.server.renderers import rss, jpeg\n config.add_renderer('rss', rss)\n config.add_renderer('jpeg', jpeg)\n config.add_renderer('jsonp', JSONP(param_name='callback'))\n\n # i18n\n config.add_translation_dirs('bodhi:server/locale/')\n\n # Authentication & Authorization\n if testing:\n # use a permissive security policy while running unit tests\n config.testing_securitypolicy(userid=testing, permissive=True)\n else:\n config.set_authentication_policy(AuthTktAuthenticationPolicy(\n settings['authtkt.secret'],\n callback=groupfinder,\n secure=asbool(settings['authtkt.secure']),\n hashalg='sha512'))\n config.set_authorization_policy(ACLAuthorizationPolicy())\n\n # Frontpage\n config.add_route('home', '/')\n\n # Views for creating new objects\n config.add_route('new_update', '/updates/new')\n config.add_route('new_override', '/overrides/new')\n config.add_route('new_stack', '/stacks/new')\n\n # Metrics\n config.add_route('metrics', '/metrics')\n config.add_route('masher_status', '/masher/')\n\n # Auto-completion search\n config.add_route('search_packages', '/search/packages')\n config.add_route('latest_candidates', '/latest_candidates')\n config.add_route('latest_builds', '/latest_builds')\n\n config.add_route('captcha_image', '/captcha/{cipherkey}/')\n\n # pyramid.openid\n config.add_route('login', '/login')\n config.add_view('bodhi.server.security.login', route_name='login')\n config.add_view('bodhi.server.security.login', context=HTTPForbidden)\n config.add_route('logout', '/logout')\n config.add_view('bodhi.server.security.logout', route_name='logout')\n config.add_route('verify_openid', pattern='/dologin.html')\n config.add_view('pyramid_fas_openid.verify_openid', route_name='verify_openid')\n\n config.add_route('api_version', '/api_version')\n\n # The only user preference we have.\n config.add_route('popup_toggle', '/popup_toggle')\n\n config.scan('bodhi.server.views')\n config.scan('bodhi.server.services')\n config.scan('bodhi.server.captcha')\n config.scan('bodhi.server.events')\n\n return config.make_wsgi_app()\n", "path": "bodhi/server/__init__.py"}]}
2,580
121
gh_patches_debug_25484
rasdani/github-patches
git_diff
dj-stripe__dj-stripe-1259
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DJStripeSubscriptionPermission issue returning bool This permission is not returning properly the bool. **Current behaviour** ```python class DJStripeSubscriptionPermission(BasePermission): """ A permission to be used when wanting to permit users with active subscriptions. """ def has_permission(self, request, view): """ Check if the subscriber has an active subscription. Returns false if: * a subscriber isn't passed through the request See ``utils.subscriber_has_active_subscription`` for more rules. """ try: subscriber_has_active_subscription(subscriber_request_callback(request)) except AttributeError: return False ``` Here is not returning True or False except if it falls in the exception. **Expected Behaviour** ```python class DJStripeSubscriptionPermission(BasePermission): """ A permission to be used when wanting to permit users with active subscriptions. """ def has_permission(self, request, view): """ Check if the subscriber has an active subscription. Returns false if: * a subscriber isn't passed through the request See ``utils.subscriber_has_active_subscription`` for more rules. """ try: return bool(subscriber_has_active_subscription(subscriber_request_callback(request))) except AttributeError: return False ``` Just missing a return and it solves the problem. We don't need a bool directly there, I just added just to follow the same patterns as the DRF (also being added to the other project :-)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `djstripe/contrib/rest_framework/serializers.py` Content: ``` 1 """ 2 .. module:: dj-stripe.contrib.rest_framework.serializers. 3 4 :synopsis: dj-stripe - Serializers to be used with the dj-stripe REST API. 5 6 .. moduleauthor:: Philippe Luickx (@philippeluickx) 7 8 """ 9 10 from rest_framework import serializers 11 from rest_framework.serializers import ModelSerializer 12 13 from djstripe.models import Subscription 14 15 16 class SubscriptionSerializer(ModelSerializer): 17 """A serializer used for the Subscription model.""" 18 19 class Meta: 20 """Model class options.""" 21 22 model = Subscription 23 exclude = ["default_tax_rates"] 24 25 26 class CreateSubscriptionSerializer(serializers.Serializer): 27 """A serializer used to create a Subscription.""" 28 29 stripe_token = serializers.CharField(max_length=200) 30 plan = serializers.CharField(max_length=50) 31 charge_immediately = serializers.BooleanField(required=False, allow_null=True, default=None) 32 tax_percent = serializers.DecimalField( 33 required=False, max_digits=5, decimal_places=2 34 ) 35 ``` Path: `djstripe/contrib/rest_framework/permissions.py` Content: ``` 1 """ 2 .. module:: dj-stripe.contrib.rest_framework.permissions. 3 4 :synopsis: dj-stripe - Permissions to be used with the dj-stripe REST API. 5 6 .. moduleauthor:: @kavdev, @pydanny 7 8 """ 9 from rest_framework.permissions import BasePermission 10 11 from ...settings import subscriber_request_callback 12 from ...utils import subscriber_has_active_subscription 13 14 15 class DJStripeSubscriptionPermission(BasePermission): 16 """ 17 A permission to be used when wanting to permit users with active subscriptions. 18 """ 19 20 def has_permission(self, request, view): 21 """ 22 Check if the subscriber has an active subscription. 23 24 Returns false if: 25 * a subscriber isn't passed through the request 26 27 See ``utils.subscriber_has_active_subscription`` for more rules. 28 29 """ 30 try: 31 subscriber_has_active_subscription(subscriber_request_callback(request)) 32 except AttributeError: 33 return False 34 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/djstripe/contrib/rest_framework/permissions.py b/djstripe/contrib/rest_framework/permissions.py --- a/djstripe/contrib/rest_framework/permissions.py +++ b/djstripe/contrib/rest_framework/permissions.py @@ -17,7 +17,7 @@ A permission to be used when wanting to permit users with active subscriptions. """ - def has_permission(self, request, view): + def has_permission(self, request, view) -> bool: """ Check if the subscriber has an active subscription. @@ -28,6 +28,8 @@ """ try: - subscriber_has_active_subscription(subscriber_request_callback(request)) + return subscriber_has_active_subscription( + subscriber_request_callback(request) + ) except AttributeError: return False diff --git a/djstripe/contrib/rest_framework/serializers.py b/djstripe/contrib/rest_framework/serializers.py --- a/djstripe/contrib/rest_framework/serializers.py +++ b/djstripe/contrib/rest_framework/serializers.py @@ -28,7 +28,9 @@ stripe_token = serializers.CharField(max_length=200) plan = serializers.CharField(max_length=50) - charge_immediately = serializers.BooleanField(required=False, allow_null=True, default=None) + charge_immediately = serializers.BooleanField( + required=False, allow_null=True, default=None + ) tax_percent = serializers.DecimalField( required=False, max_digits=5, decimal_places=2 )
{"golden_diff": "diff --git a/djstripe/contrib/rest_framework/permissions.py b/djstripe/contrib/rest_framework/permissions.py\n--- a/djstripe/contrib/rest_framework/permissions.py\n+++ b/djstripe/contrib/rest_framework/permissions.py\n@@ -17,7 +17,7 @@\n A permission to be used when wanting to permit users with active subscriptions.\n \"\"\"\n \n- def has_permission(self, request, view):\n+ def has_permission(self, request, view) -> bool:\n \"\"\"\n Check if the subscriber has an active subscription.\n \n@@ -28,6 +28,8 @@\n \n \"\"\"\n try:\n- subscriber_has_active_subscription(subscriber_request_callback(request))\n+ return subscriber_has_active_subscription(\n+ subscriber_request_callback(request)\n+ )\n except AttributeError:\n return False\ndiff --git a/djstripe/contrib/rest_framework/serializers.py b/djstripe/contrib/rest_framework/serializers.py\n--- a/djstripe/contrib/rest_framework/serializers.py\n+++ b/djstripe/contrib/rest_framework/serializers.py\n@@ -28,7 +28,9 @@\n \n stripe_token = serializers.CharField(max_length=200)\n plan = serializers.CharField(max_length=50)\n- charge_immediately = serializers.BooleanField(required=False, allow_null=True, default=None)\n+ charge_immediately = serializers.BooleanField(\n+ required=False, allow_null=True, default=None\n+ )\n tax_percent = serializers.DecimalField(\n required=False, max_digits=5, decimal_places=2\n )\n", "issue": "DJStripeSubscriptionPermission issue returning bool\nThis permission is not returning properly the bool.\r\n\r\n**Current behaviour**\r\n\r\n```python\r\nclass DJStripeSubscriptionPermission(BasePermission):\r\n \"\"\"\r\n A permission to be used when wanting to permit users with active subscriptions.\r\n \"\"\"\r\n\r\n def has_permission(self, request, view):\r\n \"\"\"\r\n Check if the subscriber has an active subscription.\r\n\r\n Returns false if:\r\n * a subscriber isn't passed through the request\r\n\r\n See ``utils.subscriber_has_active_subscription`` for more rules.\r\n\r\n \"\"\"\r\n try:\r\n subscriber_has_active_subscription(subscriber_request_callback(request))\r\n except AttributeError:\r\n return False\r\n```\r\n\r\nHere is not returning True or False except if it falls in the exception.\r\n\r\n\r\n**Expected Behaviour**\r\n\r\n\r\n```python\r\nclass DJStripeSubscriptionPermission(BasePermission):\r\n \"\"\"\r\n A permission to be used when wanting to permit users with active subscriptions.\r\n \"\"\"\r\n\r\n def has_permission(self, request, view):\r\n \"\"\"\r\n Check if the subscriber has an active subscription.\r\n\r\n Returns false if:\r\n * a subscriber isn't passed through the request\r\n\r\n See ``utils.subscriber_has_active_subscription`` for more rules.\r\n\r\n \"\"\"\r\n try:\r\n return bool(subscriber_has_active_subscription(subscriber_request_callback(request)))\r\n except AttributeError:\r\n return False\r\n```\r\n\r\nJust missing a return and it solves the problem. We don't need a bool directly there, I just added just to follow the same patterns as the DRF (also being added to the other project :-))\n", "before_files": [{"content": "\"\"\"\n.. module:: dj-stripe.contrib.rest_framework.serializers.\n\n :synopsis: dj-stripe - Serializers to be used with the dj-stripe REST API.\n\n.. moduleauthor:: Philippe Luickx (@philippeluickx)\n\n\"\"\"\n\nfrom rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer\n\nfrom djstripe.models import Subscription\n\n\nclass SubscriptionSerializer(ModelSerializer):\n \"\"\"A serializer used for the Subscription model.\"\"\"\n\n class Meta:\n \"\"\"Model class options.\"\"\"\n\n model = Subscription\n exclude = [\"default_tax_rates\"]\n\n\nclass CreateSubscriptionSerializer(serializers.Serializer):\n \"\"\"A serializer used to create a Subscription.\"\"\"\n\n stripe_token = serializers.CharField(max_length=200)\n plan = serializers.CharField(max_length=50)\n charge_immediately = serializers.BooleanField(required=False, allow_null=True, default=None)\n tax_percent = serializers.DecimalField(\n required=False, max_digits=5, decimal_places=2\n )\n", "path": "djstripe/contrib/rest_framework/serializers.py"}, {"content": "\"\"\"\n.. module:: dj-stripe.contrib.rest_framework.permissions.\n\n :synopsis: dj-stripe - Permissions to be used with the dj-stripe REST API.\n\n.. moduleauthor:: @kavdev, @pydanny\n\n\"\"\"\nfrom rest_framework.permissions import BasePermission\n\nfrom ...settings import subscriber_request_callback\nfrom ...utils import subscriber_has_active_subscription\n\n\nclass DJStripeSubscriptionPermission(BasePermission):\n \"\"\"\n A permission to be used when wanting to permit users with active subscriptions.\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Check if the subscriber has an active subscription.\n\n Returns false if:\n * a subscriber isn't passed through the request\n\n See ``utils.subscriber_has_active_subscription`` for more rules.\n\n \"\"\"\n try:\n subscriber_has_active_subscription(subscriber_request_callback(request))\n except AttributeError:\n return False\n", "path": "djstripe/contrib/rest_framework/permissions.py"}], "after_files": [{"content": "\"\"\"\n.. module:: dj-stripe.contrib.rest_framework.serializers.\n\n :synopsis: dj-stripe - Serializers to be used with the dj-stripe REST API.\n\n.. moduleauthor:: Philippe Luickx (@philippeluickx)\n\n\"\"\"\n\nfrom rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer\n\nfrom djstripe.models import Subscription\n\n\nclass SubscriptionSerializer(ModelSerializer):\n \"\"\"A serializer used for the Subscription model.\"\"\"\n\n class Meta:\n \"\"\"Model class options.\"\"\"\n\n model = Subscription\n exclude = [\"default_tax_rates\"]\n\n\nclass CreateSubscriptionSerializer(serializers.Serializer):\n \"\"\"A serializer used to create a Subscription.\"\"\"\n\n stripe_token = serializers.CharField(max_length=200)\n plan = serializers.CharField(max_length=50)\n charge_immediately = serializers.BooleanField(\n required=False, allow_null=True, default=None\n )\n tax_percent = serializers.DecimalField(\n required=False, max_digits=5, decimal_places=2\n )\n", "path": "djstripe/contrib/rest_framework/serializers.py"}, {"content": "\"\"\"\n.. module:: dj-stripe.contrib.rest_framework.permissions.\n\n :synopsis: dj-stripe - Permissions to be used with the dj-stripe REST API.\n\n.. moduleauthor:: @kavdev, @pydanny\n\n\"\"\"\nfrom rest_framework.permissions import BasePermission\n\nfrom ...settings import subscriber_request_callback\nfrom ...utils import subscriber_has_active_subscription\n\n\nclass DJStripeSubscriptionPermission(BasePermission):\n \"\"\"\n A permission to be used when wanting to permit users with active subscriptions.\n \"\"\"\n\n def has_permission(self, request, view) -> bool:\n \"\"\"\n Check if the subscriber has an active subscription.\n\n Returns false if:\n * a subscriber isn't passed through the request\n\n See ``utils.subscriber_has_active_subscription`` for more rules.\n\n \"\"\"\n try:\n return subscriber_has_active_subscription(\n subscriber_request_callback(request)\n )\n except AttributeError:\n return False\n", "path": "djstripe/contrib/rest_framework/permissions.py"}]}
1,099
334
gh_patches_debug_9096
rasdani/github-patches
git_diff
ivy-llc__ivy-26785
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove duplicate-keys in `ivy\functional\backends\numpy\__init__.py` In the file: `ivy\functional\backends\numpy\__init__.py` duplicate keys are present in the following lines https://github.com/unifyai/ivy/blob/1be0f8c2c8d916b10f06ab538b43fc506a1992a6/ivy/functional/backends/numpy/__init__.py#L25 https://github.com/unifyai/ivy/blob/1be0f8c2c8d916b10f06ab538b43fc506a1992a6/ivy/functional/backends/numpy/__init__.py#L40 https://github.com/unifyai/ivy/blob/1be0f8c2c8d916b10f06ab538b43fc506a1992a6/ivy/functional/backends/numpy/__init__.py#L31 https://github.com/unifyai/ivy/blob/1be0f8c2c8d916b10f06ab538b43fc506a1992a6/ivy/functional/backends/numpy/__init__.py#L37 They can be removed. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/backends/numpy/__init__.py` Content: ``` 1 # global 2 import sys 3 import numpy as np 4 5 # local 6 import ivy 7 from ivy.func_wrapper import _dtype_from_version 8 9 backend_version = {"version": np.__version__} 10 11 # noinspection PyUnresolvedReferences 12 if not ivy.is_local(): 13 _module_in_memory = sys.modules[__name__] 14 else: 15 _module_in_memory = sys.modules[ivy.import_module_path].import_cache[__name__] 16 17 use = ivy.utils.backend.ContextManager(_module_in_memory) 18 19 # wrap __array_ufunc__ method of ivy.Array to prioritize Ivy array methods when using numpu backend 20 21 22 def wrap__array_ufunc__(func): 23 def rep_method(self, ufunc, method, *inputs, **kwargs): 24 methods = { 25 "not_equal": "not_equal", 26 "greater": "greater", 27 "less": "less", 28 "greater_equal": "greater_equal", 29 "less_equal": "less_equal", 30 "multiply": "multiply", 31 "divide": "divide", 32 "remainder": "remainder", 33 "equal": "equal", 34 "bitwise_and": "bitwise_and", 35 "matmul": "matmul", 36 "power": "pow", 37 "divide": "divide", 38 "subtract": "subtract", 39 "add": "add", 40 "not_equal": "not_equal", 41 } 42 if ufunc.__name__ in methods.keys(): 43 return eval("ivy." + methods[ufunc.__name__] + "(*inputs, **kwargs)") 44 return func(self, ufunc, method, *inputs, **kwargs) 45 46 return rep_method 47 48 49 ivy.Array.__array_ufunc__ = wrap__array_ufunc__(ivy.Array.__array_ufunc__) 50 51 NativeArray = np.ndarray 52 NativeDevice = str 53 NativeDtype = np.dtype 54 NativeShape = tuple 55 56 NativeSparseArray = None 57 58 59 # devices 60 valid_devices = ("cpu",) 61 62 invalid_devices = ("gpu", "tpu") 63 64 # native data types 65 native_int8 = np.dtype("int8") 66 native_int16 = np.dtype("int16") 67 native_int32 = np.dtype("int32") 68 native_int64 = np.dtype("int64") 69 native_uint8 = np.dtype("uint8") 70 native_uint16 = np.dtype("uint16") 71 native_uint32 = np.dtype("uint32") 72 native_uint64 = np.dtype("uint64") 73 native_float16 = np.dtype("float16") 74 native_float32 = np.dtype("float32") 75 native_float64 = np.dtype("float64") 76 native_complex64 = np.dtype("complex64") 77 native_complex128 = np.dtype("complex128") 78 native_double = native_float64 79 native_bool = np.dtype("bool") 80 81 # valid data types 82 # ToDo: Add complex dtypes to valid_dtypes and fix all resulting failures. 83 84 # update these to add new dtypes 85 valid_dtypes = { 86 "1.26.0 and below": ( 87 ivy.int8, 88 ivy.int16, 89 ivy.int32, 90 ivy.int64, 91 ivy.uint8, 92 ivy.uint16, 93 ivy.uint32, 94 ivy.uint64, 95 ivy.float16, 96 ivy.float32, 97 ivy.float64, 98 ivy.complex64, 99 ivy.complex128, 100 ivy.bool, 101 ) 102 } 103 valid_numeric_dtypes = { 104 "1.26.0 and below": ( 105 ivy.int8, 106 ivy.int16, 107 ivy.int32, 108 ivy.int64, 109 ivy.uint8, 110 ivy.uint16, 111 ivy.uint32, 112 ivy.uint64, 113 ivy.float16, 114 ivy.float32, 115 ivy.float64, 116 ivy.complex64, 117 ivy.complex128, 118 ) 119 } 120 valid_int_dtypes = { 121 "1.26.0 and below": ( 122 ivy.int8, 123 ivy.int16, 124 ivy.int32, 125 ivy.int64, 126 ivy.uint8, 127 ivy.uint16, 128 ivy.uint32, 129 ivy.uint64, 130 ) 131 } 132 valid_float_dtypes = {"1.26.0 and below": (ivy.float16, ivy.float32, ivy.float64)} 133 valid_uint_dtypes = { 134 "1.26.0 and below": (ivy.uint8, ivy.uint16, ivy.uint32, ivy.uint64) 135 } 136 valid_complex_dtypes = {"1.26.0 and below": (ivy.complex64, ivy.complex128)} 137 138 # leave these untouched 139 valid_dtypes = _dtype_from_version(valid_dtypes, backend_version) 140 valid_numeric_dtypes = _dtype_from_version(valid_numeric_dtypes, backend_version) 141 valid_int_dtypes = _dtype_from_version(valid_int_dtypes, backend_version) 142 valid_float_dtypes = _dtype_from_version(valid_float_dtypes, backend_version) 143 valid_uint_dtypes = _dtype_from_version(valid_uint_dtypes, backend_version) 144 valid_complex_dtypes = _dtype_from_version(valid_complex_dtypes, backend_version) 145 146 # invalid data types 147 # update these to add new dtypes 148 invalid_dtypes = {"1.26.0 and below": (ivy.bfloat16,)} 149 invalid_numeric_dtypes = {"1.26.0 and below": (ivy.bfloat16,)} 150 invalid_int_dtypes = {"1.26.0 and below": ()} 151 invalid_float_dtypes = {"1.26.0 and below": (ivy.bfloat16,)} 152 invalid_uint_dtypes = {"1.26.0 and below": ()} 153 invalid_complex_dtypes = {"1.26.0 and below": ()} 154 155 156 # leave these untouched 157 invalid_dtypes = _dtype_from_version(invalid_dtypes, backend_version) 158 invalid_numeric_dtypes = _dtype_from_version(invalid_numeric_dtypes, backend_version) 159 invalid_int_dtypes = _dtype_from_version(invalid_int_dtypes, backend_version) 160 invalid_float_dtypes = _dtype_from_version(invalid_float_dtypes, backend_version) 161 invalid_uint_dtypes = _dtype_from_version(invalid_uint_dtypes, backend_version) 162 invalid_complex_dtypes = _dtype_from_version(invalid_complex_dtypes, backend_version) 163 164 165 native_inplace_support = True 166 167 supports_gradients = False 168 169 170 def closest_valid_dtype(type=None, /, as_native=False): 171 if type is None: 172 type = ivy.default_dtype() 173 elif isinstance(type, str) and type in invalid_dtypes: 174 type = {"bfloat16": ivy.float16}[type] 175 return ivy.as_ivy_dtype(type) if not as_native else ivy.as_native_dtype(type) 176 177 178 backend = "numpy" 179 180 181 # local sub-modules 182 from . import activations 183 from .activations import * 184 from . import creation 185 from .creation import * 186 from . import data_type 187 from .data_type import * 188 from . import device 189 from .device import * 190 from . import elementwise 191 from .elementwise import * 192 from . import general 193 from .general import * 194 from . import gradients 195 from .gradients import * 196 from . import layers 197 from .layers import * 198 from . import linear_algebra as linalg 199 from .linear_algebra import * 200 from . import manipulation 201 from .manipulation import * 202 from . import random 203 from .random import * 204 from . import searching 205 from .searching import * 206 from . import set 207 from .set import * 208 from . import sorting 209 from .sorting import * 210 from . import statistical 211 from .statistical import * 212 from . import utility 213 from .utility import * 214 from . import experimental 215 from .experimental import * 216 from . import control_flow_ops 217 from .control_flow_ops import * 218 219 220 # sub-backends 221 222 from . import sub_backends 223 from .sub_backends import * 224 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/backends/numpy/__init__.py b/ivy/functional/backends/numpy/__init__.py --- a/ivy/functional/backends/numpy/__init__.py +++ b/ivy/functional/backends/numpy/__init__.py @@ -34,10 +34,8 @@ "bitwise_and": "bitwise_and", "matmul": "matmul", "power": "pow", - "divide": "divide", "subtract": "subtract", "add": "add", - "not_equal": "not_equal", } if ufunc.__name__ in methods.keys(): return eval("ivy." + methods[ufunc.__name__] + "(*inputs, **kwargs)")
{"golden_diff": "diff --git a/ivy/functional/backends/numpy/__init__.py b/ivy/functional/backends/numpy/__init__.py\n--- a/ivy/functional/backends/numpy/__init__.py\n+++ b/ivy/functional/backends/numpy/__init__.py\n@@ -34,10 +34,8 @@\n \"bitwise_and\": \"bitwise_and\",\n \"matmul\": \"matmul\",\n \"power\": \"pow\",\n- \"divide\": \"divide\",\n \"subtract\": \"subtract\",\n \"add\": \"add\",\n- \"not_equal\": \"not_equal\",\n }\n if ufunc.__name__ in methods.keys():\n return eval(\"ivy.\" + methods[ufunc.__name__] + \"(*inputs, **kwargs)\")\n", "issue": "Remove duplicate-keys in `ivy\\functional\\backends\\numpy\\__init__.py`\nIn the file: `ivy\\functional\\backends\\numpy\\__init__.py` duplicate keys are present in the following lines\r\nhttps://github.com/unifyai/ivy/blob/1be0f8c2c8d916b10f06ab538b43fc506a1992a6/ivy/functional/backends/numpy/__init__.py#L25\r\nhttps://github.com/unifyai/ivy/blob/1be0f8c2c8d916b10f06ab538b43fc506a1992a6/ivy/functional/backends/numpy/__init__.py#L40\r\nhttps://github.com/unifyai/ivy/blob/1be0f8c2c8d916b10f06ab538b43fc506a1992a6/ivy/functional/backends/numpy/__init__.py#L31\r\nhttps://github.com/unifyai/ivy/blob/1be0f8c2c8d916b10f06ab538b43fc506a1992a6/ivy/functional/backends/numpy/__init__.py#L37\r\nThey can be removed.\n", "before_files": [{"content": "# global\nimport sys\nimport numpy as np\n\n# local\nimport ivy\nfrom ivy.func_wrapper import _dtype_from_version\n\nbackend_version = {\"version\": np.__version__}\n\n# noinspection PyUnresolvedReferences\nif not ivy.is_local():\n _module_in_memory = sys.modules[__name__]\nelse:\n _module_in_memory = sys.modules[ivy.import_module_path].import_cache[__name__]\n\nuse = ivy.utils.backend.ContextManager(_module_in_memory)\n\n# wrap __array_ufunc__ method of ivy.Array to prioritize Ivy array methods when using numpu backend\n\n\ndef wrap__array_ufunc__(func):\n def rep_method(self, ufunc, method, *inputs, **kwargs):\n methods = {\n \"not_equal\": \"not_equal\",\n \"greater\": \"greater\",\n \"less\": \"less\",\n \"greater_equal\": \"greater_equal\",\n \"less_equal\": \"less_equal\",\n \"multiply\": \"multiply\",\n \"divide\": \"divide\",\n \"remainder\": \"remainder\",\n \"equal\": \"equal\",\n \"bitwise_and\": \"bitwise_and\",\n \"matmul\": \"matmul\",\n \"power\": \"pow\",\n \"divide\": \"divide\",\n \"subtract\": \"subtract\",\n \"add\": \"add\",\n \"not_equal\": \"not_equal\",\n }\n if ufunc.__name__ in methods.keys():\n return eval(\"ivy.\" + methods[ufunc.__name__] + \"(*inputs, **kwargs)\")\n return func(self, ufunc, method, *inputs, **kwargs)\n\n return rep_method\n\n\nivy.Array.__array_ufunc__ = wrap__array_ufunc__(ivy.Array.__array_ufunc__)\n\nNativeArray = np.ndarray\nNativeDevice = str\nNativeDtype = np.dtype\nNativeShape = tuple\n\nNativeSparseArray = None\n\n\n# devices\nvalid_devices = (\"cpu\",)\n\ninvalid_devices = (\"gpu\", \"tpu\")\n\n# native data types\nnative_int8 = np.dtype(\"int8\")\nnative_int16 = np.dtype(\"int16\")\nnative_int32 = np.dtype(\"int32\")\nnative_int64 = np.dtype(\"int64\")\nnative_uint8 = np.dtype(\"uint8\")\nnative_uint16 = np.dtype(\"uint16\")\nnative_uint32 = np.dtype(\"uint32\")\nnative_uint64 = np.dtype(\"uint64\")\nnative_float16 = np.dtype(\"float16\")\nnative_float32 = np.dtype(\"float32\")\nnative_float64 = np.dtype(\"float64\")\nnative_complex64 = np.dtype(\"complex64\")\nnative_complex128 = np.dtype(\"complex128\")\nnative_double = native_float64\nnative_bool = np.dtype(\"bool\")\n\n# valid data types\n# ToDo: Add complex dtypes to valid_dtypes and fix all resulting failures.\n\n# update these to add new dtypes\nvalid_dtypes = {\n \"1.26.0 and below\": (\n ivy.int8,\n ivy.int16,\n ivy.int32,\n ivy.int64,\n ivy.uint8,\n ivy.uint16,\n ivy.uint32,\n ivy.uint64,\n ivy.float16,\n ivy.float32,\n ivy.float64,\n ivy.complex64,\n ivy.complex128,\n ivy.bool,\n )\n}\nvalid_numeric_dtypes = {\n \"1.26.0 and below\": (\n ivy.int8,\n ivy.int16,\n ivy.int32,\n ivy.int64,\n ivy.uint8,\n ivy.uint16,\n ivy.uint32,\n ivy.uint64,\n ivy.float16,\n ivy.float32,\n ivy.float64,\n ivy.complex64,\n ivy.complex128,\n )\n}\nvalid_int_dtypes = {\n \"1.26.0 and below\": (\n ivy.int8,\n ivy.int16,\n ivy.int32,\n ivy.int64,\n ivy.uint8,\n ivy.uint16,\n ivy.uint32,\n ivy.uint64,\n )\n}\nvalid_float_dtypes = {\"1.26.0 and below\": (ivy.float16, ivy.float32, ivy.float64)}\nvalid_uint_dtypes = {\n \"1.26.0 and below\": (ivy.uint8, ivy.uint16, ivy.uint32, ivy.uint64)\n}\nvalid_complex_dtypes = {\"1.26.0 and below\": (ivy.complex64, ivy.complex128)}\n\n# leave these untouched\nvalid_dtypes = _dtype_from_version(valid_dtypes, backend_version)\nvalid_numeric_dtypes = _dtype_from_version(valid_numeric_dtypes, backend_version)\nvalid_int_dtypes = _dtype_from_version(valid_int_dtypes, backend_version)\nvalid_float_dtypes = _dtype_from_version(valid_float_dtypes, backend_version)\nvalid_uint_dtypes = _dtype_from_version(valid_uint_dtypes, backend_version)\nvalid_complex_dtypes = _dtype_from_version(valid_complex_dtypes, backend_version)\n\n# invalid data types\n# update these to add new dtypes\ninvalid_dtypes = {\"1.26.0 and below\": (ivy.bfloat16,)}\ninvalid_numeric_dtypes = {\"1.26.0 and below\": (ivy.bfloat16,)}\ninvalid_int_dtypes = {\"1.26.0 and below\": ()}\ninvalid_float_dtypes = {\"1.26.0 and below\": (ivy.bfloat16,)}\ninvalid_uint_dtypes = {\"1.26.0 and below\": ()}\ninvalid_complex_dtypes = {\"1.26.0 and below\": ()}\n\n\n# leave these untouched\ninvalid_dtypes = _dtype_from_version(invalid_dtypes, backend_version)\ninvalid_numeric_dtypes = _dtype_from_version(invalid_numeric_dtypes, backend_version)\ninvalid_int_dtypes = _dtype_from_version(invalid_int_dtypes, backend_version)\ninvalid_float_dtypes = _dtype_from_version(invalid_float_dtypes, backend_version)\ninvalid_uint_dtypes = _dtype_from_version(invalid_uint_dtypes, backend_version)\ninvalid_complex_dtypes = _dtype_from_version(invalid_complex_dtypes, backend_version)\n\n\nnative_inplace_support = True\n\nsupports_gradients = False\n\n\ndef closest_valid_dtype(type=None, /, as_native=False):\n if type is None:\n type = ivy.default_dtype()\n elif isinstance(type, str) and type in invalid_dtypes:\n type = {\"bfloat16\": ivy.float16}[type]\n return ivy.as_ivy_dtype(type) if not as_native else ivy.as_native_dtype(type)\n\n\nbackend = \"numpy\"\n\n\n# local sub-modules\nfrom . import activations\nfrom .activations import *\nfrom . import creation\nfrom .creation import *\nfrom . import data_type\nfrom .data_type import *\nfrom . import device\nfrom .device import *\nfrom . import elementwise\nfrom .elementwise import *\nfrom . import general\nfrom .general import *\nfrom . import gradients\nfrom .gradients import *\nfrom . import layers\nfrom .layers import *\nfrom . import linear_algebra as linalg\nfrom .linear_algebra import *\nfrom . import manipulation\nfrom .manipulation import *\nfrom . import random\nfrom .random import *\nfrom . import searching\nfrom .searching import *\nfrom . import set\nfrom .set import *\nfrom . import sorting\nfrom .sorting import *\nfrom . import statistical\nfrom .statistical import *\nfrom . import utility\nfrom .utility import *\nfrom . import experimental\nfrom .experimental import *\nfrom . import control_flow_ops\nfrom .control_flow_ops import *\n\n\n# sub-backends\n\nfrom . import sub_backends\nfrom .sub_backends import *\n", "path": "ivy/functional/backends/numpy/__init__.py"}], "after_files": [{"content": "# global\nimport sys\nimport numpy as np\n\n# local\nimport ivy\nfrom ivy.func_wrapper import _dtype_from_version\n\nbackend_version = {\"version\": np.__version__}\n\n# noinspection PyUnresolvedReferences\nif not ivy.is_local():\n _module_in_memory = sys.modules[__name__]\nelse:\n _module_in_memory = sys.modules[ivy.import_module_path].import_cache[__name__]\n\nuse = ivy.utils.backend.ContextManager(_module_in_memory)\n\n# wrap __array_ufunc__ method of ivy.Array to prioritize Ivy array methods when using numpu backend\n\n\ndef wrap__array_ufunc__(func):\n def rep_method(self, ufunc, method, *inputs, **kwargs):\n methods = {\n \"not_equal\": \"not_equal\",\n \"greater\": \"greater\",\n \"less\": \"less\",\n \"greater_equal\": \"greater_equal\",\n \"less_equal\": \"less_equal\",\n \"multiply\": \"multiply\",\n \"divide\": \"divide\",\n \"remainder\": \"remainder\",\n \"equal\": \"equal\",\n \"bitwise_and\": \"bitwise_and\",\n \"matmul\": \"matmul\",\n \"power\": \"pow\",\n \"subtract\": \"subtract\",\n \"add\": \"add\",\n }\n if ufunc.__name__ in methods.keys():\n return eval(\"ivy.\" + methods[ufunc.__name__] + \"(*inputs, **kwargs)\")\n return func(self, ufunc, method, *inputs, **kwargs)\n\n return rep_method\n\n\nivy.Array.__array_ufunc__ = wrap__array_ufunc__(ivy.Array.__array_ufunc__)\n\nNativeArray = np.ndarray\nNativeDevice = str\nNativeDtype = np.dtype\nNativeShape = tuple\n\nNativeSparseArray = None\n\n\n# devices\nvalid_devices = (\"cpu\",)\n\ninvalid_devices = (\"gpu\", \"tpu\")\n\n# native data types\nnative_int8 = np.dtype(\"int8\")\nnative_int16 = np.dtype(\"int16\")\nnative_int32 = np.dtype(\"int32\")\nnative_int64 = np.dtype(\"int64\")\nnative_uint8 = np.dtype(\"uint8\")\nnative_uint16 = np.dtype(\"uint16\")\nnative_uint32 = np.dtype(\"uint32\")\nnative_uint64 = np.dtype(\"uint64\")\nnative_float16 = np.dtype(\"float16\")\nnative_float32 = np.dtype(\"float32\")\nnative_float64 = np.dtype(\"float64\")\nnative_complex64 = np.dtype(\"complex64\")\nnative_complex128 = np.dtype(\"complex128\")\nnative_double = native_float64\nnative_bool = np.dtype(\"bool\")\n\n# valid data types\n# ToDo: Add complex dtypes to valid_dtypes and fix all resulting failures.\n\n# update these to add new dtypes\nvalid_dtypes = {\n \"1.26.0 and below\": (\n ivy.int8,\n ivy.int16,\n ivy.int32,\n ivy.int64,\n ivy.uint8,\n ivy.uint16,\n ivy.uint32,\n ivy.uint64,\n ivy.float16,\n ivy.float32,\n ivy.float64,\n ivy.complex64,\n ivy.complex128,\n ivy.bool,\n )\n}\nvalid_numeric_dtypes = {\n \"1.26.0 and below\": (\n ivy.int8,\n ivy.int16,\n ivy.int32,\n ivy.int64,\n ivy.uint8,\n ivy.uint16,\n ivy.uint32,\n ivy.uint64,\n ivy.float16,\n ivy.float32,\n ivy.float64,\n ivy.complex64,\n ivy.complex128,\n )\n}\nvalid_int_dtypes = {\n \"1.26.0 and below\": (\n ivy.int8,\n ivy.int16,\n ivy.int32,\n ivy.int64,\n ivy.uint8,\n ivy.uint16,\n ivy.uint32,\n ivy.uint64,\n )\n}\nvalid_float_dtypes = {\"1.26.0 and below\": (ivy.float16, ivy.float32, ivy.float64)}\nvalid_uint_dtypes = {\n \"1.26.0 and below\": (ivy.uint8, ivy.uint16, ivy.uint32, ivy.uint64)\n}\nvalid_complex_dtypes = {\"1.26.0 and below\": (ivy.complex64, ivy.complex128)}\n\n# leave these untouched\nvalid_dtypes = _dtype_from_version(valid_dtypes, backend_version)\nvalid_numeric_dtypes = _dtype_from_version(valid_numeric_dtypes, backend_version)\nvalid_int_dtypes = _dtype_from_version(valid_int_dtypes, backend_version)\nvalid_float_dtypes = _dtype_from_version(valid_float_dtypes, backend_version)\nvalid_uint_dtypes = _dtype_from_version(valid_uint_dtypes, backend_version)\nvalid_complex_dtypes = _dtype_from_version(valid_complex_dtypes, backend_version)\n\n# invalid data types\n# update these to add new dtypes\ninvalid_dtypes = {\"1.26.0 and below\": (ivy.bfloat16,)}\ninvalid_numeric_dtypes = {\"1.26.0 and below\": (ivy.bfloat16,)}\ninvalid_int_dtypes = {\"1.26.0 and below\": ()}\ninvalid_float_dtypes = {\"1.26.0 and below\": (ivy.bfloat16,)}\ninvalid_uint_dtypes = {\"1.26.0 and below\": ()}\ninvalid_complex_dtypes = {\"1.26.0 and below\": ()}\n\n\n# leave these untouched\ninvalid_dtypes = _dtype_from_version(invalid_dtypes, backend_version)\ninvalid_numeric_dtypes = _dtype_from_version(invalid_numeric_dtypes, backend_version)\ninvalid_int_dtypes = _dtype_from_version(invalid_int_dtypes, backend_version)\ninvalid_float_dtypes = _dtype_from_version(invalid_float_dtypes, backend_version)\ninvalid_uint_dtypes = _dtype_from_version(invalid_uint_dtypes, backend_version)\ninvalid_complex_dtypes = _dtype_from_version(invalid_complex_dtypes, backend_version)\n\n\nnative_inplace_support = True\n\nsupports_gradients = False\n\n\ndef closest_valid_dtype(type=None, /, as_native=False):\n if type is None:\n type = ivy.default_dtype()\n elif isinstance(type, str) and type in invalid_dtypes:\n type = {\"bfloat16\": ivy.float16}[type]\n return ivy.as_ivy_dtype(type) if not as_native else ivy.as_native_dtype(type)\n\n\nbackend = \"numpy\"\n\n\n# local sub-modules\nfrom . import activations\nfrom .activations import *\nfrom . import creation\nfrom .creation import *\nfrom . import data_type\nfrom .data_type import *\nfrom . import device\nfrom .device import *\nfrom . import elementwise\nfrom .elementwise import *\nfrom . import general\nfrom .general import *\nfrom . import gradients\nfrom .gradients import *\nfrom . import layers\nfrom .layers import *\nfrom . import linear_algebra as linalg\nfrom .linear_algebra import *\nfrom . import manipulation\nfrom .manipulation import *\nfrom . import random\nfrom .random import *\nfrom . import searching\nfrom .searching import *\nfrom . import set\nfrom .set import *\nfrom . import sorting\nfrom .sorting import *\nfrom . import statistical\nfrom .statistical import *\nfrom . import utility\nfrom .utility import *\nfrom . import experimental\nfrom .experimental import *\nfrom . import control_flow_ops\nfrom .control_flow_ops import *\n\n\n# sub-backends\n\nfrom . import sub_backends\nfrom .sub_backends import *\n", "path": "ivy/functional/backends/numpy/__init__.py"}]}
2,876
168
gh_patches_debug_32253
rasdani/github-patches
git_diff
aws__aws-cli-2044
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [aws deploy push] Destination bundle type does not match extension When specifying a destination with a suffix of ".zip" or ".tar" or ".tar.gz" for use with the `aws deploy push` command the resulting bundle is always only zipped. ``` $ aws deploy push --application-name test --s3-location s3://$AWS_DEPLOY_BUCKET/test/latest.tar --no-ignore-hidden-files --source . To deploy with this revision, run: aws deploy create-deployment --application-name test --s3-location bucket=BUCKET_NAME,key=test/latest.tar,bundleType=zip,eTag=... --deployment-group-name <deployment-group-name> --deployment-config-name <deployment-config-name> --description <description> ``` I downloaded and check the file it come back as a zip file. ``` $ file latest.tar latest.tar: Zip archive data, at least v2.0 to extract ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `awscli/customizations/codedeploy/push.py` Content: ``` 1 # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"). You 4 # may not use this file except in compliance with the License. A copy of 5 # the License is located at 6 # 7 # http://aws.amazon.com/apache2.0/ 8 # 9 # or in the "license" file accompanying this file. This file is 10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific 12 # language governing permissions and limitations under the License. 13 14 import os 15 import sys 16 import zipfile 17 import tempfile 18 import contextlib 19 from datetime import datetime 20 21 from botocore.exceptions import ClientError 22 23 from awscli.compat import six 24 from awscli.customizations.codedeploy.utils import validate_s3_location 25 from awscli.customizations.commands import BasicCommand 26 from awscli.compat import ZIP_COMPRESSION_MODE 27 28 29 ONE_MB = 1 << 20 30 MULTIPART_LIMIT = 6 * ONE_MB 31 32 33 class Push(BasicCommand): 34 NAME = 'push' 35 36 DESCRIPTION = ( 37 'Bundles and uploads to Amazon Simple Storage Service (Amazon S3) an ' 38 'application revision, which is an archive file that contains ' 39 'deployable content and an accompanying Application Specification ' 40 'file (AppSpec file). If the upload is successful, a message is ' 41 'returned that describes how to call the create-deployment command to ' 42 'deploy the application revision from Amazon S3 to target Amazon ' 43 'Elastic Compute Cloud (Amazon EC2) instances.' 44 ) 45 46 ARG_TABLE = [ 47 { 48 'name': 'application-name', 49 'synopsis': '--application-name <app-name>', 50 'required': True, 51 'help_text': ( 52 'Required. The name of the AWS CodeDeploy application to be ' 53 'associated with the application revision.' 54 ) 55 }, 56 { 57 'name': 's3-location', 58 'synopsis': '--s3-location s3://<bucket>/<key>', 59 'required': True, 60 'help_text': ( 61 'Required. Information about the location of the application ' 62 'revision to be uploaded to Amazon S3. You must specify both ' 63 'a bucket and a key that represent the Amazon S3 bucket name ' 64 'and the object key name. Use the format ' 65 's3://\<bucket\>/\<key\>' 66 ) 67 }, 68 { 69 'name': 'ignore-hidden-files', 70 'action': 'store_true', 71 'default': False, 72 'group_name': 'ignore-hidden-files', 73 'help_text': ( 74 'Optional. Set the --ignore-hidden-files flag to not bundle ' 75 'and upload hidden files to Amazon S3; otherwise, set the ' 76 '--no-ignore-hidden-files flag (the default) to bundle and ' 77 'upload hidden files to Amazon S3.' 78 ) 79 }, 80 { 81 'name': 'no-ignore-hidden-files', 82 'action': 'store_true', 83 'default': False, 84 'group_name': 'ignore-hidden-files' 85 }, 86 { 87 'name': 'source', 88 'synopsis': '--source <path>', 89 'default': '.', 90 'help_text': ( 91 'Optional. The location of the deployable content and the ' 92 'accompanying AppSpec file on the development machine to be ' 93 'bundled and uploaded to Amazon S3. If not specified, the ' 94 'current directory is used.' 95 ) 96 }, 97 { 98 'name': 'description', 99 'synopsis': '--description <description>', 100 'help_text': ( 101 'Optional. A comment that summarizes the application ' 102 'revision. If not specified, the default string "Uploaded by ' 103 'AWS CLI \'time\' UTC" is used, where \'time\' is the current ' 104 'system time in Coordinated Universal Time (UTC).' 105 ) 106 } 107 ] 108 109 def _run_main(self, parsed_args, parsed_globals): 110 self._validate_args(parsed_args) 111 self.codedeploy = self._session.create_client( 112 'codedeploy', 113 region_name=parsed_globals.region, 114 endpoint_url=parsed_globals.endpoint_url, 115 verify=parsed_globals.verify_ssl 116 ) 117 self.s3 = self._session.create_client( 118 's3', 119 region_name=parsed_globals.region 120 ) 121 self._push(parsed_args) 122 123 def _validate_args(self, parsed_args): 124 validate_s3_location(parsed_args, 's3_location') 125 if parsed_args.ignore_hidden_files \ 126 and parsed_args.no_ignore_hidden_files: 127 raise RuntimeError( 128 'You cannot specify both --ignore-hidden-files and ' 129 '--no-ignore-hidden-files.' 130 ) 131 if not parsed_args.description: 132 parsed_args.description = ( 133 'Uploaded by AWS CLI {0} UTC'.format( 134 datetime.utcnow().isoformat() 135 ) 136 ) 137 138 def _push(self, params): 139 with self._compress( 140 params.source, 141 params.ignore_hidden_files 142 ) as bundle: 143 try: 144 upload_response = self._upload_to_s3(params, bundle) 145 params.eTag = upload_response['ETag'].replace('"', "") 146 if 'VersionId' in upload_response: 147 params.version = upload_response['VersionId'] 148 except Exception as e: 149 raise RuntimeError( 150 'Failed to upload \'%s\' to \'%s\': %s' % 151 (params.source, 152 params.s3_location, 153 str(e)) 154 ) 155 self._register_revision(params) 156 157 if 'version' in params: 158 version_string = ',version={0}'.format(params.version) 159 else: 160 version_string = '' 161 s3location_string = ( 162 '--s3-location bucket={0},key={1},' 163 'bundleType=zip,eTag={2}{3}'.format( 164 params.bucket, 165 params.key, 166 params.eTag, 167 version_string 168 ) 169 ) 170 sys.stdout.write( 171 'To deploy with this revision, run:\n' 172 'aws deploy create-deployment ' 173 '--application-name {0} {1} ' 174 '--deployment-group-name <deployment-group-name> ' 175 '--deployment-config-name <deployment-config-name> ' 176 '--description <description>\n'.format( 177 params.application_name, 178 s3location_string 179 ) 180 ) 181 182 @contextlib.contextmanager 183 def _compress(self, source, ignore_hidden_files=False): 184 source_path = os.path.abspath(source) 185 appspec_path = os.path.sep.join([source_path, 'appspec.yml']) 186 with tempfile.TemporaryFile('w+b') as tf: 187 zf = zipfile.ZipFile(tf, 'w', allowZip64=True) 188 # Using 'try'/'finally' instead of 'with' statement since ZipFile 189 # does not have support context manager in Python 2.6. 190 try: 191 contains_appspec = False 192 for root, dirs, files in os.walk(source, topdown=True): 193 if ignore_hidden_files: 194 files = [fn for fn in files if not fn.startswith('.')] 195 dirs[:] = [dn for dn in dirs if not dn.startswith('.')] 196 for fn in files: 197 filename = os.path.join(root, fn) 198 filename = os.path.abspath(filename) 199 arcname = filename[len(source_path) + 1:] 200 if filename == appspec_path: 201 contains_appspec = True 202 zf.write(filename, arcname, ZIP_COMPRESSION_MODE) 203 if not contains_appspec: 204 raise RuntimeError( 205 '{0} was not found'.format(appspec_path) 206 ) 207 finally: 208 zf.close() 209 yield tf 210 211 def _upload_to_s3(self, params, bundle): 212 size_remaining = self._bundle_size(bundle) 213 if size_remaining < MULTIPART_LIMIT: 214 return self.s3.put_object( 215 Bucket=params.bucket, 216 Key=params.key, 217 Body=bundle 218 ) 219 else: 220 return self._multipart_upload_to_s3( 221 params, 222 bundle, 223 size_remaining 224 ) 225 226 def _bundle_size(self, bundle): 227 bundle.seek(0, 2) 228 size = bundle.tell() 229 bundle.seek(0) 230 return size 231 232 def _multipart_upload_to_s3(self, params, bundle, size_remaining): 233 create_response = self.s3.create_multipart_upload( 234 Bucket=params.bucket, 235 Key=params.key 236 ) 237 upload_id = create_response['UploadId'] 238 try: 239 part_num = 1 240 multipart_list = [] 241 bundle.seek(0) 242 while size_remaining > 0: 243 data = bundle.read(MULTIPART_LIMIT) 244 upload_response = self.s3.upload_part( 245 Bucket=params.bucket, 246 Key=params.key, 247 UploadId=upload_id, 248 PartNumber=part_num, 249 Body=six.BytesIO(data) 250 ) 251 multipart_list.append({ 252 'PartNumber': part_num, 253 'ETag': upload_response['ETag'] 254 }) 255 part_num += 1 256 size_remaining -= len(data) 257 return self.s3.complete_multipart_upload( 258 Bucket=params.bucket, 259 Key=params.key, 260 UploadId=upload_id, 261 MultipartUpload={'Parts': multipart_list} 262 ) 263 except ClientError as e: 264 self.s3.abort_multipart_upload( 265 Bucket=params.bucket, 266 Key=params.key, 267 UploadId=upload_id 268 ) 269 raise e 270 271 def _register_revision(self, params): 272 revision = { 273 'revisionType': 'S3', 274 's3Location': { 275 'bucket': params.bucket, 276 'key': params.key, 277 'bundleType': 'zip', 278 'eTag': params.eTag 279 } 280 } 281 if 'version' in params: 282 revision['s3Location']['version'] = params.version 283 self.codedeploy.register_application_revision( 284 applicationName=params.application_name, 285 revision=revision, 286 description=params.description 287 ) 288 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/awscli/customizations/codedeploy/push.py b/awscli/customizations/codedeploy/push.py --- a/awscli/customizations/codedeploy/push.py +++ b/awscli/customizations/codedeploy/push.py @@ -35,7 +35,7 @@ DESCRIPTION = ( 'Bundles and uploads to Amazon Simple Storage Service (Amazon S3) an ' - 'application revision, which is an archive file that contains ' + 'application revision, which is a zip archive file that contains ' 'deployable content and an accompanying Application Specification ' 'file (AppSpec file). If the upload is successful, a message is ' 'returned that describes how to call the create-deployment command to ' @@ -61,8 +61,8 @@ 'Required. Information about the location of the application ' 'revision to be uploaded to Amazon S3. You must specify both ' 'a bucket and a key that represent the Amazon S3 bucket name ' - 'and the object key name. Use the format ' - 's3://\<bucket\>/\<key\>' + 'and the object key name. Content will be zipped before ' + 'uploading. Use the format s3://\<bucket\>/\<key\>' ) }, { @@ -90,7 +90,7 @@ 'help_text': ( 'Optional. The location of the deployable content and the ' 'accompanying AppSpec file on the development machine to be ' - 'bundled and uploaded to Amazon S3. If not specified, the ' + 'zipped and uploaded to Amazon S3. If not specified, the ' 'current directory is used.' ) },
{"golden_diff": "diff --git a/awscli/customizations/codedeploy/push.py b/awscli/customizations/codedeploy/push.py\n--- a/awscli/customizations/codedeploy/push.py\n+++ b/awscli/customizations/codedeploy/push.py\n@@ -35,7 +35,7 @@\n \n DESCRIPTION = (\n 'Bundles and uploads to Amazon Simple Storage Service (Amazon S3) an '\n- 'application revision, which is an archive file that contains '\n+ 'application revision, which is a zip archive file that contains '\n 'deployable content and an accompanying Application Specification '\n 'file (AppSpec file). If the upload is successful, a message is '\n 'returned that describes how to call the create-deployment command to '\n@@ -61,8 +61,8 @@\n 'Required. Information about the location of the application '\n 'revision to be uploaded to Amazon S3. You must specify both '\n 'a bucket and a key that represent the Amazon S3 bucket name '\n- 'and the object key name. Use the format '\n- 's3://\\<bucket\\>/\\<key\\>'\n+ 'and the object key name. Content will be zipped before '\n+ 'uploading. Use the format s3://\\<bucket\\>/\\<key\\>'\n )\n },\n {\n@@ -90,7 +90,7 @@\n 'help_text': (\n 'Optional. The location of the deployable content and the '\n 'accompanying AppSpec file on the development machine to be '\n- 'bundled and uploaded to Amazon S3. If not specified, the '\n+ 'zipped and uploaded to Amazon S3. If not specified, the '\n 'current directory is used.'\n )\n },\n", "issue": "[aws deploy push] Destination bundle type does not match extension\nWhen specifying a destination with a suffix of \".zip\" or \".tar\" or \".tar.gz\" for use with the `aws deploy push` command the resulting bundle is always only zipped.\n\n```\n$ aws deploy push --application-name test --s3-location s3://$AWS_DEPLOY_BUCKET/test/latest.tar --no-ignore-hidden-files --source .\nTo deploy with this revision, run:\naws deploy create-deployment --application-name test --s3-location bucket=BUCKET_NAME,key=test/latest.tar,bundleType=zip,eTag=... --deployment-group-name <deployment-group-name> --deployment-config-name <deployment-config-name> --description <description>\n```\n\nI downloaded and check the file it come back as a zip file.\n\n```\n$ file latest.tar \nlatest.tar: Zip archive data, at least v2.0 to extract\n```\n\n", "before_files": [{"content": "# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport os\nimport sys\nimport zipfile\nimport tempfile\nimport contextlib\nfrom datetime import datetime\n\nfrom botocore.exceptions import ClientError\n\nfrom awscli.compat import six\nfrom awscli.customizations.codedeploy.utils import validate_s3_location\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.compat import ZIP_COMPRESSION_MODE\n\n\nONE_MB = 1 << 20\nMULTIPART_LIMIT = 6 * ONE_MB\n\n\nclass Push(BasicCommand):\n NAME = 'push'\n\n DESCRIPTION = (\n 'Bundles and uploads to Amazon Simple Storage Service (Amazon S3) an '\n 'application revision, which is an archive file that contains '\n 'deployable content and an accompanying Application Specification '\n 'file (AppSpec file). If the upload is successful, a message is '\n 'returned that describes how to call the create-deployment command to '\n 'deploy the application revision from Amazon S3 to target Amazon '\n 'Elastic Compute Cloud (Amazon EC2) instances.'\n )\n\n ARG_TABLE = [\n {\n 'name': 'application-name',\n 'synopsis': '--application-name <app-name>',\n 'required': True,\n 'help_text': (\n 'Required. The name of the AWS CodeDeploy application to be '\n 'associated with the application revision.'\n )\n },\n {\n 'name': 's3-location',\n 'synopsis': '--s3-location s3://<bucket>/<key>',\n 'required': True,\n 'help_text': (\n 'Required. Information about the location of the application '\n 'revision to be uploaded to Amazon S3. You must specify both '\n 'a bucket and a key that represent the Amazon S3 bucket name '\n 'and the object key name. Use the format '\n 's3://\\<bucket\\>/\\<key\\>'\n )\n },\n {\n 'name': 'ignore-hidden-files',\n 'action': 'store_true',\n 'default': False,\n 'group_name': 'ignore-hidden-files',\n 'help_text': (\n 'Optional. Set the --ignore-hidden-files flag to not bundle '\n 'and upload hidden files to Amazon S3; otherwise, set the '\n '--no-ignore-hidden-files flag (the default) to bundle and '\n 'upload hidden files to Amazon S3.'\n )\n },\n {\n 'name': 'no-ignore-hidden-files',\n 'action': 'store_true',\n 'default': False,\n 'group_name': 'ignore-hidden-files'\n },\n {\n 'name': 'source',\n 'synopsis': '--source <path>',\n 'default': '.',\n 'help_text': (\n 'Optional. The location of the deployable content and the '\n 'accompanying AppSpec file on the development machine to be '\n 'bundled and uploaded to Amazon S3. If not specified, the '\n 'current directory is used.'\n )\n },\n {\n 'name': 'description',\n 'synopsis': '--description <description>',\n 'help_text': (\n 'Optional. A comment that summarizes the application '\n 'revision. If not specified, the default string \"Uploaded by '\n 'AWS CLI \\'time\\' UTC\" is used, where \\'time\\' is the current '\n 'system time in Coordinated Universal Time (UTC).'\n )\n }\n ]\n\n def _run_main(self, parsed_args, parsed_globals):\n self._validate_args(parsed_args)\n self.codedeploy = self._session.create_client(\n 'codedeploy',\n region_name=parsed_globals.region,\n endpoint_url=parsed_globals.endpoint_url,\n verify=parsed_globals.verify_ssl\n )\n self.s3 = self._session.create_client(\n 's3',\n region_name=parsed_globals.region\n )\n self._push(parsed_args)\n\n def _validate_args(self, parsed_args):\n validate_s3_location(parsed_args, 's3_location')\n if parsed_args.ignore_hidden_files \\\n and parsed_args.no_ignore_hidden_files:\n raise RuntimeError(\n 'You cannot specify both --ignore-hidden-files and '\n '--no-ignore-hidden-files.'\n )\n if not parsed_args.description:\n parsed_args.description = (\n 'Uploaded by AWS CLI {0} UTC'.format(\n datetime.utcnow().isoformat()\n )\n )\n\n def _push(self, params):\n with self._compress(\n params.source,\n params.ignore_hidden_files\n ) as bundle:\n try:\n upload_response = self._upload_to_s3(params, bundle)\n params.eTag = upload_response['ETag'].replace('\"', \"\")\n if 'VersionId' in upload_response:\n params.version = upload_response['VersionId']\n except Exception as e:\n raise RuntimeError(\n 'Failed to upload \\'%s\\' to \\'%s\\': %s' %\n (params.source,\n params.s3_location,\n str(e))\n )\n self._register_revision(params)\n\n if 'version' in params:\n version_string = ',version={0}'.format(params.version)\n else:\n version_string = ''\n s3location_string = (\n '--s3-location bucket={0},key={1},'\n 'bundleType=zip,eTag={2}{3}'.format(\n params.bucket,\n params.key,\n params.eTag,\n version_string\n )\n )\n sys.stdout.write(\n 'To deploy with this revision, run:\\n'\n 'aws deploy create-deployment '\n '--application-name {0} {1} '\n '--deployment-group-name <deployment-group-name> '\n '--deployment-config-name <deployment-config-name> '\n '--description <description>\\n'.format(\n params.application_name,\n s3location_string\n )\n )\n\n @contextlib.contextmanager\n def _compress(self, source, ignore_hidden_files=False):\n source_path = os.path.abspath(source)\n appspec_path = os.path.sep.join([source_path, 'appspec.yml'])\n with tempfile.TemporaryFile('w+b') as tf:\n zf = zipfile.ZipFile(tf, 'w', allowZip64=True)\n # Using 'try'/'finally' instead of 'with' statement since ZipFile\n # does not have support context manager in Python 2.6.\n try:\n contains_appspec = False\n for root, dirs, files in os.walk(source, topdown=True):\n if ignore_hidden_files:\n files = [fn for fn in files if not fn.startswith('.')]\n dirs[:] = [dn for dn in dirs if not dn.startswith('.')]\n for fn in files:\n filename = os.path.join(root, fn)\n filename = os.path.abspath(filename)\n arcname = filename[len(source_path) + 1:]\n if filename == appspec_path:\n contains_appspec = True\n zf.write(filename, arcname, ZIP_COMPRESSION_MODE)\n if not contains_appspec:\n raise RuntimeError(\n '{0} was not found'.format(appspec_path)\n )\n finally:\n zf.close()\n yield tf\n\n def _upload_to_s3(self, params, bundle):\n size_remaining = self._bundle_size(bundle)\n if size_remaining < MULTIPART_LIMIT:\n return self.s3.put_object(\n Bucket=params.bucket,\n Key=params.key,\n Body=bundle\n )\n else:\n return self._multipart_upload_to_s3(\n params,\n bundle,\n size_remaining\n )\n\n def _bundle_size(self, bundle):\n bundle.seek(0, 2)\n size = bundle.tell()\n bundle.seek(0)\n return size\n\n def _multipart_upload_to_s3(self, params, bundle, size_remaining):\n create_response = self.s3.create_multipart_upload(\n Bucket=params.bucket,\n Key=params.key\n )\n upload_id = create_response['UploadId']\n try:\n part_num = 1\n multipart_list = []\n bundle.seek(0)\n while size_remaining > 0:\n data = bundle.read(MULTIPART_LIMIT)\n upload_response = self.s3.upload_part(\n Bucket=params.bucket,\n Key=params.key,\n UploadId=upload_id,\n PartNumber=part_num,\n Body=six.BytesIO(data)\n )\n multipart_list.append({\n 'PartNumber': part_num,\n 'ETag': upload_response['ETag']\n })\n part_num += 1\n size_remaining -= len(data)\n return self.s3.complete_multipart_upload(\n Bucket=params.bucket,\n Key=params.key,\n UploadId=upload_id,\n MultipartUpload={'Parts': multipart_list}\n )\n except ClientError as e:\n self.s3.abort_multipart_upload(\n Bucket=params.bucket,\n Key=params.key,\n UploadId=upload_id\n )\n raise e\n\n def _register_revision(self, params):\n revision = {\n 'revisionType': 'S3',\n 's3Location': {\n 'bucket': params.bucket,\n 'key': params.key,\n 'bundleType': 'zip',\n 'eTag': params.eTag\n }\n }\n if 'version' in params:\n revision['s3Location']['version'] = params.version\n self.codedeploy.register_application_revision(\n applicationName=params.application_name,\n revision=revision,\n description=params.description\n )\n", "path": "awscli/customizations/codedeploy/push.py"}], "after_files": [{"content": "# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport os\nimport sys\nimport zipfile\nimport tempfile\nimport contextlib\nfrom datetime import datetime\n\nfrom botocore.exceptions import ClientError\n\nfrom awscli.compat import six\nfrom awscli.customizations.codedeploy.utils import validate_s3_location\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.compat import ZIP_COMPRESSION_MODE\n\n\nONE_MB = 1 << 20\nMULTIPART_LIMIT = 6 * ONE_MB\n\n\nclass Push(BasicCommand):\n NAME = 'push'\n\n DESCRIPTION = (\n 'Bundles and uploads to Amazon Simple Storage Service (Amazon S3) an '\n 'application revision, which is a zip archive file that contains '\n 'deployable content and an accompanying Application Specification '\n 'file (AppSpec file). If the upload is successful, a message is '\n 'returned that describes how to call the create-deployment command to '\n 'deploy the application revision from Amazon S3 to target Amazon '\n 'Elastic Compute Cloud (Amazon EC2) instances.'\n )\n\n ARG_TABLE = [\n {\n 'name': 'application-name',\n 'synopsis': '--application-name <app-name>',\n 'required': True,\n 'help_text': (\n 'Required. The name of the AWS CodeDeploy application to be '\n 'associated with the application revision.'\n )\n },\n {\n 'name': 's3-location',\n 'synopsis': '--s3-location s3://<bucket>/<key>',\n 'required': True,\n 'help_text': (\n 'Required. Information about the location of the application '\n 'revision to be uploaded to Amazon S3. You must specify both '\n 'a bucket and a key that represent the Amazon S3 bucket name '\n 'and the object key name. Content will be zipped before '\n 'uploading. Use the format s3://\\<bucket\\>/\\<key\\>'\n )\n },\n {\n 'name': 'ignore-hidden-files',\n 'action': 'store_true',\n 'default': False,\n 'group_name': 'ignore-hidden-files',\n 'help_text': (\n 'Optional. Set the --ignore-hidden-files flag to not bundle '\n 'and upload hidden files to Amazon S3; otherwise, set the '\n '--no-ignore-hidden-files flag (the default) to bundle and '\n 'upload hidden files to Amazon S3.'\n )\n },\n {\n 'name': 'no-ignore-hidden-files',\n 'action': 'store_true',\n 'default': False,\n 'group_name': 'ignore-hidden-files'\n },\n {\n 'name': 'source',\n 'synopsis': '--source <path>',\n 'default': '.',\n 'help_text': (\n 'Optional. The location of the deployable content and the '\n 'accompanying AppSpec file on the development machine to be '\n 'zipped and uploaded to Amazon S3. If not specified, the '\n 'current directory is used.'\n )\n },\n {\n 'name': 'description',\n 'synopsis': '--description <description>',\n 'help_text': (\n 'Optional. A comment that summarizes the application '\n 'revision. If not specified, the default string \"Uploaded by '\n 'AWS CLI \\'time\\' UTC\" is used, where \\'time\\' is the current '\n 'system time in Coordinated Universal Time (UTC).'\n )\n }\n ]\n\n def _run_main(self, parsed_args, parsed_globals):\n self._validate_args(parsed_args)\n self.codedeploy = self._session.create_client(\n 'codedeploy',\n region_name=parsed_globals.region,\n endpoint_url=parsed_globals.endpoint_url,\n verify=parsed_globals.verify_ssl\n )\n self.s3 = self._session.create_client(\n 's3',\n region_name=parsed_globals.region\n )\n self._push(parsed_args)\n\n def _validate_args(self, parsed_args):\n validate_s3_location(parsed_args, 's3_location')\n if parsed_args.ignore_hidden_files \\\n and parsed_args.no_ignore_hidden_files:\n raise RuntimeError(\n 'You cannot specify both --ignore-hidden-files and '\n '--no-ignore-hidden-files.'\n )\n if not parsed_args.description:\n parsed_args.description = (\n 'Uploaded by AWS CLI {0} UTC'.format(\n datetime.utcnow().isoformat()\n )\n )\n\n def _push(self, params):\n with self._compress(\n params.source,\n params.ignore_hidden_files\n ) as bundle:\n try:\n upload_response = self._upload_to_s3(params, bundle)\n params.eTag = upload_response['ETag'].replace('\"', \"\")\n if 'VersionId' in upload_response:\n params.version = upload_response['VersionId']\n except Exception as e:\n raise RuntimeError(\n 'Failed to upload \\'%s\\' to \\'%s\\': %s' %\n (params.source,\n params.s3_location,\n str(e))\n )\n self._register_revision(params)\n\n if 'version' in params:\n version_string = ',version={0}'.format(params.version)\n else:\n version_string = ''\n s3location_string = (\n '--s3-location bucket={0},key={1},'\n 'bundleType=zip,eTag={2}{3}'.format(\n params.bucket,\n params.key,\n params.eTag,\n version_string\n )\n )\n sys.stdout.write(\n 'To deploy with this revision, run:\\n'\n 'aws deploy create-deployment '\n '--application-name {0} {1} '\n '--deployment-group-name <deployment-group-name> '\n '--deployment-config-name <deployment-config-name> '\n '--description <description>\\n'.format(\n params.application_name,\n s3location_string\n )\n )\n\n @contextlib.contextmanager\n def _compress(self, source, ignore_hidden_files=False):\n source_path = os.path.abspath(source)\n appspec_path = os.path.sep.join([source_path, 'appspec.yml'])\n with tempfile.TemporaryFile('w+b') as tf:\n zf = zipfile.ZipFile(tf, 'w', allowZip64=True)\n # Using 'try'/'finally' instead of 'with' statement since ZipFile\n # does not have support context manager in Python 2.6.\n try:\n contains_appspec = False\n for root, dirs, files in os.walk(source, topdown=True):\n if ignore_hidden_files:\n files = [fn for fn in files if not fn.startswith('.')]\n dirs[:] = [dn for dn in dirs if not dn.startswith('.')]\n for fn in files:\n filename = os.path.join(root, fn)\n filename = os.path.abspath(filename)\n arcname = filename[len(source_path) + 1:]\n if filename == appspec_path:\n contains_appspec = True\n zf.write(filename, arcname, ZIP_COMPRESSION_MODE)\n if not contains_appspec:\n raise RuntimeError(\n '{0} was not found'.format(appspec_path)\n )\n finally:\n zf.close()\n yield tf\n\n def _upload_to_s3(self, params, bundle):\n size_remaining = self._bundle_size(bundle)\n if size_remaining < MULTIPART_LIMIT:\n return self.s3.put_object(\n Bucket=params.bucket,\n Key=params.key,\n Body=bundle\n )\n else:\n return self._multipart_upload_to_s3(\n params,\n bundle,\n size_remaining\n )\n\n def _bundle_size(self, bundle):\n bundle.seek(0, 2)\n size = bundle.tell()\n bundle.seek(0)\n return size\n\n def _multipart_upload_to_s3(self, params, bundle, size_remaining):\n create_response = self.s3.create_multipart_upload(\n Bucket=params.bucket,\n Key=params.key\n )\n upload_id = create_response['UploadId']\n try:\n part_num = 1\n multipart_list = []\n bundle.seek(0)\n while size_remaining > 0:\n data = bundle.read(MULTIPART_LIMIT)\n upload_response = self.s3.upload_part(\n Bucket=params.bucket,\n Key=params.key,\n UploadId=upload_id,\n PartNumber=part_num,\n Body=six.BytesIO(data)\n )\n multipart_list.append({\n 'PartNumber': part_num,\n 'ETag': upload_response['ETag']\n })\n part_num += 1\n size_remaining -= len(data)\n return self.s3.complete_multipart_upload(\n Bucket=params.bucket,\n Key=params.key,\n UploadId=upload_id,\n MultipartUpload={'Parts': multipart_list}\n )\n except ClientError as e:\n self.s3.abort_multipart_upload(\n Bucket=params.bucket,\n Key=params.key,\n UploadId=upload_id\n )\n raise e\n\n def _register_revision(self, params):\n revision = {\n 'revisionType': 'S3',\n 's3Location': {\n 'bucket': params.bucket,\n 'key': params.key,\n 'bundleType': 'zip',\n 'eTag': params.eTag\n }\n }\n if 'version' in params:\n revision['s3Location']['version'] = params.version\n self.codedeploy.register_application_revision(\n applicationName=params.application_name,\n revision=revision,\n description=params.description\n )\n", "path": "awscli/customizations/codedeploy/push.py"}]}
3,338
384
gh_patches_debug_10110
rasdani/github-patches
git_diff
spack__spack-40298
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Installation issue: [email protected] +cuda %[email protected] ### Steps to reproduce the issue I ran into this when trying to build `mpich %[email protected] +cuda netmod=ucx pmi=pmix ^[email protected] +allow-unsupported-compilers` ```console $ spack spec -l mpich%[email protected]+cuda netmod=ucx pmi=pmix ^[email protected] Input spec -------------------------------- - mpich%[email protected]+cuda netmod=ucx pmi=pmix - ^[email protected] Concretized -------------------------------- - vl25zhp [email protected]%[email protected]~argobots+cuda+fortran~hcoll+hwloc~hydra+libxml2+pci~rocm+romio~slurm~vci~verbs+wrapperrpath build_system=autotools cuda_arch=80 datatype-engine=auto device=ch4 netmod=ucx pmi=pmix arch=linux-rhel8-zen2 [e] tatyw2o ^[email protected]%[email protected]+allow-unsupported-compilers~dev build_system=generic arch=linux-rhel8-zen2 [+] oaezgck ^[email protected]%[email protected] build_system=autotools patches=440b954 arch=linux-rhel8-zen2 [+] wda76ac ^[email protected]%[email protected]~cairo+cuda~gl~libudev+libxml2~netloc~nvml~oneapi-level-zero~opencl+pci~rocm build_system=autotools cuda_arch=80 libs=shared,static arch=linux-rhel8-zen2 [+] v3xl3ra ^[email protected]%[email protected]~symlinks+termlib abi=none build_system=autotools arch=linux-rhel8-zen2 [e] jxaxqp2 ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2 [+] a25ibkd ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2 [e] ljdxawj ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2 [-] 6gqawar ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2 [+] 6tqy7tj ^[email protected]%[email protected]+pic~python+shared build_system=autotools arch=linux-rhel8-zen2 [+] gmdjnou ^[email protected]%[email protected] build_system=autotools libs=shared,static arch=linux-rhel8-zen2 [+] wv6bhnf ^[email protected]%[email protected]+pic build_system=autotools libs=shared,static arch=linux-rhel8-zen2 [+] gl4jfjp ^[email protected]%[email protected]+compat+opt build_system=autotools patches=299b958,ae9077a,b692621 arch=linux-rhel8-zen2 [e] jxaxqp2 ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2 [+] iiz3ymd ^[email protected]%[email protected]~docs+pmi_backwards_compatibility~python~restful build_system=autotools arch=linux-rhel8-zen2 [+] zpd4eva ^[email protected]%[email protected]+openssl build_system=autotools arch=linux-rhel8-zen2 [+] vfrxdbw ^[email protected]%[email protected]~docs+shared build_system=generic certs=mozilla arch=linux-rhel8-zen2 [-] f7zqyjr ^ca-certificates-mozilla@2023-05-30%[email protected] build_system=generic arch=linux-rhel8-zen2 [e] mbywbad ^[email protected]%[email protected]+cpanm+opcode+open+shared+threads build_system=generic patches=8cf4302 arch=linux-rhel8-zen2 [+] giw5sed ^[email protected]%[email protected]~assertions~backtrace_detail~cma~cuda~dc~debug~dm+examples~gdrcopy~gtest~ib_hw_tm~java~knem~logging~mlx5_dv+numa+openmp+optimizations~parameter_checking+pic~rc~rdmacm~rocm+thread_multiple~ucg~ud~verbs~vfs~xpmem build_system=autotools libs=shared,static opt=3 simd=auto arch=linux-rhel8-zen2 [+] 2hrbodu ^[email protected]%[email protected] build_system=autotools patches=4e1d78c,62fc8a8,ff37630 arch=linux-rhel8-zen2 [e] 34ciftm ^[email protected]%[email protected] build_system=autotools patches=7793209 arch=linux-rhel8-zen2 [e] 4awjffi ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2 [e] v6g426c ^[email protected]%[email protected]+sigsegv build_system=autotools patches=3877ab5,fc9b616 arch=linux-rhel8-zen2 - 3gp6itg ^[email protected]%[email protected]+cuda~rocm build_system=autotools cuda_arch=80 arch=linux-rhel8-zen2 [e] 34ciftm ^[email protected]%[email protected] build_system=autotools patches=7793209 arch=linux-rhel8-zen2 [e] 4awjffi ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2 [e] ljdxawj ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2 [e] v6g426c ^[email protected]%[email protected]+sigsegv build_system=autotools patches=3877ab5,fc9b616 arch=linux-rhel8-zen2 [e] vij5hij ^[email protected]%[email protected]+bz2+crypt+ctypes+dbm~debug+libxml2+lzma~nis~optimizations+pic+pyexpat+pythoncmd+readline+shared+sqlite3+ssl~tkinter+uuid+zlib build_system=generic patches=13fa8bf,b0615b2,ebdca64,f2fd060 arch=linux-rhel8-zen2 ``` ### Error message <details><summary>Error message</summary> <pre> ==> Error: ProcessError: Command exited with status 1: '/scratch/wp11/spack/stage/spack-stage-yaksa-0.2-3gp6itg5vlljusyvsifmuuceq7q77vp5/spack-src/configure' '--prefix=/projects/deploy/install/linux-rhel8-zen2/gcc-12.3.0/yaksa-0.2-3gp6itg5vlljusyvsifmuuceq7q77vp5' '--with-cuda=/usr/local/cuda-11.8' '--with-cuda-sm=80' '--without-hip' 7 errors found in build log: 28 libtoolize: copying file 'm4/ltsugar.m4' 29 libtoolize: copying file 'm4/ltversion.m4' 30 libtoolize: copying file 'm4/lt~obsolete.m4' 31 autoreconf: running: /projects/pkgs/autotools-ac-2.69-am-1.15.1-lt-2.4.6/bin/autoconf --force 32 autoreconf: running: /projects/pkgs/autotools-ac-2.69-am-1.15.1-lt-2.4.6/bin/autoheader --force 33 autoreconf: running: automake --add-missing --copy --force-missing >> 34 configure.ac:81: installing 'm4/ar-lib' >> 35 configure.ac:78: installing 'm4/compile' >> 36 configure.ac:83: installing 'm4/config.guess' >> 37 configure.ac:83: installing 'm4/config.sub' >> 38 configure.ac:74: installing 'm4/install-sh' >> 39 configure.ac:74: installing 'm4/missing' 40 Makefile.am: installing 'm4/depcomp' 41 autoreconf: Leaving directory `.' 42 === done === 43 44 Generating a helper maint/Version... done 45 ==> yaksa: Executing phase: 'configure' ... 198 checking alignment of long double... 16 199 checking cuda_runtime_api.h usability... yes 200 checking cuda_runtime_api.h presence... yes 201 checking for cuda_runtime_api.h... yes 202 checking for cudaStreamSynchronize in -lcudart... yes 203 checking whether nvcc works... no >> 204 configure: error: CUDA was requested but it is not functional </pre></details> ### Information on your system * **Spack:** 0.21.0.dev0 (b6f08f1d4ef3b0ca56430c1a465d660828b45ed2) * **Python:** 3.6.8 * **Platform:** linux-rhel8-zen2 * **Concretizer:** clingo ### Additional information - [spack-build-out.txt](https://github.com/spack/spack/files/12778111/spack-build-out.txt) - [spack-build-env.txt](https://github.com/spack/spack/files/12778112/spack-build-env.txt) I can confirm that the `nvcc` from CUDA 11.8 in the system works just fine by compiling and running a CUDA dot product test code. ``` $ nvcc --version nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2022 NVIDIA Corporation Built on Wed_Sep_21_10:33:58_PDT_2022 Cuda compilation tools, release 11.8, V11.8.89 Build cuda_11.8.r11.8/compiler.31833905_0 ``` Summoning `yaksa` maintainers @hzhou @raffenet @yfguo ### General information - [X] I have run `spack debug report` and reported the version of Spack/Python/Platform - [X] I have run `spack maintainers <name-of-the-package>` and **@mentioned** any maintainers - [X] I have uploaded the build log and environment files - [X] I have searched the issues of this repo and believe this is not a duplicate --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `var/spack/repos/builtin/packages/yaksa/package.py` Content: ``` 1 # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 6 from spack.package import * 7 8 9 class Yaksa(AutotoolsPackage, CudaPackage, ROCmPackage): 10 """Yaksa is a high-performance datatype engine for expressing, 11 managing and manipulating data present in noncontiguous memory 12 regions. It provides portable abstractions for structured 13 noncontiguous data layouts that are much more comprehensive compared 14 with traditional I/O vectors. 15 16 Yaksa imitates parts of the MPI Datatype system, but adds additional 17 functionality that would allow it to be used independent of MPI. It 18 provides routines for packing/unpacking, creating I/O vectors (array 19 of contiguous segments) and flattening/unflattening datatypes into 20 process-portable formats. 21 22 Yaksa's backend includes support for CPUs as well as different 23 GPUs.""" 24 25 homepage = "https://www.yaksa.org" 26 url = "https://github.com/pmodels/yaksa/archive/refs/tags/v0.2.tar.gz" 27 maintainers("raffenet", "yfguo", "hzhou") 28 29 version("0.2", sha256="9401cb6153dc8c34ddb9781bbabd418fd26b0a27b5da3294ecc21af7be9c86f2") 30 31 depends_on("autoconf", type="build") 32 depends_on("automake", type="build") 33 depends_on("libtool", type="build") 34 depends_on("m4", type="build") 35 depends_on("python@3:", type="build") 36 37 def autoreconf(self, spec, prefix): 38 sh = which("sh") 39 sh("autogen.sh") 40 41 def configure_args(self): 42 spec = self.spec 43 config_args = [] 44 45 config_args += self.with_or_without("cuda", activation_value="prefix") 46 if "+cuda" in spec: 47 cuda_archs = spec.variants["cuda_arch"].value 48 if "none" not in cuda_archs: 49 config_args.append("--with-cuda-sm={0}".format(",".join(cuda_archs))) 50 51 if "+rocm" in spec: 52 config_args.append("--with-hip={0}".format(spec["hip"].prefix)) 53 rocm_archs = spec.variants["amdgpu_target"].value 54 if "none" not in rocm_archs: 55 config_args.append("--with-hip-sm={0}".format(",".join(rocm_archs))) 56 else: 57 config_args.append("--without-hip") 58 59 return config_args 60 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/var/spack/repos/builtin/packages/yaksa/package.py b/var/spack/repos/builtin/packages/yaksa/package.py --- a/var/spack/repos/builtin/packages/yaksa/package.py +++ b/var/spack/repos/builtin/packages/yaksa/package.py @@ -47,6 +47,8 @@ cuda_archs = spec.variants["cuda_arch"].value if "none" not in cuda_archs: config_args.append("--with-cuda-sm={0}".format(",".join(cuda_archs))) + if "^cuda+allow-unsupported-compilers" in self.spec: + config_args.append("NVCC_FLAGS=-allow-unsupported-compiler") if "+rocm" in spec: config_args.append("--with-hip={0}".format(spec["hip"].prefix))
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/yaksa/package.py b/var/spack/repos/builtin/packages/yaksa/package.py\n--- a/var/spack/repos/builtin/packages/yaksa/package.py\n+++ b/var/spack/repos/builtin/packages/yaksa/package.py\n@@ -47,6 +47,8 @@\n cuda_archs = spec.variants[\"cuda_arch\"].value\n if \"none\" not in cuda_archs:\n config_args.append(\"--with-cuda-sm={0}\".format(\",\".join(cuda_archs)))\n+ if \"^cuda+allow-unsupported-compilers\" in self.spec:\n+ config_args.append(\"NVCC_FLAGS=-allow-unsupported-compiler\")\n \n if \"+rocm\" in spec:\n config_args.append(\"--with-hip={0}\".format(spec[\"hip\"].prefix))\n", "issue": "Installation issue: [email protected] +cuda %[email protected]\n### Steps to reproduce the issue\n\nI ran into this when trying to build `mpich %[email protected] +cuda netmod=ucx pmi=pmix ^[email protected] +allow-unsupported-compilers`\r\n\r\n```console\r\n$ spack spec -l mpich%[email protected]+cuda netmod=ucx pmi=pmix ^[email protected]\r\nInput spec\r\n--------------------------------\r\n - mpich%[email protected]+cuda netmod=ucx pmi=pmix\r\n - ^[email protected]\r\n \r\n Concretized\r\n--------------------------------\r\n - vl25zhp [email protected]%[email protected]~argobots+cuda+fortran~hcoll+hwloc~hydra+libxml2+pci~rocm+romio~slurm~vci~verbs+wrapperrpath build_system=autotools cuda_arch=80 datatype-engine=auto device=ch4 netmod=ucx pmi=pmix arch=linux-rhel8-zen2\r\n[e] tatyw2o ^[email protected]%[email protected]+allow-unsupported-compilers~dev build_system=generic arch=linux-rhel8-zen2\r\n[+] oaezgck ^[email protected]%[email protected] build_system=autotools patches=440b954 arch=linux-rhel8-zen2\r\n[+] wda76ac ^[email protected]%[email protected]~cairo+cuda~gl~libudev+libxml2~netloc~nvml~oneapi-level-zero~opencl+pci~rocm build_system=autotools cuda_arch=80 libs=shared,static arch=linux-rhel8-zen2\r\n[+] v3xl3ra ^[email protected]%[email protected]~symlinks+termlib abi=none build_system=autotools arch=linux-rhel8-zen2\r\n[e] jxaxqp2 ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2\r\n[+] a25ibkd ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2\r\n[e] ljdxawj ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2\r\n[-] 6gqawar ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2\r\n[+] 6tqy7tj ^[email protected]%[email protected]+pic~python+shared build_system=autotools arch=linux-rhel8-zen2\r\n[+] gmdjnou ^[email protected]%[email protected] build_system=autotools libs=shared,static arch=linux-rhel8-zen2\r\n[+] wv6bhnf ^[email protected]%[email protected]+pic build_system=autotools libs=shared,static arch=linux-rhel8-zen2\r\n[+] gl4jfjp ^[email protected]%[email protected]+compat+opt build_system=autotools patches=299b958,ae9077a,b692621 arch=linux-rhel8-zen2\r\n[e] jxaxqp2 ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2\r\n[+] iiz3ymd ^[email protected]%[email protected]~docs+pmi_backwards_compatibility~python~restful build_system=autotools arch=linux-rhel8-zen2\r\n[+] zpd4eva ^[email protected]%[email protected]+openssl build_system=autotools arch=linux-rhel8-zen2\r\n[+] vfrxdbw ^[email protected]%[email protected]~docs+shared build_system=generic certs=mozilla arch=linux-rhel8-zen2\r\n[-] f7zqyjr ^ca-certificates-mozilla@2023-05-30%[email protected] build_system=generic arch=linux-rhel8-zen2\r\n[e] mbywbad ^[email protected]%[email protected]+cpanm+opcode+open+shared+threads build_system=generic patches=8cf4302 arch=linux-rhel8-zen2\r\n[+] giw5sed ^[email protected]%[email protected]~assertions~backtrace_detail~cma~cuda~dc~debug~dm+examples~gdrcopy~gtest~ib_hw_tm~java~knem~logging~mlx5_dv+numa+openmp+optimizations~parameter_checking+pic~rc~rdmacm~rocm+thread_multiple~ucg~ud~verbs~vfs~xpmem build_system=autotools libs=shared,static opt=3 simd=auto arch=linux-rhel8-zen2\r\n[+] 2hrbodu ^[email protected]%[email protected] build_system=autotools patches=4e1d78c,62fc8a8,ff37630 arch=linux-rhel8-zen2\r\n[e] 34ciftm ^[email protected]%[email protected] build_system=autotools patches=7793209 arch=linux-rhel8-zen2\r\n[e] 4awjffi ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2\r\n[e] v6g426c ^[email protected]%[email protected]+sigsegv build_system=autotools patches=3877ab5,fc9b616 arch=linux-rhel8-zen2\r\n - 3gp6itg ^[email protected]%[email protected]+cuda~rocm build_system=autotools cuda_arch=80 arch=linux-rhel8-zen2\r\n[e] 34ciftm ^[email protected]%[email protected] build_system=autotools patches=7793209 arch=linux-rhel8-zen2\r\n[e] 4awjffi ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2\r\n[e] ljdxawj ^[email protected]%[email protected] build_system=autotools arch=linux-rhel8-zen2\r\n[e] v6g426c ^[email protected]%[email protected]+sigsegv build_system=autotools patches=3877ab5,fc9b616 arch=linux-rhel8-zen2\r\n[e] vij5hij ^[email protected]%[email protected]+bz2+crypt+ctypes+dbm~debug+libxml2+lzma~nis~optimizations+pic+pyexpat+pythoncmd+readline+shared+sqlite3+ssl~tkinter+uuid+zlib build_system=generic patches=13fa8bf,b0615b2,ebdca64,f2fd060 arch=linux-rhel8-zen2\r\n```\r\n\r\n\n\n### Error message\n\n<details><summary>Error message</summary>\r\n\r\n<pre>\r\n==> Error: ProcessError: Command exited with status 1:\r\n '/scratch/wp11/spack/stage/spack-stage-yaksa-0.2-3gp6itg5vlljusyvsifmuuceq7q77vp5/spack-src/configure' '--prefix=/projects/deploy/install/linux-rhel8-zen2/gcc-12.3.0/yaksa-0.2-3gp6itg5vlljusyvsifmuuceq7q77vp5' '--with-cuda=/usr/local/cuda-11.8' '--with-cuda-sm=80' '--without-hip'\r\n\r\n7 errors found in build log:\r\n 28 libtoolize: copying file 'm4/ltsugar.m4'\r\n 29 libtoolize: copying file 'm4/ltversion.m4'\r\n 30 libtoolize: copying file 'm4/lt~obsolete.m4'\r\n 31 autoreconf: running: /projects/pkgs/autotools-ac-2.69-am-1.15.1-lt-2.4.6/bin/autoconf --force\r\n 32 autoreconf: running: /projects/pkgs/autotools-ac-2.69-am-1.15.1-lt-2.4.6/bin/autoheader --force\r\n 33 autoreconf: running: automake --add-missing --copy --force-missing\r\n >> 34 configure.ac:81: installing 'm4/ar-lib'\r\n >> 35 configure.ac:78: installing 'm4/compile'\r\n >> 36 configure.ac:83: installing 'm4/config.guess'\r\n >> 37 configure.ac:83: installing 'm4/config.sub'\r\n >> 38 configure.ac:74: installing 'm4/install-sh'\r\n >> 39 configure.ac:74: installing 'm4/missing'\r\n 40 Makefile.am: installing 'm4/depcomp'\r\n 41 autoreconf: Leaving directory `.'\r\n 42 === done ===\r\n 43 \r\n 44 Generating a helper maint/Version... done\r\n 45 ==> yaksa: Executing phase: 'configure'\r\n\r\n ...\r\n\r\n 198 checking alignment of long double... 16\r\n 199 checking cuda_runtime_api.h usability... yes\r\n 200 checking cuda_runtime_api.h presence... yes\r\n 201 checking for cuda_runtime_api.h... yes\r\n 202 checking for cudaStreamSynchronize in -lcudart... yes\r\n 203 checking whether nvcc works... no\r\n >> 204 configure: error: CUDA was requested but it is not functional\r\n</pre></details>\r\n\n\n### Information on your system\n\n* **Spack:** 0.21.0.dev0 (b6f08f1d4ef3b0ca56430c1a465d660828b45ed2)\r\n* **Python:** 3.6.8\r\n* **Platform:** linux-rhel8-zen2\r\n* **Concretizer:** clingo\n\n### Additional information\n\n- [spack-build-out.txt](https://github.com/spack/spack/files/12778111/spack-build-out.txt)\r\n- [spack-build-env.txt](https://github.com/spack/spack/files/12778112/spack-build-env.txt)\r\n\r\nI can confirm that the `nvcc` from CUDA 11.8 in the system works just fine by compiling and running a CUDA dot product test code.\r\n\r\n```\r\n$ nvcc --version\r\nnvcc: NVIDIA (R) Cuda compiler driver\r\nCopyright (c) 2005-2022 NVIDIA Corporation\r\nBuilt on Wed_Sep_21_10:33:58_PDT_2022\r\nCuda compilation tools, release 11.8, V11.8.89\r\nBuild cuda_11.8.r11.8/compiler.31833905_0\r\n```\r\n\r\nSummoning `yaksa` maintainers @hzhou @raffenet @yfguo\n\n### General information\n\n- [X] I have run `spack debug report` and reported the version of Spack/Python/Platform\n- [X] I have run `spack maintainers <name-of-the-package>` and **@mentioned** any maintainers\n- [X] I have uploaded the build log and environment files\n- [X] I have searched the issues of this repo and believe this is not a duplicate\n", "before_files": [{"content": "# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass Yaksa(AutotoolsPackage, CudaPackage, ROCmPackage):\n \"\"\"Yaksa is a high-performance datatype engine for expressing,\n managing and manipulating data present in noncontiguous memory\n regions. It provides portable abstractions for structured\n noncontiguous data layouts that are much more comprehensive compared\n with traditional I/O vectors.\n\n Yaksa imitates parts of the MPI Datatype system, but adds additional\n functionality that would allow it to be used independent of MPI. It\n provides routines for packing/unpacking, creating I/O vectors (array\n of contiguous segments) and flattening/unflattening datatypes into\n process-portable formats.\n\n Yaksa's backend includes support for CPUs as well as different\n GPUs.\"\"\"\n\n homepage = \"https://www.yaksa.org\"\n url = \"https://github.com/pmodels/yaksa/archive/refs/tags/v0.2.tar.gz\"\n maintainers(\"raffenet\", \"yfguo\", \"hzhou\")\n\n version(\"0.2\", sha256=\"9401cb6153dc8c34ddb9781bbabd418fd26b0a27b5da3294ecc21af7be9c86f2\")\n\n depends_on(\"autoconf\", type=\"build\")\n depends_on(\"automake\", type=\"build\")\n depends_on(\"libtool\", type=\"build\")\n depends_on(\"m4\", type=\"build\")\n depends_on(\"python@3:\", type=\"build\")\n\n def autoreconf(self, spec, prefix):\n sh = which(\"sh\")\n sh(\"autogen.sh\")\n\n def configure_args(self):\n spec = self.spec\n config_args = []\n\n config_args += self.with_or_without(\"cuda\", activation_value=\"prefix\")\n if \"+cuda\" in spec:\n cuda_archs = spec.variants[\"cuda_arch\"].value\n if \"none\" not in cuda_archs:\n config_args.append(\"--with-cuda-sm={0}\".format(\",\".join(cuda_archs)))\n\n if \"+rocm\" in spec:\n config_args.append(\"--with-hip={0}\".format(spec[\"hip\"].prefix))\n rocm_archs = spec.variants[\"amdgpu_target\"].value\n if \"none\" not in rocm_archs:\n config_args.append(\"--with-hip-sm={0}\".format(\",\".join(rocm_archs)))\n else:\n config_args.append(\"--without-hip\")\n\n return config_args\n", "path": "var/spack/repos/builtin/packages/yaksa/package.py"}], "after_files": [{"content": "# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass Yaksa(AutotoolsPackage, CudaPackage, ROCmPackage):\n \"\"\"Yaksa is a high-performance datatype engine for expressing,\n managing and manipulating data present in noncontiguous memory\n regions. It provides portable abstractions for structured\n noncontiguous data layouts that are much more comprehensive compared\n with traditional I/O vectors.\n\n Yaksa imitates parts of the MPI Datatype system, but adds additional\n functionality that would allow it to be used independent of MPI. It\n provides routines for packing/unpacking, creating I/O vectors (array\n of contiguous segments) and flattening/unflattening datatypes into\n process-portable formats.\n\n Yaksa's backend includes support for CPUs as well as different\n GPUs.\"\"\"\n\n homepage = \"https://www.yaksa.org\"\n url = \"https://github.com/pmodels/yaksa/archive/refs/tags/v0.2.tar.gz\"\n maintainers(\"raffenet\", \"yfguo\", \"hzhou\")\n\n version(\"0.2\", sha256=\"9401cb6153dc8c34ddb9781bbabd418fd26b0a27b5da3294ecc21af7be9c86f2\")\n\n depends_on(\"autoconf\", type=\"build\")\n depends_on(\"automake\", type=\"build\")\n depends_on(\"libtool\", type=\"build\")\n depends_on(\"m4\", type=\"build\")\n depends_on(\"python@3:\", type=\"build\")\n\n def autoreconf(self, spec, prefix):\n sh = which(\"sh\")\n sh(\"autogen.sh\")\n\n def configure_args(self):\n spec = self.spec\n config_args = []\n\n config_args += self.with_or_without(\"cuda\", activation_value=\"prefix\")\n if \"+cuda\" in spec:\n cuda_archs = spec.variants[\"cuda_arch\"].value\n if \"none\" not in cuda_archs:\n config_args.append(\"--with-cuda-sm={0}\".format(\",\".join(cuda_archs)))\n if \"^cuda+allow-unsupported-compilers\" in self.spec:\n config_args.append(\"NVCC_FLAGS=-allow-unsupported-compiler\")\n\n if \"+rocm\" in spec:\n config_args.append(\"--with-hip={0}\".format(spec[\"hip\"].prefix))\n rocm_archs = spec.variants[\"amdgpu_target\"].value\n if \"none\" not in rocm_archs:\n config_args.append(\"--with-hip-sm={0}\".format(\",\".join(rocm_archs)))\n else:\n config_args.append(\"--without-hip\")\n\n return config_args\n", "path": "var/spack/repos/builtin/packages/yaksa/package.py"}]}
4,005
177
gh_patches_debug_35132
rasdani/github-patches
git_diff
CTFd__CTFd-1352
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Submission search Search submissions akin to how users are searched --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `CTFd/admin/submissions.py` Content: ``` 1 from flask import render_template, request 2 3 from CTFd.admin import admin 4 from CTFd.models import Challenges, Submissions 5 from CTFd.utils.decorators import admins_only 6 from CTFd.utils.modes import get_model 7 8 9 @admin.route("/admin/submissions", defaults={"submission_type": None}) 10 @admin.route("/admin/submissions/<submission_type>") 11 @admins_only 12 def submissions_listing(submission_type): 13 filters = {} 14 if submission_type: 15 filters["type"] = submission_type 16 17 curr_page = abs(int(request.args.get("page", 1, type=int))) 18 results_per_page = 50 19 page_start = results_per_page * (curr_page - 1) 20 page_end = results_per_page * (curr_page - 1) + results_per_page 21 sub_count = Submissions.query.filter_by(**filters).count() 22 page_count = int(sub_count / results_per_page) + (sub_count % results_per_page > 0) 23 24 Model = get_model() 25 26 submissions = ( 27 Submissions.query.add_columns( 28 Submissions.id, 29 Submissions.type, 30 Submissions.challenge_id, 31 Submissions.provided, 32 Submissions.account_id, 33 Submissions.date, 34 Challenges.name.label("challenge_name"), 35 Model.name.label("team_name"), 36 ) 37 .filter_by(**filters) 38 .join(Challenges) 39 .join(Model) 40 .order_by(Submissions.date.desc()) 41 .slice(page_start, page_end) 42 .all() 43 ) 44 45 return render_template( 46 "admin/submissions.html", 47 submissions=submissions, 48 page_count=page_count, 49 curr_page=curr_page, 50 type=submission_type, 51 ) 52 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/CTFd/admin/submissions.py b/CTFd/admin/submissions.py --- a/CTFd/admin/submissions.py +++ b/CTFd/admin/submissions.py @@ -1,4 +1,4 @@ -from flask import render_template, request +from flask import render_template, request, url_for from CTFd.admin import admin from CTFd.models import Challenges, Submissions @@ -10,16 +10,21 @@ @admin.route("/admin/submissions/<submission_type>") @admins_only def submissions_listing(submission_type): - filters = {} + filters_by = {} if submission_type: - filters["type"] = submission_type + filters_by["type"] = submission_type + filters = [] - curr_page = abs(int(request.args.get("page", 1, type=int))) - results_per_page = 50 - page_start = results_per_page * (curr_page - 1) - page_end = results_per_page * (curr_page - 1) + results_per_page - sub_count = Submissions.query.filter_by(**filters).count() - page_count = int(sub_count / results_per_page) + (sub_count % results_per_page > 0) + q = request.args.get("q") + field = request.args.get("field") + page = abs(request.args.get("page", 1, type=int)) + + if q: + submissions = [] + if Submissions.__mapper__.has_property( + field + ): # The field exists as an exposed column + filters.append(getattr(Submissions, field).like("%{}%".format(q))) Model = get_model() @@ -34,18 +39,27 @@ Challenges.name.label("challenge_name"), Model.name.label("team_name"), ) - .filter_by(**filters) + .filter_by(**filters_by) + .filter(*filters) .join(Challenges) .join(Model) .order_by(Submissions.date.desc()) - .slice(page_start, page_end) - .all() + .paginate(page=page, per_page=50) ) + args = dict(request.args) + args.pop("page", 1) + return render_template( "admin/submissions.html", submissions=submissions, - page_count=page_count, - curr_page=curr_page, + prev_page=url_for( + request.endpoint, type=submission_type, page=submissions.prev_num, **args + ), + next_page=url_for( + request.endpoint, type=submission_type, page=submissions.next_num, **args + ), type=submission_type, + q=q, + field=field, )
{"golden_diff": "diff --git a/CTFd/admin/submissions.py b/CTFd/admin/submissions.py\n--- a/CTFd/admin/submissions.py\n+++ b/CTFd/admin/submissions.py\n@@ -1,4 +1,4 @@\n-from flask import render_template, request\n+from flask import render_template, request, url_for\n \n from CTFd.admin import admin\n from CTFd.models import Challenges, Submissions\n@@ -10,16 +10,21 @@\n @admin.route(\"/admin/submissions/<submission_type>\")\n @admins_only\n def submissions_listing(submission_type):\n- filters = {}\n+ filters_by = {}\n if submission_type:\n- filters[\"type\"] = submission_type\n+ filters_by[\"type\"] = submission_type\n+ filters = []\n \n- curr_page = abs(int(request.args.get(\"page\", 1, type=int)))\n- results_per_page = 50\n- page_start = results_per_page * (curr_page - 1)\n- page_end = results_per_page * (curr_page - 1) + results_per_page\n- sub_count = Submissions.query.filter_by(**filters).count()\n- page_count = int(sub_count / results_per_page) + (sub_count % results_per_page > 0)\n+ q = request.args.get(\"q\")\n+ field = request.args.get(\"field\")\n+ page = abs(request.args.get(\"page\", 1, type=int))\n+\n+ if q:\n+ submissions = []\n+ if Submissions.__mapper__.has_property(\n+ field\n+ ): # The field exists as an exposed column\n+ filters.append(getattr(Submissions, field).like(\"%{}%\".format(q)))\n \n Model = get_model()\n \n@@ -34,18 +39,27 @@\n Challenges.name.label(\"challenge_name\"),\n Model.name.label(\"team_name\"),\n )\n- .filter_by(**filters)\n+ .filter_by(**filters_by)\n+ .filter(*filters)\n .join(Challenges)\n .join(Model)\n .order_by(Submissions.date.desc())\n- .slice(page_start, page_end)\n- .all()\n+ .paginate(page=page, per_page=50)\n )\n \n+ args = dict(request.args)\n+ args.pop(\"page\", 1)\n+\n return render_template(\n \"admin/submissions.html\",\n submissions=submissions,\n- page_count=page_count,\n- curr_page=curr_page,\n+ prev_page=url_for(\n+ request.endpoint, type=submission_type, page=submissions.prev_num, **args\n+ ),\n+ next_page=url_for(\n+ request.endpoint, type=submission_type, page=submissions.next_num, **args\n+ ),\n type=submission_type,\n+ q=q,\n+ field=field,\n )\n", "issue": "Submission search\nSearch submissions akin to how users are searched\n", "before_files": [{"content": "from flask import render_template, request\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Submissions\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.modes import get_model\n\n\[email protected](\"/admin/submissions\", defaults={\"submission_type\": None})\[email protected](\"/admin/submissions/<submission_type>\")\n@admins_only\ndef submissions_listing(submission_type):\n filters = {}\n if submission_type:\n filters[\"type\"] = submission_type\n\n curr_page = abs(int(request.args.get(\"page\", 1, type=int)))\n results_per_page = 50\n page_start = results_per_page * (curr_page - 1)\n page_end = results_per_page * (curr_page - 1) + results_per_page\n sub_count = Submissions.query.filter_by(**filters).count()\n page_count = int(sub_count / results_per_page) + (sub_count % results_per_page > 0)\n\n Model = get_model()\n\n submissions = (\n Submissions.query.add_columns(\n Submissions.id,\n Submissions.type,\n Submissions.challenge_id,\n Submissions.provided,\n Submissions.account_id,\n Submissions.date,\n Challenges.name.label(\"challenge_name\"),\n Model.name.label(\"team_name\"),\n )\n .filter_by(**filters)\n .join(Challenges)\n .join(Model)\n .order_by(Submissions.date.desc())\n .slice(page_start, page_end)\n .all()\n )\n\n return render_template(\n \"admin/submissions.html\",\n submissions=submissions,\n page_count=page_count,\n curr_page=curr_page,\n type=submission_type,\n )\n", "path": "CTFd/admin/submissions.py"}], "after_files": [{"content": "from flask import render_template, request, url_for\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Submissions\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.modes import get_model\n\n\[email protected](\"/admin/submissions\", defaults={\"submission_type\": None})\[email protected](\"/admin/submissions/<submission_type>\")\n@admins_only\ndef submissions_listing(submission_type):\n filters_by = {}\n if submission_type:\n filters_by[\"type\"] = submission_type\n filters = []\n\n q = request.args.get(\"q\")\n field = request.args.get(\"field\")\n page = abs(request.args.get(\"page\", 1, type=int))\n\n if q:\n submissions = []\n if Submissions.__mapper__.has_property(\n field\n ): # The field exists as an exposed column\n filters.append(getattr(Submissions, field).like(\"%{}%\".format(q)))\n\n Model = get_model()\n\n submissions = (\n Submissions.query.add_columns(\n Submissions.id,\n Submissions.type,\n Submissions.challenge_id,\n Submissions.provided,\n Submissions.account_id,\n Submissions.date,\n Challenges.name.label(\"challenge_name\"),\n Model.name.label(\"team_name\"),\n )\n .filter_by(**filters_by)\n .filter(*filters)\n .join(Challenges)\n .join(Model)\n .order_by(Submissions.date.desc())\n .paginate(page=page, per_page=50)\n )\n\n args = dict(request.args)\n args.pop(\"page\", 1)\n\n return render_template(\n \"admin/submissions.html\",\n submissions=submissions,\n prev_page=url_for(\n request.endpoint, type=submission_type, page=submissions.prev_num, **args\n ),\n next_page=url_for(\n request.endpoint, type=submission_type, page=submissions.next_num, **args\n ),\n type=submission_type,\n q=q,\n field=field,\n )\n", "path": "CTFd/admin/submissions.py"}]}
728
615
gh_patches_debug_11056
rasdani/github-patches
git_diff
pantsbuild__pants-14584
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- coursier post processing script choking on null dependency file in report json **Describe the bug** One of our 3rdparty jars (`org.geotools:gt-main:9.2` from the `https://repo.osgeo.org/repository/release` repositiory) transitively depends on a `javax.media:jai_core:1.1.3` jar. For whatever reason, Coursier's resolve report json lists `"file": null` for that jar, which the post processing script doesn't currently handle. **Pants version** 2.9.0 **OS** Encountered on MacOS, though expect we would see this on Linux as well **Additional info** Coursier config: ``` [coursier] repos = [ "https://maven-central.storage-download.googleapis.com/maven2", "https://repo1.maven.org/maven2", "https://repo.osgeo.org/repository/release", ] ``` affected jvm_artifact: ``` jvm_artifact( group="org.geotools", artifact="gt-main", version="9.2", ) ``` [slack discussion](https://pantsbuild.slack.com/archives/C046T6T9U/p1645598534013879) with links to full resolve output and json --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/python/pants/jvm/resolve/coursier_setup.py` Content: ``` 1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 from __future__ import annotations 5 6 import os 7 import shlex 8 import textwrap 9 from dataclasses import dataclass 10 from typing import ClassVar, Iterable, Tuple 11 12 from pants.core.util_rules import external_tool 13 from pants.core.util_rules.external_tool import ( 14 DownloadedExternalTool, 15 ExternalToolRequest, 16 TemplatedExternalTool, 17 ) 18 from pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests 19 from pants.engine.platform import Platform 20 from pants.engine.process import BashBinary, Process 21 from pants.engine.rules import Get, MultiGet, collect_rules, rule 22 from pants.python.binaries import PythonBinary 23 from pants.util.logging import LogLevel 24 25 COURSIER_POST_PROCESSING_SCRIPT = textwrap.dedent( 26 """\ 27 import json 28 import sys 29 import os 30 from pathlib import PurePath 31 from shutil import copyfile 32 33 report = json.load(open(sys.argv[1])) 34 35 # Mapping from dest path to source path. It is ok to capture the same output filename multiple 36 # times if the source is the same as well. 37 classpath = dict() 38 for dep in report['dependencies']: 39 source = PurePath(dep['file']) 40 dest_name = dep['coord'].replace(":", "_") 41 _, ext = os.path.splitext(source) 42 classpath_dest = f"classpath/{dest_name}{ext}" 43 44 existing_source = classpath.get(classpath_dest) 45 if existing_source: 46 if existing_source == source: 47 # We've already captured this file. 48 continue 49 raise Exception( 50 f"Duplicate jar name {classpath_dest} with incompatible source:\\n" 51 f" {source}\\n" 52 f" {existing_source}\\n" 53 ) 54 classpath[classpath_dest] = source 55 copyfile(source, classpath_dest) 56 """ 57 ) 58 59 COURSIER_FETCH_WRAPPER_SCRIPT = textwrap.dedent( 60 """\ 61 set -eux 62 63 coursier_exe="$1" 64 shift 65 json_output_file="$1" 66 shift 67 68 working_dir="$(pwd)" 69 "$coursier_exe" fetch {repos_args} \ 70 --json-output-file="$json_output_file" \ 71 "${{@//{coursier_working_directory}/$working_dir}}" 72 /bin/mkdir -p classpath 73 {python_path} {coursier_bin_dir}/coursier_post_processing_script.py "$json_output_file" 74 """ 75 ) 76 77 78 # TODO: Coursier renders setrlimit error line on macOS. 79 # see https://github.com/pantsbuild/pants/issues/13942. 80 POST_PROCESS_COURSIER_STDERR_SCRIPT = textwrap.dedent( 81 """\ 82 #!{python_path} 83 import sys 84 from subprocess import run, PIPE 85 86 proc = run(sys.argv[1:], stdout=PIPE, stderr=PIPE) 87 88 sys.stdout.buffer.write(proc.stdout) 89 sys.stderr.buffer.write(proc.stderr.replace(b"setrlimit to increase file descriptor limit failed, errno 22\\n", b"")) 90 sys.exit(proc.returncode) 91 """ 92 ) 93 94 95 class CoursierSubsystem(TemplatedExternalTool): 96 options_scope = "coursier" 97 name = "coursier" 98 help = "A dependency resolver for the Maven ecosystem." 99 100 default_version = "v2.0.16-169-g194ebc55c" 101 default_known_versions = [ 102 "v2.0.16-169-g194ebc55c|linux_arm64 |da38c97d55967505b8454c20a90370c518044829398b9bce8b637d194d79abb3|18114472", 103 "v2.0.16-169-g194ebc55c|linux_x86_64|4c61a634c4bd2773b4543fe0fc32210afd343692891121cddb447204b48672e8|18486946", 104 "v2.0.16-169-g194ebc55c|macos_arm64 |15bce235d223ef1d022da30b67b4c64e9228d236b876c834b64e029bbe824c6f|17957182", 105 "v2.0.16-169-g194ebc55c|macos_x86_64|15bce235d223ef1d022da30b67b4c64e9228d236b876c834b64e029bbe824c6f|17957182", 106 ] 107 default_url_template = ( 108 "https://github.com/coursier/coursier/releases/download/{version}/cs-{platform}.gz" 109 ) 110 default_url_platform_mapping = { 111 "macos_arm64": "x86_64-apple-darwin", 112 "macos_x86_64": "x86_64-apple-darwin", 113 "linux_arm64": "aarch64-pc-linux", 114 "linux_x86_64": "x86_64-pc-linux", 115 } 116 117 @classmethod 118 def register_options(cls, register) -> None: 119 super().register_options(register) 120 register( 121 "--repos", 122 type=list, 123 member_type=str, 124 default=[ 125 "https://maven-central.storage-download.googleapis.com/maven2", 126 "https://repo1.maven.org/maven2", 127 ], 128 help=("Maven style repositories to resolve artifacts from."), 129 ) 130 131 def generate_exe(self, plat: Platform) -> str: 132 archive_filename = os.path.basename(self.generate_url(plat)) 133 filename = os.path.splitext(archive_filename)[0] 134 return f"./{filename}" 135 136 137 @dataclass(frozen=True) 138 class Coursier: 139 """The Coursier tool and various utilities, prepared for use via `immutable_input_digests`.""" 140 141 coursier: DownloadedExternalTool 142 _digest: Digest 143 144 bin_dir: ClassVar[str] = "__coursier" 145 fetch_wrapper_script: ClassVar[str] = f"{bin_dir}/coursier_fetch_wrapper_script.sh" 146 post_processing_script: ClassVar[str] = f"{bin_dir}/coursier_post_processing_script.py" 147 post_process_stderr: ClassVar[str] = f"{bin_dir}/coursier_post_process_stderr.py" 148 cache_name: ClassVar[str] = "coursier" 149 cache_dir: ClassVar[str] = ".cache" 150 working_directory_placeholder: ClassVar[str] = "___COURSIER_WORKING_DIRECTORY___" 151 152 def args(self, args: Iterable[str], *, wrapper: Iterable[str] = ()) -> tuple[str, ...]: 153 return ( 154 self.post_process_stderr, 155 *wrapper, 156 os.path.join(self.bin_dir, self.coursier.exe), 157 *args, 158 ) 159 160 @property 161 def env(self) -> dict[str, str]: 162 # NB: These variables have changed a few times, and they change again on `main`. But as of 163 # `v2.0.16+73-gddc6d9cc9` they are accurate. See: 164 # https://github.com/coursier/coursier/blob/v2.0.16+73-gddc6d9cc9/modules/paths/src/main/java/coursier/paths/CoursierPaths.java#L38-L48 165 return { 166 "COURSIER_CACHE": f"{self.cache_dir}/jdk", 167 "COURSIER_ARCHIVE_CACHE": f"{self.cache_dir}/arc", 168 "COURSIER_JVM_CACHE": f"{self.cache_dir}/v1", 169 } 170 171 @property 172 def append_only_caches(self) -> dict[str, str]: 173 return {self.cache_name: self.cache_dir} 174 175 @property 176 def immutable_input_digests(self) -> dict[str, Digest]: 177 return {self.bin_dir: self._digest} 178 179 180 @dataclass(frozen=True) 181 class CoursierWrapperProcess: 182 183 args: Tuple[str, ...] 184 input_digest: Digest 185 output_directories: Tuple[str, ...] 186 output_files: Tuple[str, ...] 187 description: str 188 189 190 @rule 191 async def invoke_coursier_wrapper( 192 bash: BashBinary, 193 coursier: Coursier, 194 request: CoursierWrapperProcess, 195 ) -> Process: 196 197 return Process( 198 argv=coursier.args( 199 request.args, 200 wrapper=[bash.path, coursier.fetch_wrapper_script], 201 ), 202 input_digest=request.input_digest, 203 immutable_input_digests=coursier.immutable_input_digests, 204 output_directories=request.output_directories, 205 output_files=request.output_files, 206 append_only_caches=coursier.append_only_caches, 207 env=coursier.env, 208 description=request.description, 209 level=LogLevel.DEBUG, 210 ) 211 212 213 @rule 214 async def setup_coursier( 215 coursier_subsystem: CoursierSubsystem, 216 python: PythonBinary, 217 ) -> Coursier: 218 repos_args = " ".join(f"-r={shlex.quote(repo)}" for repo in coursier_subsystem.options.repos) 219 coursier_wrapper_script = COURSIER_FETCH_WRAPPER_SCRIPT.format( 220 repos_args=repos_args, 221 coursier_working_directory=Coursier.working_directory_placeholder, 222 python_path=python.path, 223 coursier_bin_dir=Coursier.bin_dir, 224 ) 225 226 post_process_stderr = POST_PROCESS_COURSIER_STDERR_SCRIPT.format(python_path=python.path) 227 228 downloaded_coursier_get = Get( 229 DownloadedExternalTool, 230 ExternalToolRequest, 231 coursier_subsystem.get_request(Platform.current), 232 ) 233 wrapper_scripts_digest_get = Get( 234 Digest, 235 CreateDigest( 236 [ 237 FileContent( 238 os.path.basename(Coursier.fetch_wrapper_script), 239 coursier_wrapper_script.encode("utf-8"), 240 is_executable=True, 241 ), 242 FileContent( 243 os.path.basename(Coursier.post_processing_script), 244 COURSIER_POST_PROCESSING_SCRIPT.encode("utf-8"), 245 is_executable=True, 246 ), 247 FileContent( 248 os.path.basename(Coursier.post_process_stderr), 249 post_process_stderr.encode("utf-8"), 250 is_executable=True, 251 ), 252 ] 253 ), 254 ) 255 256 downloaded_coursier, wrapper_scripts_digest = await MultiGet( 257 downloaded_coursier_get, wrapper_scripts_digest_get 258 ) 259 260 return Coursier( 261 coursier=downloaded_coursier, 262 _digest=await Get( 263 Digest, 264 MergeDigests( 265 [ 266 downloaded_coursier.digest, 267 wrapper_scripts_digest, 268 ] 269 ), 270 ), 271 ) 272 273 274 def rules(): 275 return [ 276 *collect_rules(), 277 *external_tool.rules(), 278 ] 279 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/python/pants/jvm/resolve/coursier_setup.py b/src/python/pants/jvm/resolve/coursier_setup.py --- a/src/python/pants/jvm/resolve/coursier_setup.py +++ b/src/python/pants/jvm/resolve/coursier_setup.py @@ -215,7 +215,10 @@ coursier_subsystem: CoursierSubsystem, python: PythonBinary, ) -> Coursier: - repos_args = " ".join(f"-r={shlex.quote(repo)}" for repo in coursier_subsystem.options.repos) + repos_args = ( + " ".join(f"-r={shlex.quote(repo)}" for repo in coursier_subsystem.options.repos) + + " --no-default" + ) coursier_wrapper_script = COURSIER_FETCH_WRAPPER_SCRIPT.format( repos_args=repos_args, coursier_working_directory=Coursier.working_directory_placeholder,
{"golden_diff": "diff --git a/src/python/pants/jvm/resolve/coursier_setup.py b/src/python/pants/jvm/resolve/coursier_setup.py\n--- a/src/python/pants/jvm/resolve/coursier_setup.py\n+++ b/src/python/pants/jvm/resolve/coursier_setup.py\n@@ -215,7 +215,10 @@\n coursier_subsystem: CoursierSubsystem,\n python: PythonBinary,\n ) -> Coursier:\n- repos_args = \" \".join(f\"-r={shlex.quote(repo)}\" for repo in coursier_subsystem.options.repos)\n+ repos_args = (\n+ \" \".join(f\"-r={shlex.quote(repo)}\" for repo in coursier_subsystem.options.repos)\n+ + \" --no-default\"\n+ )\n coursier_wrapper_script = COURSIER_FETCH_WRAPPER_SCRIPT.format(\n repos_args=repos_args,\n coursier_working_directory=Coursier.working_directory_placeholder,\n", "issue": "coursier post processing script choking on null dependency file in report json\n**Describe the bug**\r\nOne of our 3rdparty jars (`org.geotools:gt-main:9.2` from the `https://repo.osgeo.org/repository/release` repositiory) transitively depends on a `javax.media:jai_core:1.1.3` jar. For whatever reason, Coursier's resolve report json lists `\"file\": null` for that jar, which the post processing script doesn't currently handle.\r\n\r\n**Pants version**\r\n2.9.0\r\n\r\n**OS**\r\nEncountered on MacOS, though expect we would see this on Linux as well\r\n\r\n**Additional info**\r\nCoursier config:\r\n```\r\n[coursier]\r\nrepos = [\r\n \"https://maven-central.storage-download.googleapis.com/maven2\",\r\n \"https://repo1.maven.org/maven2\",\r\n \"https://repo.osgeo.org/repository/release\",\r\n]\r\n```\r\n\r\naffected jvm_artifact:\r\n```\r\njvm_artifact(\r\n group=\"org.geotools\",\r\n artifact=\"gt-main\",\r\n version=\"9.2\",\r\n)\r\n```\r\n\r\n[slack discussion](https://pantsbuild.slack.com/archives/C046T6T9U/p1645598534013879) with links to full resolve output and json\r\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport os\nimport shlex\nimport textwrap\nfrom dataclasses import dataclass\nfrom typing import ClassVar, Iterable, Tuple\n\nfrom pants.core.util_rules import external_tool\nfrom pants.core.util_rules.external_tool import (\n DownloadedExternalTool,\n ExternalToolRequest,\n TemplatedExternalTool,\n)\nfrom pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests\nfrom pants.engine.platform import Platform\nfrom pants.engine.process import BashBinary, Process\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.python.binaries import PythonBinary\nfrom pants.util.logging import LogLevel\n\nCOURSIER_POST_PROCESSING_SCRIPT = textwrap.dedent(\n \"\"\"\\\n import json\n import sys\n import os\n from pathlib import PurePath\n from shutil import copyfile\n\n report = json.load(open(sys.argv[1]))\n\n # Mapping from dest path to source path. It is ok to capture the same output filename multiple\n # times if the source is the same as well.\n classpath = dict()\n for dep in report['dependencies']:\n source = PurePath(dep['file'])\n dest_name = dep['coord'].replace(\":\", \"_\")\n _, ext = os.path.splitext(source)\n classpath_dest = f\"classpath/{dest_name}{ext}\"\n\n existing_source = classpath.get(classpath_dest)\n if existing_source:\n if existing_source == source:\n # We've already captured this file.\n continue\n raise Exception(\n f\"Duplicate jar name {classpath_dest} with incompatible source:\\\\n\"\n f\" {source}\\\\n\"\n f\" {existing_source}\\\\n\"\n )\n classpath[classpath_dest] = source\n copyfile(source, classpath_dest)\n \"\"\"\n)\n\nCOURSIER_FETCH_WRAPPER_SCRIPT = textwrap.dedent(\n \"\"\"\\\n set -eux\n\n coursier_exe=\"$1\"\n shift\n json_output_file=\"$1\"\n shift\n\n working_dir=\"$(pwd)\"\n \"$coursier_exe\" fetch {repos_args} \\\n --json-output-file=\"$json_output_file\" \\\n \"${{@//{coursier_working_directory}/$working_dir}}\"\n /bin/mkdir -p classpath\n {python_path} {coursier_bin_dir}/coursier_post_processing_script.py \"$json_output_file\"\n \"\"\"\n)\n\n\n# TODO: Coursier renders setrlimit error line on macOS.\n# see https://github.com/pantsbuild/pants/issues/13942.\nPOST_PROCESS_COURSIER_STDERR_SCRIPT = textwrap.dedent(\n \"\"\"\\\n #!{python_path}\n import sys\n from subprocess import run, PIPE\n\n proc = run(sys.argv[1:], stdout=PIPE, stderr=PIPE)\n\n sys.stdout.buffer.write(proc.stdout)\n sys.stderr.buffer.write(proc.stderr.replace(b\"setrlimit to increase file descriptor limit failed, errno 22\\\\n\", b\"\"))\n sys.exit(proc.returncode)\n \"\"\"\n)\n\n\nclass CoursierSubsystem(TemplatedExternalTool):\n options_scope = \"coursier\"\n name = \"coursier\"\n help = \"A dependency resolver for the Maven ecosystem.\"\n\n default_version = \"v2.0.16-169-g194ebc55c\"\n default_known_versions = [\n \"v2.0.16-169-g194ebc55c|linux_arm64 |da38c97d55967505b8454c20a90370c518044829398b9bce8b637d194d79abb3|18114472\",\n \"v2.0.16-169-g194ebc55c|linux_x86_64|4c61a634c4bd2773b4543fe0fc32210afd343692891121cddb447204b48672e8|18486946\",\n \"v2.0.16-169-g194ebc55c|macos_arm64 |15bce235d223ef1d022da30b67b4c64e9228d236b876c834b64e029bbe824c6f|17957182\",\n \"v2.0.16-169-g194ebc55c|macos_x86_64|15bce235d223ef1d022da30b67b4c64e9228d236b876c834b64e029bbe824c6f|17957182\",\n ]\n default_url_template = (\n \"https://github.com/coursier/coursier/releases/download/{version}/cs-{platform}.gz\"\n )\n default_url_platform_mapping = {\n \"macos_arm64\": \"x86_64-apple-darwin\",\n \"macos_x86_64\": \"x86_64-apple-darwin\",\n \"linux_arm64\": \"aarch64-pc-linux\",\n \"linux_x86_64\": \"x86_64-pc-linux\",\n }\n\n @classmethod\n def register_options(cls, register) -> None:\n super().register_options(register)\n register(\n \"--repos\",\n type=list,\n member_type=str,\n default=[\n \"https://maven-central.storage-download.googleapis.com/maven2\",\n \"https://repo1.maven.org/maven2\",\n ],\n help=(\"Maven style repositories to resolve artifacts from.\"),\n )\n\n def generate_exe(self, plat: Platform) -> str:\n archive_filename = os.path.basename(self.generate_url(plat))\n filename = os.path.splitext(archive_filename)[0]\n return f\"./{filename}\"\n\n\n@dataclass(frozen=True)\nclass Coursier:\n \"\"\"The Coursier tool and various utilities, prepared for use via `immutable_input_digests`.\"\"\"\n\n coursier: DownloadedExternalTool\n _digest: Digest\n\n bin_dir: ClassVar[str] = \"__coursier\"\n fetch_wrapper_script: ClassVar[str] = f\"{bin_dir}/coursier_fetch_wrapper_script.sh\"\n post_processing_script: ClassVar[str] = f\"{bin_dir}/coursier_post_processing_script.py\"\n post_process_stderr: ClassVar[str] = f\"{bin_dir}/coursier_post_process_stderr.py\"\n cache_name: ClassVar[str] = \"coursier\"\n cache_dir: ClassVar[str] = \".cache\"\n working_directory_placeholder: ClassVar[str] = \"___COURSIER_WORKING_DIRECTORY___\"\n\n def args(self, args: Iterable[str], *, wrapper: Iterable[str] = ()) -> tuple[str, ...]:\n return (\n self.post_process_stderr,\n *wrapper,\n os.path.join(self.bin_dir, self.coursier.exe),\n *args,\n )\n\n @property\n def env(self) -> dict[str, str]:\n # NB: These variables have changed a few times, and they change again on `main`. But as of\n # `v2.0.16+73-gddc6d9cc9` they are accurate. See:\n # https://github.com/coursier/coursier/blob/v2.0.16+73-gddc6d9cc9/modules/paths/src/main/java/coursier/paths/CoursierPaths.java#L38-L48\n return {\n \"COURSIER_CACHE\": f\"{self.cache_dir}/jdk\",\n \"COURSIER_ARCHIVE_CACHE\": f\"{self.cache_dir}/arc\",\n \"COURSIER_JVM_CACHE\": f\"{self.cache_dir}/v1\",\n }\n\n @property\n def append_only_caches(self) -> dict[str, str]:\n return {self.cache_name: self.cache_dir}\n\n @property\n def immutable_input_digests(self) -> dict[str, Digest]:\n return {self.bin_dir: self._digest}\n\n\n@dataclass(frozen=True)\nclass CoursierWrapperProcess:\n\n args: Tuple[str, ...]\n input_digest: Digest\n output_directories: Tuple[str, ...]\n output_files: Tuple[str, ...]\n description: str\n\n\n@rule\nasync def invoke_coursier_wrapper(\n bash: BashBinary,\n coursier: Coursier,\n request: CoursierWrapperProcess,\n) -> Process:\n\n return Process(\n argv=coursier.args(\n request.args,\n wrapper=[bash.path, coursier.fetch_wrapper_script],\n ),\n input_digest=request.input_digest,\n immutable_input_digests=coursier.immutable_input_digests,\n output_directories=request.output_directories,\n output_files=request.output_files,\n append_only_caches=coursier.append_only_caches,\n env=coursier.env,\n description=request.description,\n level=LogLevel.DEBUG,\n )\n\n\n@rule\nasync def setup_coursier(\n coursier_subsystem: CoursierSubsystem,\n python: PythonBinary,\n) -> Coursier:\n repos_args = \" \".join(f\"-r={shlex.quote(repo)}\" for repo in coursier_subsystem.options.repos)\n coursier_wrapper_script = COURSIER_FETCH_WRAPPER_SCRIPT.format(\n repos_args=repos_args,\n coursier_working_directory=Coursier.working_directory_placeholder,\n python_path=python.path,\n coursier_bin_dir=Coursier.bin_dir,\n )\n\n post_process_stderr = POST_PROCESS_COURSIER_STDERR_SCRIPT.format(python_path=python.path)\n\n downloaded_coursier_get = Get(\n DownloadedExternalTool,\n ExternalToolRequest,\n coursier_subsystem.get_request(Platform.current),\n )\n wrapper_scripts_digest_get = Get(\n Digest,\n CreateDigest(\n [\n FileContent(\n os.path.basename(Coursier.fetch_wrapper_script),\n coursier_wrapper_script.encode(\"utf-8\"),\n is_executable=True,\n ),\n FileContent(\n os.path.basename(Coursier.post_processing_script),\n COURSIER_POST_PROCESSING_SCRIPT.encode(\"utf-8\"),\n is_executable=True,\n ),\n FileContent(\n os.path.basename(Coursier.post_process_stderr),\n post_process_stderr.encode(\"utf-8\"),\n is_executable=True,\n ),\n ]\n ),\n )\n\n downloaded_coursier, wrapper_scripts_digest = await MultiGet(\n downloaded_coursier_get, wrapper_scripts_digest_get\n )\n\n return Coursier(\n coursier=downloaded_coursier,\n _digest=await Get(\n Digest,\n MergeDigests(\n [\n downloaded_coursier.digest,\n wrapper_scripts_digest,\n ]\n ),\n ),\n )\n\n\ndef rules():\n return [\n *collect_rules(),\n *external_tool.rules(),\n ]\n", "path": "src/python/pants/jvm/resolve/coursier_setup.py"}], "after_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport os\nimport shlex\nimport textwrap\nfrom dataclasses import dataclass\nfrom typing import ClassVar, Iterable, Tuple\n\nfrom pants.core.util_rules import external_tool\nfrom pants.core.util_rules.external_tool import (\n DownloadedExternalTool,\n ExternalToolRequest,\n TemplatedExternalTool,\n)\nfrom pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests\nfrom pants.engine.platform import Platform\nfrom pants.engine.process import BashBinary, Process\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.python.binaries import PythonBinary\nfrom pants.util.logging import LogLevel\n\nCOURSIER_POST_PROCESSING_SCRIPT = textwrap.dedent(\n \"\"\"\\\n import json\n import sys\n import os\n from pathlib import PurePath\n from shutil import copyfile\n\n report = json.load(open(sys.argv[1]))\n\n # Mapping from dest path to source path. It is ok to capture the same output filename multiple\n # times if the source is the same as well.\n classpath = dict()\n for dep in report['dependencies']:\n source = PurePath(dep['file'])\n dest_name = dep['coord'].replace(\":\", \"_\")\n _, ext = os.path.splitext(source)\n classpath_dest = f\"classpath/{dest_name}{ext}\"\n\n existing_source = classpath.get(classpath_dest)\n if existing_source:\n if existing_source == source:\n # We've already captured this file.\n continue\n raise Exception(\n f\"Duplicate jar name {classpath_dest} with incompatible source:\\\\n\"\n f\" {source}\\\\n\"\n f\" {existing_source}\\\\n\"\n )\n classpath[classpath_dest] = source\n copyfile(source, classpath_dest)\n \"\"\"\n)\n\nCOURSIER_FETCH_WRAPPER_SCRIPT = textwrap.dedent(\n \"\"\"\\\n set -eux\n\n coursier_exe=\"$1\"\n shift\n json_output_file=\"$1\"\n shift\n\n working_dir=\"$(pwd)\"\n \"$coursier_exe\" fetch {repos_args} \\\n --json-output-file=\"$json_output_file\" \\\n \"${{@//{coursier_working_directory}/$working_dir}}\"\n /bin/mkdir -p classpath\n {python_path} {coursier_bin_dir}/coursier_post_processing_script.py \"$json_output_file\"\n \"\"\"\n)\n\n\n# TODO: Coursier renders setrlimit error line on macOS.\n# see https://github.com/pantsbuild/pants/issues/13942.\nPOST_PROCESS_COURSIER_STDERR_SCRIPT = textwrap.dedent(\n \"\"\"\\\n #!{python_path}\n import sys\n from subprocess import run, PIPE\n\n proc = run(sys.argv[1:], stdout=PIPE, stderr=PIPE)\n\n sys.stdout.buffer.write(proc.stdout)\n sys.stderr.buffer.write(proc.stderr.replace(b\"setrlimit to increase file descriptor limit failed, errno 22\\\\n\", b\"\"))\n sys.exit(proc.returncode)\n \"\"\"\n)\n\n\nclass CoursierSubsystem(TemplatedExternalTool):\n options_scope = \"coursier\"\n name = \"coursier\"\n help = \"A dependency resolver for the Maven ecosystem.\"\n\n default_version = \"v2.0.16-169-g194ebc55c\"\n default_known_versions = [\n \"v2.0.16-169-g194ebc55c|linux_arm64 |da38c97d55967505b8454c20a90370c518044829398b9bce8b637d194d79abb3|18114472\",\n \"v2.0.16-169-g194ebc55c|linux_x86_64|4c61a634c4bd2773b4543fe0fc32210afd343692891121cddb447204b48672e8|18486946\",\n \"v2.0.16-169-g194ebc55c|macos_arm64 |15bce235d223ef1d022da30b67b4c64e9228d236b876c834b64e029bbe824c6f|17957182\",\n \"v2.0.16-169-g194ebc55c|macos_x86_64|15bce235d223ef1d022da30b67b4c64e9228d236b876c834b64e029bbe824c6f|17957182\",\n ]\n default_url_template = (\n \"https://github.com/coursier/coursier/releases/download/{version}/cs-{platform}.gz\"\n )\n default_url_platform_mapping = {\n \"macos_arm64\": \"x86_64-apple-darwin\",\n \"macos_x86_64\": \"x86_64-apple-darwin\",\n \"linux_arm64\": \"aarch64-pc-linux\",\n \"linux_x86_64\": \"x86_64-pc-linux\",\n }\n\n @classmethod\n def register_options(cls, register) -> None:\n super().register_options(register)\n register(\n \"--repos\",\n type=list,\n member_type=str,\n default=[\n \"https://maven-central.storage-download.googleapis.com/maven2\",\n \"https://repo1.maven.org/maven2\",\n ],\n help=(\"Maven style repositories to resolve artifacts from.\"),\n )\n\n def generate_exe(self, plat: Platform) -> str:\n archive_filename = os.path.basename(self.generate_url(plat))\n filename = os.path.splitext(archive_filename)[0]\n return f\"./{filename}\"\n\n\n@dataclass(frozen=True)\nclass Coursier:\n \"\"\"The Coursier tool and various utilities, prepared for use via `immutable_input_digests`.\"\"\"\n\n coursier: DownloadedExternalTool\n _digest: Digest\n\n bin_dir: ClassVar[str] = \"__coursier\"\n fetch_wrapper_script: ClassVar[str] = f\"{bin_dir}/coursier_fetch_wrapper_script.sh\"\n post_processing_script: ClassVar[str] = f\"{bin_dir}/coursier_post_processing_script.py\"\n post_process_stderr: ClassVar[str] = f\"{bin_dir}/coursier_post_process_stderr.py\"\n cache_name: ClassVar[str] = \"coursier\"\n cache_dir: ClassVar[str] = \".cache\"\n working_directory_placeholder: ClassVar[str] = \"___COURSIER_WORKING_DIRECTORY___\"\n\n def args(self, args: Iterable[str], *, wrapper: Iterable[str] = ()) -> tuple[str, ...]:\n return (\n self.post_process_stderr,\n *wrapper,\n os.path.join(self.bin_dir, self.coursier.exe),\n *args,\n )\n\n @property\n def env(self) -> dict[str, str]:\n # NB: These variables have changed a few times, and they change again on `main`. But as of\n # `v2.0.16+73-gddc6d9cc9` they are accurate. See:\n # https://github.com/coursier/coursier/blob/v2.0.16+73-gddc6d9cc9/modules/paths/src/main/java/coursier/paths/CoursierPaths.java#L38-L48\n return {\n \"COURSIER_CACHE\": f\"{self.cache_dir}/jdk\",\n \"COURSIER_ARCHIVE_CACHE\": f\"{self.cache_dir}/arc\",\n \"COURSIER_JVM_CACHE\": f\"{self.cache_dir}/v1\",\n }\n\n @property\n def append_only_caches(self) -> dict[str, str]:\n return {self.cache_name: self.cache_dir}\n\n @property\n def immutable_input_digests(self) -> dict[str, Digest]:\n return {self.bin_dir: self._digest}\n\n\n@dataclass(frozen=True)\nclass CoursierWrapperProcess:\n\n args: Tuple[str, ...]\n input_digest: Digest\n output_directories: Tuple[str, ...]\n output_files: Tuple[str, ...]\n description: str\n\n\n@rule\nasync def invoke_coursier_wrapper(\n bash: BashBinary,\n coursier: Coursier,\n request: CoursierWrapperProcess,\n) -> Process:\n\n return Process(\n argv=coursier.args(\n request.args,\n wrapper=[bash.path, coursier.fetch_wrapper_script],\n ),\n input_digest=request.input_digest,\n immutable_input_digests=coursier.immutable_input_digests,\n output_directories=request.output_directories,\n output_files=request.output_files,\n append_only_caches=coursier.append_only_caches,\n env=coursier.env,\n description=request.description,\n level=LogLevel.DEBUG,\n )\n\n\n@rule\nasync def setup_coursier(\n coursier_subsystem: CoursierSubsystem,\n python: PythonBinary,\n) -> Coursier:\n repos_args = (\n \" \".join(f\"-r={shlex.quote(repo)}\" for repo in coursier_subsystem.options.repos)\n + \" --no-default\"\n )\n coursier_wrapper_script = COURSIER_FETCH_WRAPPER_SCRIPT.format(\n repos_args=repos_args,\n coursier_working_directory=Coursier.working_directory_placeholder,\n python_path=python.path,\n coursier_bin_dir=Coursier.bin_dir,\n )\n\n post_process_stderr = POST_PROCESS_COURSIER_STDERR_SCRIPT.format(python_path=python.path)\n\n downloaded_coursier_get = Get(\n DownloadedExternalTool,\n ExternalToolRequest,\n coursier_subsystem.get_request(Platform.current),\n )\n wrapper_scripts_digest_get = Get(\n Digest,\n CreateDigest(\n [\n FileContent(\n os.path.basename(Coursier.fetch_wrapper_script),\n coursier_wrapper_script.encode(\"utf-8\"),\n is_executable=True,\n ),\n FileContent(\n os.path.basename(Coursier.post_processing_script),\n COURSIER_POST_PROCESSING_SCRIPT.encode(\"utf-8\"),\n is_executable=True,\n ),\n FileContent(\n os.path.basename(Coursier.post_process_stderr),\n post_process_stderr.encode(\"utf-8\"),\n is_executable=True,\n ),\n ]\n ),\n )\n\n downloaded_coursier, wrapper_scripts_digest = await MultiGet(\n downloaded_coursier_get, wrapper_scripts_digest_get\n )\n\n return Coursier(\n coursier=downloaded_coursier,\n _digest=await Get(\n Digest,\n MergeDigests(\n [\n downloaded_coursier.digest,\n wrapper_scripts_digest,\n ]\n ),\n ),\n )\n\n\ndef rules():\n return [\n *collect_rules(),\n *external_tool.rules(),\n ]\n", "path": "src/python/pants/jvm/resolve/coursier_setup.py"}]}
3,776
208
gh_patches_debug_51710
rasdani/github-patches
git_diff
getsentry__sentry-python-2069
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cannot import appengine ### How do you use Sentry? Sentry Saas (sentry.io) ### Version 1.18.0 ### Steps to Reproduce Install the SDK within any project that is not pinning urllib3 < 2.0.0 ### Expected Result ability to import appengine ### Actual Result Cannot import appengine as gaecontrib. As per urllib 2.0.0 release: https://github.com/urllib3/urllib3/tree/2.0.0 Removed urllib3.contrib.appengine.AppEngineManager and support for Google App Engine Standard Environment (https://github.com/urllib3/urllib3/issues/2044). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 """ 4 Sentry-Python - Sentry SDK for Python 5 ===================================== 6 7 **Sentry-Python is an SDK for Sentry.** Check out `GitHub 8 <https://github.com/getsentry/sentry-python>`_ to find out more. 9 """ 10 11 import os 12 from setuptools import setup, find_packages 13 14 here = os.path.abspath(os.path.dirname(__file__)) 15 16 17 def get_file_text(file_name): 18 with open(os.path.join(here, file_name)) as in_file: 19 return in_file.read() 20 21 22 setup( 23 name="sentry-sdk", 24 version="1.21.1", 25 author="Sentry Team and Contributors", 26 author_email="[email protected]", 27 url="https://github.com/getsentry/sentry-python", 28 project_urls={ 29 "Documentation": "https://docs.sentry.io/platforms/python/", 30 "Changelog": "https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md", 31 }, 32 description="Python client for Sentry (https://sentry.io)", 33 long_description=get_file_text("README.md"), 34 long_description_content_type="text/markdown", 35 packages=find_packages(exclude=("tests", "tests.*")), 36 # PEP 561 37 package_data={"sentry_sdk": ["py.typed"]}, 38 zip_safe=False, 39 license="MIT", 40 install_requires=[ 41 'urllib3>=1.25.7; python_version<="3.4"', 42 'urllib3>=1.26.9; python_version=="3.5"', 43 'urllib3>=1.26.11; python_version >="3.6"', 44 "certifi", 45 ], 46 extras_require={ 47 "flask": ["flask>=0.11", "blinker>=1.1"], 48 "quart": ["quart>=0.16.1", "blinker>=1.1"], 49 "bottle": ["bottle>=0.12.13"], 50 "falcon": ["falcon>=1.4"], 51 "django": ["django>=1.8"], 52 "sanic": ["sanic>=0.8"], 53 "celery": ["celery>=3"], 54 "huey": ["huey>=2"], 55 "beam": ["apache-beam>=2.12"], 56 "arq": ["arq>=0.23"], 57 "rq": ["rq>=0.6"], 58 "aiohttp": ["aiohttp>=3.5"], 59 "tornado": ["tornado>=5"], 60 "sqlalchemy": ["sqlalchemy>=1.2"], 61 "pyspark": ["pyspark>=2.4.4"], 62 "pure_eval": ["pure_eval", "executing", "asttokens"], 63 "chalice": ["chalice>=1.16.0"], 64 "httpx": ["httpx>=0.16.0"], 65 "starlette": ["starlette>=0.19.1"], 66 "starlite": ["starlite>=1.48"], 67 "fastapi": ["fastapi>=0.79.0"], 68 "pymongo": ["pymongo>=3.1"], 69 "opentelemetry": ["opentelemetry-distro>=0.35b0"], 70 "grpcio": ["grpcio>=1.21.1"] 71 }, 72 classifiers=[ 73 "Development Status :: 5 - Production/Stable", 74 "Environment :: Web Environment", 75 "Intended Audience :: Developers", 76 "License :: OSI Approved :: BSD License", 77 "Operating System :: OS Independent", 78 "Programming Language :: Python", 79 "Programming Language :: Python :: 2", 80 "Programming Language :: Python :: 2.7", 81 "Programming Language :: Python :: 3", 82 "Programming Language :: Python :: 3.4", 83 "Programming Language :: Python :: 3.5", 84 "Programming Language :: Python :: 3.6", 85 "Programming Language :: Python :: 3.7", 86 "Programming Language :: Python :: 3.8", 87 "Programming Language :: Python :: 3.9", 88 "Programming Language :: Python :: 3.10", 89 "Topic :: Software Development :: Libraries :: Python Modules", 90 ], 91 options={"bdist_wheel": {"universal": "1"}}, 92 ) 93 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -41,6 +41,7 @@ 'urllib3>=1.25.7; python_version<="3.4"', 'urllib3>=1.26.9; python_version=="3.5"', 'urllib3>=1.26.11; python_version >="3.6"', + 'urllib3<2.0.0', "certifi", ], extras_require={
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,6 +41,7 @@\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version==\"3.5\"',\n 'urllib3>=1.26.11; python_version >=\"3.6\"',\n+ 'urllib3<2.0.0',\n \"certifi\",\n ],\n extras_require={\n", "issue": "Cannot import appengine\n### How do you use Sentry?\n\nSentry Saas (sentry.io)\n\n### Version\n\n1.18.0\n\n### Steps to Reproduce\n\nInstall the SDK within any project that is not pinning urllib3 < 2.0.0\n\n### Expected Result\n\nability to import appengine\n\n### Actual Result\n\nCannot import appengine as gaecontrib.\r\nAs per urllib 2.0.0 release: https://github.com/urllib3/urllib3/tree/2.0.0\r\n\r\nRemoved urllib3.contrib.appengine.AppEngineManager and support for Google App Engine Standard Environment (https://github.com/urllib3/urllib3/issues/2044).\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nSentry-Python - Sentry SDK for Python\n=====================================\n\n**Sentry-Python is an SDK for Sentry.** Check out `GitHub\n<https://github.com/getsentry/sentry-python>`_ to find out more.\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file_text(file_name):\n with open(os.path.join(here, file_name)) as in_file:\n return in_file.read()\n\n\nsetup(\n name=\"sentry-sdk\",\n version=\"1.21.1\",\n author=\"Sentry Team and Contributors\",\n author_email=\"[email protected]\",\n url=\"https://github.com/getsentry/sentry-python\",\n project_urls={\n \"Documentation\": \"https://docs.sentry.io/platforms/python/\",\n \"Changelog\": \"https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md\",\n },\n description=\"Python client for Sentry (https://sentry.io)\",\n long_description=get_file_text(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n # PEP 561\n package_data={\"sentry_sdk\": [\"py.typed\"]},\n zip_safe=False,\n license=\"MIT\",\n install_requires=[\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version==\"3.5\"',\n 'urllib3>=1.26.11; python_version >=\"3.6\"',\n \"certifi\",\n ],\n extras_require={\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\"],\n \"quart\": [\"quart>=0.16.1\", \"blinker>=1.1\"],\n \"bottle\": [\"bottle>=0.12.13\"],\n \"falcon\": [\"falcon>=1.4\"],\n \"django\": [\"django>=1.8\"],\n \"sanic\": [\"sanic>=0.8\"],\n \"celery\": [\"celery>=3\"],\n \"huey\": [\"huey>=2\"],\n \"beam\": [\"apache-beam>=2.12\"],\n \"arq\": [\"arq>=0.23\"],\n \"rq\": [\"rq>=0.6\"],\n \"aiohttp\": [\"aiohttp>=3.5\"],\n \"tornado\": [\"tornado>=5\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.2\"],\n \"pyspark\": [\"pyspark>=2.4.4\"],\n \"pure_eval\": [\"pure_eval\", \"executing\", \"asttokens\"],\n \"chalice\": [\"chalice>=1.16.0\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"starlette\": [\"starlette>=0.19.1\"],\n \"starlite\": [\"starlite>=1.48\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n \"pymongo\": [\"pymongo>=3.1\"],\n \"opentelemetry\": [\"opentelemetry-distro>=0.35b0\"],\n \"grpcio\": [\"grpcio>=1.21.1\"]\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n options={\"bdist_wheel\": {\"universal\": \"1\"}},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nSentry-Python - Sentry SDK for Python\n=====================================\n\n**Sentry-Python is an SDK for Sentry.** Check out `GitHub\n<https://github.com/getsentry/sentry-python>`_ to find out more.\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file_text(file_name):\n with open(os.path.join(here, file_name)) as in_file:\n return in_file.read()\n\n\nsetup(\n name=\"sentry-sdk\",\n version=\"1.21.1\",\n author=\"Sentry Team and Contributors\",\n author_email=\"[email protected]\",\n url=\"https://github.com/getsentry/sentry-python\",\n project_urls={\n \"Documentation\": \"https://docs.sentry.io/platforms/python/\",\n \"Changelog\": \"https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md\",\n },\n description=\"Python client for Sentry (https://sentry.io)\",\n long_description=get_file_text(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n # PEP 561\n package_data={\"sentry_sdk\": [\"py.typed\"]},\n zip_safe=False,\n license=\"MIT\",\n install_requires=[\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version==\"3.5\"',\n 'urllib3>=1.26.11; python_version >=\"3.6\"',\n 'urllib3<2.0.0',\n \"certifi\",\n ],\n extras_require={\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\"],\n \"quart\": [\"quart>=0.16.1\", \"blinker>=1.1\"],\n \"bottle\": [\"bottle>=0.12.13\"],\n \"falcon\": [\"falcon>=1.4\"],\n \"django\": [\"django>=1.8\"],\n \"sanic\": [\"sanic>=0.8\"],\n \"celery\": [\"celery>=3\"],\n \"huey\": [\"huey>=2\"],\n \"beam\": [\"apache-beam>=2.12\"],\n \"arq\": [\"arq>=0.23\"],\n \"rq\": [\"rq>=0.6\"],\n \"aiohttp\": [\"aiohttp>=3.5\"],\n \"tornado\": [\"tornado>=5\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.2\"],\n \"pyspark\": [\"pyspark>=2.4.4\"],\n \"pure_eval\": [\"pure_eval\", \"executing\", \"asttokens\"],\n \"chalice\": [\"chalice>=1.16.0\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"starlette\": [\"starlette>=0.19.1\"],\n \"starlite\": [\"starlite>=1.48\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n \"pymongo\": [\"pymongo>=3.1\"],\n \"opentelemetry\": [\"opentelemetry-distro>=0.35b0\"],\n \"grpcio\": [\"grpcio>=1.21.1\"]\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n options={\"bdist_wheel\": {\"universal\": \"1\"}},\n)\n", "path": "setup.py"}]}
1,514
119
gh_patches_debug_3474
rasdani/github-patches
git_diff
pymedusa__Medusa-4725
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- YGGTorrent : Unable to bdecode torrent **Describe the bug** The YggTorrent provider seems to have a bug. The downloads are not correctly handled to rtorrent. The "unable to bdecode torrent" occurs. Thank you for your help. **Medusa (please complete the following information):** Branch: master  Commit: e7c4ff3bc9a48b47c932b0e40a539d62e3a62f64 Version: v0.2.6 Database: 44.11 Python Version: | 2.7.12 (default, Dec 4 2017, 14:50:18) [GCC 5.4.0 20160609] SSL Version: | OpenSSL 1.0.2g 1 Mar 2016 OS: | Linux-4.12.14-041214-generic-x86_64-with-Ubuntu-16.04-xenial **Logs:** <details> ``` 2018-07-23 12:45:32 WARNING SEARCHQUEUE-FORCED-325517 :: [e7c4ff3] Unable to bdecode torrent. Invalid torrent: Salvation.S02E01.FASTSUB.VOSTFR.720p.AMZN.WEB-DL.DDP5.1.H.264-GOLD. Deleting cached result if exists 2018-07-23 12:45:32 INFO SEARCHQUEUE-FORCED-325517 :: [e7c4ff3] Downloading Salvation.S02E01.FASTSUB.VOSTFR.720p.AMZN.WEB-DL.DDP5.1.H.264-GOLD with 36 seeders and 0 leechers and size 938.52 MB from Yggtorrent 2018-07-23 12:45:32 INFO SEARCHQUEUE-FORCED-325517 :: [Yggtorrent] :: [e7c4ff3] Picked Salvation.S02E01.FASTSUB.VOSTFR.720p.AMZN.WEB-DL.DDP5.1.H.264-GOLD as the best result. 2018-07-23 12:45:32 INFO SEARCHQUEUE-FORCED-325517 :: [Yggtorrent] :: [e7c4ff3] Quality of Salvation.S02E01.(FASTSUB.VOSTFR.720p.WEBRip.x265.NSP) is 720p WEB-DL 2018-07-23 12:45:32 INFO SEARCHQUEUE-FORCED-325517 :: [Yggtorrent] :: [e7c4ff3] Quality of Salvation.S02E01.FASTSUB.VOSTFR.720p.HDTV.x264.-.ZKH is 720p HDTV 2018-07-23 12:45:32 INFO SEARCHQUEUE-FORCED-325517 :: [Yggtorrent] :: [e7c4ff3] Quality of Salvation.S02E01.FASTSUB.VOSTFR.720p.AMZN.WEB-DL.DDP5.1.H.264-GOLD is 720p WEB-DL 2018-07-23 12:45:31 INFO SEARCHQUEUE-FORCED-325517 :: [Yggtorrent] :: [e7c4ff3] Performing episode search for Salvation``` </details> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `medusa/providers/torrent/html/yggtorrent.py` Content: ``` 1 # coding=utf-8 2 3 """Provider code for Yggtorrent.""" 4 5 from __future__ import unicode_literals 6 7 import logging 8 import re 9 10 from medusa import tv 11 from medusa.bs4_parser import BS4Parser 12 from medusa.helper.common import ( 13 convert_size, 14 try_int, 15 ) 16 from medusa.logger.adapters.style import BraceAdapter 17 from medusa.providers.torrent.torrent_provider import TorrentProvider 18 19 from requests.compat import urljoin 20 21 log = BraceAdapter(logging.getLogger(__name__)) 22 log.logger.addHandler(logging.NullHandler()) 23 24 25 class YggtorrentProvider(TorrentProvider): 26 """Yggtorrent Torrent provider.""" 27 28 torrent_id_pattern = re.compile(r'\/(\d+)-') 29 30 def __init__(self): 31 """Initialize the class.""" 32 super(YggtorrentProvider, self).__init__('Yggtorrent') 33 34 # Credentials 35 self.username = None 36 self.password = None 37 38 # URLs 39 self.url = 'https://ww1.yggtorrent.is' 40 self.urls = { 41 'login': urljoin(self.url, 'user/login'), 42 'search': urljoin(self.url, 'engine/search'), 43 'download': urljoin(self.url, 'engine/download_torrent?id={0}') 44 } 45 46 # Proper Strings 47 self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP'] 48 49 # Torrent Stats 50 self.minseed = None 51 self.minleech = None 52 53 # Cache 54 self.cache = tv.Cache(self, min_time=20) 55 56 def search(self, search_strings, age=0, ep_obj=None, **kwargs): 57 """ 58 Search a provider and parse the results. 59 60 :param search_strings: A dict with mode (key) and the search value (value) 61 :param age: Not used 62 :param ep_obj: Not used 63 :returns: A list of search results (structure) 64 """ 65 results = [] 66 if not self.login(): 67 return results 68 69 # Search Params 70 search_params = { 71 'category': 2145, 72 'do': 'search' 73 } 74 75 for mode in search_strings: 76 log.debug('Search mode: {0}', mode) 77 78 for search_string in search_strings[mode]: 79 80 if mode != 'RSS': 81 log.debug('Search string: {search}', 82 {'search': search_string}) 83 84 search_params['name'] = re.sub(r'[()]', '', search_string) 85 86 response = self.session.get(self.urls['search'], params=search_params) 87 if not response or not response.text: 88 log.debug('No data returned from provider') 89 continue 90 91 results += self.parse(response.text, mode) 92 93 return results 94 95 def parse(self, data, mode): 96 """ 97 Parse search results for items. 98 99 :param data: The raw response from a search 100 :param mode: The current mode used to search, e.g. RSS 101 102 :return: A list of items found 103 """ 104 # Units 105 units = ['O', 'KO', 'MO', 'GO', 'TO', 'PO'] 106 107 items = [] 108 109 with BS4Parser(data, 'html5lib') as html: 110 torrent_table = html.find(class_='table-responsive results') 111 torrent_rows = torrent_table('tr') if torrent_table else [] 112 113 # Continue only if at least one Release is found 114 if len(torrent_rows) < 2: 115 log.debug('Data returned from provider does not contain any torrents') 116 return items 117 118 # Skip column headers 119 for result in torrent_rows[1:]: 120 cells = result('td') 121 if len(cells) < 9: 122 continue 123 124 try: 125 info = cells[1].find('a') 126 title = info.get_text(strip=True) 127 download_url = info.get('href') 128 if not (title and download_url): 129 continue 130 131 torrent_id = self.torrent_id_pattern.search(download_url) 132 download_url = self.urls['download'].format(torrent_id.group(1)) 133 134 seeders = try_int(cells[7].get_text(strip=True), 0) 135 leechers = try_int(cells[8].get_text(strip=True), 0) 136 137 # Filter unseeded torrent 138 if seeders < min(self.minseed, 1): 139 if mode != 'RSS': 140 log.debug("Discarding torrent because it doesn't meet the" 141 " minimum seeders: {0}. Seeders: {1}", 142 title, seeders) 143 continue 144 145 torrent_size = cells[5].get_text() 146 size = convert_size(torrent_size, sep='', units=units, default=-1) 147 148 pubdate_raw = cells[4].find('div', class_='hidden').get_text(strip=True) 149 pubdate = self.parse_pubdate(pubdate_raw, fromtimestamp=True) 150 151 item = { 152 'title': title, 153 'link': download_url, 154 'size': size, 155 'seeders': seeders, 156 'leechers': leechers, 157 'pubdate': pubdate, 158 } 159 if mode != 'RSS': 160 log.debug('Found result: {0} with {1} seeders and {2} leechers', 161 title, seeders, leechers) 162 163 items.append(item) 164 except (AttributeError, TypeError, KeyError, ValueError, IndexError): 165 log.exception('Failed parsing provider.') 166 167 return items 168 169 def login(self): 170 """Login method used for logging in before doing search and torrent downloads.""" 171 login_params = { 172 'id': self.username, 173 'pass': self.password 174 } 175 176 login_resp = self.session.post(self.urls['login'], data=login_params) 177 if not login_resp: 178 log.warning('Invalid username or password. Check your settings') 179 return False 180 181 response = self.session.get(self.url) 182 if not response: 183 log.warning('Unable to connect to provider') 184 return False 185 186 if 'Bienvenue' not in response.text: 187 log.warning('Unable to login to provider') 188 return False 189 190 return True 191 192 193 provider = YggtorrentProvider() 194 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/medusa/providers/torrent/html/yggtorrent.py b/medusa/providers/torrent/html/yggtorrent.py --- a/medusa/providers/torrent/html/yggtorrent.py +++ b/medusa/providers/torrent/html/yggtorrent.py @@ -36,7 +36,7 @@ self.password = None # URLs - self.url = 'https://ww1.yggtorrent.is' + self.url = 'https://ww2.yggtorrent.is' self.urls = { 'login': urljoin(self.url, 'user/login'), 'search': urljoin(self.url, 'engine/search'),
{"golden_diff": "diff --git a/medusa/providers/torrent/html/yggtorrent.py b/medusa/providers/torrent/html/yggtorrent.py\n--- a/medusa/providers/torrent/html/yggtorrent.py\n+++ b/medusa/providers/torrent/html/yggtorrent.py\n@@ -36,7 +36,7 @@\n self.password = None\n \n # URLs\n- self.url = 'https://ww1.yggtorrent.is'\n+ self.url = 'https://ww2.yggtorrent.is'\n self.urls = {\n 'login': urljoin(self.url, 'user/login'),\n 'search': urljoin(self.url, 'engine/search'),\n", "issue": "YGGTorrent : Unable to bdecode torrent\n**Describe the bug**\r\nThe YggTorrent provider seems to have a bug. The downloads are not correctly handled to rtorrent. The \"unable to bdecode torrent\" occurs. Thank you for your help. \r\n\r\n\r\n**Medusa (please complete the following information):**\r\n\r\nBranch:\u00a0master\u00a0\r\nCommit:\u00a0e7c4ff3bc9a48b47c932b0e40a539d62e3a62f64\u00a0Version:\u00a0v0.2.6\u00a0Database:\u00a044.11\r\nPython Version: | 2.7.12 (default, Dec 4 2017, 14:50:18) [GCC 5.4.0 20160609]\r\nSSL Version: | OpenSSL 1.0.2g 1 Mar 2016\r\nOS: | Linux-4.12.14-041214-generic-x86_64-with-Ubuntu-16.04-xenial\r\n\r\n**Logs:**\r\n<details>\r\n\r\n```\r\n2018-07-23 12:45:32 WARNING SEARCHQUEUE-FORCED-325517 :: [e7c4ff3] Unable to bdecode torrent. Invalid torrent: Salvation.S02E01.FASTSUB.VOSTFR.720p.AMZN.WEB-DL.DDP5.1.H.264-GOLD. Deleting cached result if exists\r\n2018-07-23 12:45:32 INFO SEARCHQUEUE-FORCED-325517 :: [e7c4ff3] Downloading Salvation.S02E01.FASTSUB.VOSTFR.720p.AMZN.WEB-DL.DDP5.1.H.264-GOLD with 36 seeders and 0 leechers and size 938.52 MB from Yggtorrent\r\n2018-07-23 12:45:32 INFO SEARCHQUEUE-FORCED-325517 :: [Yggtorrent] :: [e7c4ff3] Picked Salvation.S02E01.FASTSUB.VOSTFR.720p.AMZN.WEB-DL.DDP5.1.H.264-GOLD as the best result.\r\n2018-07-23 12:45:32 INFO SEARCHQUEUE-FORCED-325517 :: [Yggtorrent] :: [e7c4ff3] Quality of Salvation.S02E01.(FASTSUB.VOSTFR.720p.WEBRip.x265.NSP) is 720p WEB-DL\r\n2018-07-23 12:45:32 INFO SEARCHQUEUE-FORCED-325517 :: [Yggtorrent] :: [e7c4ff3] Quality of Salvation.S02E01.FASTSUB.VOSTFR.720p.HDTV.x264.-.ZKH is 720p HDTV\r\n2018-07-23 12:45:32 INFO SEARCHQUEUE-FORCED-325517 :: [Yggtorrent] :: [e7c4ff3] Quality of Salvation.S02E01.FASTSUB.VOSTFR.720p.AMZN.WEB-DL.DDP5.1.H.264-GOLD is 720p WEB-DL\r\n2018-07-23 12:45:31 INFO SEARCHQUEUE-FORCED-325517 :: [Yggtorrent] :: [e7c4ff3] Performing episode search for Salvation```\r\n</details>\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for Yggtorrent.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport re\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import (\n convert_size,\n try_int,\n)\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass YggtorrentProvider(TorrentProvider):\n \"\"\"Yggtorrent Torrent provider.\"\"\"\n\n torrent_id_pattern = re.compile(r'\\/(\\d+)-')\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(YggtorrentProvider, self).__init__('Yggtorrent')\n\n # Credentials\n self.username = None\n self.password = None\n\n # URLs\n self.url = 'https://ww1.yggtorrent.is'\n self.urls = {\n 'login': urljoin(self.url, 'user/login'),\n 'search': urljoin(self.url, 'engine/search'),\n 'download': urljoin(self.url, 'engine/download_torrent?id={0}')\n }\n\n # Proper Strings\n self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']\n\n # Torrent Stats\n self.minseed = None\n self.minleech = None\n\n # Cache\n self.cache = tv.Cache(self, min_time=20)\n\n def search(self, search_strings, age=0, ep_obj=None, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n if not self.login():\n return results\n\n # Search Params\n search_params = {\n 'category': 2145,\n 'do': 'search'\n }\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n\n if mode != 'RSS':\n log.debug('Search string: {search}',\n {'search': search_string})\n\n search_params['name'] = re.sub(r'[()]', '', search_string)\n\n response = self.session.get(self.urls['search'], params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n # Units\n units = ['O', 'KO', 'MO', 'GO', 'TO', 'PO']\n\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n torrent_table = html.find(class_='table-responsive results')\n torrent_rows = torrent_table('tr') if torrent_table else []\n\n # Continue only if at least one Release is found\n if len(torrent_rows) < 2:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n # Skip column headers\n for result in torrent_rows[1:]:\n cells = result('td')\n if len(cells) < 9:\n continue\n\n try:\n info = cells[1].find('a')\n title = info.get_text(strip=True)\n download_url = info.get('href')\n if not (title and download_url):\n continue\n\n torrent_id = self.torrent_id_pattern.search(download_url)\n download_url = self.urls['download'].format(torrent_id.group(1))\n\n seeders = try_int(cells[7].get_text(strip=True), 0)\n leechers = try_int(cells[8].get_text(strip=True), 0)\n\n # Filter unseeded torrent\n if seeders < min(self.minseed, 1):\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n \" minimum seeders: {0}. Seeders: {1}\",\n title, seeders)\n continue\n\n torrent_size = cells[5].get_text()\n size = convert_size(torrent_size, sep='', units=units, default=-1)\n\n pubdate_raw = cells[4].find('div', class_='hidden').get_text(strip=True)\n pubdate = self.parse_pubdate(pubdate_raw, fromtimestamp=True)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n def login(self):\n \"\"\"Login method used for logging in before doing search and torrent downloads.\"\"\"\n login_params = {\n 'id': self.username,\n 'pass': self.password\n }\n\n login_resp = self.session.post(self.urls['login'], data=login_params)\n if not login_resp:\n log.warning('Invalid username or password. Check your settings')\n return False\n\n response = self.session.get(self.url)\n if not response:\n log.warning('Unable to connect to provider')\n return False\n\n if 'Bienvenue' not in response.text:\n log.warning('Unable to login to provider')\n return False\n\n return True\n\n\nprovider = YggtorrentProvider()\n", "path": "medusa/providers/torrent/html/yggtorrent.py"}], "after_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for Yggtorrent.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport re\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import (\n convert_size,\n try_int,\n)\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass YggtorrentProvider(TorrentProvider):\n \"\"\"Yggtorrent Torrent provider.\"\"\"\n\n torrent_id_pattern = re.compile(r'\\/(\\d+)-')\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(YggtorrentProvider, self).__init__('Yggtorrent')\n\n # Credentials\n self.username = None\n self.password = None\n\n # URLs\n self.url = 'https://ww2.yggtorrent.is'\n self.urls = {\n 'login': urljoin(self.url, 'user/login'),\n 'search': urljoin(self.url, 'engine/search'),\n 'download': urljoin(self.url, 'engine/download_torrent?id={0}')\n }\n\n # Proper Strings\n self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']\n\n # Torrent Stats\n self.minseed = None\n self.minleech = None\n\n # Cache\n self.cache = tv.Cache(self, min_time=20)\n\n def search(self, search_strings, age=0, ep_obj=None, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n if not self.login():\n return results\n\n # Search Params\n search_params = {\n 'category': 2145,\n 'do': 'search'\n }\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n\n if mode != 'RSS':\n log.debug('Search string: {search}',\n {'search': search_string})\n\n search_params['name'] = re.sub(r'[()]', '', search_string)\n\n response = self.session.get(self.urls['search'], params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n # Units\n units = ['O', 'KO', 'MO', 'GO', 'TO', 'PO']\n\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n torrent_table = html.find(class_='table-responsive results')\n torrent_rows = torrent_table('tr') if torrent_table else []\n\n # Continue only if at least one Release is found\n if len(torrent_rows) < 2:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n # Skip column headers\n for result in torrent_rows[1:]:\n cells = result('td')\n if len(cells) < 9:\n continue\n\n try:\n info = cells[1].find('a')\n title = info.get_text(strip=True)\n download_url = info.get('href')\n if not (title and download_url):\n continue\n\n torrent_id = self.torrent_id_pattern.search(download_url)\n download_url = self.urls['download'].format(torrent_id.group(1))\n\n seeders = try_int(cells[7].get_text(strip=True), 0)\n leechers = try_int(cells[8].get_text(strip=True), 0)\n\n # Filter unseeded torrent\n if seeders < min(self.minseed, 1):\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n \" minimum seeders: {0}. Seeders: {1}\",\n title, seeders)\n continue\n\n torrent_size = cells[5].get_text()\n size = convert_size(torrent_size, sep='', units=units, default=-1)\n\n pubdate_raw = cells[4].find('div', class_='hidden').get_text(strip=True)\n pubdate = self.parse_pubdate(pubdate_raw, fromtimestamp=True)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n def login(self):\n \"\"\"Login method used for logging in before doing search and torrent downloads.\"\"\"\n login_params = {\n 'id': self.username,\n 'pass': self.password\n }\n\n login_resp = self.session.post(self.urls['login'], data=login_params)\n if not login_resp:\n log.warning('Invalid username or password. Check your settings')\n return False\n\n response = self.session.get(self.url)\n if not response:\n log.warning('Unable to connect to provider')\n return False\n\n if 'Bienvenue' not in response.text:\n log.warning('Unable to login to provider')\n return False\n\n return True\n\n\nprovider = YggtorrentProvider()\n", "path": "medusa/providers/torrent/html/yggtorrent.py"}]}
2,961
145
gh_patches_debug_51406
rasdani/github-patches
git_diff
pytorch__ignite-1016
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PyTorch dependency is lacking version constraint ## 🐛 Bug description <!-- A clear and concise description of what the bug is. --> PyTorch is a dependency of Ignite and, thus, is specified in `setup.py` https://github.com/pytorch/ignite/blob/4b311cc82fe45b3082661125cd7ee54007283fb0/setup.py#L24-L26 and `conda.recipe/meta.yaml`: https://github.com/pytorch/ignite/blob/4b311cc82fe45b3082661125cd7ee54007283fb0/conda.recipe/meta.yaml#L15-L23 The PyTorch dependency is lacking a version constraint which may work fine right now, but there is no guarantee that Ignite will be compatible with any future major PyTorch release (e.g. PyTorch v2.x). I suggest to constrain the PyTorch version that Ignite is compatible with, e.g. `>=1.0,<2` or `<2` if any `0.x` and `1.x` version works. If PyTorch has a new major release, even previous Ignite versions can become compatible with the new major PyTorch release (especially if no changes to the code are necessary) by making new bug fix releases with relaxed version constraints to include the new PyTorch version. In my opinion, it is highly preferable to be conservative about dependency version constraints through a [compatible release constraint](https://www.python.org/dev/peps/pep-0440/#compatible-release) in case the dependency conforms with semantic versioning. It is impossible to guarantee compatibility with a future major release of a dependency as its API can change arbitrarily. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import os 2 import io 3 import re 4 from setuptools import setup, find_packages 5 6 7 def read(*names, **kwargs): 8 with io.open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp: 9 return fp.read() 10 11 12 def find_version(*file_paths): 13 version_file = read(*file_paths) 14 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) 15 if version_match: 16 return version_match.group(1) 17 raise RuntimeError("Unable to find version string.") 18 19 20 readme = read("README.md") 21 22 VERSION = find_version("ignite", "__init__.py") 23 24 requirements = [ 25 "torch", 26 ] 27 28 setup( 29 # Metadata 30 name="pytorch-ignite", 31 version=VERSION, 32 author="PyTorch Core Team", 33 author_email="[email protected]", 34 url="https://github.com/pytorch/ignite", 35 description="A lightweight library to help with training neural networks in PyTorch.", 36 long_description_content_type="text/markdown", 37 long_description=readme, 38 license="BSD", 39 # Package info 40 packages=find_packages(exclude=("tests", "tests.*",)), 41 zip_safe=True, 42 install_requires=requirements, 43 ) 44 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -22,7 +22,7 @@ VERSION = find_version("ignite", "__init__.py") requirements = [ - "torch", + "torch>=1.0,<2", ] setup(
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,7 +22,7 @@\n VERSION = find_version(\"ignite\", \"__init__.py\")\n \n requirements = [\n- \"torch\",\n+ \"torch>=1.0,<2\",\n ]\n \n setup(\n", "issue": "PyTorch dependency is lacking version constraint\n## \ud83d\udc1b Bug description\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\nPyTorch is a dependency of Ignite and, thus, is specified in `setup.py`\r\n\r\nhttps://github.com/pytorch/ignite/blob/4b311cc82fe45b3082661125cd7ee54007283fb0/setup.py#L24-L26\r\n\r\nand `conda.recipe/meta.yaml`:\r\n\r\nhttps://github.com/pytorch/ignite/blob/4b311cc82fe45b3082661125cd7ee54007283fb0/conda.recipe/meta.yaml#L15-L23\r\n\r\nThe PyTorch dependency is lacking a version constraint which may work fine right now, but there is no guarantee that Ignite will be compatible with any future major PyTorch release (e.g. PyTorch v2.x).\r\n\r\nI suggest to constrain the PyTorch version that Ignite is compatible with, e.g. `>=1.0,<2` or `<2` if any `0.x` and `1.x` version works. If PyTorch has a new major release, even previous Ignite versions can become compatible with the new major PyTorch release (especially if no changes to the code are necessary) by making new bug fix releases with relaxed version constraints to include the new PyTorch version.\r\n\r\nIn my opinion, it is highly preferable to be conservative about dependency version constraints through a [compatible release constraint](https://www.python.org/dev/peps/pep-0440/#compatible-release) in case the dependency conforms with semantic versioning. It is impossible to guarantee compatibility with a future major release of a dependency as its API can change arbitrarily.\n", "before_files": [{"content": "import os\nimport io\nimport re\nfrom setuptools import setup, find_packages\n\n\ndef read(*names, **kwargs):\n with io.open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nreadme = read(\"README.md\")\n\nVERSION = find_version(\"ignite\", \"__init__.py\")\n\nrequirements = [\n \"torch\",\n]\n\nsetup(\n # Metadata\n name=\"pytorch-ignite\",\n version=VERSION,\n author=\"PyTorch Core Team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pytorch/ignite\",\n description=\"A lightweight library to help with training neural networks in PyTorch.\",\n long_description_content_type=\"text/markdown\",\n long_description=readme,\n license=\"BSD\",\n # Package info\n packages=find_packages(exclude=(\"tests\", \"tests.*\",)),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport io\nimport re\nfrom setuptools import setup, find_packages\n\n\ndef read(*names, **kwargs):\n with io.open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nreadme = read(\"README.md\")\n\nVERSION = find_version(\"ignite\", \"__init__.py\")\n\nrequirements = [\n \"torch>=1.0,<2\",\n]\n\nsetup(\n # Metadata\n name=\"pytorch-ignite\",\n version=VERSION,\n author=\"PyTorch Core Team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pytorch/ignite\",\n description=\"A lightweight library to help with training neural networks in PyTorch.\",\n long_description_content_type=\"text/markdown\",\n long_description=readme,\n license=\"BSD\",\n # Package info\n packages=find_packages(exclude=(\"tests\", \"tests.*\",)),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py"}]}
1,012
68
gh_patches_debug_64103
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-2411
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- App breaking when using Enum as field for Generic <!-- Provide a general summary of the bug in the title above. --> When using an Enum as a field on a Generic, the app breaks, throwing a `NotImplementedError`. <!--- This template is entirely optional and can be removed, but is here to help both you and us. --> <!--- Anything on lines wrapped in comments like these will not show up in the final text. --> ## Describe the Bug The below code is an example of the error. ```python from enum import Enum from typing import Generic, Optional, TypeVar import strawberry T = TypeVar("T") @strawberry.enum class EstimatedValueEnum(Enum): test = "test" testtest = "testtest" @strawberry.type class EstimatedValue(Generic[T]): value: T type: EstimatedValueEnum @strawberry.type class Query: id_translations: Optional[EstimatedValue[int]] schema = strawberry.Schema(query=Query) ``` Are we doing something wrong and this is intended or is this a bug? <!-- A clear and concise description of what the bug is. --> ## System Information - Operating system: Docker - Strawberry version (if applicable): Since [0.149.2](https://github.com/strawberry-graphql/strawberry/blob/main/CHANGELOG.md#01492---2022-12-09) <!-- Add any other relevant information about the problem here. --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `strawberry/enum.py` Content: ``` 1 import dataclasses 2 from enum import EnumMeta 3 from typing import ( 4 Any, 5 Callable, 6 Iterable, 7 List, 8 Mapping, 9 Optional, 10 TypeVar, 11 Union, 12 overload, 13 ) 14 15 from strawberry.type import StrawberryType 16 17 from .exceptions import ObjectIsNotAnEnumError 18 19 20 @dataclasses.dataclass 21 class EnumValue: 22 name: str 23 value: Any 24 deprecation_reason: Optional[str] = None 25 directives: Iterable[object] = () 26 description: Optional[str] = None 27 28 29 @dataclasses.dataclass 30 class EnumDefinition(StrawberryType): 31 wrapped_cls: EnumMeta 32 name: str 33 values: List[EnumValue] 34 description: Optional[str] 35 directives: Iterable[object] = () 36 37 def __hash__(self) -> int: 38 # TODO: Is this enough for unique-ness? 39 return hash(self.name) 40 41 def copy_with( 42 self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] 43 ) -> Union[StrawberryType, type]: 44 return super().copy_with(type_var_map) # type: ignore[safe-super] 45 46 @property 47 def is_generic(self) -> bool: 48 return False 49 50 51 # TODO: remove duplication of EnumValueDefinition and EnumValue 52 @dataclasses.dataclass 53 class EnumValueDefinition: 54 value: Any 55 deprecation_reason: Optional[str] = None 56 directives: Iterable[object] = () 57 description: Optional[str] = None 58 59 60 def enum_value( 61 value: Any, 62 deprecation_reason: Optional[str] = None, 63 directives: Iterable[object] = (), 64 description: Optional[str] = None, 65 ) -> EnumValueDefinition: 66 return EnumValueDefinition( 67 value=value, 68 deprecation_reason=deprecation_reason, 69 directives=directives, 70 description=description, 71 ) 72 73 74 EnumType = TypeVar("EnumType", bound=EnumMeta) 75 76 77 def _process_enum( 78 cls: EnumType, 79 name: Optional[str] = None, 80 description: Optional[str] = None, 81 directives: Iterable[object] = (), 82 ) -> EnumType: 83 if not isinstance(cls, EnumMeta): 84 raise ObjectIsNotAnEnumError(cls) 85 86 if not name: 87 name = cls.__name__ 88 89 description = description 90 91 values = [] 92 for item in cls: # type: ignore 93 item_value = item.value 94 item_name = item.name 95 deprecation_reason = None 96 item_directives: Iterable[object] = () 97 enum_value_description = None 98 99 if isinstance(item_value, EnumValueDefinition): 100 item_directives = item_value.directives 101 enum_value_description = item_value.description 102 deprecation_reason = item_value.deprecation_reason 103 item_value = item_value.value 104 105 # update _value2member_map_ so that doing `MyEnum.MY_VALUE` and 106 # `MyEnum['MY_VALUE']` both work 107 cls._value2member_map_[item_value] = item 108 cls._member_map_[item_name]._value_ = item_value 109 110 value = EnumValue( 111 item_name, 112 item_value, 113 deprecation_reason=deprecation_reason, 114 directives=item_directives, 115 description=enum_value_description, 116 ) 117 values.append(value) 118 119 cls._enum_definition = EnumDefinition( # type: ignore 120 wrapped_cls=cls, 121 name=name, 122 values=values, 123 description=description, 124 directives=directives, 125 ) 126 127 return cls 128 129 130 @overload 131 def enum( 132 _cls: EnumType, 133 *, 134 name: Optional[str] = None, 135 description: Optional[str] = None, 136 directives: Iterable[object] = () 137 ) -> EnumType: 138 ... 139 140 141 @overload 142 def enum( 143 _cls: None = None, 144 *, 145 name: Optional[str] = None, 146 description: Optional[str] = None, 147 directives: Iterable[object] = () 148 ) -> Callable[[EnumType], EnumType]: 149 ... 150 151 152 def enum( 153 _cls: Optional[EnumType] = None, 154 *, 155 name: Optional[str] = None, 156 description: Optional[str] = None, 157 directives: Iterable[object] = () 158 ) -> Union[EnumType, Callable[[EnumType], EnumType]]: 159 """Registers the enum in the GraphQL type system. 160 161 If name is passed, the name of the GraphQL type will be 162 the value passed of name instead of the Enum class name. 163 """ 164 165 def wrap(cls: EnumType) -> EnumType: 166 return _process_enum(cls, name, description, directives=directives) 167 168 if not _cls: 169 return wrap 170 171 return wrap(_cls) 172 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/strawberry/enum.py b/strawberry/enum.py --- a/strawberry/enum.py +++ b/strawberry/enum.py @@ -41,7 +41,8 @@ def copy_with( self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] ) -> Union[StrawberryType, type]: - return super().copy_with(type_var_map) # type: ignore[safe-super] + # enum don't support type parameters, so we can safely return self + return self @property def is_generic(self) -> bool:
{"golden_diff": "diff --git a/strawberry/enum.py b/strawberry/enum.py\n--- a/strawberry/enum.py\n+++ b/strawberry/enum.py\n@@ -41,7 +41,8 @@\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n- return super().copy_with(type_var_map) # type: ignore[safe-super]\n+ # enum don't support type parameters, so we can safely return self\n+ return self\n \n @property\n def is_generic(self) -> bool:\n", "issue": "App breaking when using Enum as field for Generic\n<!-- Provide a general summary of the bug in the title above. -->\r\nWhen using an Enum as a field on a Generic, the app breaks, throwing a `NotImplementedError`.\r\n\r\n<!--- This template is entirely optional and can be removed, but is here to help both you and us. -->\r\n<!--- Anything on lines wrapped in comments like these will not show up in the final text. -->\r\n\r\n## Describe the Bug\r\nThe below code is an example of the error.\r\n```python\r\nfrom enum import Enum\r\nfrom typing import Generic, Optional, TypeVar\r\n\r\nimport strawberry\r\n\r\nT = TypeVar(\"T\")\r\n\r\n\r\[email protected]\r\nclass EstimatedValueEnum(Enum):\r\n test = \"test\"\r\n testtest = \"testtest\"\r\n\r\n\r\[email protected]\r\nclass EstimatedValue(Generic[T]):\r\n value: T\r\n type: EstimatedValueEnum\r\n\r\n\r\[email protected]\r\nclass Query:\r\n id_translations: Optional[EstimatedValue[int]]\r\n\r\n\r\nschema = strawberry.Schema(query=Query)\r\n```\r\nAre we doing something wrong and this is intended or is this a bug?\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n## System Information\r\n\r\n - Operating system: Docker\r\n - Strawberry version (if applicable): Since [0.149.2](https://github.com/strawberry-graphql/strawberry/blob/main/CHANGELOG.md#01492---2022-12-09)\r\n\r\n<!-- Add any other relevant information about the problem here. -->\r\n\n", "before_files": [{"content": "import dataclasses\nfrom enum import EnumMeta\nfrom typing import (\n Any,\n Callable,\n Iterable,\n List,\n Mapping,\n Optional,\n TypeVar,\n Union,\n overload,\n)\n\nfrom strawberry.type import StrawberryType\n\nfrom .exceptions import ObjectIsNotAnEnumError\n\n\[email protected]\nclass EnumValue:\n name: str\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\[email protected]\nclass EnumDefinition(StrawberryType):\n wrapped_cls: EnumMeta\n name: str\n values: List[EnumValue]\n description: Optional[str]\n directives: Iterable[object] = ()\n\n def __hash__(self) -> int:\n # TODO: Is this enough for unique-ness?\n return hash(self.name)\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n return super().copy_with(type_var_map) # type: ignore[safe-super]\n\n @property\n def is_generic(self) -> bool:\n return False\n\n\n# TODO: remove duplication of EnumValueDefinition and EnumValue\[email protected]\nclass EnumValueDefinition:\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\ndef enum_value(\n value: Any,\n deprecation_reason: Optional[str] = None,\n directives: Iterable[object] = (),\n description: Optional[str] = None,\n) -> EnumValueDefinition:\n return EnumValueDefinition(\n value=value,\n deprecation_reason=deprecation_reason,\n directives=directives,\n description=description,\n )\n\n\nEnumType = TypeVar(\"EnumType\", bound=EnumMeta)\n\n\ndef _process_enum(\n cls: EnumType,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = (),\n) -> EnumType:\n if not isinstance(cls, EnumMeta):\n raise ObjectIsNotAnEnumError(cls)\n\n if not name:\n name = cls.__name__\n\n description = description\n\n values = []\n for item in cls: # type: ignore\n item_value = item.value\n item_name = item.name\n deprecation_reason = None\n item_directives: Iterable[object] = ()\n enum_value_description = None\n\n if isinstance(item_value, EnumValueDefinition):\n item_directives = item_value.directives\n enum_value_description = item_value.description\n deprecation_reason = item_value.deprecation_reason\n item_value = item_value.value\n\n # update _value2member_map_ so that doing `MyEnum.MY_VALUE` and\n # `MyEnum['MY_VALUE']` both work\n cls._value2member_map_[item_value] = item\n cls._member_map_[item_name]._value_ = item_value\n\n value = EnumValue(\n item_name,\n item_value,\n deprecation_reason=deprecation_reason,\n directives=item_directives,\n description=enum_value_description,\n )\n values.append(value)\n\n cls._enum_definition = EnumDefinition( # type: ignore\n wrapped_cls=cls,\n name=name,\n values=values,\n description=description,\n directives=directives,\n )\n\n return cls\n\n\n@overload\ndef enum(\n _cls: EnumType,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> EnumType:\n ...\n\n\n@overload\ndef enum(\n _cls: None = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Callable[[EnumType], EnumType]:\n ...\n\n\ndef enum(\n _cls: Optional[EnumType] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Union[EnumType, Callable[[EnumType], EnumType]]:\n \"\"\"Registers the enum in the GraphQL type system.\n\n If name is passed, the name of the GraphQL type will be\n the value passed of name instead of the Enum class name.\n \"\"\"\n\n def wrap(cls: EnumType) -> EnumType:\n return _process_enum(cls, name, description, directives=directives)\n\n if not _cls:\n return wrap\n\n return wrap(_cls)\n", "path": "strawberry/enum.py"}], "after_files": [{"content": "import dataclasses\nfrom enum import EnumMeta\nfrom typing import (\n Any,\n Callable,\n Iterable,\n List,\n Mapping,\n Optional,\n TypeVar,\n Union,\n overload,\n)\n\nfrom strawberry.type import StrawberryType\n\nfrom .exceptions import ObjectIsNotAnEnumError\n\n\[email protected]\nclass EnumValue:\n name: str\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\[email protected]\nclass EnumDefinition(StrawberryType):\n wrapped_cls: EnumMeta\n name: str\n values: List[EnumValue]\n description: Optional[str]\n directives: Iterable[object] = ()\n\n def __hash__(self) -> int:\n # TODO: Is this enough for unique-ness?\n return hash(self.name)\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n # enum don't support type parameters, so we can safely return self\n return self\n\n @property\n def is_generic(self) -> bool:\n return False\n\n\n# TODO: remove duplication of EnumValueDefinition and EnumValue\[email protected]\nclass EnumValueDefinition:\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\ndef enum_value(\n value: Any,\n deprecation_reason: Optional[str] = None,\n directives: Iterable[object] = (),\n description: Optional[str] = None,\n) -> EnumValueDefinition:\n return EnumValueDefinition(\n value=value,\n deprecation_reason=deprecation_reason,\n directives=directives,\n description=description,\n )\n\n\nEnumType = TypeVar(\"EnumType\", bound=EnumMeta)\n\n\ndef _process_enum(\n cls: EnumType,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = (),\n) -> EnumType:\n if not isinstance(cls, EnumMeta):\n raise ObjectIsNotAnEnumError(cls)\n\n if not name:\n name = cls.__name__\n\n description = description\n\n values = []\n for item in cls: # type: ignore\n item_value = item.value\n item_name = item.name\n deprecation_reason = None\n item_directives: Iterable[object] = ()\n enum_value_description = None\n\n if isinstance(item_value, EnumValueDefinition):\n item_directives = item_value.directives\n enum_value_description = item_value.description\n deprecation_reason = item_value.deprecation_reason\n item_value = item_value.value\n\n # update _value2member_map_ so that doing `MyEnum.MY_VALUE` and\n # `MyEnum['MY_VALUE']` both work\n cls._value2member_map_[item_value] = item\n cls._member_map_[item_name]._value_ = item_value\n\n value = EnumValue(\n item_name,\n item_value,\n deprecation_reason=deprecation_reason,\n directives=item_directives,\n description=enum_value_description,\n )\n values.append(value)\n\n cls._enum_definition = EnumDefinition( # type: ignore\n wrapped_cls=cls,\n name=name,\n values=values,\n description=description,\n directives=directives,\n )\n\n return cls\n\n\n@overload\ndef enum(\n _cls: EnumType,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> EnumType:\n ...\n\n\n@overload\ndef enum(\n _cls: None = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Callable[[EnumType], EnumType]:\n ...\n\n\ndef enum(\n _cls: Optional[EnumType] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Union[EnumType, Callable[[EnumType], EnumType]]:\n \"\"\"Registers the enum in the GraphQL type system.\n\n If name is passed, the name of the GraphQL type will be\n the value passed of name instead of the Enum class name.\n \"\"\"\n\n def wrap(cls: EnumType) -> EnumType:\n return _process_enum(cls, name, description, directives=directives)\n\n if not _cls:\n return wrap\n\n return wrap(_cls)\n", "path": "strawberry/enum.py"}]}
2,021
146
gh_patches_debug_26348
rasdani/github-patches
git_diff
wagtail__wagtail-4805
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- When changing your password from with admin a user is presented with an edit button. I've just reset my password from within admin and I get this horrible large edit button. <img width="1267" alt="screen shot 2018-01-16 at 12 33 46" src="https://user-images.githubusercontent.com/7290588/34989276-e10b3338-fab9-11e7-9e69-05d1c7846af4.png"> Can we remove the edit button and amend the message to say 'Your details have been updated. You've been logged out for security reasons, please login to continue.' or something like this. When changing your password from with admin a user is presented with an edit button. I've just reset my password from within admin and I get this horrible large edit button. <img width="1267" alt="screen shot 2018-01-16 at 12 33 46" src="https://user-images.githubusercontent.com/7290588/34989276-e10b3338-fab9-11e7-9e69-05d1c7846af4.png"> Can we remove the edit button and amend the message to say 'Your details have been updated. You've been logged out for security reasons, please login to continue.' or something like this. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wagtail/users/views/users.py` Content: ``` 1 from django.conf import settings 2 from django.contrib.auth import get_user_model 3 from django.db.models import Q 4 from django.shortcuts import get_object_or_404, redirect, render 5 from django.urls import reverse 6 from django.utils.translation import ugettext as _ 7 from django.views.decorators.vary import vary_on_headers 8 9 from wagtail.admin import messages 10 from wagtail.admin.forms.search import SearchForm 11 from wagtail.admin.utils import any_permission_required, permission_denied, permission_required 12 from wagtail.core import hooks 13 from wagtail.core.compat import AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME 14 from wagtail.users.forms import UserCreationForm, UserEditForm 15 from wagtail.users.utils import user_can_delete_user 16 from wagtail.utils.loading import get_custom_form 17 from wagtail.utils.pagination import paginate 18 19 User = get_user_model() 20 21 # Typically we would check the permission 'auth.change_user' (and 'auth.add_user' / 22 # 'auth.delete_user') for user management actions, but this may vary according to 23 # the AUTH_USER_MODEL setting 24 add_user_perm = "{0}.add_{1}".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower()) 25 change_user_perm = "{0}.change_{1}".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower()) 26 delete_user_perm = "{0}.delete_{1}".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower()) 27 28 29 def get_user_creation_form(): 30 form_setting = 'WAGTAIL_USER_CREATION_FORM' 31 if hasattr(settings, form_setting): 32 return get_custom_form(form_setting) 33 else: 34 return UserCreationForm 35 36 37 def get_user_edit_form(): 38 form_setting = 'WAGTAIL_USER_EDIT_FORM' 39 if hasattr(settings, form_setting): 40 return get_custom_form(form_setting) 41 else: 42 return UserEditForm 43 44 45 @any_permission_required(add_user_perm, change_user_perm, delete_user_perm) 46 @vary_on_headers('X-Requested-With') 47 def index(request): 48 q = None 49 is_searching = False 50 51 model_fields = [f.name for f in User._meta.get_fields()] 52 53 if 'q' in request.GET: 54 form = SearchForm(request.GET, placeholder=_("Search users")) 55 if form.is_valid(): 56 q = form.cleaned_data['q'] 57 is_searching = True 58 conditions = Q() 59 60 for term in q.split(): 61 if 'username' in model_fields: 62 conditions |= Q(username__icontains=term) 63 64 if 'first_name' in model_fields: 65 conditions |= Q(first_name__icontains=term) 66 67 if 'last_name' in model_fields: 68 conditions |= Q(last_name__icontains=term) 69 70 if 'email' in model_fields: 71 conditions |= Q(email__icontains=term) 72 73 users = User.objects.filter(conditions) 74 else: 75 form = SearchForm(placeholder=_("Search users")) 76 77 if not is_searching: 78 users = User.objects.all() 79 80 if 'last_name' in model_fields and 'first_name' in model_fields: 81 users = users.order_by('last_name', 'first_name') 82 83 if 'ordering' in request.GET: 84 ordering = request.GET['ordering'] 85 86 if ordering == 'username': 87 users = users.order_by(User.USERNAME_FIELD) 88 else: 89 ordering = 'name' 90 91 paginator, users = paginate(request, users) 92 93 if request.is_ajax(): 94 return render(request, "wagtailusers/users/results.html", { 95 'users': users, 96 'is_searching': is_searching, 97 'query_string': q, 98 'ordering': ordering, 99 }) 100 else: 101 return render(request, "wagtailusers/users/index.html", { 102 'search_form': form, 103 'users': users, 104 'is_searching': is_searching, 105 'ordering': ordering, 106 'query_string': q, 107 }) 108 109 110 @permission_required(add_user_perm) 111 def create(request): 112 for fn in hooks.get_hooks('before_create_user'): 113 result = fn(request) 114 if hasattr(result, 'status_code'): 115 return result 116 if request.method == 'POST': 117 form = get_user_creation_form()(request.POST, request.FILES) 118 if form.is_valid(): 119 user = form.save() 120 messages.success(request, _("User '{0}' created.").format(user), buttons=[ 121 messages.button(reverse('wagtailusers_users:edit', args=(user.pk,)), _('Edit')) 122 ]) 123 for fn in hooks.get_hooks('after_create_user'): 124 result = fn(request, user) 125 if hasattr(result, 'status_code'): 126 return result 127 return redirect('wagtailusers_users:index') 128 else: 129 messages.error(request, _("The user could not be created due to errors.")) 130 else: 131 form = get_user_creation_form()() 132 133 return render(request, 'wagtailusers/users/create.html', { 134 'form': form, 135 }) 136 137 138 @permission_required(change_user_perm) 139 def edit(request, user_id): 140 user = get_object_or_404(User, pk=user_id) 141 can_delete = user_can_delete_user(request.user, user) 142 editing_self = request.user == user 143 144 for fn in hooks.get_hooks('before_edit_user'): 145 result = fn(request, user) 146 if hasattr(result, 'status_code'): 147 return result 148 if request.method == 'POST': 149 form = get_user_edit_form()(request.POST, request.FILES, instance=user, editing_self=editing_self) 150 if form.is_valid(): 151 user = form.save() 152 messages.success(request, _("Your details have been updated. You've been logged out for security reasons, " 153 "please login to continue.")) 154 for fn in hooks.get_hooks('after_edit_user'): 155 result = fn(request, user) 156 if hasattr(result, 'status_code'): 157 return result 158 return redirect('wagtailusers_users:index') 159 else: 160 messages.error(request, _("The user could not be saved due to errors.")) 161 else: 162 form = get_user_edit_form()(instance=user, editing_self=editing_self) 163 164 return render(request, 'wagtailusers/users/edit.html', { 165 'user': user, 166 'form': form, 167 'can_delete': can_delete, 168 }) 169 170 171 @permission_required(delete_user_perm) 172 def delete(request, user_id): 173 user = get_object_or_404(User, pk=user_id) 174 175 if not user_can_delete_user(request.user, user): 176 return permission_denied(request) 177 178 for fn in hooks.get_hooks('before_delete_user'): 179 result = fn(request, user) 180 if hasattr(result, 'status_code'): 181 return result 182 if request.method == 'POST': 183 user.delete() 184 messages.success(request, _("User '{0}' deleted.").format(user)) 185 for fn in hooks.get_hooks('after_delete_user'): 186 result = fn(request, user) 187 if hasattr(result, 'status_code'): 188 return result 189 return redirect('wagtailusers_users:index') 190 191 return render(request, "wagtailusers/users/confirm_delete.html", { 192 'user': user, 193 }) 194 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wagtail/users/views/users.py b/wagtail/users/views/users.py --- a/wagtail/users/views/users.py +++ b/wagtail/users/views/users.py @@ -1,5 +1,5 @@ from django.conf import settings -from django.contrib.auth import get_user_model +from django.contrib.auth import get_user_model, update_session_auth_hash from django.db.models import Q from django.shortcuts import get_object_or_404, redirect, render from django.urls import reverse @@ -149,8 +149,14 @@ form = get_user_edit_form()(request.POST, request.FILES, instance=user, editing_self=editing_self) if form.is_valid(): user = form.save() - messages.success(request, _("Your details have been updated. You've been logged out for security reasons, " - "please login to continue.")) + + if user == request.user and 'password1' in form.changed_data: + # User is changing their own password; need to update their session hash + update_session_auth_hash(request, user) + + messages.success(request, _("User '{0}' updated.").format(user), buttons=[ + messages.button(reverse('wagtailusers_users:edit', args=(user.pk,)), _('Edit')) + ]) for fn in hooks.get_hooks('after_edit_user'): result = fn(request, user) if hasattr(result, 'status_code'):
{"golden_diff": "diff --git a/wagtail/users/views/users.py b/wagtail/users/views/users.py\n--- a/wagtail/users/views/users.py\n+++ b/wagtail/users/views/users.py\n@@ -1,5 +1,5 @@\n from django.conf import settings\n-from django.contrib.auth import get_user_model\n+from django.contrib.auth import get_user_model, update_session_auth_hash\n from django.db.models import Q\n from django.shortcuts import get_object_or_404, redirect, render\n from django.urls import reverse\n@@ -149,8 +149,14 @@\n form = get_user_edit_form()(request.POST, request.FILES, instance=user, editing_self=editing_self)\n if form.is_valid():\n user = form.save()\n- messages.success(request, _(\"Your details have been updated. You've been logged out for security reasons, \"\n- \"please login to continue.\"))\n+\n+ if user == request.user and 'password1' in form.changed_data:\n+ # User is changing their own password; need to update their session hash\n+ update_session_auth_hash(request, user)\n+\n+ messages.success(request, _(\"User '{0}' updated.\").format(user), buttons=[\n+ messages.button(reverse('wagtailusers_users:edit', args=(user.pk,)), _('Edit'))\n+ ])\n for fn in hooks.get_hooks('after_edit_user'):\n result = fn(request, user)\n if hasattr(result, 'status_code'):\n", "issue": "When changing your password from with admin a user is presented with an edit button. \nI've just reset my password from within admin and I get this horrible large edit button. \r\n<img width=\"1267\" alt=\"screen shot 2018-01-16 at 12 33 46\" src=\"https://user-images.githubusercontent.com/7290588/34989276-e10b3338-fab9-11e7-9e69-05d1c7846af4.png\">\r\n\r\nCan we remove the edit button and amend the message to say 'Your details have been updated. You've been logged out for security reasons, please login to continue.' or something like this. \nWhen changing your password from with admin a user is presented with an edit button. \nI've just reset my password from within admin and I get this horrible large edit button. \r\n<img width=\"1267\" alt=\"screen shot 2018-01-16 at 12 33 46\" src=\"https://user-images.githubusercontent.com/7290588/34989276-e10b3338-fab9-11e7-9e69-05d1c7846af4.png\">\r\n\r\nCan we remove the edit button and amend the message to say 'Your details have been updated. You've been logged out for security reasons, please login to continue.' or something like this. \n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.vary import vary_on_headers\n\nfrom wagtail.admin import messages\nfrom wagtail.admin.forms.search import SearchForm\nfrom wagtail.admin.utils import any_permission_required, permission_denied, permission_required\nfrom wagtail.core import hooks\nfrom wagtail.core.compat import AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME\nfrom wagtail.users.forms import UserCreationForm, UserEditForm\nfrom wagtail.users.utils import user_can_delete_user\nfrom wagtail.utils.loading import get_custom_form\nfrom wagtail.utils.pagination import paginate\n\nUser = get_user_model()\n\n# Typically we would check the permission 'auth.change_user' (and 'auth.add_user' /\n# 'auth.delete_user') for user management actions, but this may vary according to\n# the AUTH_USER_MODEL setting\nadd_user_perm = \"{0}.add_{1}\".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower())\nchange_user_perm = \"{0}.change_{1}\".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower())\ndelete_user_perm = \"{0}.delete_{1}\".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower())\n\n\ndef get_user_creation_form():\n form_setting = 'WAGTAIL_USER_CREATION_FORM'\n if hasattr(settings, form_setting):\n return get_custom_form(form_setting)\n else:\n return UserCreationForm\n\n\ndef get_user_edit_form():\n form_setting = 'WAGTAIL_USER_EDIT_FORM'\n if hasattr(settings, form_setting):\n return get_custom_form(form_setting)\n else:\n return UserEditForm\n\n\n@any_permission_required(add_user_perm, change_user_perm, delete_user_perm)\n@vary_on_headers('X-Requested-With')\ndef index(request):\n q = None\n is_searching = False\n\n model_fields = [f.name for f in User._meta.get_fields()]\n\n if 'q' in request.GET:\n form = SearchForm(request.GET, placeholder=_(\"Search users\"))\n if form.is_valid():\n q = form.cleaned_data['q']\n is_searching = True\n conditions = Q()\n\n for term in q.split():\n if 'username' in model_fields:\n conditions |= Q(username__icontains=term)\n\n if 'first_name' in model_fields:\n conditions |= Q(first_name__icontains=term)\n\n if 'last_name' in model_fields:\n conditions |= Q(last_name__icontains=term)\n\n if 'email' in model_fields:\n conditions |= Q(email__icontains=term)\n\n users = User.objects.filter(conditions)\n else:\n form = SearchForm(placeholder=_(\"Search users\"))\n\n if not is_searching:\n users = User.objects.all()\n\n if 'last_name' in model_fields and 'first_name' in model_fields:\n users = users.order_by('last_name', 'first_name')\n\n if 'ordering' in request.GET:\n ordering = request.GET['ordering']\n\n if ordering == 'username':\n users = users.order_by(User.USERNAME_FIELD)\n else:\n ordering = 'name'\n\n paginator, users = paginate(request, users)\n\n if request.is_ajax():\n return render(request, \"wagtailusers/users/results.html\", {\n 'users': users,\n 'is_searching': is_searching,\n 'query_string': q,\n 'ordering': ordering,\n })\n else:\n return render(request, \"wagtailusers/users/index.html\", {\n 'search_form': form,\n 'users': users,\n 'is_searching': is_searching,\n 'ordering': ordering,\n 'query_string': q,\n })\n\n\n@permission_required(add_user_perm)\ndef create(request):\n for fn in hooks.get_hooks('before_create_user'):\n result = fn(request)\n if hasattr(result, 'status_code'):\n return result\n if request.method == 'POST':\n form = get_user_creation_form()(request.POST, request.FILES)\n if form.is_valid():\n user = form.save()\n messages.success(request, _(\"User '{0}' created.\").format(user), buttons=[\n messages.button(reverse('wagtailusers_users:edit', args=(user.pk,)), _('Edit'))\n ])\n for fn in hooks.get_hooks('after_create_user'):\n result = fn(request, user)\n if hasattr(result, 'status_code'):\n return result\n return redirect('wagtailusers_users:index')\n else:\n messages.error(request, _(\"The user could not be created due to errors.\"))\n else:\n form = get_user_creation_form()()\n\n return render(request, 'wagtailusers/users/create.html', {\n 'form': form,\n })\n\n\n@permission_required(change_user_perm)\ndef edit(request, user_id):\n user = get_object_or_404(User, pk=user_id)\n can_delete = user_can_delete_user(request.user, user)\n editing_self = request.user == user\n\n for fn in hooks.get_hooks('before_edit_user'):\n result = fn(request, user)\n if hasattr(result, 'status_code'):\n return result\n if request.method == 'POST':\n form = get_user_edit_form()(request.POST, request.FILES, instance=user, editing_self=editing_self)\n if form.is_valid():\n user = form.save()\n messages.success(request, _(\"Your details have been updated. You've been logged out for security reasons, \"\n \"please login to continue.\"))\n for fn in hooks.get_hooks('after_edit_user'):\n result = fn(request, user)\n if hasattr(result, 'status_code'):\n return result\n return redirect('wagtailusers_users:index')\n else:\n messages.error(request, _(\"The user could not be saved due to errors.\"))\n else:\n form = get_user_edit_form()(instance=user, editing_self=editing_self)\n\n return render(request, 'wagtailusers/users/edit.html', {\n 'user': user,\n 'form': form,\n 'can_delete': can_delete,\n })\n\n\n@permission_required(delete_user_perm)\ndef delete(request, user_id):\n user = get_object_or_404(User, pk=user_id)\n\n if not user_can_delete_user(request.user, user):\n return permission_denied(request)\n\n for fn in hooks.get_hooks('before_delete_user'):\n result = fn(request, user)\n if hasattr(result, 'status_code'):\n return result\n if request.method == 'POST':\n user.delete()\n messages.success(request, _(\"User '{0}' deleted.\").format(user))\n for fn in hooks.get_hooks('after_delete_user'):\n result = fn(request, user)\n if hasattr(result, 'status_code'):\n return result\n return redirect('wagtailusers_users:index')\n\n return render(request, \"wagtailusers/users/confirm_delete.html\", {\n 'user': user,\n })\n", "path": "wagtail/users/views/users.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth import get_user_model, update_session_auth_hash\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.vary import vary_on_headers\n\nfrom wagtail.admin import messages\nfrom wagtail.admin.forms.search import SearchForm\nfrom wagtail.admin.utils import any_permission_required, permission_denied, permission_required\nfrom wagtail.core import hooks\nfrom wagtail.core.compat import AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME\nfrom wagtail.users.forms import UserCreationForm, UserEditForm\nfrom wagtail.users.utils import user_can_delete_user\nfrom wagtail.utils.loading import get_custom_form\nfrom wagtail.utils.pagination import paginate\n\nUser = get_user_model()\n\n# Typically we would check the permission 'auth.change_user' (and 'auth.add_user' /\n# 'auth.delete_user') for user management actions, but this may vary according to\n# the AUTH_USER_MODEL setting\nadd_user_perm = \"{0}.add_{1}\".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower())\nchange_user_perm = \"{0}.change_{1}\".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower())\ndelete_user_perm = \"{0}.delete_{1}\".format(AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME.lower())\n\n\ndef get_user_creation_form():\n form_setting = 'WAGTAIL_USER_CREATION_FORM'\n if hasattr(settings, form_setting):\n return get_custom_form(form_setting)\n else:\n return UserCreationForm\n\n\ndef get_user_edit_form():\n form_setting = 'WAGTAIL_USER_EDIT_FORM'\n if hasattr(settings, form_setting):\n return get_custom_form(form_setting)\n else:\n return UserEditForm\n\n\n@any_permission_required(add_user_perm, change_user_perm, delete_user_perm)\n@vary_on_headers('X-Requested-With')\ndef index(request):\n q = None\n is_searching = False\n\n model_fields = [f.name for f in User._meta.get_fields()]\n\n if 'q' in request.GET:\n form = SearchForm(request.GET, placeholder=_(\"Search users\"))\n if form.is_valid():\n q = form.cleaned_data['q']\n is_searching = True\n conditions = Q()\n\n for term in q.split():\n if 'username' in model_fields:\n conditions |= Q(username__icontains=term)\n\n if 'first_name' in model_fields:\n conditions |= Q(first_name__icontains=term)\n\n if 'last_name' in model_fields:\n conditions |= Q(last_name__icontains=term)\n\n if 'email' in model_fields:\n conditions |= Q(email__icontains=term)\n\n users = User.objects.filter(conditions)\n else:\n form = SearchForm(placeholder=_(\"Search users\"))\n\n if not is_searching:\n users = User.objects.all()\n\n if 'last_name' in model_fields and 'first_name' in model_fields:\n users = users.order_by('last_name', 'first_name')\n\n if 'ordering' in request.GET:\n ordering = request.GET['ordering']\n\n if ordering == 'username':\n users = users.order_by(User.USERNAME_FIELD)\n else:\n ordering = 'name'\n\n paginator, users = paginate(request, users)\n\n if request.is_ajax():\n return render(request, \"wagtailusers/users/results.html\", {\n 'users': users,\n 'is_searching': is_searching,\n 'query_string': q,\n 'ordering': ordering,\n })\n else:\n return render(request, \"wagtailusers/users/index.html\", {\n 'search_form': form,\n 'users': users,\n 'is_searching': is_searching,\n 'ordering': ordering,\n 'query_string': q,\n })\n\n\n@permission_required(add_user_perm)\ndef create(request):\n for fn in hooks.get_hooks('before_create_user'):\n result = fn(request)\n if hasattr(result, 'status_code'):\n return result\n if request.method == 'POST':\n form = get_user_creation_form()(request.POST, request.FILES)\n if form.is_valid():\n user = form.save()\n messages.success(request, _(\"User '{0}' created.\").format(user), buttons=[\n messages.button(reverse('wagtailusers_users:edit', args=(user.pk,)), _('Edit'))\n ])\n for fn in hooks.get_hooks('after_create_user'):\n result = fn(request, user)\n if hasattr(result, 'status_code'):\n return result\n return redirect('wagtailusers_users:index')\n else:\n messages.error(request, _(\"The user could not be created due to errors.\"))\n else:\n form = get_user_creation_form()()\n\n return render(request, 'wagtailusers/users/create.html', {\n 'form': form,\n })\n\n\n@permission_required(change_user_perm)\ndef edit(request, user_id):\n user = get_object_or_404(User, pk=user_id)\n can_delete = user_can_delete_user(request.user, user)\n editing_self = request.user == user\n\n for fn in hooks.get_hooks('before_edit_user'):\n result = fn(request, user)\n if hasattr(result, 'status_code'):\n return result\n if request.method == 'POST':\n form = get_user_edit_form()(request.POST, request.FILES, instance=user, editing_self=editing_self)\n if form.is_valid():\n user = form.save()\n\n if user == request.user and 'password1' in form.changed_data:\n # User is changing their own password; need to update their session hash\n update_session_auth_hash(request, user)\n\n messages.success(request, _(\"User '{0}' updated.\").format(user), buttons=[\n messages.button(reverse('wagtailusers_users:edit', args=(user.pk,)), _('Edit'))\n ])\n for fn in hooks.get_hooks('after_edit_user'):\n result = fn(request, user)\n if hasattr(result, 'status_code'):\n return result\n return redirect('wagtailusers_users:index')\n else:\n messages.error(request, _(\"The user could not be saved due to errors.\"))\n else:\n form = get_user_edit_form()(instance=user, editing_self=editing_self)\n\n return render(request, 'wagtailusers/users/edit.html', {\n 'user': user,\n 'form': form,\n 'can_delete': can_delete,\n })\n\n\n@permission_required(delete_user_perm)\ndef delete(request, user_id):\n user = get_object_or_404(User, pk=user_id)\n\n if not user_can_delete_user(request.user, user):\n return permission_denied(request)\n\n for fn in hooks.get_hooks('before_delete_user'):\n result = fn(request, user)\n if hasattr(result, 'status_code'):\n return result\n if request.method == 'POST':\n user.delete()\n messages.success(request, _(\"User '{0}' deleted.\").format(user))\n for fn in hooks.get_hooks('after_delete_user'):\n result = fn(request, user)\n if hasattr(result, 'status_code'):\n return result\n return redirect('wagtailusers_users:index')\n\n return render(request, \"wagtailusers/users/confirm_delete.html\", {\n 'user': user,\n })\n", "path": "wagtail/users/views/users.py"}]}
2,578
310
gh_patches_debug_9098
rasdani/github-patches
git_diff
mindsdb__mindsdb-1590
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add option to list tables in PostgreSQL integration :bookmark_tabs: When users create a connection to the database it will be useful to show them tips with a list of tables. To be able to do this we need a new method `get_tables_list` implemented in the PostgreSQL integration class. ## Steps :male_detective: :female_detective: - Frok MindsDB repo - Add new implementation in https://github.com/mindsdb/mindsdb/blob/stable/mindsdb/integrations/postgres/postgres.py#L37 - Make a PR to staging branch ## Additional rewards :1st_place_medal: Each code PR brings :three: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mindsdb/integrations/postgres/postgres.py` Content: ``` 1 from contextlib import closing 2 import pg8000 3 4 from lightwood.api import dtype 5 from mindsdb.integrations.base import Integration 6 from mindsdb.utilities.log import log 7 8 9 class PostgreSQLConnectionChecker: 10 def __init__(self, **kwargs): 11 self.host = kwargs.get('host') 12 self.port = kwargs.get('port') 13 self.user = kwargs.get('user') 14 self.password = kwargs.get('password') 15 self.database = kwargs.get('database', 'postgres') 16 17 def _get_connection(self): 18 return pg8000.connect( 19 database=self.database, 20 user=self.user, 21 password=self.password, 22 host=self.host, 23 port=self.port 24 ) 25 26 def check_connection(self): 27 try: 28 con = self._get_connection() 29 with closing(con) as con: 30 con.run('select 1;') 31 connected = True 32 except Exception: 33 connected = False 34 return connected 35 36 37 class PostgreSQL(Integration, PostgreSQLConnectionChecker): 38 def __init__(self, config, name, db_info): 39 super().__init__(config, name) 40 self.user = db_info.get('user') 41 self.password = db_info.get('password') 42 self.host = db_info.get('host') 43 self.port = db_info.get('port') 44 self.database = db_info.get('database', 'postgres') 45 46 def _to_postgres_table(self, dtype_dict, predicted_cols, columns): 47 subtype_map = { 48 dtype.integer: ' int8', 49 dtype.float: 'float8', 50 dtype.binary: 'bool', 51 dtype.date: 'date', 52 dtype.datetime: 'timestamp', 53 dtype.binary: 'text', 54 dtype.categorical: 'text', 55 dtype.tags: 'text', 56 dtype.image: 'text', 57 dtype.video: 'text', 58 dtype.audio: 'text', 59 dtype.short_text: 'text', 60 dtype.rich_text: 'text', 61 dtype.array: 'text' 62 } 63 64 column_declaration = [] 65 for name in columns: 66 try: 67 col_subtype = dtype_dict[name] 68 new_type = subtype_map[col_subtype] 69 column_declaration.append(f' "{name}" {new_type} ') 70 if name in predicted_cols: 71 column_declaration.append(f' "{name}_original" {new_type} ') 72 except Exception as e: 73 log.error(f'Error: can not determine postgres data type for column {name}: {e}') 74 75 return column_declaration 76 77 def _escape_table_name(self, name): 78 return '"' + name.replace('"', '""') + '"' 79 80 def _query(self, query): 81 con = self._get_connection() 82 with closing(con) as con: 83 84 cur = con.cursor() 85 res = True 86 cur.execute(query) 87 88 try: 89 rows = cur.fetchall() 90 keys = [k[0] if isinstance(k[0], str) else k[0].decode('ascii') for k in cur.description] 91 res = [dict(zip(keys, row)) for row in rows] 92 except Exception: 93 pass 94 95 con.commit() 96 97 return res 98 99 def setup(self): 100 user = f"{self.config['api']['mysql']['user']}_{self.name}" 101 password = self.config['api']['mysql']['password'] 102 host = self.config['api']['mysql']['host'] 103 port = self.config['api']['mysql']['port'] 104 105 try: 106 self._query(''' 107 DO $$ 108 begin 109 if not exists (SELECT 1 FROM pg_extension where extname = 'mysql_fdw') then 110 CREATE EXTENSION mysql_fdw; 111 end if; 112 END 113 $$; 114 ''') 115 except Exception: 116 print('Error: cant find or activate mysql_fdw extension for PostgreSQL.') 117 118 self._query(f'DROP SCHEMA IF EXISTS {self.mindsdb_database} CASCADE') 119 120 self._query(f"DROP USER MAPPING IF EXISTS FOR {self.user} SERVER server_{self.mindsdb_database}") 121 122 self._query(f'DROP SERVER IF EXISTS server_{self.mindsdb_database} CASCADE') 123 124 self._query(f''' 125 CREATE SERVER server_{self.mindsdb_database} 126 FOREIGN DATA WRAPPER mysql_fdw 127 OPTIONS (host '{host}', port '{port}'); 128 ''') 129 130 self._query(f''' 131 CREATE USER MAPPING FOR {self.user} 132 SERVER server_{self.mindsdb_database} 133 OPTIONS (username '{user}', password '{password}'); 134 ''') 135 136 self._query(f'CREATE SCHEMA {self.mindsdb_database}') 137 138 q = f""" 139 CREATE FOREIGN TABLE IF NOT EXISTS {self.mindsdb_database}.predictors ( 140 name text, 141 status text, 142 accuracy text, 143 predict text, 144 select_data_query text, 145 external_datasource text, 146 training_options text 147 ) 148 SERVER server_{self.mindsdb_database} 149 OPTIONS (dbname 'mindsdb', table_name 'predictors'); 150 """ 151 self._query(q) 152 153 q = f""" 154 CREATE FOREIGN TABLE IF NOT EXISTS {self.mindsdb_database}.commands ( 155 command text 156 ) SERVER server_{self.mindsdb_database} 157 OPTIONS (dbname 'mindsdb', table_name 'commands'); 158 """ 159 self._query(q) 160 161 def register_predictors(self, model_data_arr): 162 for model_meta in model_data_arr: 163 name = model_meta['name'] 164 predict = model_meta['predict'] 165 if not isinstance(predict, list): 166 predict = [predict] 167 columns_sql = ','.join(self._to_postgres_table( 168 model_meta['dtype_dict'], 169 predict, 170 list(model_meta['dtype_dict'].keys()) 171 )) 172 columns_sql += ',"select_data_query" text' 173 columns_sql += ',"external_datasource" text' 174 for col in predict: 175 columns_sql += f',"{col}_confidence" float8' 176 if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float): 177 columns_sql += f',"{col}_min" float8' 178 columns_sql += f',"{col}_max" float8' 179 columns_sql += f',"{col}_explain" text' 180 181 self.unregister_predictor(name) 182 q = f""" 183 CREATE FOREIGN TABLE {self.mindsdb_database}.{self._escape_table_name(name)} ( 184 {columns_sql} 185 ) SERVER server_{self.mindsdb_database} 186 OPTIONS (dbname 'mindsdb', table_name '{name}'); 187 """ 188 self._query(q) 189 190 def unregister_predictor(self, name): 191 q = f""" 192 DROP FOREIGN TABLE IF EXISTS {self.mindsdb_database}.{self._escape_table_name(name)}; 193 """ 194 self._query(q) 195 196 def get_row_count(self, query): 197 q = f""" 198 SELECT COUNT(*) as count 199 FROM ({query}) as query;""" 200 result = self._query(q) 201 return result[0]['count'] 202 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mindsdb/integrations/postgres/postgres.py b/mindsdb/integrations/postgres/postgres.py --- a/mindsdb/integrations/postgres/postgres.py +++ b/mindsdb/integrations/postgres/postgres.py @@ -199,3 +199,13 @@ FROM ({query}) as query;""" result = self._query(q) return result[0]['count'] + + def get_tables_list(self): + q = f""" SELECT table_schema, table_name + FROM information_schema.tables + WHERE table_schema != 'pg_catalog' + AND table_schema != 'information_schema' + ORDER BY table_schema, table_name""" + tables_list = self._query(q) + tables= [f"{table['table_schema']}.{table['table_name']}" for table in tables_list] + return tables \ No newline at end of file
{"golden_diff": "diff --git a/mindsdb/integrations/postgres/postgres.py b/mindsdb/integrations/postgres/postgres.py\n--- a/mindsdb/integrations/postgres/postgres.py\n+++ b/mindsdb/integrations/postgres/postgres.py\n@@ -199,3 +199,13 @@\n FROM ({query}) as query;\"\"\"\n result = self._query(q)\n return result[0]['count']\n+ \n+ def get_tables_list(self):\n+ q = f\"\"\" SELECT table_schema, table_name\n+ FROM information_schema.tables\n+ WHERE table_schema != 'pg_catalog'\n+ AND table_schema != 'information_schema'\n+ ORDER BY table_schema, table_name\"\"\"\n+ tables_list = self._query(q)\n+ tables= [f\"{table['table_schema']}.{table['table_name']}\" for table in tables_list]\n+ return tables\n\\ No newline at end of file\n", "issue": "Add option to list tables in PostgreSQL integration :bookmark_tabs: \nWhen users create a connection to the database it will be useful to show them tips with a list of tables. To be able to do this we need a new method `get_tables_list` implemented in the PostgreSQL integration class.\r\n\r\n## Steps :male_detective: :female_detective: \r\n\r\n- Frok MindsDB repo\r\n- Add new implementation in https://github.com/mindsdb/mindsdb/blob/stable/mindsdb/integrations/postgres/postgres.py#L37\r\n- Make a PR to staging branch\r\n\r\n## Additional rewards :1st_place_medal: \r\n\r\nEach code PR brings :three: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/\n", "before_files": [{"content": "from contextlib import closing\nimport pg8000\n\nfrom lightwood.api import dtype\nfrom mindsdb.integrations.base import Integration\nfrom mindsdb.utilities.log import log\n\n\nclass PostgreSQLConnectionChecker:\n def __init__(self, **kwargs):\n self.host = kwargs.get('host')\n self.port = kwargs.get('port')\n self.user = kwargs.get('user')\n self.password = kwargs.get('password')\n self.database = kwargs.get('database', 'postgres')\n\n def _get_connection(self):\n return pg8000.connect(\n database=self.database,\n user=self.user,\n password=self.password,\n host=self.host,\n port=self.port\n )\n\n def check_connection(self):\n try:\n con = self._get_connection()\n with closing(con) as con:\n con.run('select 1;')\n connected = True\n except Exception:\n connected = False\n return connected\n\n\nclass PostgreSQL(Integration, PostgreSQLConnectionChecker):\n def __init__(self, config, name, db_info):\n super().__init__(config, name)\n self.user = db_info.get('user')\n self.password = db_info.get('password')\n self.host = db_info.get('host')\n self.port = db_info.get('port')\n self.database = db_info.get('database', 'postgres')\n\n def _to_postgres_table(self, dtype_dict, predicted_cols, columns):\n subtype_map = {\n dtype.integer: ' int8',\n dtype.float: 'float8',\n dtype.binary: 'bool',\n dtype.date: 'date',\n dtype.datetime: 'timestamp',\n dtype.binary: 'text',\n dtype.categorical: 'text',\n dtype.tags: 'text',\n dtype.image: 'text',\n dtype.video: 'text',\n dtype.audio: 'text',\n dtype.short_text: 'text',\n dtype.rich_text: 'text',\n dtype.array: 'text'\n }\n\n column_declaration = []\n for name in columns:\n try:\n col_subtype = dtype_dict[name]\n new_type = subtype_map[col_subtype]\n column_declaration.append(f' \"{name}\" {new_type} ')\n if name in predicted_cols:\n column_declaration.append(f' \"{name}_original\" {new_type} ')\n except Exception as e:\n log.error(f'Error: can not determine postgres data type for column {name}: {e}')\n\n return column_declaration\n\n def _escape_table_name(self, name):\n return '\"' + name.replace('\"', '\"\"') + '\"'\n\n def _query(self, query):\n con = self._get_connection()\n with closing(con) as con:\n\n cur = con.cursor()\n res = True\n cur.execute(query)\n\n try:\n rows = cur.fetchall()\n keys = [k[0] if isinstance(k[0], str) else k[0].decode('ascii') for k in cur.description]\n res = [dict(zip(keys, row)) for row in rows]\n except Exception:\n pass\n\n con.commit()\n\n return res\n\n def setup(self):\n user = f\"{self.config['api']['mysql']['user']}_{self.name}\"\n password = self.config['api']['mysql']['password']\n host = self.config['api']['mysql']['host']\n port = self.config['api']['mysql']['port']\n\n try:\n self._query('''\n DO $$\n begin\n if not exists (SELECT 1 FROM pg_extension where extname = 'mysql_fdw') then\n CREATE EXTENSION mysql_fdw;\n end if;\n END\n $$;\n ''')\n except Exception:\n print('Error: cant find or activate mysql_fdw extension for PostgreSQL.')\n\n self._query(f'DROP SCHEMA IF EXISTS {self.mindsdb_database} CASCADE')\n\n self._query(f\"DROP USER MAPPING IF EXISTS FOR {self.user} SERVER server_{self.mindsdb_database}\")\n\n self._query(f'DROP SERVER IF EXISTS server_{self.mindsdb_database} CASCADE')\n\n self._query(f'''\n CREATE SERVER server_{self.mindsdb_database}\n FOREIGN DATA WRAPPER mysql_fdw\n OPTIONS (host '{host}', port '{port}');\n ''')\n\n self._query(f'''\n CREATE USER MAPPING FOR {self.user}\n SERVER server_{self.mindsdb_database}\n OPTIONS (username '{user}', password '{password}');\n ''')\n\n self._query(f'CREATE SCHEMA {self.mindsdb_database}')\n\n q = f\"\"\"\n CREATE FOREIGN TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (\n name text,\n status text,\n accuracy text,\n predict text,\n select_data_query text,\n external_datasource text,\n training_options text\n )\n SERVER server_{self.mindsdb_database}\n OPTIONS (dbname 'mindsdb', table_name 'predictors');\n \"\"\"\n self._query(q)\n\n q = f\"\"\"\n CREATE FOREIGN TABLE IF NOT EXISTS {self.mindsdb_database}.commands (\n command text\n ) SERVER server_{self.mindsdb_database}\n OPTIONS (dbname 'mindsdb', table_name 'commands');\n \"\"\"\n self._query(q)\n\n def register_predictors(self, model_data_arr):\n for model_meta in model_data_arr:\n name = model_meta['name']\n predict = model_meta['predict']\n if not isinstance(predict, list):\n predict = [predict]\n columns_sql = ','.join(self._to_postgres_table(\n model_meta['dtype_dict'],\n predict,\n list(model_meta['dtype_dict'].keys())\n ))\n columns_sql += ',\"select_data_query\" text'\n columns_sql += ',\"external_datasource\" text'\n for col in predict:\n columns_sql += f',\"{col}_confidence\" float8'\n if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):\n columns_sql += f',\"{col}_min\" float8'\n columns_sql += f',\"{col}_max\" float8'\n columns_sql += f',\"{col}_explain\" text'\n\n self.unregister_predictor(name)\n q = f\"\"\"\n CREATE FOREIGN TABLE {self.mindsdb_database}.{self._escape_table_name(name)} (\n {columns_sql}\n ) SERVER server_{self.mindsdb_database}\n OPTIONS (dbname 'mindsdb', table_name '{name}');\n \"\"\"\n self._query(q)\n\n def unregister_predictor(self, name):\n q = f\"\"\"\n DROP FOREIGN TABLE IF EXISTS {self.mindsdb_database}.{self._escape_table_name(name)};\n \"\"\"\n self._query(q)\n\n def get_row_count(self, query):\n q = f\"\"\" \n SELECT COUNT(*) as count\n FROM ({query}) as query;\"\"\"\n result = self._query(q)\n return result[0]['count']\n", "path": "mindsdb/integrations/postgres/postgres.py"}], "after_files": [{"content": "from contextlib import closing\nimport pg8000\n\nfrom lightwood.api import dtype\nfrom mindsdb.integrations.base import Integration\nfrom mindsdb.utilities.log import log\n\n\nclass PostgreSQLConnectionChecker:\n def __init__(self, **kwargs):\n self.host = kwargs.get('host')\n self.port = kwargs.get('port')\n self.user = kwargs.get('user')\n self.password = kwargs.get('password')\n self.database = kwargs.get('database', 'postgres')\n\n def _get_connection(self):\n return pg8000.connect(\n database=self.database,\n user=self.user,\n password=self.password,\n host=self.host,\n port=self.port\n )\n\n def check_connection(self):\n try:\n con = self._get_connection()\n with closing(con) as con:\n con.run('select 1;')\n connected = True\n except Exception:\n connected = False\n return connected\n\n\nclass PostgreSQL(Integration, PostgreSQLConnectionChecker):\n def __init__(self, config, name, db_info):\n super().__init__(config, name)\n self.user = db_info.get('user')\n self.password = db_info.get('password')\n self.host = db_info.get('host')\n self.port = db_info.get('port')\n self.database = db_info.get('database', 'postgres')\n\n def _to_postgres_table(self, dtype_dict, predicted_cols, columns):\n subtype_map = {\n dtype.integer: ' int8',\n dtype.float: 'float8',\n dtype.binary: 'bool',\n dtype.date: 'date',\n dtype.datetime: 'timestamp',\n dtype.binary: 'text',\n dtype.categorical: 'text',\n dtype.tags: 'text',\n dtype.image: 'text',\n dtype.video: 'text',\n dtype.audio: 'text',\n dtype.short_text: 'text',\n dtype.rich_text: 'text',\n dtype.array: 'text'\n }\n\n column_declaration = []\n for name in columns:\n try:\n col_subtype = dtype_dict[name]\n new_type = subtype_map[col_subtype]\n column_declaration.append(f' \"{name}\" {new_type} ')\n if name in predicted_cols:\n column_declaration.append(f' \"{name}_original\" {new_type} ')\n except Exception as e:\n log.error(f'Error: can not determine postgres data type for column {name}: {e}')\n\n return column_declaration\n\n def _escape_table_name(self, name):\n return '\"' + name.replace('\"', '\"\"') + '\"'\n\n def _query(self, query):\n con = self._get_connection()\n with closing(con) as con:\n\n cur = con.cursor()\n res = True\n cur.execute(query)\n\n try:\n rows = cur.fetchall()\n keys = [k[0] if isinstance(k[0], str) else k[0].decode('ascii') for k in cur.description]\n res = [dict(zip(keys, row)) for row in rows]\n except Exception:\n pass\n\n con.commit()\n\n return res\n\n def setup(self):\n user = f\"{self.config['api']['mysql']['user']}_{self.name}\"\n password = self.config['api']['mysql']['password']\n host = self.config['api']['mysql']['host']\n port = self.config['api']['mysql']['port']\n\n try:\n self._query('''\n DO $$\n begin\n if not exists (SELECT 1 FROM pg_extension where extname = 'mysql_fdw') then\n CREATE EXTENSION mysql_fdw;\n end if;\n END\n $$;\n ''')\n except Exception:\n print('Error: cant find or activate mysql_fdw extension for PostgreSQL.')\n\n self._query(f'DROP SCHEMA IF EXISTS {self.mindsdb_database} CASCADE')\n\n self._query(f\"DROP USER MAPPING IF EXISTS FOR {self.user} SERVER server_{self.mindsdb_database}\")\n\n self._query(f'DROP SERVER IF EXISTS server_{self.mindsdb_database} CASCADE')\n\n self._query(f'''\n CREATE SERVER server_{self.mindsdb_database}\n FOREIGN DATA WRAPPER mysql_fdw\n OPTIONS (host '{host}', port '{port}');\n ''')\n\n self._query(f'''\n CREATE USER MAPPING FOR {self.user}\n SERVER server_{self.mindsdb_database}\n OPTIONS (username '{user}', password '{password}');\n ''')\n\n self._query(f'CREATE SCHEMA {self.mindsdb_database}')\n\n q = f\"\"\"\n CREATE FOREIGN TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (\n name text,\n status text,\n accuracy text,\n predict text,\n select_data_query text,\n external_datasource text,\n training_options text\n )\n SERVER server_{self.mindsdb_database}\n OPTIONS (dbname 'mindsdb', table_name 'predictors');\n \"\"\"\n self._query(q)\n\n q = f\"\"\"\n CREATE FOREIGN TABLE IF NOT EXISTS {self.mindsdb_database}.commands (\n command text\n ) SERVER server_{self.mindsdb_database}\n OPTIONS (dbname 'mindsdb', table_name 'commands');\n \"\"\"\n self._query(q)\n\n def register_predictors(self, model_data_arr):\n for model_meta in model_data_arr:\n name = model_meta['name']\n predict = model_meta['predict']\n if not isinstance(predict, list):\n predict = [predict]\n columns_sql = ','.join(self._to_postgres_table(\n model_meta['dtype_dict'],\n predict,\n list(model_meta['dtype_dict'].keys())\n ))\n columns_sql += ',\"select_data_query\" text'\n columns_sql += ',\"external_datasource\" text'\n for col in predict:\n columns_sql += f',\"{col}_confidence\" float8'\n if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):\n columns_sql += f',\"{col}_min\" float8'\n columns_sql += f',\"{col}_max\" float8'\n columns_sql += f',\"{col}_explain\" text'\n\n self.unregister_predictor(name)\n q = f\"\"\"\n CREATE FOREIGN TABLE {self.mindsdb_database}.{self._escape_table_name(name)} (\n {columns_sql}\n ) SERVER server_{self.mindsdb_database}\n OPTIONS (dbname 'mindsdb', table_name '{name}');\n \"\"\"\n self._query(q)\n\n def unregister_predictor(self, name):\n q = f\"\"\"\n DROP FOREIGN TABLE IF EXISTS {self.mindsdb_database}.{self._escape_table_name(name)};\n \"\"\"\n self._query(q)\n\n def get_row_count(self, query):\n q = f\"\"\" \n SELECT COUNT(*) as count\n FROM ({query}) as query;\"\"\"\n result = self._query(q)\n return result[0]['count']\n \n def get_tables_list(self):\n q = f\"\"\" SELECT table_schema, table_name\n FROM information_schema.tables\n WHERE table_schema != 'pg_catalog'\n AND table_schema != 'information_schema'\n ORDER BY table_schema, table_name\"\"\"\n tables_list = self._query(q)\n tables= [f\"{table['table_schema']}.{table['table_name']}\" for table in tables_list]\n return tables", "path": "mindsdb/integrations/postgres/postgres.py"}]}
2,427
204
gh_patches_debug_27460
rasdani/github-patches
git_diff
googleapis__python-bigquery-442
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Too noise logging about telemetry Hello, In the Apache Airflow project, we use the BigQuery library, but recently we've started to see annoying log message when the library is loaded. It is enough that the library is loaded and there is an message every time. In my opinion, this message should be of a lower level (DEBUG) so that it is not displayed much less often or is displayed only when the client is initialized. ``` import logging logging.basicConfig(level=logging.INFO) from google.cloud import bigquery ``` Output: ``` INFO:google.cloud.bigquery.opentelemetry_tracing:This service is instrumented using OpenTelemetry. OpenTelemetry could not be imported; please add opentelemetry-api and opentelemetry-instrumentation packages in order to get BigQuery Tracing data. ``` Related issue: https://github.com/apache/airflow/issues/13131 CC: @tswast --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `google/cloud/bigquery/opentelemetry_tracing.py` Content: ``` 1 # Copyright 2020 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import logging 16 from contextlib import contextmanager 17 from google.api_core.exceptions import GoogleAPICallError 18 19 logger = logging.getLogger(__name__) 20 try: 21 from opentelemetry import trace 22 from opentelemetry.instrumentation.utils import http_status_to_canonical_code 23 from opentelemetry.trace.status import Status 24 25 HAS_OPENTELEMETRY = True 26 27 except ImportError: 28 logger.info( 29 "This service is instrumented using OpenTelemetry. " 30 "OpenTelemetry could not be imported; please " 31 "add opentelemetry-api and opentelemetry-instrumentation " 32 "packages in order to get BigQuery Tracing data." 33 ) 34 35 HAS_OPENTELEMETRY = False 36 37 _default_attributes = { 38 "db.system": "BigQuery" 39 } # static, default values assigned to all spans 40 41 42 @contextmanager 43 def create_span(name, attributes=None, client=None, job_ref=None): 44 """Creates a ContextManager for a Span to be exported to the configured exporter. 45 If no configuration exists yields None. 46 47 Args: 48 name (str): Name that will be set for the span being created 49 attributes (Optional[dict]): 50 Additional attributes that pertain to 51 the specific API call (i.e. not a default attribute) 52 client (Optional[google.cloud.bigquery.client.Client]): 53 Pass in a Client object to extract any attributes that may be 54 relevant to it and add them to the created spans. 55 job_ref (Optional[google.cloud.bigquery.job._AsyncJob]) 56 Pass in a _AsyncJob object to extract any attributes that may be 57 relevant to it and add them to the created spans. 58 59 Yields: 60 opentelemetry.trace.Span: Yields the newly created Span. 61 62 Raises: 63 google.api_core.exceptions.GoogleAPICallError: 64 Raised if a span could not be yielded or issue with call to 65 OpenTelemetry. 66 """ 67 final_attributes = _get_final_span_attributes(attributes, client, job_ref) 68 if not HAS_OPENTELEMETRY: 69 yield None 70 return 71 tracer = trace.get_tracer(__name__) 72 73 # yield new span value 74 with tracer.start_as_current_span(name=name, attributes=final_attributes) as span: 75 try: 76 yield span 77 except GoogleAPICallError as error: 78 if error.code is not None: 79 span.set_status(Status(http_status_to_canonical_code(error.code))) 80 raise 81 82 83 def _get_final_span_attributes(attributes=None, client=None, job_ref=None): 84 final_attributes = {} 85 final_attributes.update(_default_attributes.copy()) 86 if client: 87 client_attributes = _set_client_attributes(client) 88 final_attributes.update(client_attributes) 89 if job_ref: 90 job_attributes = _set_job_attributes(job_ref) 91 final_attributes.update(job_attributes) 92 if attributes: 93 final_attributes.update(attributes) 94 return final_attributes 95 96 97 def _set_client_attributes(client): 98 return {"db.name": client.project, "location": client.location} 99 100 101 def _set_job_attributes(job_ref): 102 job_attributes = { 103 "db.name": job_ref.project, 104 "location": job_ref.location, 105 "num_child_jobs": job_ref.num_child_jobs, 106 "job_id": job_ref.job_id, 107 "parent_job_id": job_ref.parent_job_id, 108 "state": job_ref.state, 109 } 110 111 job_attributes["hasErrors"] = job_ref.error_result is not None 112 113 if job_ref.created is not None: 114 job_attributes["timeCreated"] = job_ref.created.isoformat() 115 116 if job_ref.started is not None: 117 job_attributes["timeStarted"] = job_ref.started.isoformat() 118 119 if job_ref.ended is not None: 120 job_attributes["timeEnded"] = job_ref.ended.isoformat() 121 122 return job_attributes 123 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/google/cloud/bigquery/opentelemetry_tracing.py b/google/cloud/bigquery/opentelemetry_tracing.py --- a/google/cloud/bigquery/opentelemetry_tracing.py +++ b/google/cloud/bigquery/opentelemetry_tracing.py @@ -23,16 +23,11 @@ from opentelemetry.trace.status import Status HAS_OPENTELEMETRY = True + _warned_telemetry = True except ImportError: - logger.info( - "This service is instrumented using OpenTelemetry. " - "OpenTelemetry could not be imported; please " - "add opentelemetry-api and opentelemetry-instrumentation " - "packages in order to get BigQuery Tracing data." - ) - HAS_OPENTELEMETRY = False + _warned_telemetry = False _default_attributes = { "db.system": "BigQuery" @@ -64,8 +59,18 @@ Raised if a span could not be yielded or issue with call to OpenTelemetry. """ + global _warned_telemetry final_attributes = _get_final_span_attributes(attributes, client, job_ref) if not HAS_OPENTELEMETRY: + if not _warned_telemetry: + logger.debug( + "This service is instrumented using OpenTelemetry. " + "OpenTelemetry could not be imported; please " + "add opentelemetry-api and opentelemetry-instrumentation " + "packages in order to get BigQuery Tracing data." + ) + _warned_telemetry = True + yield None return tracer = trace.get_tracer(__name__)
{"golden_diff": "diff --git a/google/cloud/bigquery/opentelemetry_tracing.py b/google/cloud/bigquery/opentelemetry_tracing.py\n--- a/google/cloud/bigquery/opentelemetry_tracing.py\n+++ b/google/cloud/bigquery/opentelemetry_tracing.py\n@@ -23,16 +23,11 @@\n from opentelemetry.trace.status import Status\n \n HAS_OPENTELEMETRY = True\n+ _warned_telemetry = True\n \n except ImportError:\n- logger.info(\n- \"This service is instrumented using OpenTelemetry. \"\n- \"OpenTelemetry could not be imported; please \"\n- \"add opentelemetry-api and opentelemetry-instrumentation \"\n- \"packages in order to get BigQuery Tracing data.\"\n- )\n-\n HAS_OPENTELEMETRY = False\n+ _warned_telemetry = False\n \n _default_attributes = {\n \"db.system\": \"BigQuery\"\n@@ -64,8 +59,18 @@\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n+ global _warned_telemetry\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n+ if not _warned_telemetry:\n+ logger.debug(\n+ \"This service is instrumented using OpenTelemetry. \"\n+ \"OpenTelemetry could not be imported; please \"\n+ \"add opentelemetry-api and opentelemetry-instrumentation \"\n+ \"packages in order to get BigQuery Tracing data.\"\n+ )\n+ _warned_telemetry = True\n+\n yield None\n return\n tracer = trace.get_tracer(__name__)\n", "issue": "Too noise logging about telemetry\nHello,\r\n\r\nIn the Apache Airflow project, we use the BigQuery library, but recently we've started to see annoying log message when the library is loaded. It is enough that the library is loaded and there is an message every time. \r\n\r\nIn my opinion, this message should be of a lower level (DEBUG) so that it is not displayed much less often or is displayed only when the client is initialized. \r\n```\r\nimport logging\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\nfrom google.cloud import bigquery\r\n```\r\nOutput: \r\n```\r\nINFO:google.cloud.bigquery.opentelemetry_tracing:This service is instrumented using OpenTelemetry. OpenTelemetry could not be imported; please add opentelemetry-api and opentelemetry-instrumentation packages in order to get BigQuery Tracing data.\r\n```\r\n\r\nRelated issue: https://github.com/apache/airflow/issues/13131\r\n\r\nCC: @tswast \n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom contextlib import contextmanager\nfrom google.api_core.exceptions import GoogleAPICallError\n\nlogger = logging.getLogger(__name__)\ntry:\n from opentelemetry import trace\n from opentelemetry.instrumentation.utils import http_status_to_canonical_code\n from opentelemetry.trace.status import Status\n\n HAS_OPENTELEMETRY = True\n\nexcept ImportError:\n logger.info(\n \"This service is instrumented using OpenTelemetry. \"\n \"OpenTelemetry could not be imported; please \"\n \"add opentelemetry-api and opentelemetry-instrumentation \"\n \"packages in order to get BigQuery Tracing data.\"\n )\n\n HAS_OPENTELEMETRY = False\n\n_default_attributes = {\n \"db.system\": \"BigQuery\"\n} # static, default values assigned to all spans\n\n\n@contextmanager\ndef create_span(name, attributes=None, client=None, job_ref=None):\n \"\"\"Creates a ContextManager for a Span to be exported to the configured exporter.\n If no configuration exists yields None.\n\n Args:\n name (str): Name that will be set for the span being created\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the created spans.\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the created spans.\n\n Yields:\n opentelemetry.trace.Span: Yields the newly created Span.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError:\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n yield None\n return\n tracer = trace.get_tracer(__name__)\n\n # yield new span value\n with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:\n try:\n yield span\n except GoogleAPICallError as error:\n if error.code is not None:\n span.set_status(Status(http_status_to_canonical_code(error.code)))\n raise\n\n\ndef _get_final_span_attributes(attributes=None, client=None, job_ref=None):\n final_attributes = {}\n final_attributes.update(_default_attributes.copy())\n if client:\n client_attributes = _set_client_attributes(client)\n final_attributes.update(client_attributes)\n if job_ref:\n job_attributes = _set_job_attributes(job_ref)\n final_attributes.update(job_attributes)\n if attributes:\n final_attributes.update(attributes)\n return final_attributes\n\n\ndef _set_client_attributes(client):\n return {\"db.name\": client.project, \"location\": client.location}\n\n\ndef _set_job_attributes(job_ref):\n job_attributes = {\n \"db.name\": job_ref.project,\n \"location\": job_ref.location,\n \"num_child_jobs\": job_ref.num_child_jobs,\n \"job_id\": job_ref.job_id,\n \"parent_job_id\": job_ref.parent_job_id,\n \"state\": job_ref.state,\n }\n\n job_attributes[\"hasErrors\"] = job_ref.error_result is not None\n\n if job_ref.created is not None:\n job_attributes[\"timeCreated\"] = job_ref.created.isoformat()\n\n if job_ref.started is not None:\n job_attributes[\"timeStarted\"] = job_ref.started.isoformat()\n\n if job_ref.ended is not None:\n job_attributes[\"timeEnded\"] = job_ref.ended.isoformat()\n\n return job_attributes\n", "path": "google/cloud/bigquery/opentelemetry_tracing.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom contextlib import contextmanager\nfrom google.api_core.exceptions import GoogleAPICallError\n\nlogger = logging.getLogger(__name__)\ntry:\n from opentelemetry import trace\n from opentelemetry.instrumentation.utils import http_status_to_canonical_code\n from opentelemetry.trace.status import Status\n\n HAS_OPENTELEMETRY = True\n _warned_telemetry = True\n\nexcept ImportError:\n HAS_OPENTELEMETRY = False\n _warned_telemetry = False\n\n_default_attributes = {\n \"db.system\": \"BigQuery\"\n} # static, default values assigned to all spans\n\n\n@contextmanager\ndef create_span(name, attributes=None, client=None, job_ref=None):\n \"\"\"Creates a ContextManager for a Span to be exported to the configured exporter.\n If no configuration exists yields None.\n\n Args:\n name (str): Name that will be set for the span being created\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the created spans.\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the created spans.\n\n Yields:\n opentelemetry.trace.Span: Yields the newly created Span.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError:\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n global _warned_telemetry\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n if not _warned_telemetry:\n logger.debug(\n \"This service is instrumented using OpenTelemetry. \"\n \"OpenTelemetry could not be imported; please \"\n \"add opentelemetry-api and opentelemetry-instrumentation \"\n \"packages in order to get BigQuery Tracing data.\"\n )\n _warned_telemetry = True\n\n yield None\n return\n tracer = trace.get_tracer(__name__)\n\n # yield new span value\n with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:\n try:\n yield span\n except GoogleAPICallError as error:\n if error.code is not None:\n span.set_status(Status(http_status_to_canonical_code(error.code)))\n raise\n\n\ndef _get_final_span_attributes(attributes=None, client=None, job_ref=None):\n final_attributes = {}\n final_attributes.update(_default_attributes.copy())\n if client:\n client_attributes = _set_client_attributes(client)\n final_attributes.update(client_attributes)\n if job_ref:\n job_attributes = _set_job_attributes(job_ref)\n final_attributes.update(job_attributes)\n if attributes:\n final_attributes.update(attributes)\n return final_attributes\n\n\ndef _set_client_attributes(client):\n return {\"db.name\": client.project, \"location\": client.location}\n\n\ndef _set_job_attributes(job_ref):\n job_attributes = {\n \"db.name\": job_ref.project,\n \"location\": job_ref.location,\n \"num_child_jobs\": job_ref.num_child_jobs,\n \"job_id\": job_ref.job_id,\n \"parent_job_id\": job_ref.parent_job_id,\n \"state\": job_ref.state,\n }\n\n job_attributes[\"hasErrors\"] = job_ref.error_result is not None\n\n if job_ref.created is not None:\n job_attributes[\"timeCreated\"] = job_ref.created.isoformat()\n\n if job_ref.started is not None:\n job_attributes[\"timeStarted\"] = job_ref.started.isoformat()\n\n if job_ref.ended is not None:\n job_attributes[\"timeEnded\"] = job_ref.ended.isoformat()\n\n return job_attributes\n", "path": "google/cloud/bigquery/opentelemetry_tracing.py"}]}
1,662
377
gh_patches_debug_16868
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-2544
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Implement Drop aggregation See [Drop aggregation](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#drop-aggregation) > The Drop Aggregation informs the SDK to ignore/drop all Instrument Measurements for this Aggregation. This is a bit of a special case. The implementation should probably avoid creating a `ViewInstrumentMatch` altogether when we encounter this aggregation to save memory. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from abc import ABC, abstractmethod 16 from bisect import bisect_left 17 from dataclasses import replace 18 from logging import getLogger 19 from math import inf 20 from threading import Lock 21 from typing import Generic, List, Optional, Sequence, TypeVar 22 23 from opentelemetry._metrics.instrument import ( 24 Asynchronous, 25 Instrument, 26 Synchronous, 27 _Monotonic, 28 ) 29 from opentelemetry.sdk._metrics.measurement import Measurement 30 from opentelemetry.sdk._metrics.point import ( 31 AggregationTemporality, 32 Gauge, 33 Histogram, 34 PointT, 35 Sum, 36 ) 37 from opentelemetry.util._time import _time_ns 38 39 _PointVarT = TypeVar("_PointVarT", bound=PointT) 40 41 _logger = getLogger(__name__) 42 43 44 class _Aggregation(ABC, Generic[_PointVarT]): 45 def __init__(self): 46 self._lock = Lock() 47 48 @abstractmethod 49 def aggregate(self, measurement: Measurement) -> None: 50 pass 51 52 @abstractmethod 53 def collect(self) -> Optional[_PointVarT]: 54 pass 55 56 57 class _SumAggregation(_Aggregation[Sum]): 58 def __init__( 59 self, 60 instrument_is_monotonic: bool, 61 instrument_temporality: AggregationTemporality, 62 ): 63 super().__init__() 64 65 self._start_time_unix_nano = _time_ns() 66 self._instrument_temporality = instrument_temporality 67 self._instrument_is_monotonic = instrument_is_monotonic 68 69 if self._instrument_temporality is AggregationTemporality.DELTA: 70 self._value = 0 71 else: 72 self._value = None 73 74 def aggregate(self, measurement: Measurement) -> None: 75 with self._lock: 76 if self._value is None: 77 self._value = 0 78 self._value = self._value + measurement.value 79 80 def collect(self) -> Optional[Sum]: 81 """ 82 Atomically return a point for the current value of the metric and 83 reset the aggregation value. 84 """ 85 now = _time_ns() 86 87 if self._instrument_temporality is AggregationTemporality.DELTA: 88 89 with self._lock: 90 value = self._value 91 start_time_unix_nano = self._start_time_unix_nano 92 93 self._value = 0 94 self._start_time_unix_nano = now + 1 95 96 return Sum( 97 aggregation_temporality=AggregationTemporality.DELTA, 98 is_monotonic=self._instrument_is_monotonic, 99 start_time_unix_nano=start_time_unix_nano, 100 time_unix_nano=now, 101 value=value, 102 ) 103 104 with self._lock: 105 if self._value is None: 106 return None 107 value = self._value 108 self._value = None 109 110 return Sum( 111 aggregation_temporality=AggregationTemporality.CUMULATIVE, 112 is_monotonic=self._instrument_is_monotonic, 113 start_time_unix_nano=self._start_time_unix_nano, 114 time_unix_nano=now, 115 value=value, 116 ) 117 118 119 class _LastValueAggregation(_Aggregation[Gauge]): 120 def __init__(self): 121 super().__init__() 122 self._value = None 123 124 def aggregate(self, measurement: Measurement): 125 with self._lock: 126 self._value = measurement.value 127 128 def collect(self) -> Optional[Gauge]: 129 """ 130 Atomically return a point for the current value of the metric. 131 """ 132 with self._lock: 133 if self._value is None: 134 return None 135 value = self._value 136 self._value = None 137 138 return Gauge( 139 time_unix_nano=_time_ns(), 140 value=value, 141 ) 142 143 144 class _ExplicitBucketHistogramAggregation(_Aggregation[Histogram]): 145 def __init__( 146 self, 147 boundaries: Sequence[float] = ( 148 0.0, 149 5.0, 150 10.0, 151 25.0, 152 50.0, 153 75.0, 154 100.0, 155 250.0, 156 500.0, 157 1000.0, 158 ), 159 record_min_max: bool = True, 160 ): 161 super().__init__() 162 self._boundaries = tuple(boundaries) 163 self._bucket_counts = self._get_empty_bucket_counts() 164 self._min = inf 165 self._max = -inf 166 self._sum = 0 167 self._record_min_max = record_min_max 168 self._start_time_unix_nano = _time_ns() 169 170 def _get_empty_bucket_counts(self) -> List[int]: 171 return [0] * (len(self._boundaries) + 1) 172 173 def aggregate(self, measurement: Measurement) -> None: 174 175 value = measurement.value 176 177 if self._record_min_max: 178 self._min = min(self._min, value) 179 self._max = max(self._max, value) 180 181 self._sum += value 182 183 self._bucket_counts[bisect_left(self._boundaries, value)] += 1 184 185 def collect(self) -> Histogram: 186 """ 187 Atomically return a point for the current value of the metric. 188 """ 189 now = _time_ns() 190 191 with self._lock: 192 value = self._bucket_counts 193 start_time_unix_nano = self._start_time_unix_nano 194 histogram_sum = self._sum 195 196 self._bucket_counts = self._get_empty_bucket_counts() 197 self._start_time_unix_nano = now + 1 198 self._sum = 0 199 200 return Histogram( 201 start_time_unix_nano=start_time_unix_nano, 202 time_unix_nano=now, 203 bucket_counts=tuple(value), 204 explicit_bounds=self._boundaries, 205 aggregation_temporality=AggregationTemporality.DELTA, 206 sum=histogram_sum, 207 ) 208 209 210 # pylint: disable=too-many-return-statements,too-many-branches 211 def _convert_aggregation_temporality( 212 previous_point: Optional[_PointVarT], 213 current_point: _PointVarT, 214 aggregation_temporality: AggregationTemporality, 215 ) -> _PointVarT: 216 """Converts `current_point` to the requested `aggregation_temporality` 217 given the `previous_point`. 218 219 `previous_point` must have `CUMULATIVE` temporality. `current_point` may 220 have `DELTA` or `CUMULATIVE` temporality. 221 222 The output point will have temporality `aggregation_temporality`. Since 223 `GAUGE` points have no temporality, they are returned unchanged. 224 """ 225 226 current_point_type = type(current_point) 227 228 if current_point_type is Gauge: 229 return current_point 230 231 if previous_point is not None and type(previous_point) is not type( 232 current_point 233 ): 234 _logger.warning( 235 "convert_aggregation_temporality called with mismatched " 236 "point types: %s and %s", 237 type(previous_point), 238 current_point_type, 239 ) 240 241 return current_point 242 243 if current_point_type is Sum: 244 if previous_point is None: 245 # Output CUMULATIVE for a synchronous instrument 246 # There is no previous value, return the delta point as a 247 # cumulative 248 return replace( 249 current_point, aggregation_temporality=aggregation_temporality 250 ) 251 if previous_point.aggregation_temporality is not ( 252 AggregationTemporality.CUMULATIVE 253 ): 254 raise Exception( 255 "previous_point aggregation temporality must be CUMULATIVE" 256 ) 257 258 if current_point.aggregation_temporality is aggregation_temporality: 259 # Output DELTA for a synchronous instrument 260 # Output CUMULATIVE for an asynchronous instrument 261 return current_point 262 263 if aggregation_temporality is AggregationTemporality.DELTA: 264 # Output temporality DELTA for an asynchronous instrument 265 value = current_point.value - previous_point.value 266 output_start_time_unix_nano = previous_point.time_unix_nano 267 268 else: 269 # Output CUMULATIVE for a synchronous instrument 270 value = current_point.value + previous_point.value 271 output_start_time_unix_nano = previous_point.start_time_unix_nano 272 273 is_monotonic = ( 274 previous_point.is_monotonic and current_point.is_monotonic 275 ) 276 277 return Sum( 278 start_time_unix_nano=output_start_time_unix_nano, 279 time_unix_nano=current_point.time_unix_nano, 280 value=value, 281 aggregation_temporality=aggregation_temporality, 282 is_monotonic=is_monotonic, 283 ) 284 285 if current_point_type is Histogram: 286 if previous_point is None: 287 return replace( 288 current_point, aggregation_temporality=aggregation_temporality 289 ) 290 if previous_point.aggregation_temporality is not ( 291 AggregationTemporality.CUMULATIVE 292 ): 293 raise Exception( 294 "previous_point aggregation temporality must be CUMULATIVE" 295 ) 296 297 if current_point.aggregation_temporality is aggregation_temporality: 298 return current_point 299 300 if aggregation_temporality is AggregationTemporality.CUMULATIVE: 301 start_time_unix_nano = previous_point.start_time_unix_nano 302 sum_ = current_point.sum + previous_point.sum 303 bucket_counts = [ 304 curr_count + prev_count 305 for curr_count, prev_count in zip( 306 current_point.bucket_counts, previous_point.bucket_counts 307 ) 308 ] 309 else: 310 start_time_unix_nano = previous_point.time_unix_nano 311 sum_ = current_point.sum - previous_point.sum 312 bucket_counts = [ 313 curr_count - prev_count 314 for curr_count, prev_count in zip( 315 current_point.bucket_counts, previous_point.bucket_counts 316 ) 317 ] 318 319 return Histogram( 320 start_time_unix_nano=start_time_unix_nano, 321 time_unix_nano=current_point.time_unix_nano, 322 bucket_counts=bucket_counts, 323 explicit_bounds=current_point.explicit_bounds, 324 sum=sum_, 325 aggregation_temporality=aggregation_temporality, 326 ) 327 return None 328 329 330 class _AggregationFactory(ABC): 331 @abstractmethod 332 def _create_aggregation(self, instrument: Instrument) -> _Aggregation: 333 """Creates an aggregation""" 334 335 336 class ExplicitBucketHistogramAggregation(_AggregationFactory): 337 def __init__( 338 self, 339 boundaries: Sequence[float] = ( 340 0.0, 341 5.0, 342 10.0, 343 25.0, 344 50.0, 345 75.0, 346 100.0, 347 250.0, 348 500.0, 349 1000.0, 350 ), 351 record_min_max: bool = True, 352 ) -> None: 353 self._boundaries = boundaries 354 self._record_min_max = record_min_max 355 356 def _create_aggregation(self, instrument: Instrument) -> _Aggregation: 357 return _ExplicitBucketHistogramAggregation( 358 boundaries=self._boundaries, 359 record_min_max=self._record_min_max, 360 ) 361 362 363 class SumAggregation(_AggregationFactory): 364 def _create_aggregation(self, instrument: Instrument) -> _Aggregation: 365 366 temporality = AggregationTemporality.UNSPECIFIED 367 if isinstance(instrument, Synchronous): 368 temporality = AggregationTemporality.DELTA 369 elif isinstance(instrument, Asynchronous): 370 temporality = AggregationTemporality.CUMULATIVE 371 372 return _SumAggregation( 373 isinstance(instrument, _Monotonic), 374 temporality, 375 ) 376 377 378 class LastValueAggregation(_AggregationFactory): 379 def _create_aggregation(self, instrument: Instrument) -> _Aggregation: 380 return _LastValueAggregation() 381 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py --- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py @@ -54,6 +54,14 @@ pass +class _DropAggregation(_Aggregation): + def aggregate(self, measurement: Measurement) -> None: + pass + + def collect(self) -> Optional[_PointVarT]: + pass + + class _SumAggregation(_Aggregation[Sum]): def __init__( self, @@ -378,3 +386,10 @@ class LastValueAggregation(_AggregationFactory): def _create_aggregation(self, instrument: Instrument) -> _Aggregation: return _LastValueAggregation() + + +class DropAggregation(_AggregationFactory): + """Using this aggregation will make all measurements be ignored.""" + + def _create_aggregation(self, instrument: Instrument) -> _Aggregation: + return _DropAggregation()
{"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n@@ -54,6 +54,14 @@\n pass\n \n \n+class _DropAggregation(_Aggregation):\n+ def aggregate(self, measurement: Measurement) -> None:\n+ pass\n+\n+ def collect(self) -> Optional[_PointVarT]:\n+ pass\n+\n+\n class _SumAggregation(_Aggregation[Sum]):\n def __init__(\n self,\n@@ -378,3 +386,10 @@\n class LastValueAggregation(_AggregationFactory):\n def _create_aggregation(self, instrument: Instrument) -> _Aggregation:\n return _LastValueAggregation()\n+\n+\n+class DropAggregation(_AggregationFactory):\n+ \"\"\"Using this aggregation will make all measurements be ignored.\"\"\"\n+\n+ def _create_aggregation(self, instrument: Instrument) -> _Aggregation:\n+ return _DropAggregation()\n", "issue": "Implement Drop aggregation\nSee [Drop aggregation](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#drop-aggregation)\r\n\r\n> The Drop Aggregation informs the SDK to ignore/drop all Instrument Measurements for this Aggregation.\r\n\r\nThis is a bit of a special case. The implementation should probably avoid creating a `ViewInstrumentMatch` altogether when we encounter this aggregation to save memory.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\nfrom bisect import bisect_left\nfrom dataclasses import replace\nfrom logging import getLogger\nfrom math import inf\nfrom threading import Lock\nfrom typing import Generic, List, Optional, Sequence, TypeVar\n\nfrom opentelemetry._metrics.instrument import (\n Asynchronous,\n Instrument,\n Synchronous,\n _Monotonic,\n)\nfrom opentelemetry.sdk._metrics.measurement import Measurement\nfrom opentelemetry.sdk._metrics.point import (\n AggregationTemporality,\n Gauge,\n Histogram,\n PointT,\n Sum,\n)\nfrom opentelemetry.util._time import _time_ns\n\n_PointVarT = TypeVar(\"_PointVarT\", bound=PointT)\n\n_logger = getLogger(__name__)\n\n\nclass _Aggregation(ABC, Generic[_PointVarT]):\n def __init__(self):\n self._lock = Lock()\n\n @abstractmethod\n def aggregate(self, measurement: Measurement) -> None:\n pass\n\n @abstractmethod\n def collect(self) -> Optional[_PointVarT]:\n pass\n\n\nclass _SumAggregation(_Aggregation[Sum]):\n def __init__(\n self,\n instrument_is_monotonic: bool,\n instrument_temporality: AggregationTemporality,\n ):\n super().__init__()\n\n self._start_time_unix_nano = _time_ns()\n self._instrument_temporality = instrument_temporality\n self._instrument_is_monotonic = instrument_is_monotonic\n\n if self._instrument_temporality is AggregationTemporality.DELTA:\n self._value = 0\n else:\n self._value = None\n\n def aggregate(self, measurement: Measurement) -> None:\n with self._lock:\n if self._value is None:\n self._value = 0\n self._value = self._value + measurement.value\n\n def collect(self) -> Optional[Sum]:\n \"\"\"\n Atomically return a point for the current value of the metric and\n reset the aggregation value.\n \"\"\"\n now = _time_ns()\n\n if self._instrument_temporality is AggregationTemporality.DELTA:\n\n with self._lock:\n value = self._value\n start_time_unix_nano = self._start_time_unix_nano\n\n self._value = 0\n self._start_time_unix_nano = now + 1\n\n return Sum(\n aggregation_temporality=AggregationTemporality.DELTA,\n is_monotonic=self._instrument_is_monotonic,\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n value=value,\n )\n\n with self._lock:\n if self._value is None:\n return None\n value = self._value\n self._value = None\n\n return Sum(\n aggregation_temporality=AggregationTemporality.CUMULATIVE,\n is_monotonic=self._instrument_is_monotonic,\n start_time_unix_nano=self._start_time_unix_nano,\n time_unix_nano=now,\n value=value,\n )\n\n\nclass _LastValueAggregation(_Aggregation[Gauge]):\n def __init__(self):\n super().__init__()\n self._value = None\n\n def aggregate(self, measurement: Measurement):\n with self._lock:\n self._value = measurement.value\n\n def collect(self) -> Optional[Gauge]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n with self._lock:\n if self._value is None:\n return None\n value = self._value\n self._value = None\n\n return Gauge(\n time_unix_nano=_time_ns(),\n value=value,\n )\n\n\nclass _ExplicitBucketHistogramAggregation(_Aggregation[Histogram]):\n def __init__(\n self,\n boundaries: Sequence[float] = (\n 0.0,\n 5.0,\n 10.0,\n 25.0,\n 50.0,\n 75.0,\n 100.0,\n 250.0,\n 500.0,\n 1000.0,\n ),\n record_min_max: bool = True,\n ):\n super().__init__()\n self._boundaries = tuple(boundaries)\n self._bucket_counts = self._get_empty_bucket_counts()\n self._min = inf\n self._max = -inf\n self._sum = 0\n self._record_min_max = record_min_max\n self._start_time_unix_nano = _time_ns()\n\n def _get_empty_bucket_counts(self) -> List[int]:\n return [0] * (len(self._boundaries) + 1)\n\n def aggregate(self, measurement: Measurement) -> None:\n\n value = measurement.value\n\n if self._record_min_max:\n self._min = min(self._min, value)\n self._max = max(self._max, value)\n\n self._sum += value\n\n self._bucket_counts[bisect_left(self._boundaries, value)] += 1\n\n def collect(self) -> Histogram:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n now = _time_ns()\n\n with self._lock:\n value = self._bucket_counts\n start_time_unix_nano = self._start_time_unix_nano\n histogram_sum = self._sum\n\n self._bucket_counts = self._get_empty_bucket_counts()\n self._start_time_unix_nano = now + 1\n self._sum = 0\n\n return Histogram(\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n bucket_counts=tuple(value),\n explicit_bounds=self._boundaries,\n aggregation_temporality=AggregationTemporality.DELTA,\n sum=histogram_sum,\n )\n\n\n# pylint: disable=too-many-return-statements,too-many-branches\ndef _convert_aggregation_temporality(\n previous_point: Optional[_PointVarT],\n current_point: _PointVarT,\n aggregation_temporality: AggregationTemporality,\n) -> _PointVarT:\n \"\"\"Converts `current_point` to the requested `aggregation_temporality`\n given the `previous_point`.\n\n `previous_point` must have `CUMULATIVE` temporality. `current_point` may\n have `DELTA` or `CUMULATIVE` temporality.\n\n The output point will have temporality `aggregation_temporality`. Since\n `GAUGE` points have no temporality, they are returned unchanged.\n \"\"\"\n\n current_point_type = type(current_point)\n\n if current_point_type is Gauge:\n return current_point\n\n if previous_point is not None and type(previous_point) is not type(\n current_point\n ):\n _logger.warning(\n \"convert_aggregation_temporality called with mismatched \"\n \"point types: %s and %s\",\n type(previous_point),\n current_point_type,\n )\n\n return current_point\n\n if current_point_type is Sum:\n if previous_point is None:\n # Output CUMULATIVE for a synchronous instrument\n # There is no previous value, return the delta point as a\n # cumulative\n return replace(\n current_point, aggregation_temporality=aggregation_temporality\n )\n if previous_point.aggregation_temporality is not (\n AggregationTemporality.CUMULATIVE\n ):\n raise Exception(\n \"previous_point aggregation temporality must be CUMULATIVE\"\n )\n\n if current_point.aggregation_temporality is aggregation_temporality:\n # Output DELTA for a synchronous instrument\n # Output CUMULATIVE for an asynchronous instrument\n return current_point\n\n if aggregation_temporality is AggregationTemporality.DELTA:\n # Output temporality DELTA for an asynchronous instrument\n value = current_point.value - previous_point.value\n output_start_time_unix_nano = previous_point.time_unix_nano\n\n else:\n # Output CUMULATIVE for a synchronous instrument\n value = current_point.value + previous_point.value\n output_start_time_unix_nano = previous_point.start_time_unix_nano\n\n is_monotonic = (\n previous_point.is_monotonic and current_point.is_monotonic\n )\n\n return Sum(\n start_time_unix_nano=output_start_time_unix_nano,\n time_unix_nano=current_point.time_unix_nano,\n value=value,\n aggregation_temporality=aggregation_temporality,\n is_monotonic=is_monotonic,\n )\n\n if current_point_type is Histogram:\n if previous_point is None:\n return replace(\n current_point, aggregation_temporality=aggregation_temporality\n )\n if previous_point.aggregation_temporality is not (\n AggregationTemporality.CUMULATIVE\n ):\n raise Exception(\n \"previous_point aggregation temporality must be CUMULATIVE\"\n )\n\n if current_point.aggregation_temporality is aggregation_temporality:\n return current_point\n\n if aggregation_temporality is AggregationTemporality.CUMULATIVE:\n start_time_unix_nano = previous_point.start_time_unix_nano\n sum_ = current_point.sum + previous_point.sum\n bucket_counts = [\n curr_count + prev_count\n for curr_count, prev_count in zip(\n current_point.bucket_counts, previous_point.bucket_counts\n )\n ]\n else:\n start_time_unix_nano = previous_point.time_unix_nano\n sum_ = current_point.sum - previous_point.sum\n bucket_counts = [\n curr_count - prev_count\n for curr_count, prev_count in zip(\n current_point.bucket_counts, previous_point.bucket_counts\n )\n ]\n\n return Histogram(\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=current_point.time_unix_nano,\n bucket_counts=bucket_counts,\n explicit_bounds=current_point.explicit_bounds,\n sum=sum_,\n aggregation_temporality=aggregation_temporality,\n )\n return None\n\n\nclass _AggregationFactory(ABC):\n @abstractmethod\n def _create_aggregation(self, instrument: Instrument) -> _Aggregation:\n \"\"\"Creates an aggregation\"\"\"\n\n\nclass ExplicitBucketHistogramAggregation(_AggregationFactory):\n def __init__(\n self,\n boundaries: Sequence[float] = (\n 0.0,\n 5.0,\n 10.0,\n 25.0,\n 50.0,\n 75.0,\n 100.0,\n 250.0,\n 500.0,\n 1000.0,\n ),\n record_min_max: bool = True,\n ) -> None:\n self._boundaries = boundaries\n self._record_min_max = record_min_max\n\n def _create_aggregation(self, instrument: Instrument) -> _Aggregation:\n return _ExplicitBucketHistogramAggregation(\n boundaries=self._boundaries,\n record_min_max=self._record_min_max,\n )\n\n\nclass SumAggregation(_AggregationFactory):\n def _create_aggregation(self, instrument: Instrument) -> _Aggregation:\n\n temporality = AggregationTemporality.UNSPECIFIED\n if isinstance(instrument, Synchronous):\n temporality = AggregationTemporality.DELTA\n elif isinstance(instrument, Asynchronous):\n temporality = AggregationTemporality.CUMULATIVE\n\n return _SumAggregation(\n isinstance(instrument, _Monotonic),\n temporality,\n )\n\n\nclass LastValueAggregation(_AggregationFactory):\n def _create_aggregation(self, instrument: Instrument) -> _Aggregation:\n return _LastValueAggregation()\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\nfrom bisect import bisect_left\nfrom dataclasses import replace\nfrom logging import getLogger\nfrom math import inf\nfrom threading import Lock\nfrom typing import Generic, List, Optional, Sequence, TypeVar\n\nfrom opentelemetry._metrics.instrument import (\n Asynchronous,\n Instrument,\n Synchronous,\n _Monotonic,\n)\nfrom opentelemetry.sdk._metrics.measurement import Measurement\nfrom opentelemetry.sdk._metrics.point import (\n AggregationTemporality,\n Gauge,\n Histogram,\n PointT,\n Sum,\n)\nfrom opentelemetry.util._time import _time_ns\n\n_PointVarT = TypeVar(\"_PointVarT\", bound=PointT)\n\n_logger = getLogger(__name__)\n\n\nclass _Aggregation(ABC, Generic[_PointVarT]):\n def __init__(self):\n self._lock = Lock()\n\n @abstractmethod\n def aggregate(self, measurement: Measurement) -> None:\n pass\n\n @abstractmethod\n def collect(self) -> Optional[_PointVarT]:\n pass\n\n\nclass _DropAggregation(_Aggregation):\n def aggregate(self, measurement: Measurement) -> None:\n pass\n\n def collect(self) -> Optional[_PointVarT]:\n pass\n\n\nclass _SumAggregation(_Aggregation[Sum]):\n def __init__(\n self,\n instrument_is_monotonic: bool,\n instrument_temporality: AggregationTemporality,\n ):\n super().__init__()\n\n self._start_time_unix_nano = _time_ns()\n self._instrument_temporality = instrument_temporality\n self._instrument_is_monotonic = instrument_is_monotonic\n\n if self._instrument_temporality is AggregationTemporality.DELTA:\n self._value = 0\n else:\n self._value = None\n\n def aggregate(self, measurement: Measurement) -> None:\n with self._lock:\n if self._value is None:\n self._value = 0\n self._value = self._value + measurement.value\n\n def collect(self) -> Optional[Sum]:\n \"\"\"\n Atomically return a point for the current value of the metric and\n reset the aggregation value.\n \"\"\"\n now = _time_ns()\n\n if self._instrument_temporality is AggregationTemporality.DELTA:\n\n with self._lock:\n value = self._value\n start_time_unix_nano = self._start_time_unix_nano\n\n self._value = 0\n self._start_time_unix_nano = now + 1\n\n return Sum(\n aggregation_temporality=AggregationTemporality.DELTA,\n is_monotonic=self._instrument_is_monotonic,\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n value=value,\n )\n\n with self._lock:\n if self._value is None:\n return None\n value = self._value\n self._value = None\n\n return Sum(\n aggregation_temporality=AggregationTemporality.CUMULATIVE,\n is_monotonic=self._instrument_is_monotonic,\n start_time_unix_nano=self._start_time_unix_nano,\n time_unix_nano=now,\n value=value,\n )\n\n\nclass _LastValueAggregation(_Aggregation[Gauge]):\n def __init__(self):\n super().__init__()\n self._value = None\n\n def aggregate(self, measurement: Measurement):\n with self._lock:\n self._value = measurement.value\n\n def collect(self) -> Optional[Gauge]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n with self._lock:\n if self._value is None:\n return None\n value = self._value\n self._value = None\n\n return Gauge(\n time_unix_nano=_time_ns(),\n value=value,\n )\n\n\nclass _ExplicitBucketHistogramAggregation(_Aggregation[Histogram]):\n def __init__(\n self,\n boundaries: Sequence[float] = (\n 0.0,\n 5.0,\n 10.0,\n 25.0,\n 50.0,\n 75.0,\n 100.0,\n 250.0,\n 500.0,\n 1000.0,\n ),\n record_min_max: bool = True,\n ):\n super().__init__()\n self._boundaries = tuple(boundaries)\n self._bucket_counts = self._get_empty_bucket_counts()\n self._min = inf\n self._max = -inf\n self._sum = 0\n self._record_min_max = record_min_max\n self._start_time_unix_nano = _time_ns()\n\n def _get_empty_bucket_counts(self) -> List[int]:\n return [0] * (len(self._boundaries) + 1)\n\n def aggregate(self, measurement: Measurement) -> None:\n\n value = measurement.value\n\n if self._record_min_max:\n self._min = min(self._min, value)\n self._max = max(self._max, value)\n\n self._sum += value\n\n self._bucket_counts[bisect_left(self._boundaries, value)] += 1\n\n def collect(self) -> Histogram:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n now = _time_ns()\n\n with self._lock:\n value = self._bucket_counts\n start_time_unix_nano = self._start_time_unix_nano\n histogram_sum = self._sum\n\n self._bucket_counts = self._get_empty_bucket_counts()\n self._start_time_unix_nano = now + 1\n self._sum = 0\n\n return Histogram(\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n bucket_counts=tuple(value),\n explicit_bounds=self._boundaries,\n aggregation_temporality=AggregationTemporality.DELTA,\n sum=histogram_sum,\n )\n\n\n# pylint: disable=too-many-return-statements,too-many-branches\ndef _convert_aggregation_temporality(\n previous_point: Optional[_PointVarT],\n current_point: _PointVarT,\n aggregation_temporality: AggregationTemporality,\n) -> _PointVarT:\n \"\"\"Converts `current_point` to the requested `aggregation_temporality`\n given the `previous_point`.\n\n `previous_point` must have `CUMULATIVE` temporality. `current_point` may\n have `DELTA` or `CUMULATIVE` temporality.\n\n The output point will have temporality `aggregation_temporality`. Since\n `GAUGE` points have no temporality, they are returned unchanged.\n \"\"\"\n\n current_point_type = type(current_point)\n\n if current_point_type is Gauge:\n return current_point\n\n if previous_point is not None and type(previous_point) is not type(\n current_point\n ):\n _logger.warning(\n \"convert_aggregation_temporality called with mismatched \"\n \"point types: %s and %s\",\n type(previous_point),\n current_point_type,\n )\n\n return current_point\n\n if current_point_type is Sum:\n if previous_point is None:\n # Output CUMULATIVE for a synchronous instrument\n # There is no previous value, return the delta point as a\n # cumulative\n return replace(\n current_point, aggregation_temporality=aggregation_temporality\n )\n if previous_point.aggregation_temporality is not (\n AggregationTemporality.CUMULATIVE\n ):\n raise Exception(\n \"previous_point aggregation temporality must be CUMULATIVE\"\n )\n\n if current_point.aggregation_temporality is aggregation_temporality:\n # Output DELTA for a synchronous instrument\n # Output CUMULATIVE for an asynchronous instrument\n return current_point\n\n if aggregation_temporality is AggregationTemporality.DELTA:\n # Output temporality DELTA for an asynchronous instrument\n value = current_point.value - previous_point.value\n output_start_time_unix_nano = previous_point.time_unix_nano\n\n else:\n # Output CUMULATIVE for a synchronous instrument\n value = current_point.value + previous_point.value\n output_start_time_unix_nano = previous_point.start_time_unix_nano\n\n is_monotonic = (\n previous_point.is_monotonic and current_point.is_monotonic\n )\n\n return Sum(\n start_time_unix_nano=output_start_time_unix_nano,\n time_unix_nano=current_point.time_unix_nano,\n value=value,\n aggregation_temporality=aggregation_temporality,\n is_monotonic=is_monotonic,\n )\n\n if current_point_type is Histogram:\n if previous_point is None:\n return replace(\n current_point, aggregation_temporality=aggregation_temporality\n )\n if previous_point.aggregation_temporality is not (\n AggregationTemporality.CUMULATIVE\n ):\n raise Exception(\n \"previous_point aggregation temporality must be CUMULATIVE\"\n )\n\n if current_point.aggregation_temporality is aggregation_temporality:\n return current_point\n\n if aggregation_temporality is AggregationTemporality.CUMULATIVE:\n start_time_unix_nano = previous_point.start_time_unix_nano\n sum_ = current_point.sum + previous_point.sum\n bucket_counts = [\n curr_count + prev_count\n for curr_count, prev_count in zip(\n current_point.bucket_counts, previous_point.bucket_counts\n )\n ]\n else:\n start_time_unix_nano = previous_point.time_unix_nano\n sum_ = current_point.sum - previous_point.sum\n bucket_counts = [\n curr_count - prev_count\n for curr_count, prev_count in zip(\n current_point.bucket_counts, previous_point.bucket_counts\n )\n ]\n\n return Histogram(\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=current_point.time_unix_nano,\n bucket_counts=bucket_counts,\n explicit_bounds=current_point.explicit_bounds,\n sum=sum_,\n aggregation_temporality=aggregation_temporality,\n )\n return None\n\n\nclass _AggregationFactory(ABC):\n @abstractmethod\n def _create_aggregation(self, instrument: Instrument) -> _Aggregation:\n \"\"\"Creates an aggregation\"\"\"\n\n\nclass ExplicitBucketHistogramAggregation(_AggregationFactory):\n def __init__(\n self,\n boundaries: Sequence[float] = (\n 0.0,\n 5.0,\n 10.0,\n 25.0,\n 50.0,\n 75.0,\n 100.0,\n 250.0,\n 500.0,\n 1000.0,\n ),\n record_min_max: bool = True,\n ) -> None:\n self._boundaries = boundaries\n self._record_min_max = record_min_max\n\n def _create_aggregation(self, instrument: Instrument) -> _Aggregation:\n return _ExplicitBucketHistogramAggregation(\n boundaries=self._boundaries,\n record_min_max=self._record_min_max,\n )\n\n\nclass SumAggregation(_AggregationFactory):\n def _create_aggregation(self, instrument: Instrument) -> _Aggregation:\n\n temporality = AggregationTemporality.UNSPECIFIED\n if isinstance(instrument, Synchronous):\n temporality = AggregationTemporality.DELTA\n elif isinstance(instrument, Asynchronous):\n temporality = AggregationTemporality.CUMULATIVE\n\n return _SumAggregation(\n isinstance(instrument, _Monotonic),\n temporality,\n )\n\n\nclass LastValueAggregation(_AggregationFactory):\n def _create_aggregation(self, instrument: Instrument) -> _Aggregation:\n return _LastValueAggregation()\n\n\nclass DropAggregation(_AggregationFactory):\n \"\"\"Using this aggregation will make all measurements be ignored.\"\"\"\n\n def _create_aggregation(self, instrument: Instrument) -> _Aggregation:\n return _DropAggregation()\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py"}]}
4,087
253
gh_patches_debug_9151
rasdani/github-patches
git_diff
python-discord__bot-1199
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Race condition in wait_for_deletion between a manual delete and reacting Sentry Issue: [BOT-86](https://sentry.io/organizations/python-discord/issues/1861537034/?referrer=github_integration) The message may be deleted before `wait_for_deletion` has a chance to add a reaction. ``` NotFound: 404 Not Found (error code: 10008): Unknown Message File "bot/utils/messages.py", line 38, in wait_for_deletion await message.add_reaction(emoji) File "discord/message.py", line 953, in add_reaction await self._state.http.add_reaction(self.channel.id, self.id, emoji) File "discord/http.py", line 243, in request raise NotFound(r, data) Task exception was never retrieved future: <Task finished name='Task-333258' coro=<wait_for_deletion() done, defined at /bot/bot/utils/messages.py:19> exception=NotFound('404 Not Found (error code: 10008): Unknown Message')> ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bot/utils/messages.py` Content: ``` 1 import asyncio 2 import contextlib 3 import logging 4 import random 5 import re 6 from io import BytesIO 7 from typing import List, Optional, Sequence, Union 8 9 import discord 10 from discord.errors import HTTPException 11 from discord.ext.commands import Context 12 13 from bot.constants import Emojis, NEGATIVE_REPLIES 14 15 log = logging.getLogger(__name__) 16 17 18 async def wait_for_deletion( 19 message: discord.Message, 20 user_ids: Sequence[discord.abc.Snowflake], 21 client: discord.Client, 22 deletion_emojis: Sequence[str] = (Emojis.trashcan,), 23 timeout: float = 60 * 5, 24 attach_emojis: bool = True, 25 ) -> None: 26 """ 27 Wait for up to `timeout` seconds for a reaction by any of the specified `user_ids` to delete the message. 28 29 An `attach_emojis` bool may be specified to determine whether to attach the given 30 `deletion_emojis` to the message in the given `context`. 31 """ 32 if message.guild is None: 33 raise ValueError("Message must be sent on a guild") 34 35 if attach_emojis: 36 for emoji in deletion_emojis: 37 await message.add_reaction(emoji) 38 39 def check(reaction: discord.Reaction, user: discord.Member) -> bool: 40 """Check that the deletion emoji is reacted by the appropriate user.""" 41 return ( 42 reaction.message.id == message.id 43 and str(reaction.emoji) in deletion_emojis 44 and user.id in user_ids 45 ) 46 47 with contextlib.suppress(asyncio.TimeoutError): 48 await client.wait_for('reaction_add', check=check, timeout=timeout) 49 await message.delete() 50 51 52 async def send_attachments( 53 message: discord.Message, 54 destination: Union[discord.TextChannel, discord.Webhook], 55 link_large: bool = True 56 ) -> List[str]: 57 """ 58 Re-upload the message's attachments to the destination and return a list of their new URLs. 59 60 Each attachment is sent as a separate message to more easily comply with the request/file size 61 limit. If link_large is True, attachments which are too large are instead grouped into a single 62 embed which links to them. 63 """ 64 large = [] 65 urls = [] 66 for attachment in message.attachments: 67 failure_msg = ( 68 f"Failed to re-upload attachment {attachment.filename} from message {message.id}" 69 ) 70 71 try: 72 # Allow 512 bytes of leeway for the rest of the request. 73 # This should avoid most files that are too large, 74 # but some may get through hence the try-catch. 75 if attachment.size <= destination.guild.filesize_limit - 512: 76 with BytesIO() as file: 77 await attachment.save(file, use_cached=True) 78 attachment_file = discord.File(file, filename=attachment.filename) 79 80 if isinstance(destination, discord.TextChannel): 81 msg = await destination.send(file=attachment_file) 82 urls.append(msg.attachments[0].url) 83 else: 84 await destination.send( 85 file=attachment_file, 86 username=sub_clyde(message.author.display_name), 87 avatar_url=message.author.avatar_url 88 ) 89 elif link_large: 90 large.append(attachment) 91 else: 92 log.info(f"{failure_msg} because it's too large.") 93 except HTTPException as e: 94 if link_large and e.status == 413: 95 large.append(attachment) 96 else: 97 log.warning(f"{failure_msg} with status {e.status}.", exc_info=e) 98 99 if link_large and large: 100 desc = "\n".join(f"[{attachment.filename}]({attachment.url})" for attachment in large) 101 embed = discord.Embed(description=desc) 102 embed.set_footer(text="Attachments exceed upload size limit.") 103 104 if isinstance(destination, discord.TextChannel): 105 await destination.send(embed=embed) 106 else: 107 await destination.send( 108 embed=embed, 109 username=sub_clyde(message.author.display_name), 110 avatar_url=message.author.avatar_url 111 ) 112 113 return urls 114 115 116 def sub_clyde(username: Optional[str]) -> Optional[str]: 117 """ 118 Replace "e"/"E" in any "clyde" in `username` with a Cyrillic "е"/"E" and return the new string. 119 120 Discord disallows "clyde" anywhere in the username for webhooks. It will return a 400. 121 Return None only if `username` is None. 122 """ 123 def replace_e(match: re.Match) -> str: 124 char = "е" if match[2] == "e" else "Е" 125 return match[1] + char 126 127 if username: 128 return re.sub(r"(clyd)(e)", replace_e, username, flags=re.I) 129 else: 130 return username # Empty string or None 131 132 133 async def send_denial(ctx: Context, reason: str) -> None: 134 """Send an embed denying the user with the given reason.""" 135 embed = discord.Embed() 136 embed.colour = discord.Colour.red() 137 embed.title = random.choice(NEGATIVE_REPLIES) 138 embed.description = reason 139 140 await ctx.send(embed=embed) 141 142 143 def format_user(user: discord.abc.User) -> str: 144 """Return a string for `user` which has their mention and ID.""" 145 return f"{user.mention} (`{user.id}`)" 146 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bot/utils/messages.py b/bot/utils/messages.py --- a/bot/utils/messages.py +++ b/bot/utils/messages.py @@ -34,7 +34,11 @@ if attach_emojis: for emoji in deletion_emojis: - await message.add_reaction(emoji) + try: + await message.add_reaction(emoji) + except discord.NotFound: + log.trace(f"Aborting wait_for_deletion: message {message.id} deleted prematurely.") + return def check(reaction: discord.Reaction, user: discord.Member) -> bool: """Check that the deletion emoji is reacted by the appropriate user."""
{"golden_diff": "diff --git a/bot/utils/messages.py b/bot/utils/messages.py\n--- a/bot/utils/messages.py\n+++ b/bot/utils/messages.py\n@@ -34,7 +34,11 @@\n \n if attach_emojis:\n for emoji in deletion_emojis:\n- await message.add_reaction(emoji)\n+ try:\n+ await message.add_reaction(emoji)\n+ except discord.NotFound:\n+ log.trace(f\"Aborting wait_for_deletion: message {message.id} deleted prematurely.\")\n+ return\n \n def check(reaction: discord.Reaction, user: discord.Member) -> bool:\n \"\"\"Check that the deletion emoji is reacted by the appropriate user.\"\"\"\n", "issue": "Race condition in wait_for_deletion between a manual delete and reacting\nSentry Issue: [BOT-86](https://sentry.io/organizations/python-discord/issues/1861537034/?referrer=github_integration)\r\n\r\nThe message may be deleted before `wait_for_deletion` has a chance to add a reaction.\r\n\r\n```\r\nNotFound: 404 Not Found (error code: 10008): Unknown Message\r\n File \"bot/utils/messages.py\", line 38, in wait_for_deletion\r\n await message.add_reaction(emoji)\r\n File \"discord/message.py\", line 953, in add_reaction\r\n await self._state.http.add_reaction(self.channel.id, self.id, emoji)\r\n File \"discord/http.py\", line 243, in request\r\n raise NotFound(r, data)\r\n\r\nTask exception was never retrieved\r\nfuture: <Task finished name='Task-333258' coro=<wait_for_deletion() done, defined at /bot/bot/utils/messages.py:19> exception=NotFound('404 Not Found (error code: 10008): Unknown Message')>\r\n```\n", "before_files": [{"content": "import asyncio\nimport contextlib\nimport logging\nimport random\nimport re\nfrom io import BytesIO\nfrom typing import List, Optional, Sequence, Union\n\nimport discord\nfrom discord.errors import HTTPException\nfrom discord.ext.commands import Context\n\nfrom bot.constants import Emojis, NEGATIVE_REPLIES\n\nlog = logging.getLogger(__name__)\n\n\nasync def wait_for_deletion(\n message: discord.Message,\n user_ids: Sequence[discord.abc.Snowflake],\n client: discord.Client,\n deletion_emojis: Sequence[str] = (Emojis.trashcan,),\n timeout: float = 60 * 5,\n attach_emojis: bool = True,\n) -> None:\n \"\"\"\n Wait for up to `timeout` seconds for a reaction by any of the specified `user_ids` to delete the message.\n\n An `attach_emojis` bool may be specified to determine whether to attach the given\n `deletion_emojis` to the message in the given `context`.\n \"\"\"\n if message.guild is None:\n raise ValueError(\"Message must be sent on a guild\")\n\n if attach_emojis:\n for emoji in deletion_emojis:\n await message.add_reaction(emoji)\n\n def check(reaction: discord.Reaction, user: discord.Member) -> bool:\n \"\"\"Check that the deletion emoji is reacted by the appropriate user.\"\"\"\n return (\n reaction.message.id == message.id\n and str(reaction.emoji) in deletion_emojis\n and user.id in user_ids\n )\n\n with contextlib.suppress(asyncio.TimeoutError):\n await client.wait_for('reaction_add', check=check, timeout=timeout)\n await message.delete()\n\n\nasync def send_attachments(\n message: discord.Message,\n destination: Union[discord.TextChannel, discord.Webhook],\n link_large: bool = True\n) -> List[str]:\n \"\"\"\n Re-upload the message's attachments to the destination and return a list of their new URLs.\n\n Each attachment is sent as a separate message to more easily comply with the request/file size\n limit. If link_large is True, attachments which are too large are instead grouped into a single\n embed which links to them.\n \"\"\"\n large = []\n urls = []\n for attachment in message.attachments:\n failure_msg = (\n f\"Failed to re-upload attachment {attachment.filename} from message {message.id}\"\n )\n\n try:\n # Allow 512 bytes of leeway for the rest of the request.\n # This should avoid most files that are too large,\n # but some may get through hence the try-catch.\n if attachment.size <= destination.guild.filesize_limit - 512:\n with BytesIO() as file:\n await attachment.save(file, use_cached=True)\n attachment_file = discord.File(file, filename=attachment.filename)\n\n if isinstance(destination, discord.TextChannel):\n msg = await destination.send(file=attachment_file)\n urls.append(msg.attachments[0].url)\n else:\n await destination.send(\n file=attachment_file,\n username=sub_clyde(message.author.display_name),\n avatar_url=message.author.avatar_url\n )\n elif link_large:\n large.append(attachment)\n else:\n log.info(f\"{failure_msg} because it's too large.\")\n except HTTPException as e:\n if link_large and e.status == 413:\n large.append(attachment)\n else:\n log.warning(f\"{failure_msg} with status {e.status}.\", exc_info=e)\n\n if link_large and large:\n desc = \"\\n\".join(f\"[{attachment.filename}]({attachment.url})\" for attachment in large)\n embed = discord.Embed(description=desc)\n embed.set_footer(text=\"Attachments exceed upload size limit.\")\n\n if isinstance(destination, discord.TextChannel):\n await destination.send(embed=embed)\n else:\n await destination.send(\n embed=embed,\n username=sub_clyde(message.author.display_name),\n avatar_url=message.author.avatar_url\n )\n\n return urls\n\n\ndef sub_clyde(username: Optional[str]) -> Optional[str]:\n \"\"\"\n Replace \"e\"/\"E\" in any \"clyde\" in `username` with a Cyrillic \"\u0435\"/\"E\" and return the new string.\n\n Discord disallows \"clyde\" anywhere in the username for webhooks. It will return a 400.\n Return None only if `username` is None.\n \"\"\"\n def replace_e(match: re.Match) -> str:\n char = \"\u0435\" if match[2] == \"e\" else \"\u0415\"\n return match[1] + char\n\n if username:\n return re.sub(r\"(clyd)(e)\", replace_e, username, flags=re.I)\n else:\n return username # Empty string or None\n\n\nasync def send_denial(ctx: Context, reason: str) -> None:\n \"\"\"Send an embed denying the user with the given reason.\"\"\"\n embed = discord.Embed()\n embed.colour = discord.Colour.red()\n embed.title = random.choice(NEGATIVE_REPLIES)\n embed.description = reason\n\n await ctx.send(embed=embed)\n\n\ndef format_user(user: discord.abc.User) -> str:\n \"\"\"Return a string for `user` which has their mention and ID.\"\"\"\n return f\"{user.mention} (`{user.id}`)\"\n", "path": "bot/utils/messages.py"}], "after_files": [{"content": "import asyncio\nimport contextlib\nimport logging\nimport random\nimport re\nfrom io import BytesIO\nfrom typing import List, Optional, Sequence, Union\n\nimport discord\nfrom discord.errors import HTTPException\nfrom discord.ext.commands import Context\n\nfrom bot.constants import Emojis, NEGATIVE_REPLIES\n\nlog = logging.getLogger(__name__)\n\n\nasync def wait_for_deletion(\n message: discord.Message,\n user_ids: Sequence[discord.abc.Snowflake],\n client: discord.Client,\n deletion_emojis: Sequence[str] = (Emojis.trashcan,),\n timeout: float = 60 * 5,\n attach_emojis: bool = True,\n) -> None:\n \"\"\"\n Wait for up to `timeout` seconds for a reaction by any of the specified `user_ids` to delete the message.\n\n An `attach_emojis` bool may be specified to determine whether to attach the given\n `deletion_emojis` to the message in the given `context`.\n \"\"\"\n if message.guild is None:\n raise ValueError(\"Message must be sent on a guild\")\n\n if attach_emojis:\n for emoji in deletion_emojis:\n try:\n await message.add_reaction(emoji)\n except discord.NotFound:\n log.trace(f\"Aborting wait_for_deletion: message {message.id} deleted prematurely.\")\n return\n\n def check(reaction: discord.Reaction, user: discord.Member) -> bool:\n \"\"\"Check that the deletion emoji is reacted by the appropriate user.\"\"\"\n return (\n reaction.message.id == message.id\n and str(reaction.emoji) in deletion_emojis\n and user.id in user_ids\n )\n\n with contextlib.suppress(asyncio.TimeoutError):\n await client.wait_for('reaction_add', check=check, timeout=timeout)\n await message.delete()\n\n\nasync def send_attachments(\n message: discord.Message,\n destination: Union[discord.TextChannel, discord.Webhook],\n link_large: bool = True\n) -> List[str]:\n \"\"\"\n Re-upload the message's attachments to the destination and return a list of their new URLs.\n\n Each attachment is sent as a separate message to more easily comply with the request/file size\n limit. If link_large is True, attachments which are too large are instead grouped into a single\n embed which links to them.\n \"\"\"\n large = []\n urls = []\n for attachment in message.attachments:\n failure_msg = (\n f\"Failed to re-upload attachment {attachment.filename} from message {message.id}\"\n )\n\n try:\n # Allow 512 bytes of leeway for the rest of the request.\n # This should avoid most files that are too large,\n # but some may get through hence the try-catch.\n if attachment.size <= destination.guild.filesize_limit - 512:\n with BytesIO() as file:\n await attachment.save(file, use_cached=True)\n attachment_file = discord.File(file, filename=attachment.filename)\n\n if isinstance(destination, discord.TextChannel):\n msg = await destination.send(file=attachment_file)\n urls.append(msg.attachments[0].url)\n else:\n await destination.send(\n file=attachment_file,\n username=sub_clyde(message.author.display_name),\n avatar_url=message.author.avatar_url\n )\n elif link_large:\n large.append(attachment)\n else:\n log.info(f\"{failure_msg} because it's too large.\")\n except HTTPException as e:\n if link_large and e.status == 413:\n large.append(attachment)\n else:\n log.warning(f\"{failure_msg} with status {e.status}.\", exc_info=e)\n\n if link_large and large:\n desc = \"\\n\".join(f\"[{attachment.filename}]({attachment.url})\" for attachment in large)\n embed = discord.Embed(description=desc)\n embed.set_footer(text=\"Attachments exceed upload size limit.\")\n\n if isinstance(destination, discord.TextChannel):\n await destination.send(embed=embed)\n else:\n await destination.send(\n embed=embed,\n username=sub_clyde(message.author.display_name),\n avatar_url=message.author.avatar_url\n )\n\n return urls\n\n\ndef sub_clyde(username: Optional[str]) -> Optional[str]:\n \"\"\"\n Replace \"e\"/\"E\" in any \"clyde\" in `username` with a Cyrillic \"\u0435\"/\"E\" and return the new string.\n\n Discord disallows \"clyde\" anywhere in the username for webhooks. It will return a 400.\n Return None only if `username` is None.\n \"\"\"\n def replace_e(match: re.Match) -> str:\n char = \"\u0435\" if match[2] == \"e\" else \"\u0415\"\n return match[1] + char\n\n if username:\n return re.sub(r\"(clyd)(e)\", replace_e, username, flags=re.I)\n else:\n return username # Empty string or None\n\n\nasync def send_denial(ctx: Context, reason: str) -> None:\n \"\"\"Send an embed denying the user with the given reason.\"\"\"\n embed = discord.Embed()\n embed.colour = discord.Colour.red()\n embed.title = random.choice(NEGATIVE_REPLIES)\n embed.description = reason\n\n await ctx.send(embed=embed)\n\n\ndef format_user(user: discord.abc.User) -> str:\n \"\"\"Return a string for `user` which has their mention and ID.\"\"\"\n return f\"{user.mention} (`{user.id}`)\"\n", "path": "bot/utils/messages.py"}]}
1,989
147
gh_patches_debug_7832
rasdani/github-patches
git_diff
Mailu__Mailu-1974
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DEFER_ON_TLS_ERROR in mailu.env doesn't appear to be applied. Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests. For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net). To be able to help you best, we need some more information. ## Before you open your issue - [X] Check if no issue or pull-request for this already exists. - [X] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page) - [X] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur. - [X] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title. ## Environment & Versions ### Environment - [X] docker-compose - [ ] kubernetes - [ ] docker swarm ### Versions master (d464187) ## Description DEFER_ON_TLS_ERROR in mailu.env doesn't appear to be applied. I've set this environment variable to "false" and my emails still appear to be deferred according to the SMTP container. `(status=deferred (client TLS configuration problem))` I've set this to "false" because MTA-STS isn't as widely deployed as one would hope, and I don't want my emails blackholed and deferred until my destination's policy is published (which may be never)... ## Replication Steps In mailu.env - set `DEFER_ON_TLS_ERROR=false`, reload containers, then try sending an email to a domain without an MTA-STS policy... Then output logs for the SMTP container and note the message deferral. ## Expected behaviour I expect the email to continue being sent despite the MTA-STS error. ## Logs ``` smtp_1 | Sep 09 13:42:08 mail postfix/cleanup[416]: 85B6147DDA: message-id=<[email protected]> smtp_1 | Sep 09 13:42:09 mail postfix/qmgr[391]: 85B6147DDA: from=<[email protected]>, size=6074, nrcpt=1 (queue active) smtp_1 | Sep 09 13:42:09 mail postfix/smtp[417]: warning: connect to /tmp/mta-sts.socket: No such file or directory smtp_1 | Sep 09 13:42:09 mail postfix/smtp[417]: warning: table socketmap:unix:/tmp/mta-sts.socket:postfix lookup error: No such file or directory smtp_1 | Sep 09 13:42:09 mail postfix/smtp[417]: warning: socketmap:unix:/tmp/mta-sts.socket:postfix lookup error for "ssl-tools.net" smtp_1 | Sep 09 13:42:09 mail postfix/smtp[417]: warning: smtp_tls_policy_maps, next-hop destination "ssl-tools.net": policy table lookup error smtp_1 | Sep 09 13:42:09 mail postfix/smtp[417]: warning: TLS policy lookup for ssl-tools.net/ssl-tools.net: client TLS configuration problem smtp_1 | Sep 09 13:42:09 mail postfix/smtp[417]: warning: TLS policy lookup for ssl-tools.net/ssl-tools.net: client TLS configuration problem smtp_1 | Sep 09 13:42:09 mail postfix/smtp[417]: 85B6147DDA: to=<[email protected]>, relay=none, delay=1.1, delays=1/0.03/0.01/0, dsn=4.7.5, status=deferred (client TLS configuration problem) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `core/postfix/start.py` Content: ``` 1 #!/usr/bin/python3 2 3 import os 4 import glob 5 import shutil 6 import multiprocessing 7 import logging as log 8 import sys 9 10 from podop import run_server 11 from pwd import getpwnam 12 from socrate import system, conf 13 14 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING")) 15 16 def start_podop(): 17 os.setuid(getpwnam('postfix').pw_uid) 18 os.mkdir('/dev/shm/postfix',mode=0o700) 19 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/" 20 # TODO: Remove verbosity setting from Podop? 21 run_server(0, "postfix", "/tmp/podop.socket", [ 22 ("transport", "url", url + "transport/§"), 23 ("alias", "url", url + "alias/§"), 24 ("dane", "url", url + "dane/§"), 25 ("domain", "url", url + "domain/§"), 26 ("mailbox", "url", url + "mailbox/§"), 27 ("recipientmap", "url", url + "recipient/map/§"), 28 ("sendermap", "url", url + "sender/map/§"), 29 ("senderaccess", "url", url + "sender/access/§"), 30 ("senderlogin", "url", url + "sender/login/§"), 31 ("senderrate", "url", url + "sender/rate/§") 32 ]) 33 34 def start_mta_sts_daemon(): 35 os.chmod("/root/", 0o755) # read access to /root/.netrc required 36 os.setuid(getpwnam('postfix').pw_uid) 37 from postfix_mta_sts_resolver import daemon 38 daemon.main() 39 40 def is_valid_postconf_line(line): 41 return not line.startswith("#") \ 42 and not line == '' 43 44 # Actual startup script 45 os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front") 46 os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin") 47 os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332") 48 os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525") 49 os.environ["OUTCLEAN"] = os.environ["HOSTNAMES"].split(",")[0] 50 try: 51 _to_lookup = os.environ["OUTCLEAN"] 52 # Ensure we lookup a FQDN: @see #1884 53 if not _to_lookup.endswith('.'): 54 _to_lookup += '.' 55 os.environ["OUTCLEAN_ADDRESS"] = system.resolve_hostname(_to_lookup) 56 except: 57 os.environ["OUTCLEAN_ADDRESS"] = "10.10.10.10" 58 59 for postfix_file in glob.glob("/conf/*.cf"): 60 conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file))) 61 62 if os.path.exists("/overrides/postfix.cf"): 63 for line in open("/overrides/postfix.cf").read().strip().split("\n"): 64 if is_valid_postconf_line(line): 65 os.system('postconf -e "{}"'.format(line)) 66 67 if os.path.exists("/overrides/postfix.master"): 68 for line in open("/overrides/postfix.master").read().strip().split("\n"): 69 if is_valid_postconf_line(line): 70 os.system('postconf -Me "{}"'.format(line)) 71 72 for map_file in glob.glob("/overrides/*.map"): 73 destination = os.path.join("/etc/postfix", os.path.basename(map_file)) 74 shutil.copyfile(map_file, destination) 75 os.system("postmap {}".format(destination)) 76 os.remove(destination) 77 78 if os.path.exists("/overrides/mta-sts-daemon.yml"): 79 shutil.copyfile("/overrides/mta-sts-daemon.yml", "/etc/mta-sts-daemon.yml") 80 conf.jinja("/etc/mta-sts-daemon.yml", os.environ, "/etc/mta-sts-daemon.yml") 81 82 if not os.path.exists("/etc/postfix/tls_policy.map.lmdb"): 83 open("/etc/postfix/tls_policy.map", "a").close() 84 os.system("postmap /etc/postfix/tls_policy.map") 85 86 if "RELAYUSER" in os.environ: 87 path = "/etc/postfix/sasl_passwd" 88 conf.jinja("/conf/sasl_passwd", os.environ, path) 89 os.system("postmap {}".format(path)) 90 91 # Run Podop and Postfix 92 multiprocessing.Process(target=start_podop).start() 93 multiprocessing.Process(target=start_mta_sts_daemon).start() 94 os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing") 95 # Before starting postfix, we need to check permissions on /queue 96 # in the event that postfix,postdrop id have changed 97 os.system("postfix set-permissions") 98 os.system("postfix start-fg") 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/postfix/start.py b/core/postfix/start.py --- a/core/postfix/start.py +++ b/core/postfix/start.py @@ -77,7 +77,8 @@ if os.path.exists("/overrides/mta-sts-daemon.yml"): shutil.copyfile("/overrides/mta-sts-daemon.yml", "/etc/mta-sts-daemon.yml") -conf.jinja("/etc/mta-sts-daemon.yml", os.environ, "/etc/mta-sts-daemon.yml") +else: + conf.jinja("/conf/mta-sts-daemon.yml", os.environ, "/etc/mta-sts-daemon.yml") if not os.path.exists("/etc/postfix/tls_policy.map.lmdb"): open("/etc/postfix/tls_policy.map", "a").close()
{"golden_diff": "diff --git a/core/postfix/start.py b/core/postfix/start.py\n--- a/core/postfix/start.py\n+++ b/core/postfix/start.py\n@@ -77,7 +77,8 @@\n \n if os.path.exists(\"/overrides/mta-sts-daemon.yml\"):\n shutil.copyfile(\"/overrides/mta-sts-daemon.yml\", \"/etc/mta-sts-daemon.yml\")\n-conf.jinja(\"/etc/mta-sts-daemon.yml\", os.environ, \"/etc/mta-sts-daemon.yml\")\n+else:\n+ conf.jinja(\"/conf/mta-sts-daemon.yml\", os.environ, \"/etc/mta-sts-daemon.yml\")\n \n if not os.path.exists(\"/etc/postfix/tls_policy.map.lmdb\"):\n open(\"/etc/postfix/tls_policy.map\", \"a\").close()\n", "issue": "DEFER_ON_TLS_ERROR in mailu.env doesn't appear to be applied.\nThank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.\r\nFor **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).\r\n\r\nTo be able to help you best, we need some more information.\r\n\r\n## Before you open your issue\r\n- [X] Check if no issue or pull-request for this already exists.\r\n- [X] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- [X] You understand `Mailu` is made by volunteers in their **free time** \u2014 be conscise, civil and accept that delays can occur.\r\n- [X] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\n## Environment & Versions\r\n### Environment\r\n - [X] docker-compose\r\n - [ ] kubernetes\r\n - [ ] docker swarm\r\n\r\n### Versions\r\nmaster (d464187)\r\n\r\n## Description\r\nDEFER_ON_TLS_ERROR in mailu.env doesn't appear to be applied. I've set this environment variable to \"false\" and my emails still appear to be deferred according to the SMTP container. \r\n`(status=deferred (client TLS configuration problem))`\r\n\r\nI've set this to \"false\" because MTA-STS isn't as widely deployed as one would hope, and I don't want my emails blackholed and deferred until my destination's policy is published (which may be never)... \r\n\r\n## Replication Steps\r\nIn mailu.env - set `DEFER_ON_TLS_ERROR=false`, reload containers, then try sending an email to a domain without an MTA-STS policy...\r\n\r\nThen output logs for the SMTP container and note the message deferral.\r\n\r\n## Expected behaviour\r\nI expect the email to continue being sent despite the MTA-STS error.\r\n\r\n## Logs\r\n```\r\nsmtp_1 | Sep 09 13:42:08 mail postfix/cleanup[416]: 85B6147DDA: message-id=<[email protected]>\r\nsmtp_1 | Sep 09 13:42:09 mail postfix/qmgr[391]: 85B6147DDA: from=<[email protected]>, size=6074, nrcpt=1 (queue active)\r\nsmtp_1 | Sep 09 13:42:09 mail postfix/smtp[417]: warning: connect to /tmp/mta-sts.socket: No such file or directory\r\nsmtp_1 | Sep 09 13:42:09 mail postfix/smtp[417]: warning: table socketmap:unix:/tmp/mta-sts.socket:postfix lookup error: No such file or directory\r\nsmtp_1 | Sep 09 13:42:09 mail postfix/smtp[417]: warning: socketmap:unix:/tmp/mta-sts.socket:postfix lookup error for \"ssl-tools.net\"\r\nsmtp_1 | Sep 09 13:42:09 mail postfix/smtp[417]: warning: smtp_tls_policy_maps, next-hop destination \"ssl-tools.net\": policy table lookup error\r\nsmtp_1 | Sep 09 13:42:09 mail postfix/smtp[417]: warning: TLS policy lookup for ssl-tools.net/ssl-tools.net: client TLS configuration problem\r\nsmtp_1 | Sep 09 13:42:09 mail postfix/smtp[417]: warning: TLS policy lookup for ssl-tools.net/ssl-tools.net: client TLS configuration problem\r\nsmtp_1 | Sep 09 13:42:09 mail postfix/smtp[417]: 85B6147DDA: to=<[email protected]>, relay=none, delay=1.1, delays=1/0.03/0.01/0, dsn=4.7.5, status=deferred (client TLS configuration problem)\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom pwd import getpwnam\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(getpwnam('postfix').pw_uid)\n os.mkdir('/dev/shm/postfix',mode=0o700)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n (\"transport\", \"url\", url + \"transport/\u00a7\"),\n (\"alias\", \"url\", url + \"alias/\u00a7\"),\n (\"dane\", \"url\", url + \"dane/\u00a7\"),\n (\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"recipientmap\", \"url\", url + \"recipient/map/\u00a7\"),\n (\"sendermap\", \"url\", url + \"sender/map/\u00a7\"),\n (\"senderaccess\", \"url\", url + \"sender/access/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\"),\n (\"senderrate\", \"url\", url + \"sender/rate/\u00a7\")\n ])\n\ndef start_mta_sts_daemon():\n os.chmod(\"/root/\", 0o755) # read access to /root/.netrc required\n os.setuid(getpwnam('postfix').pw_uid)\n from postfix_mta_sts_resolver import daemon\n daemon.main()\n\ndef is_valid_postconf_line(line):\n return not line.startswith(\"#\") \\\n and not line == ''\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_MILTER_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_MILTER\", \"antispam:11332\")\nos.environ[\"LMTP_ADDRESS\"] = system.get_host_address_from_environment(\"LMTP\", \"imap:2525\")\nos.environ[\"OUTCLEAN\"] = os.environ[\"HOSTNAMES\"].split(\",\")[0]\ntry:\n _to_lookup = os.environ[\"OUTCLEAN\"]\n # Ensure we lookup a FQDN: @see #1884\n if not _to_lookup.endswith('.'):\n _to_lookup += '.'\n os.environ[\"OUTCLEAN_ADDRESS\"] = system.resolve_hostname(_to_lookup)\nexcept:\n os.environ[\"OUTCLEAN_ADDRESS\"] = \"10.10.10.10\"\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n conf.jinja(postfix_file, os.environ, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif os.path.exists(\"/overrides/mta-sts-daemon.yml\"):\n shutil.copyfile(\"/overrides/mta-sts-daemon.yml\", \"/etc/mta-sts-daemon.yml\")\nconf.jinja(\"/etc/mta-sts-daemon.yml\", os.environ, \"/etc/mta-sts-daemon.yml\")\n\nif not os.path.exists(\"/etc/postfix/tls_policy.map.lmdb\"):\n open(\"/etc/postfix/tls_policy.map\", \"a\").close()\n os.system(\"postmap /etc/postfix/tls_policy.map\")\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n conf.jinja(\"/conf/sasl_passwd\", os.environ, path)\n os.system(\"postmap {}\".format(path))\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nmultiprocessing.Process(target=start_mta_sts_daemon).start()\nos.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\n# Before starting postfix, we need to check permissions on /queue\n# in the event that postfix,postdrop id have changed\nos.system(\"postfix set-permissions\")\nos.system(\"postfix start-fg\")\n", "path": "core/postfix/start.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom pwd import getpwnam\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(getpwnam('postfix').pw_uid)\n os.mkdir('/dev/shm/postfix',mode=0o700)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n (\"transport\", \"url\", url + \"transport/\u00a7\"),\n (\"alias\", \"url\", url + \"alias/\u00a7\"),\n (\"dane\", \"url\", url + \"dane/\u00a7\"),\n (\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"recipientmap\", \"url\", url + \"recipient/map/\u00a7\"),\n (\"sendermap\", \"url\", url + \"sender/map/\u00a7\"),\n (\"senderaccess\", \"url\", url + \"sender/access/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\"),\n (\"senderrate\", \"url\", url + \"sender/rate/\u00a7\")\n ])\n\ndef start_mta_sts_daemon():\n os.chmod(\"/root/\", 0o755) # read access to /root/.netrc required\n os.setuid(getpwnam('postfix').pw_uid)\n from postfix_mta_sts_resolver import daemon\n daemon.main()\n\ndef is_valid_postconf_line(line):\n return not line.startswith(\"#\") \\\n and not line == ''\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_MILTER_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_MILTER\", \"antispam:11332\")\nos.environ[\"LMTP_ADDRESS\"] = system.get_host_address_from_environment(\"LMTP\", \"imap:2525\")\nos.environ[\"OUTCLEAN\"] = os.environ[\"HOSTNAMES\"].split(\",\")[0]\ntry:\n _to_lookup = os.environ[\"OUTCLEAN\"]\n # Ensure we lookup a FQDN: @see #1884\n if not _to_lookup.endswith('.'):\n _to_lookup += '.'\n os.environ[\"OUTCLEAN_ADDRESS\"] = system.resolve_hostname(_to_lookup)\nexcept:\n os.environ[\"OUTCLEAN_ADDRESS\"] = \"10.10.10.10\"\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n conf.jinja(postfix_file, os.environ, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif os.path.exists(\"/overrides/mta-sts-daemon.yml\"):\n shutil.copyfile(\"/overrides/mta-sts-daemon.yml\", \"/etc/mta-sts-daemon.yml\")\nelse:\n conf.jinja(\"/conf/mta-sts-daemon.yml\", os.environ, \"/etc/mta-sts-daemon.yml\")\n\nif not os.path.exists(\"/etc/postfix/tls_policy.map.lmdb\"):\n open(\"/etc/postfix/tls_policy.map\", \"a\").close()\n os.system(\"postmap /etc/postfix/tls_policy.map\")\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n conf.jinja(\"/conf/sasl_passwd\", os.environ, path)\n os.system(\"postmap {}\".format(path))\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nmultiprocessing.Process(target=start_mta_sts_daemon).start()\nos.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\n# Before starting postfix, we need to check permissions on /queue\n# in the event that postfix,postdrop id have changed\nos.system(\"postfix set-permissions\")\nos.system(\"postfix start-fg\")\n", "path": "core/postfix/start.py"}]}
2,494
180
gh_patches_debug_23666
rasdani/github-patches
git_diff
pypi__warehouse-7927
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support for properly displaying the site translated to RTL languages **What's the problem this feature will solve?** Display PyPI.org propely when translated to RTL languages such as Arabic, Farsi and Hebrew. **Describe the solution you'd like** I can't suggest a specific technical solution, since I'm not familiar with the website's design and implementation. **Additional context** The context is the recent work on translation into additional languages. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `warehouse/i18n/__init__.py` Content: ``` 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import functools 14 15 from babel.core import Locale 16 from pyramid import viewderivers 17 from pyramid.i18n import TranslationStringFactory, default_locale_negotiator 18 from pyramid.threadlocal import get_current_request 19 20 from warehouse.cache.http import add_vary 21 22 # Taken from: 23 # https://github.com/django/django/blob/master/django/conf/locale/__init__.py 24 KNOWN_LOCALES = { 25 "en": "English", # English 26 "es": "español", # Spanish 27 "fr": "français", # French 28 "ja": "日本語", # Japanese 29 "pt_BR": "Português Brasileiro", # Brazilian Portugeuse 30 "uk": "Українська", # Ukrainian 31 "el": "Ελληνικά", # Greek 32 "de": "Deutsch", # German 33 "zh_Hans": "简体中文", # Simplified Chinese 34 "ru": "Русский", # Russian 35 } 36 37 LOCALE_ATTR = "_LOCALE_" 38 39 _translation_factory = TranslationStringFactory("messages") 40 41 42 class LazyString: 43 def __init__(self, fn, *args, **kwargs): 44 self.fn = fn 45 self.args = args 46 self.mapping = kwargs.get("mapping", {}) 47 self.kwargs = kwargs 48 49 def __json__(self, request): 50 return str(self) 51 52 def __mod__(self, new_mapping): 53 mapping = self.mapping.copy() 54 mapping.update(new_mapping) 55 return LazyString(self.fn, *self.args, mapping=new_mapping, **self.kwargs) 56 57 def __str__(self): 58 return self.fn(*self.args, **self.kwargs) 59 60 61 def _locale(request): 62 """ 63 Computes a babel.core:Locale() object for this request. 64 """ 65 return Locale.parse(request.locale_name, sep="_") 66 67 68 def _negotiate_locale(request): 69 locale_name = getattr(request, LOCALE_ATTR, None) 70 if locale_name is not None: 71 return locale_name 72 73 locale_name = request.params.get(LOCALE_ATTR) 74 if locale_name is not None: 75 return locale_name 76 77 locale_name = request.cookies.get(LOCALE_ATTR) 78 if locale_name is not None: 79 return locale_name 80 81 if not request.accept_language: 82 return default_locale_negotiator(request) 83 84 return request.accept_language.best_match( 85 tuple(KNOWN_LOCALES.keys()), default_match=default_locale_negotiator(request) 86 ) 87 88 89 def _localize(request, message, **kwargs): 90 """ 91 To be used on the request directly, e.g. `request._(message)` 92 """ 93 return request.localizer.translate(_translation_factory(message, **kwargs)) 94 95 96 def localize(message, **kwargs): 97 """ 98 To be used when we don't have the request context, e.g. 99 `from warehouse.i18n import localize as _` 100 """ 101 102 def _lazy_localize(message, **kwargs): 103 request = get_current_request() 104 return _localize(request, message, **kwargs) 105 106 return LazyString(_lazy_localize, message, **kwargs) 107 108 109 class InvalidLocalizer: 110 def _fail(self): 111 raise RuntimeError("Cannot use localizer without has_translations=True") 112 113 @property 114 def locale_name(self): 115 self._fail() 116 117 def pluralize(self, *args, **kwargs): 118 self._fail() 119 120 def translate(self, *args, **kwargs): 121 self._fail() 122 123 124 def translated_view(view, info): 125 if info.options.get("has_translations"): 126 # If this page can be translated, then we'll add a Vary: PyPI-Locale 127 # Vary header. 128 # Note: This will give weird results if hitting PyPI directly instead of through 129 # the Fastly VCL which sets PyPI-Locale. 130 return add_vary("PyPI-Locale")(view) 131 elif info.exception_only: 132 return view 133 else: 134 # If we're not using translations on this view, then we'll wrap the view 135 # with a wrapper that just ensures that the localizer cannot be used. 136 @functools.wraps(view) 137 def wrapped(context, request): 138 # This whole method is a little bit of an odd duck, we want to make 139 # sure that we don't actually *access* request.localizer, because 140 # doing so triggers the machinery to create a new localizer. So 141 # instead we will dig into the request object __dict__ to 142 # effectively do the same thing, just without triggering an access 143 # on request.localizer. 144 145 # Save the original session so that we can restore it once the 146 # inner views have been called. 147 nothing = object() 148 original_localizer = request.__dict__.get("localizer", nothing) 149 150 # This particular view hasn't been set to allow access to the 151 # translations, so we'll just assign an InvalidLocalizer to 152 # request.localizer 153 request.__dict__["localizer"] = InvalidLocalizer() 154 155 try: 156 # Invoke the real view 157 return view(context, request) 158 finally: 159 # Restore the original session so that things like 160 # pyramid_debugtoolbar can access it. 161 if original_localizer is nothing: 162 del request.__dict__["localizer"] 163 else: 164 request.__dict__["localizer"] = original_localizer 165 166 return wrapped 167 168 169 translated_view.options = {"has_translations"} 170 171 172 def includeme(config): 173 # Add the request attributes 174 config.add_request_method(_locale, name="locale", reify=True) 175 config.add_request_method(_localize, name="_") 176 177 # Register our translation directory. 178 config.add_translation_dirs("warehouse:locale/") 179 180 config.set_locale_negotiator(_negotiate_locale) 181 182 # Register our i18n/l10n filters for Jinja2 183 filters = config.get_settings().setdefault("jinja2.filters", {}) 184 filters.setdefault("format_date", "warehouse.i18n.filters:format_date") 185 filters.setdefault("format_datetime", "warehouse.i18n.filters:format_datetime") 186 filters.setdefault( 187 "format_rfc822_datetime", "warehouse.i18n.filters:format_rfc822_datetime" 188 ) 189 filters.setdefault("format_number", "warehouse.i18n.filters:format_number") 190 191 jglobals = config.get_settings().setdefault("jinja2.globals", {}) 192 jglobals.setdefault("KNOWN_LOCALES", "warehouse.i18n:KNOWN_LOCALES") 193 194 config.add_view_deriver( 195 translated_view, over="rendered_view", under=viewderivers.INGRESS 196 ) 197 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py --- a/warehouse/i18n/__init__.py +++ b/warehouse/i18n/__init__.py @@ -19,19 +19,21 @@ from warehouse.cache.http import add_vary -# Taken from: -# https://github.com/django/django/blob/master/django/conf/locale/__init__.py KNOWN_LOCALES = { - "en": "English", # English - "es": "español", # Spanish - "fr": "français", # French - "ja": "日本語", # Japanese - "pt_BR": "Português Brasileiro", # Brazilian Portugeuse - "uk": "Українська", # Ukrainian - "el": "Ελληνικά", # Greek - "de": "Deutsch", # German - "zh_Hans": "简体中文", # Simplified Chinese - "ru": "Русский", # Russian + identifier: Locale.parse(identifier, sep="_") + for identifier in [ + "en", # English + "es", # Spanish + "fr", # French + "ja", # Japanese + "pt_BR", # Brazilian Portugeuse + "uk", # Ukranian + "el", # Greek + "de", # German + "zh_Hans", # Simplified Chinese + "ru", # Russian + "he", # Hebrew + ] } LOCALE_ATTR = "_LOCALE_" @@ -60,9 +62,9 @@ def _locale(request): """ - Computes a babel.core:Locale() object for this request. + Gets a babel.core:Locale() object for this request. """ - return Locale.parse(request.locale_name, sep="_") + return KNOWN_LOCALES.get(request.locale_name, "en") def _negotiate_locale(request):
{"golden_diff": "diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py\n--- a/warehouse/i18n/__init__.py\n+++ b/warehouse/i18n/__init__.py\n@@ -19,19 +19,21 @@\n \n from warehouse.cache.http import add_vary\n \n-# Taken from:\n-# https://github.com/django/django/blob/master/django/conf/locale/__init__.py\n KNOWN_LOCALES = {\n- \"en\": \"English\", # English\n- \"es\": \"espa\u00f1ol\", # Spanish\n- \"fr\": \"fran\u00e7ais\", # French\n- \"ja\": \"\u65e5\u672c\u8a9e\", # Japanese\n- \"pt_BR\": \"Portugu\u00eas Brasileiro\", # Brazilian Portugeuse\n- \"uk\": \"\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430\", # Ukrainian\n- \"el\": \"\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac\", # Greek\n- \"de\": \"Deutsch\", # German\n- \"zh_Hans\": \"\u7b80\u4f53\u4e2d\u6587\", # Simplified Chinese\n- \"ru\": \"\u0420\u0443\u0441\u0441\u043a\u0438\u0439\", # Russian\n+ identifier: Locale.parse(identifier, sep=\"_\")\n+ for identifier in [\n+ \"en\", # English\n+ \"es\", # Spanish\n+ \"fr\", # French\n+ \"ja\", # Japanese\n+ \"pt_BR\", # Brazilian Portugeuse\n+ \"uk\", # Ukranian\n+ \"el\", # Greek\n+ \"de\", # German\n+ \"zh_Hans\", # Simplified Chinese\n+ \"ru\", # Russian\n+ \"he\", # Hebrew\n+ ]\n }\n \n LOCALE_ATTR = \"_LOCALE_\"\n@@ -60,9 +62,9 @@\n \n def _locale(request):\n \"\"\"\n- Computes a babel.core:Locale() object for this request.\n+ Gets a babel.core:Locale() object for this request.\n \"\"\"\n- return Locale.parse(request.locale_name, sep=\"_\")\n+ return KNOWN_LOCALES.get(request.locale_name, \"en\")\n \n \n def _negotiate_locale(request):\n", "issue": "Support for properly displaying the site translated to RTL languages\n**What's the problem this feature will solve?**\r\nDisplay PyPI.org propely when translated to RTL languages such as Arabic, Farsi and Hebrew.\r\n\r\n**Describe the solution you'd like**\r\nI can't suggest a specific technical solution, since I'm not familiar with the website's design and implementation.\r\n\r\n**Additional context**\r\nThe context is the recent work on translation into additional languages.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\n\nfrom babel.core import Locale\nfrom pyramid import viewderivers\nfrom pyramid.i18n import TranslationStringFactory, default_locale_negotiator\nfrom pyramid.threadlocal import get_current_request\n\nfrom warehouse.cache.http import add_vary\n\n# Taken from:\n# https://github.com/django/django/blob/master/django/conf/locale/__init__.py\nKNOWN_LOCALES = {\n \"en\": \"English\", # English\n \"es\": \"espa\u00f1ol\", # Spanish\n \"fr\": \"fran\u00e7ais\", # French\n \"ja\": \"\u65e5\u672c\u8a9e\", # Japanese\n \"pt_BR\": \"Portugu\u00eas Brasileiro\", # Brazilian Portugeuse\n \"uk\": \"\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430\", # Ukrainian\n \"el\": \"\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac\", # Greek\n \"de\": \"Deutsch\", # German\n \"zh_Hans\": \"\u7b80\u4f53\u4e2d\u6587\", # Simplified Chinese\n \"ru\": \"\u0420\u0443\u0441\u0441\u043a\u0438\u0439\", # Russian\n}\n\nLOCALE_ATTR = \"_LOCALE_\"\n\n_translation_factory = TranslationStringFactory(\"messages\")\n\n\nclass LazyString:\n def __init__(self, fn, *args, **kwargs):\n self.fn = fn\n self.args = args\n self.mapping = kwargs.get(\"mapping\", {})\n self.kwargs = kwargs\n\n def __json__(self, request):\n return str(self)\n\n def __mod__(self, new_mapping):\n mapping = self.mapping.copy()\n mapping.update(new_mapping)\n return LazyString(self.fn, *self.args, mapping=new_mapping, **self.kwargs)\n\n def __str__(self):\n return self.fn(*self.args, **self.kwargs)\n\n\ndef _locale(request):\n \"\"\"\n Computes a babel.core:Locale() object for this request.\n \"\"\"\n return Locale.parse(request.locale_name, sep=\"_\")\n\n\ndef _negotiate_locale(request):\n locale_name = getattr(request, LOCALE_ATTR, None)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.params.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.cookies.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n if not request.accept_language:\n return default_locale_negotiator(request)\n\n return request.accept_language.best_match(\n tuple(KNOWN_LOCALES.keys()), default_match=default_locale_negotiator(request)\n )\n\n\ndef _localize(request, message, **kwargs):\n \"\"\"\n To be used on the request directly, e.g. `request._(message)`\n \"\"\"\n return request.localizer.translate(_translation_factory(message, **kwargs))\n\n\ndef localize(message, **kwargs):\n \"\"\"\n To be used when we don't have the request context, e.g.\n `from warehouse.i18n import localize as _`\n \"\"\"\n\n def _lazy_localize(message, **kwargs):\n request = get_current_request()\n return _localize(request, message, **kwargs)\n\n return LazyString(_lazy_localize, message, **kwargs)\n\n\nclass InvalidLocalizer:\n def _fail(self):\n raise RuntimeError(\"Cannot use localizer without has_translations=True\")\n\n @property\n def locale_name(self):\n self._fail()\n\n def pluralize(self, *args, **kwargs):\n self._fail()\n\n def translate(self, *args, **kwargs):\n self._fail()\n\n\ndef translated_view(view, info):\n if info.options.get(\"has_translations\"):\n # If this page can be translated, then we'll add a Vary: PyPI-Locale\n # Vary header.\n # Note: This will give weird results if hitting PyPI directly instead of through\n # the Fastly VCL which sets PyPI-Locale.\n return add_vary(\"PyPI-Locale\")(view)\n elif info.exception_only:\n return view\n else:\n # If we're not using translations on this view, then we'll wrap the view\n # with a wrapper that just ensures that the localizer cannot be used.\n @functools.wraps(view)\n def wrapped(context, request):\n # This whole method is a little bit of an odd duck, we want to make\n # sure that we don't actually *access* request.localizer, because\n # doing so triggers the machinery to create a new localizer. So\n # instead we will dig into the request object __dict__ to\n # effectively do the same thing, just without triggering an access\n # on request.localizer.\n\n # Save the original session so that we can restore it once the\n # inner views have been called.\n nothing = object()\n original_localizer = request.__dict__.get(\"localizer\", nothing)\n\n # This particular view hasn't been set to allow access to the\n # translations, so we'll just assign an InvalidLocalizer to\n # request.localizer\n request.__dict__[\"localizer\"] = InvalidLocalizer()\n\n try:\n # Invoke the real view\n return view(context, request)\n finally:\n # Restore the original session so that things like\n # pyramid_debugtoolbar can access it.\n if original_localizer is nothing:\n del request.__dict__[\"localizer\"]\n else:\n request.__dict__[\"localizer\"] = original_localizer\n\n return wrapped\n\n\ntranslated_view.options = {\"has_translations\"}\n\n\ndef includeme(config):\n # Add the request attributes\n config.add_request_method(_locale, name=\"locale\", reify=True)\n config.add_request_method(_localize, name=\"_\")\n\n # Register our translation directory.\n config.add_translation_dirs(\"warehouse:locale/\")\n\n config.set_locale_negotiator(_negotiate_locale)\n\n # Register our i18n/l10n filters for Jinja2\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"format_date\", \"warehouse.i18n.filters:format_date\")\n filters.setdefault(\"format_datetime\", \"warehouse.i18n.filters:format_datetime\")\n filters.setdefault(\n \"format_rfc822_datetime\", \"warehouse.i18n.filters:format_rfc822_datetime\"\n )\n filters.setdefault(\"format_number\", \"warehouse.i18n.filters:format_number\")\n\n jglobals = config.get_settings().setdefault(\"jinja2.globals\", {})\n jglobals.setdefault(\"KNOWN_LOCALES\", \"warehouse.i18n:KNOWN_LOCALES\")\n\n config.add_view_deriver(\n translated_view, over=\"rendered_view\", under=viewderivers.INGRESS\n )\n", "path": "warehouse/i18n/__init__.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\n\nfrom babel.core import Locale\nfrom pyramid import viewderivers\nfrom pyramid.i18n import TranslationStringFactory, default_locale_negotiator\nfrom pyramid.threadlocal import get_current_request\n\nfrom warehouse.cache.http import add_vary\n\nKNOWN_LOCALES = {\n identifier: Locale.parse(identifier, sep=\"_\")\n for identifier in [\n \"en\", # English\n \"es\", # Spanish\n \"fr\", # French\n \"ja\", # Japanese\n \"pt_BR\", # Brazilian Portugeuse\n \"uk\", # Ukranian\n \"el\", # Greek\n \"de\", # German\n \"zh_Hans\", # Simplified Chinese\n \"ru\", # Russian\n \"he\", # Hebrew\n ]\n}\n\nLOCALE_ATTR = \"_LOCALE_\"\n\n_translation_factory = TranslationStringFactory(\"messages\")\n\n\nclass LazyString:\n def __init__(self, fn, *args, **kwargs):\n self.fn = fn\n self.args = args\n self.mapping = kwargs.get(\"mapping\", {})\n self.kwargs = kwargs\n\n def __json__(self, request):\n return str(self)\n\n def __mod__(self, new_mapping):\n mapping = self.mapping.copy()\n mapping.update(new_mapping)\n return LazyString(self.fn, *self.args, mapping=new_mapping, **self.kwargs)\n\n def __str__(self):\n return self.fn(*self.args, **self.kwargs)\n\n\ndef _locale(request):\n \"\"\"\n Gets a babel.core:Locale() object for this request.\n \"\"\"\n return KNOWN_LOCALES.get(request.locale_name, \"en\")\n\n\ndef _negotiate_locale(request):\n locale_name = getattr(request, LOCALE_ATTR, None)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.params.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.cookies.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n if not request.accept_language:\n return default_locale_negotiator(request)\n\n return request.accept_language.best_match(\n tuple(KNOWN_LOCALES.keys()), default_match=default_locale_negotiator(request)\n )\n\n\ndef _localize(request, message, **kwargs):\n \"\"\"\n To be used on the request directly, e.g. `request._(message)`\n \"\"\"\n return request.localizer.translate(_translation_factory(message, **kwargs))\n\n\ndef localize(message, **kwargs):\n \"\"\"\n To be used when we don't have the request context, e.g.\n `from warehouse.i18n import localize as _`\n \"\"\"\n\n def _lazy_localize(message, **kwargs):\n request = get_current_request()\n return _localize(request, message, **kwargs)\n\n return LazyString(_lazy_localize, message, **kwargs)\n\n\nclass InvalidLocalizer:\n def _fail(self):\n raise RuntimeError(\"Cannot use localizer without has_translations=True\")\n\n @property\n def locale_name(self):\n self._fail()\n\n def pluralize(self, *args, **kwargs):\n self._fail()\n\n def translate(self, *args, **kwargs):\n self._fail()\n\n\ndef translated_view(view, info):\n if info.options.get(\"has_translations\"):\n # If this page can be translated, then we'll add a Vary: PyPI-Locale\n # Vary header.\n # Note: This will give weird results if hitting PyPI directly instead of through\n # the Fastly VCL which sets PyPI-Locale.\n return add_vary(\"PyPI-Locale\")(view)\n elif info.exception_only:\n return view\n else:\n # If we're not using translations on this view, then we'll wrap the view\n # with a wrapper that just ensures that the localizer cannot be used.\n @functools.wraps(view)\n def wrapped(context, request):\n # This whole method is a little bit of an odd duck, we want to make\n # sure that we don't actually *access* request.localizer, because\n # doing so triggers the machinery to create a new localizer. So\n # instead we will dig into the request object __dict__ to\n # effectively do the same thing, just without triggering an access\n # on request.localizer.\n\n # Save the original session so that we can restore it once the\n # inner views have been called.\n nothing = object()\n original_localizer = request.__dict__.get(\"localizer\", nothing)\n\n # This particular view hasn't been set to allow access to the\n # translations, so we'll just assign an InvalidLocalizer to\n # request.localizer\n request.__dict__[\"localizer\"] = InvalidLocalizer()\n\n try:\n # Invoke the real view\n return view(context, request)\n finally:\n # Restore the original session so that things like\n # pyramid_debugtoolbar can access it.\n if original_localizer is nothing:\n del request.__dict__[\"localizer\"]\n else:\n request.__dict__[\"localizer\"] = original_localizer\n\n return wrapped\n\n\ntranslated_view.options = {\"has_translations\"}\n\n\ndef includeme(config):\n # Add the request attributes\n config.add_request_method(_locale, name=\"locale\", reify=True)\n config.add_request_method(_localize, name=\"_\")\n\n # Register our translation directory.\n config.add_translation_dirs(\"warehouse:locale/\")\n\n config.set_locale_negotiator(_negotiate_locale)\n\n # Register our i18n/l10n filters for Jinja2\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"format_date\", \"warehouse.i18n.filters:format_date\")\n filters.setdefault(\"format_datetime\", \"warehouse.i18n.filters:format_datetime\")\n filters.setdefault(\n \"format_rfc822_datetime\", \"warehouse.i18n.filters:format_rfc822_datetime\"\n )\n filters.setdefault(\"format_number\", \"warehouse.i18n.filters:format_number\")\n\n jglobals = config.get_settings().setdefault(\"jinja2.globals\", {})\n jglobals.setdefault(\"KNOWN_LOCALES\", \"warehouse.i18n:KNOWN_LOCALES\")\n\n config.add_view_deriver(\n translated_view, over=\"rendered_view\", under=viewderivers.INGRESS\n )\n", "path": "warehouse/i18n/__init__.py"}]}
2,413
492
gh_patches_debug_13887
rasdani/github-patches
git_diff
apluslms__a-plus-474
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The exercise results page does not show that a module is open for reading while the exercises are still closed. The UI leaves the impression that the module is completely closed (between the reading opening time and the opening time). The "opens" badge in the module box also only shows the normal open time. The UI should show clearly that a module is open for reading. The collapsing box should be opened by default for read-open modules. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `exercise/templatetags/exercise.py` Content: ``` 1 import json 2 from django import template 3 from django.db.models import Max, Min 4 from django.template.loader import render_to_string 5 from django.utils import timezone 6 from django.utils.translation import ugettext_lazy as _ 7 8 from course.models import CourseModule 9 from lib.errors import TagUsageError 10 from ..cache.content import CachedContent 11 from ..cache.points import CachedPoints 12 from ..exercise_summary import UserExerciseSummary 13 from ..models import LearningObjectDisplay, LearningObject, Submission, BaseExercise 14 15 16 register = template.Library() 17 18 19 def _prepare_now(context): 20 if not 'now' in context: 21 context['now'] = timezone.now() 22 return context['now'] 23 24 25 def _prepare_context(context, student=None): 26 if not 'instance' in context: 27 raise TagUsageError() 28 instance = context['instance'] 29 _prepare_now(context) 30 if not 'content' in context: 31 context['content'] = CachedContent(instance) 32 def points(user, key): 33 if not key in context: 34 context[key] = CachedPoints(instance, user, context['content']) 35 return context[key] 36 if student: 37 return points(student, 'studentpoints') 38 return points(context['request'].user, 'points') 39 40 41 def _get_toc(context, student=None): 42 points = _prepare_context(context, student) 43 context = context.flatten() 44 context.update({ 45 'modules': points.modules_flatted(), 46 'categories': points.categories(), 47 'total': points.total(), 48 'is_course_staff': context.get('is_course_staff', False), 49 }) 50 return context 51 52 53 def _is_accessible(context, entry, t): 54 if t and t > _prepare_now(context): 55 return False 56 if entry.get('requirements'): 57 points = _prepare_context(context) 58 module = CourseModule.objects.get(id=entry['id']) 59 return module.are_requirements_passed(points) 60 return True 61 62 63 @register.inclusion_tag("exercise/_user_results.html", takes_context=True) 64 def user_results(context, student=None): 65 values = _get_toc(context, student) 66 values['total_json'] = json.dumps(values['total']) 67 if student: 68 values['is_course_staff'] = False 69 return values 70 71 72 @register.inclusion_tag("exercise/_user_toc.html", takes_context=True) 73 def user_toc(context, student=None): 74 return _get_toc(context, student) 75 76 77 @register.inclusion_tag("exercise/_user_last.html", takes_context=True) 78 def user_last(context): 79 user = context['request'].user 80 points = _prepare_context(context) 81 if user.is_authenticated: 82 last = LearningObjectDisplay.objects.filter( 83 profile=user.userprofile, 84 learning_object__status=LearningObject.STATUS.READY, 85 learning_object__course_module__course_instance=context['instance'], 86 ).select_related('learning_object').order_by('-timestamp').first() 87 if last: 88 entry,_,_,_ = points.find(last.learning_object) 89 return { 90 'last': entry, 91 'last_time': last.timestamp, 92 } 93 return { 94 'begin': points.begin(), 95 'instance': context['instance'], 96 } 97 98 99 @register.inclusion_tag("exercise/_category_points.html", takes_context=True) 100 def category_points(context, student=None): 101 return _get_toc(context, student) 102 103 104 @register.inclusion_tag("exercise/_submission_list.html", takes_context=True) 105 def latest_submissions(context): 106 submissions = context["profile"].submissions \ 107 .filter(exercise__course_module__course_instance=context["instance"]) \ 108 .order_by("-id")[:10] 109 return { 110 "submissions": submissions, 111 "title": _("Latest submissions"), 112 "empty": _("No submissions for this course."), 113 } 114 115 116 @register.filter 117 def max_submissions(exercise, user_profile): 118 return exercise.max_submissions_for_student(user_profile) 119 120 121 @register.filter 122 def percent(decimal): 123 return int(decimal * 100) 124 125 126 @register.filter 127 def submission_status(status): 128 return Submission.STATUS[status] 129 130 131 def _points_data(obj, classes=None): 132 if isinstance(obj, UserExerciseSummary): 133 exercise = obj.exercise 134 data = { 135 'points': obj.get_points(), 136 'max': exercise.max_points, 137 'difficulty': exercise.difficulty, 138 'required': exercise.points_to_pass, 139 'confirm_the_level': exercise.category.confirm_the_level, 140 'missing_points': obj.is_missing_points(), 141 'passed': obj.is_passed(), 142 'full_score': obj.is_full_points(), 143 'submitted': obj.is_submitted(), 144 'graded': obj.is_graded(), 145 'official': not obj.is_unofficial(), 146 'exercise_page': True, 147 } 148 elif isinstance(obj, Submission): 149 exercise = obj.exercise 150 data = { 151 'points': obj.grade, 152 'max': exercise.max_points, 153 'difficulty': exercise.difficulty, 154 'required': exercise.points_to_pass, 155 'confirm_the_level': exercise.category.confirm_the_level, 156 'missing_points': obj.grade < exercise.points_to_pass, 157 'passed': obj.grade >= exercise.points_to_pass, 158 'full_score': obj.grade >= exercise.max_points, 159 'submitted': True, 160 'graded': obj.is_graded, 161 'official': obj.status != Submission.STATUS.UNOFFICIAL, 162 } 163 if not obj.is_graded and ( 164 not exercise.category.confirm_the_level 165 or obj.status != Submission.STATUS.WAITING 166 ): 167 data['status'] = obj.status 168 else: 169 points = obj.get('points', 0) 170 max_points = obj.get('max_points', 0) 171 required = obj.get('points_to_pass', 0) 172 data = { 173 'points': points, 174 'max': max_points, 175 'difficulty': obj.get('difficulty', ''), 176 'required': required, 177 'confirm_the_level': obj.get('confirm_the_level', False), 178 'missing_points': points < required, 179 'passed': obj.get('passed', True), 180 'full_score': points >= max_points, 181 'submitted': obj.get('submission_count', 0) > 0, 182 'graded': obj.get('graded', True), 183 'status': obj.get('submission_status', False), 184 'unconfirmed': obj.get('unconfirmed', False), 185 'official': not obj.get('unofficial', False), 186 'confirmable_points': obj.get('confirmable_points', False), 187 } 188 percentage = 0 189 required_percentage = None 190 if data['max'] > 0: 191 percentage = int(round(100.0 * data['points'] / data['max'])) 192 if data['required']: 193 required_percentage = int(round(100.0 * data['required'] / data['max'])) 194 data.update({ 195 'classes': classes, 196 'percentage': percentage, 197 'required_percentage': required_percentage, 198 }) 199 return data 200 201 202 @register.inclusion_tag("exercise/_points_progress.html") 203 def points_progress(obj): 204 return _points_data(obj) 205 206 207 @register.inclusion_tag("exercise/_points_badge.html") 208 def points_badge(obj, classes=None): 209 return _points_data(obj, classes) 210 211 212 @register.simple_tag(takes_context=True) 213 def max_group_size(context): 214 points = _prepare_context(context) 215 return points.total()['max_group_size'] 216 217 218 @register.simple_tag(takes_context=True) 219 def min_group_size(context): 220 points = _prepare_context(context) 221 return points.total()['min_group_size'] 222 223 224 @register.simple_tag(takes_context=True) 225 def module_accessible(context, entry): 226 t = entry.get('reading_opening_time', entry.get('opening_time')) 227 return _is_accessible(context, entry, t) 228 229 230 @register.simple_tag(takes_context=True) 231 def exercise_accessible(context, entry): 232 t = entry.get('opening_time') 233 return _is_accessible(context, entry, t) 234 235 236 @register.simple_tag 237 def get_grading_errors(submission): 238 if not isinstance(submission.grading_data, dict): 239 return "" 240 grading_data = submission.grading_data.get('grading_data') 241 if not isinstance(grading_data, str): 242 return "" 243 if grading_data.startswith('<pre>'): 244 return grading_data[5:-6] 245 try: 246 return json.loads(grading_data).get('errors', "") 247 except (AttributeError, TypeError, ValueError): 248 return "" 249 250 251 @register.inclusion_tag("exercise/_text_stats.html", takes_context=True) 252 def exercise_text_stats(context, exercise): 253 if not 'instance' in context: 254 raise TagUsageError() 255 instance = context['instance'] 256 257 if not 'student_count' in context: 258 context['student_count'] = instance.students.count() 259 total = context['student_count'] 260 261 if isinstance(exercise, int): 262 num = instance.students.filter(submissions__exercise_id=exercise).distinct().count() 263 else: 264 num = exercise.number_of_submitters() if exercise else 0 265 return { 266 "number": num, 267 "percentage": int(100 * num / total) if total else 0, 268 } 269 270 271 @register.simple_tag 272 def get_format_info(format): 273 format_infos = { 274 'json' : { 275 'name': 'json', 276 'verbose_name': 'JSON', 277 }, 278 'csv': { 279 'name': 'csv', 280 'verbose_name': 'CSV', 281 }, 282 'excel.csv': { 283 'name': 'excel.csv', 284 'verbose_name': _('Excel compatible CSV'), 285 }, 286 } 287 try: 288 return format_infos[format] 289 except KeyError as e: 290 raise RuntimeError('Invalid format: \'{}\''.format(format)) from e 291 292 293 @register.simple_tag 294 def get_format_info_list(formats): 295 return [get_format_info(format) for format in formats.split()] 296 297 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/exercise/templatetags/exercise.py b/exercise/templatetags/exercise.py --- a/exercise/templatetags/exercise.py +++ b/exercise/templatetags/exercise.py @@ -223,8 +223,11 @@ @register.simple_tag(takes_context=True) def module_accessible(context, entry): - t = entry.get('reading_opening_time', entry.get('opening_time')) - return _is_accessible(context, entry, t) + t = entry.get('reading_opening_time') + if t: + return _is_accessible(context, entry, t) + else: + return exercise_accessible(context, entry) @register.simple_tag(takes_context=True) @@ -293,4 +296,3 @@ @register.simple_tag def get_format_info_list(formats): return [get_format_info(format) for format in formats.split()] -
{"golden_diff": "diff --git a/exercise/templatetags/exercise.py b/exercise/templatetags/exercise.py\n--- a/exercise/templatetags/exercise.py\n+++ b/exercise/templatetags/exercise.py\n@@ -223,8 +223,11 @@\n \n @register.simple_tag(takes_context=True)\n def module_accessible(context, entry):\n- t = entry.get('reading_opening_time', entry.get('opening_time'))\n- return _is_accessible(context, entry, t)\n+ t = entry.get('reading_opening_time')\n+ if t:\n+ return _is_accessible(context, entry, t)\n+ else:\n+ return exercise_accessible(context, entry)\n \n \n @register.simple_tag(takes_context=True)\n@@ -293,4 +296,3 @@\n @register.simple_tag\n def get_format_info_list(formats):\n return [get_format_info(format) for format in formats.split()]\n-\n", "issue": "The exercise results page does not show that a module is open for reading while the exercises are still closed.\nThe UI leaves the impression that the module is completely closed (between the reading opening time and the opening time). The \"opens\" badge in the module box also only shows the normal open time. The UI should show clearly that a module is open for reading. The collapsing box should be opened by default for read-open modules.\n", "before_files": [{"content": "import json\nfrom django import template\nfrom django.db.models import Max, Min\nfrom django.template.loader import render_to_string\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom course.models import CourseModule\nfrom lib.errors import TagUsageError\nfrom ..cache.content import CachedContent\nfrom ..cache.points import CachedPoints\nfrom ..exercise_summary import UserExerciseSummary\nfrom ..models import LearningObjectDisplay, LearningObject, Submission, BaseExercise\n\n\nregister = template.Library()\n\n\ndef _prepare_now(context):\n if not 'now' in context:\n context['now'] = timezone.now()\n return context['now']\n\n\ndef _prepare_context(context, student=None):\n if not 'instance' in context:\n raise TagUsageError()\n instance = context['instance']\n _prepare_now(context)\n if not 'content' in context:\n context['content'] = CachedContent(instance)\n def points(user, key):\n if not key in context:\n context[key] = CachedPoints(instance, user, context['content'])\n return context[key]\n if student:\n return points(student, 'studentpoints')\n return points(context['request'].user, 'points')\n\n\ndef _get_toc(context, student=None):\n points = _prepare_context(context, student)\n context = context.flatten()\n context.update({\n 'modules': points.modules_flatted(),\n 'categories': points.categories(),\n 'total': points.total(),\n 'is_course_staff': context.get('is_course_staff', False),\n })\n return context\n\n\ndef _is_accessible(context, entry, t):\n if t and t > _prepare_now(context):\n return False\n if entry.get('requirements'):\n points = _prepare_context(context)\n module = CourseModule.objects.get(id=entry['id'])\n return module.are_requirements_passed(points)\n return True\n\n\[email protected]_tag(\"exercise/_user_results.html\", takes_context=True)\ndef user_results(context, student=None):\n values = _get_toc(context, student)\n values['total_json'] = json.dumps(values['total'])\n if student:\n values['is_course_staff'] = False\n return values\n\n\[email protected]_tag(\"exercise/_user_toc.html\", takes_context=True)\ndef user_toc(context, student=None):\n return _get_toc(context, student)\n\n\[email protected]_tag(\"exercise/_user_last.html\", takes_context=True)\ndef user_last(context):\n user = context['request'].user\n points = _prepare_context(context)\n if user.is_authenticated:\n last = LearningObjectDisplay.objects.filter(\n profile=user.userprofile,\n learning_object__status=LearningObject.STATUS.READY,\n learning_object__course_module__course_instance=context['instance'],\n ).select_related('learning_object').order_by('-timestamp').first()\n if last:\n entry,_,_,_ = points.find(last.learning_object)\n return {\n 'last': entry,\n 'last_time': last.timestamp,\n }\n return {\n 'begin': points.begin(),\n 'instance': context['instance'],\n }\n\n\[email protected]_tag(\"exercise/_category_points.html\", takes_context=True)\ndef category_points(context, student=None):\n return _get_toc(context, student)\n\n\[email protected]_tag(\"exercise/_submission_list.html\", takes_context=True)\ndef latest_submissions(context):\n submissions = context[\"profile\"].submissions \\\n .filter(exercise__course_module__course_instance=context[\"instance\"]) \\\n .order_by(\"-id\")[:10]\n return {\n \"submissions\": submissions,\n \"title\": _(\"Latest submissions\"),\n \"empty\": _(\"No submissions for this course.\"),\n }\n\n\[email protected]\ndef max_submissions(exercise, user_profile):\n return exercise.max_submissions_for_student(user_profile)\n\n\[email protected]\ndef percent(decimal):\n return int(decimal * 100)\n\n\[email protected]\ndef submission_status(status):\n return Submission.STATUS[status]\n\n\ndef _points_data(obj, classes=None):\n if isinstance(obj, UserExerciseSummary):\n exercise = obj.exercise\n data = {\n 'points': obj.get_points(),\n 'max': exercise.max_points,\n 'difficulty': exercise.difficulty,\n 'required': exercise.points_to_pass,\n 'confirm_the_level': exercise.category.confirm_the_level,\n 'missing_points': obj.is_missing_points(),\n 'passed': obj.is_passed(),\n 'full_score': obj.is_full_points(),\n 'submitted': obj.is_submitted(),\n 'graded': obj.is_graded(),\n 'official': not obj.is_unofficial(),\n 'exercise_page': True,\n }\n elif isinstance(obj, Submission):\n exercise = obj.exercise\n data = {\n 'points': obj.grade,\n 'max': exercise.max_points,\n 'difficulty': exercise.difficulty,\n 'required': exercise.points_to_pass,\n 'confirm_the_level': exercise.category.confirm_the_level,\n 'missing_points': obj.grade < exercise.points_to_pass,\n 'passed': obj.grade >= exercise.points_to_pass,\n 'full_score': obj.grade >= exercise.max_points,\n 'submitted': True,\n 'graded': obj.is_graded,\n 'official': obj.status != Submission.STATUS.UNOFFICIAL,\n }\n if not obj.is_graded and (\n not exercise.category.confirm_the_level\n or obj.status != Submission.STATUS.WAITING\n ):\n data['status'] = obj.status\n else:\n points = obj.get('points', 0)\n max_points = obj.get('max_points', 0)\n required = obj.get('points_to_pass', 0)\n data = {\n 'points': points,\n 'max': max_points,\n 'difficulty': obj.get('difficulty', ''),\n 'required': required,\n 'confirm_the_level': obj.get('confirm_the_level', False),\n 'missing_points': points < required,\n 'passed': obj.get('passed', True),\n 'full_score': points >= max_points,\n 'submitted': obj.get('submission_count', 0) > 0,\n 'graded': obj.get('graded', True),\n 'status': obj.get('submission_status', False),\n 'unconfirmed': obj.get('unconfirmed', False),\n 'official': not obj.get('unofficial', False),\n 'confirmable_points': obj.get('confirmable_points', False),\n }\n percentage = 0\n required_percentage = None\n if data['max'] > 0:\n percentage = int(round(100.0 * data['points'] / data['max']))\n if data['required']:\n required_percentage = int(round(100.0 * data['required'] / data['max']))\n data.update({\n 'classes': classes,\n 'percentage': percentage,\n 'required_percentage': required_percentage,\n })\n return data\n\n\[email protected]_tag(\"exercise/_points_progress.html\")\ndef points_progress(obj):\n return _points_data(obj)\n\n\[email protected]_tag(\"exercise/_points_badge.html\")\ndef points_badge(obj, classes=None):\n return _points_data(obj, classes)\n\n\[email protected]_tag(takes_context=True)\ndef max_group_size(context):\n points = _prepare_context(context)\n return points.total()['max_group_size']\n\n\[email protected]_tag(takes_context=True)\ndef min_group_size(context):\n points = _prepare_context(context)\n return points.total()['min_group_size']\n\n\[email protected]_tag(takes_context=True)\ndef module_accessible(context, entry):\n t = entry.get('reading_opening_time', entry.get('opening_time'))\n return _is_accessible(context, entry, t)\n\n\[email protected]_tag(takes_context=True)\ndef exercise_accessible(context, entry):\n t = entry.get('opening_time')\n return _is_accessible(context, entry, t)\n\n\[email protected]_tag\ndef get_grading_errors(submission):\n if not isinstance(submission.grading_data, dict):\n return \"\"\n grading_data = submission.grading_data.get('grading_data')\n if not isinstance(grading_data, str):\n return \"\"\n if grading_data.startswith('<pre>'):\n return grading_data[5:-6]\n try:\n return json.loads(grading_data).get('errors', \"\")\n except (AttributeError, TypeError, ValueError):\n return \"\"\n\n\[email protected]_tag(\"exercise/_text_stats.html\", takes_context=True)\ndef exercise_text_stats(context, exercise):\n if not 'instance' in context:\n raise TagUsageError()\n instance = context['instance']\n\n if not 'student_count' in context:\n context['student_count'] = instance.students.count()\n total = context['student_count']\n\n if isinstance(exercise, int):\n num = instance.students.filter(submissions__exercise_id=exercise).distinct().count()\n else:\n num = exercise.number_of_submitters() if exercise else 0\n return {\n \"number\": num,\n \"percentage\": int(100 * num / total) if total else 0,\n }\n\n\[email protected]_tag\ndef get_format_info(format):\n format_infos = {\n 'json' : {\n 'name': 'json',\n 'verbose_name': 'JSON',\n },\n 'csv': {\n 'name': 'csv',\n 'verbose_name': 'CSV',\n },\n 'excel.csv': {\n 'name': 'excel.csv',\n 'verbose_name': _('Excel compatible CSV'),\n },\n }\n try:\n return format_infos[format]\n except KeyError as e:\n raise RuntimeError('Invalid format: \\'{}\\''.format(format)) from e\n\n\[email protected]_tag\ndef get_format_info_list(formats):\n return [get_format_info(format) for format in formats.split()]\n\n", "path": "exercise/templatetags/exercise.py"}], "after_files": [{"content": "import json\nfrom django import template\nfrom django.db.models import Max, Min\nfrom django.template.loader import render_to_string\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom course.models import CourseModule\nfrom lib.errors import TagUsageError\nfrom ..cache.content import CachedContent\nfrom ..cache.points import CachedPoints\nfrom ..exercise_summary import UserExerciseSummary\nfrom ..models import LearningObjectDisplay, LearningObject, Submission, BaseExercise\n\n\nregister = template.Library()\n\n\ndef _prepare_now(context):\n if not 'now' in context:\n context['now'] = timezone.now()\n return context['now']\n\n\ndef _prepare_context(context, student=None):\n if not 'instance' in context:\n raise TagUsageError()\n instance = context['instance']\n _prepare_now(context)\n if not 'content' in context:\n context['content'] = CachedContent(instance)\n def points(user, key):\n if not key in context:\n context[key] = CachedPoints(instance, user, context['content'])\n return context[key]\n if student:\n return points(student, 'studentpoints')\n return points(context['request'].user, 'points')\n\n\ndef _get_toc(context, student=None):\n points = _prepare_context(context, student)\n context = context.flatten()\n context.update({\n 'modules': points.modules_flatted(),\n 'categories': points.categories(),\n 'total': points.total(),\n 'is_course_staff': context.get('is_course_staff', False),\n })\n return context\n\n\ndef _is_accessible(context, entry, t):\n if t and t > _prepare_now(context):\n return False\n if entry.get('requirements'):\n points = _prepare_context(context)\n module = CourseModule.objects.get(id=entry['id'])\n return module.are_requirements_passed(points)\n return True\n\n\[email protected]_tag(\"exercise/_user_results.html\", takes_context=True)\ndef user_results(context, student=None):\n values = _get_toc(context, student)\n values['total_json'] = json.dumps(values['total'])\n if student:\n values['is_course_staff'] = False\n return values\n\n\[email protected]_tag(\"exercise/_user_toc.html\", takes_context=True)\ndef user_toc(context, student=None):\n return _get_toc(context, student)\n\n\[email protected]_tag(\"exercise/_user_last.html\", takes_context=True)\ndef user_last(context):\n user = context['request'].user\n points = _prepare_context(context)\n if user.is_authenticated:\n last = LearningObjectDisplay.objects.filter(\n profile=user.userprofile,\n learning_object__status=LearningObject.STATUS.READY,\n learning_object__course_module__course_instance=context['instance'],\n ).select_related('learning_object').order_by('-timestamp').first()\n if last:\n entry,_,_,_ = points.find(last.learning_object)\n return {\n 'last': entry,\n 'last_time': last.timestamp,\n }\n return {\n 'begin': points.begin(),\n 'instance': context['instance'],\n }\n\n\[email protected]_tag(\"exercise/_category_points.html\", takes_context=True)\ndef category_points(context, student=None):\n return _get_toc(context, student)\n\n\[email protected]_tag(\"exercise/_submission_list.html\", takes_context=True)\ndef latest_submissions(context):\n submissions = context[\"profile\"].submissions \\\n .filter(exercise__course_module__course_instance=context[\"instance\"]) \\\n .order_by(\"-id\")[:10]\n return {\n \"submissions\": submissions,\n \"title\": _(\"Latest submissions\"),\n \"empty\": _(\"No submissions for this course.\"),\n }\n\n\[email protected]\ndef max_submissions(exercise, user_profile):\n return exercise.max_submissions_for_student(user_profile)\n\n\[email protected]\ndef percent(decimal):\n return int(decimal * 100)\n\n\[email protected]\ndef submission_status(status):\n return Submission.STATUS[status]\n\n\ndef _points_data(obj, classes=None):\n if isinstance(obj, UserExerciseSummary):\n exercise = obj.exercise\n data = {\n 'points': obj.get_points(),\n 'max': exercise.max_points,\n 'difficulty': exercise.difficulty,\n 'required': exercise.points_to_pass,\n 'confirm_the_level': exercise.category.confirm_the_level,\n 'missing_points': obj.is_missing_points(),\n 'passed': obj.is_passed(),\n 'full_score': obj.is_full_points(),\n 'submitted': obj.is_submitted(),\n 'graded': obj.is_graded(),\n 'official': not obj.is_unofficial(),\n 'exercise_page': True,\n }\n elif isinstance(obj, Submission):\n exercise = obj.exercise\n data = {\n 'points': obj.grade,\n 'max': exercise.max_points,\n 'difficulty': exercise.difficulty,\n 'required': exercise.points_to_pass,\n 'confirm_the_level': exercise.category.confirm_the_level,\n 'missing_points': obj.grade < exercise.points_to_pass,\n 'passed': obj.grade >= exercise.points_to_pass,\n 'full_score': obj.grade >= exercise.max_points,\n 'submitted': True,\n 'graded': obj.is_graded,\n 'official': obj.status != Submission.STATUS.UNOFFICIAL,\n }\n if not obj.is_graded and (\n not exercise.category.confirm_the_level\n or obj.status != Submission.STATUS.WAITING\n ):\n data['status'] = obj.status\n else:\n points = obj.get('points', 0)\n max_points = obj.get('max_points', 0)\n required = obj.get('points_to_pass', 0)\n data = {\n 'points': points,\n 'max': max_points,\n 'difficulty': obj.get('difficulty', ''),\n 'required': required,\n 'confirm_the_level': obj.get('confirm_the_level', False),\n 'missing_points': points < required,\n 'passed': obj.get('passed', True),\n 'full_score': points >= max_points,\n 'submitted': obj.get('submission_count', 0) > 0,\n 'graded': obj.get('graded', True),\n 'status': obj.get('submission_status', False),\n 'unconfirmed': obj.get('unconfirmed', False),\n 'official': not obj.get('unofficial', False),\n 'confirmable_points': obj.get('confirmable_points', False),\n }\n percentage = 0\n required_percentage = None\n if data['max'] > 0:\n percentage = int(round(100.0 * data['points'] / data['max']))\n if data['required']:\n required_percentage = int(round(100.0 * data['required'] / data['max']))\n data.update({\n 'classes': classes,\n 'percentage': percentage,\n 'required_percentage': required_percentage,\n })\n return data\n\n\[email protected]_tag(\"exercise/_points_progress.html\")\ndef points_progress(obj):\n return _points_data(obj)\n\n\[email protected]_tag(\"exercise/_points_badge.html\")\ndef points_badge(obj, classes=None):\n return _points_data(obj, classes)\n\n\[email protected]_tag(takes_context=True)\ndef max_group_size(context):\n points = _prepare_context(context)\n return points.total()['max_group_size']\n\n\[email protected]_tag(takes_context=True)\ndef min_group_size(context):\n points = _prepare_context(context)\n return points.total()['min_group_size']\n\n\[email protected]_tag(takes_context=True)\ndef module_accessible(context, entry):\n t = entry.get('reading_opening_time')\n if t:\n return _is_accessible(context, entry, t)\n else:\n return exercise_accessible(context, entry)\n\n\[email protected]_tag(takes_context=True)\ndef exercise_accessible(context, entry):\n t = entry.get('opening_time')\n return _is_accessible(context, entry, t)\n\n\[email protected]_tag\ndef get_grading_errors(submission):\n if not isinstance(submission.grading_data, dict):\n return \"\"\n grading_data = submission.grading_data.get('grading_data')\n if not isinstance(grading_data, str):\n return \"\"\n if grading_data.startswith('<pre>'):\n return grading_data[5:-6]\n try:\n return json.loads(grading_data).get('errors', \"\")\n except (AttributeError, TypeError, ValueError):\n return \"\"\n\n\[email protected]_tag(\"exercise/_text_stats.html\", takes_context=True)\ndef exercise_text_stats(context, exercise):\n if not 'instance' in context:\n raise TagUsageError()\n instance = context['instance']\n\n if not 'student_count' in context:\n context['student_count'] = instance.students.count()\n total = context['student_count']\n\n if isinstance(exercise, int):\n num = instance.students.filter(submissions__exercise_id=exercise).distinct().count()\n else:\n num = exercise.number_of_submitters() if exercise else 0\n return {\n \"number\": num,\n \"percentage\": int(100 * num / total) if total else 0,\n }\n\n\[email protected]_tag\ndef get_format_info(format):\n format_infos = {\n 'json' : {\n 'name': 'json',\n 'verbose_name': 'JSON',\n },\n 'csv': {\n 'name': 'csv',\n 'verbose_name': 'CSV',\n },\n 'excel.csv': {\n 'name': 'excel.csv',\n 'verbose_name': _('Excel compatible CSV'),\n },\n }\n try:\n return format_infos[format]\n except KeyError as e:\n raise RuntimeError('Invalid format: \\'{}\\''.format(format)) from e\n\n\[email protected]_tag\ndef get_format_info_list(formats):\n return [get_format_info(format) for format in formats.split()]\n", "path": "exercise/templatetags/exercise.py"}]}
3,219
208
gh_patches_debug_35
rasdani/github-patches
git_diff
StackStorm__st2-5104
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add version string to st2tests to make it installable Prior to this change, this will fail: cd st2tests/st2tests pip install . After this change that command successfully installs the `st2tests` package. This will also work for installing via GitHub as in: pip install -e git+https://github.com/StackStorm/[email protected]#egg=st2tests&subdirectory=st2tests The original request in #2574 is to get st2tests onto PyPI, and I'm not sure if this will accomplish that request, but this is a good first step. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `st2tests/st2tests/__init__.py` Content: ``` 1 # Copyright 2020 The StackStorm Authors. 2 # Copyright 2019 Extreme Networks, Inc. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 from __future__ import absolute_import 17 18 from st2tests.base import EventletTestCase 19 from st2tests.base import DbTestCase 20 from st2tests.base import ExecutionDbTestCase 21 from st2tests.base import DbModelTestCase 22 from st2tests.base import WorkflowTestCase 23 24 25 __all__ = [ 26 'EventletTestCase', 27 'DbTestCase', 28 'ExecutionDbTestCase', 29 'DbModelTestCase', 30 'WorkflowTestCase' 31 ] 32 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/st2tests/st2tests/__init__.py b/st2tests/st2tests/__init__.py --- a/st2tests/st2tests/__init__.py +++ b/st2tests/st2tests/__init__.py @@ -29,3 +29,5 @@ 'DbModelTestCase', 'WorkflowTestCase' ] + +__version__ = '3.3dev'
{"golden_diff": "diff --git a/st2tests/st2tests/__init__.py b/st2tests/st2tests/__init__.py\n--- a/st2tests/st2tests/__init__.py\n+++ b/st2tests/st2tests/__init__.py\n@@ -29,3 +29,5 @@\n 'DbModelTestCase',\n 'WorkflowTestCase'\n ]\n+\n+__version__ = '3.3dev'\n", "issue": "Add version string to st2tests to make it installable\nPrior to this change, this will fail:\r\n\r\n cd st2tests/st2tests\r\n pip install .\r\n\r\nAfter this change that command successfully installs the `st2tests` package. This will also work for installing via GitHub as in:\r\n\r\n pip install -e git+https://github.com/StackStorm/[email protected]#egg=st2tests&subdirectory=st2tests\r\n\r\nThe original request in #2574 is to get st2tests onto PyPI, and I'm not sure if this will accomplish that request, but this is a good first step.\n", "before_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom st2tests.base import EventletTestCase\nfrom st2tests.base import DbTestCase\nfrom st2tests.base import ExecutionDbTestCase\nfrom st2tests.base import DbModelTestCase\nfrom st2tests.base import WorkflowTestCase\n\n\n__all__ = [\n 'EventletTestCase',\n 'DbTestCase',\n 'ExecutionDbTestCase',\n 'DbModelTestCase',\n 'WorkflowTestCase'\n]\n", "path": "st2tests/st2tests/__init__.py"}], "after_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom st2tests.base import EventletTestCase\nfrom st2tests.base import DbTestCase\nfrom st2tests.base import ExecutionDbTestCase\nfrom st2tests.base import DbModelTestCase\nfrom st2tests.base import WorkflowTestCase\n\n\n__all__ = [\n 'EventletTestCase',\n 'DbTestCase',\n 'ExecutionDbTestCase',\n 'DbModelTestCase',\n 'WorkflowTestCase'\n]\n\n__version__ = '3.3dev'\n", "path": "st2tests/st2tests/__init__.py"}]}
696
88
gh_patches_debug_24126
rasdani/github-patches
git_diff
kartoza__prj.app-279
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Sponsors for a release need to be filtered for the project and the date There is a serious bug with the sponsors whereby if you set the release date for a release in one project, it will show on the release page sponsors from any other project. <img width="1387" alt="screen shot 2016-03-31 at 13 36 43" src="https://cloud.githubusercontent.com/assets/178003/14167536/1eff7c90-f746-11e5-9a2e-ed648c9d22d6.png"> Note that you need to have a date for the version otherwise it does not replicate: <img width="1210" alt="screen shot 2016-03-31 at 13 37 21" src="https://cloud.githubusercontent.com/assets/178003/14167565/50e68c58-f746-11e5-96b6-e59970bf6c14.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `django_project/changes/models/version.py` Content: ``` 1 # coding=utf-8 2 from django.core.urlresolvers import reverse 3 # from django.utils.text import slugify 4 from common.utilities import version_slugify 5 import os 6 import logging 7 from core.settings.contrib import STOP_WORDS 8 from django.conf.global_settings import MEDIA_ROOT 9 from django.db import models 10 from .entry import Entry 11 from .sponsorship_period import SponsorshipPeriod 12 from django.contrib.auth.models import User 13 from django.utils.translation import ugettext_lazy as _ 14 15 logger = logging.getLogger(__name__) 16 17 18 class ApprovedVersionManager(models.Manager): 19 """Custom version manager that shows only approved records.""" 20 21 def get_queryset(self): 22 """Query set generator""" 23 return super( 24 ApprovedVersionManager, self).get_queryset().filter( 25 approved=True) 26 27 28 class UnapprovedVersionManager(models.Manager): 29 """Custom version manager that shows only unapproved records.""" 30 31 def get_queryset(self): 32 """Query set generator""" 33 return super( 34 UnapprovedVersionManager, self).get_queryset().filter( 35 approved=False) 36 37 38 # noinspection PyUnresolvedReferences 39 class Version(models.Model): 40 """A version model that the changelog is associated with..""" 41 42 name = models.CharField( 43 help_text='Name of this release e.g. 1.0.1.', 44 max_length=255, 45 null=False, 46 blank=False, 47 unique=False) 48 49 padded_version = models.CharField( 50 help_text=( 51 'Numeric version for this release e.g. 001000001 for 1.0.1 ' 52 'calculated by zero padding each component of maj/minor/bugfix ' 53 'elements from name.'), 54 max_length=9, 55 null=False, 56 blank=True, 57 unique=False) 58 59 approved = models.BooleanField( 60 help_text=( 61 'Whether this version has been approved for use by the ' 62 'project owner.'), 63 default=False) 64 65 image_file = models.ImageField( 66 help_text=( 67 'An optional image for this version e.g. a splashscreen. ' 68 'Most browsers support dragging the image directly on to the ' 69 '"Choose File" button above.'), 70 upload_to=os.path.join(MEDIA_ROOT, 'images/projects'), 71 blank=True) 72 73 description = models.TextField( 74 null=True, 75 blank=True, 76 help_text='Describe the new version. Markdown is supported.') 77 78 release_date = models.DateField( 79 _('Release date (yyyy-mm-dd)'), 80 help_text='Date of official release', 81 null=True, 82 blank=True) 83 84 author = models.ForeignKey(User) 85 slug = models.SlugField() 86 project = models.ForeignKey('base.Project') 87 objects = models.Manager() 88 approved_objects = ApprovedVersionManager() 89 unapproved_objects = UnapprovedVersionManager() 90 91 # noinspection PyClassicStyleClass 92 class Meta: 93 """Meta options for the version class.""" 94 unique_together = ( 95 ('name', 'project'), 96 ('slug', 'project'), 97 ) 98 app_label = 'changes' 99 # ordering = ['-datetime_created'] 100 101 def save(self, *args, **kwargs): 102 if not self.pk: 103 words = self.name.split() 104 filtered_words = [t for t in words if t.lower() not in STOP_WORDS] 105 new_list = ' '.join(filtered_words) 106 self.slug = version_slugify(new_list)[:50] 107 self.padded_version = self.pad_name(self.name) 108 super(Version, self).save(*args, **kwargs) 109 110 def pad_name(self, version): 111 """Create a 0 padded version of the version name. 112 113 e.g. input: 2.10.1 114 e.g. output: 002010100 115 116 This will ensure we have sortable version names. 117 118 :param version: A text version in the form 0.0.0 - if the version is 119 not in this form, we return the version unaltered. 120 :type version: str 121 122 :returns: Zero padded representation of the version e.g. 001010100 123 :rtype: str 124 125 """ 126 tokens = version.split('.') 127 if len(tokens) != 3: 128 return version 129 result = '' 130 for token in tokens: 131 result += token.zfill(3) 132 return result 133 134 def __unicode__(self): 135 return u'%s : %s' % (self.project.name, self.name) 136 137 def get_absolute_url(self): 138 return reverse('version-detail', kwargs={ 139 'slug': self.slug, 140 'project_slug': self.project.slug 141 }) 142 143 def entries(self): 144 """Get the entries for this version.""" 145 qs = Entry.objects.filter(version=self).order_by('category') 146 return qs 147 148 def _entries_for_category(self, category): 149 """All entries for this version and filtered by the given category. 150 151 :param category: Category to filter by. 152 :type category: Category 153 154 .. note:: only approved entries returned. 155 """ 156 qs = Entry.objects.filter(version=self, 157 category=category, 158 approved=True) 159 return qs 160 161 def categories(self): 162 """Get a list of categories where there are one or more entries. 163 164 Example use in template:: 165 {% for row in version.categories %} 166 <h2 class="text-muted">{{ row.category.name }}</h2> 167 <ul> 168 {% for entry in row.entries %} 169 <li>{{ entry.name }}</li> 170 {% endfor %} 171 </ul> 172 {% endfor %} 173 """ 174 qs = self.entries() 175 used = [] 176 categories = [] 177 for entry in qs: 178 category = entry.category 179 if category not in used: 180 row = { 181 'category': category, 182 'entries': self._entries_for_category(category) 183 } 184 categories.append(row) 185 used.append(category) 186 return categories 187 188 def sponsors(self): 189 """Return a list of sponsors current at time of this version release. 190 191 :returns: A list of SponsorPeriod objects whose release date coincides 192 with the version release date. Only approved sponsors are returned. 193 Returns None if the release date (which is optional) is not set. 194 :rtype: Queryset, None 195 """ 196 if self.release_date is None: 197 return None 198 sponsors = SponsorshipPeriod.approved_objects.filter( 199 end_date__gte=self.release_date).filter( 200 start_date__lte=self.release_date).order_by( 201 'start_date').order_by( 202 '-sponsorship_level__value') 203 return sponsors 204 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/django_project/changes/models/version.py b/django_project/changes/models/version.py --- a/django_project/changes/models/version.py +++ b/django_project/changes/models/version.py @@ -188,8 +188,9 @@ def sponsors(self): """Return a list of sponsors current at time of this version release. - :returns: A list of SponsorPeriod objects whose release date coincides - with the version release date. Only approved sponsors are returned. + :returns: A list of SponsorPeriod objects for current project + whose release date coincides with the version release date. + Only approved sponsors are returned. Returns None if the release date (which is optional) is not set. :rtype: Queryset, None """ @@ -197,7 +198,8 @@ return None sponsors = SponsorshipPeriod.approved_objects.filter( end_date__gte=self.release_date).filter( - start_date__lte=self.release_date).order_by( + start_date__lte=self.release_date).filter( + project=self.project).order_by( 'start_date').order_by( '-sponsorship_level__value') return sponsors
{"golden_diff": "diff --git a/django_project/changes/models/version.py b/django_project/changes/models/version.py\n--- a/django_project/changes/models/version.py\n+++ b/django_project/changes/models/version.py\n@@ -188,8 +188,9 @@\n def sponsors(self):\n \"\"\"Return a list of sponsors current at time of this version release.\n \n- :returns: A list of SponsorPeriod objects whose release date coincides\n- with the version release date. Only approved sponsors are returned.\n+ :returns: A list of SponsorPeriod objects for current project\n+ whose release date coincides with the version release date.\n+ Only approved sponsors are returned.\n Returns None if the release date (which is optional) is not set.\n :rtype: Queryset, None\n \"\"\"\n@@ -197,7 +198,8 @@\n return None\n sponsors = SponsorshipPeriod.approved_objects.filter(\n end_date__gte=self.release_date).filter(\n- start_date__lte=self.release_date).order_by(\n+ start_date__lte=self.release_date).filter(\n+ project=self.project).order_by(\n 'start_date').order_by(\n '-sponsorship_level__value')\n return sponsors\n", "issue": "Sponsors for a release need to be filtered for the project and the date\nThere is a serious bug with the sponsors whereby if you set the release date for a release in one project, it will show on the release page sponsors from any other project.\n\n<img width=\"1387\" alt=\"screen shot 2016-03-31 at 13 36 43\" src=\"https://cloud.githubusercontent.com/assets/178003/14167536/1eff7c90-f746-11e5-9a2e-ed648c9d22d6.png\">\n\nNote that you need to have a date for the version otherwise it does not replicate:\n\n<img width=\"1210\" alt=\"screen shot 2016-03-31 at 13 37 21\" src=\"https://cloud.githubusercontent.com/assets/178003/14167565/50e68c58-f746-11e5-96b6-e59970bf6c14.png\">\n\n", "before_files": [{"content": "# coding=utf-8\nfrom django.core.urlresolvers import reverse\n# from django.utils.text import slugify\nfrom common.utilities import version_slugify\nimport os\nimport logging\nfrom core.settings.contrib import STOP_WORDS\nfrom django.conf.global_settings import MEDIA_ROOT\nfrom django.db import models\nfrom .entry import Entry\nfrom .sponsorship_period import SponsorshipPeriod\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy as _\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApprovedVersionManager(models.Manager):\n \"\"\"Custom version manager that shows only approved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ApprovedVersionManager, self).get_queryset().filter(\n approved=True)\n\n\nclass UnapprovedVersionManager(models.Manager):\n \"\"\"Custom version manager that shows only unapproved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n UnapprovedVersionManager, self).get_queryset().filter(\n approved=False)\n\n\n# noinspection PyUnresolvedReferences\nclass Version(models.Model):\n \"\"\"A version model that the changelog is associated with..\"\"\"\n\n name = models.CharField(\n help_text='Name of this release e.g. 1.0.1.',\n max_length=255,\n null=False,\n blank=False,\n unique=False)\n\n padded_version = models.CharField(\n help_text=(\n 'Numeric version for this release e.g. 001000001 for 1.0.1 '\n 'calculated by zero padding each component of maj/minor/bugfix '\n 'elements from name.'),\n max_length=9,\n null=False,\n blank=True,\n unique=False)\n\n approved = models.BooleanField(\n help_text=(\n 'Whether this version has been approved for use by the '\n 'project owner.'),\n default=False)\n\n image_file = models.ImageField(\n help_text=(\n 'An optional image for this version e.g. a splashscreen. '\n 'Most browsers support dragging the image directly on to the '\n '\"Choose File\" button above.'),\n upload_to=os.path.join(MEDIA_ROOT, 'images/projects'),\n blank=True)\n\n description = models.TextField(\n null=True,\n blank=True,\n help_text='Describe the new version. Markdown is supported.')\n\n release_date = models.DateField(\n _('Release date (yyyy-mm-dd)'),\n help_text='Date of official release',\n null=True,\n blank=True)\n\n author = models.ForeignKey(User)\n slug = models.SlugField()\n project = models.ForeignKey('base.Project')\n objects = models.Manager()\n approved_objects = ApprovedVersionManager()\n unapproved_objects = UnapprovedVersionManager()\n\n # noinspection PyClassicStyleClass\n class Meta:\n \"\"\"Meta options for the version class.\"\"\"\n unique_together = (\n ('name', 'project'),\n ('slug', 'project'),\n )\n app_label = 'changes'\n # ordering = ['-datetime_created']\n\n def save(self, *args, **kwargs):\n if not self.pk:\n words = self.name.split()\n filtered_words = [t for t in words if t.lower() not in STOP_WORDS]\n new_list = ' '.join(filtered_words)\n self.slug = version_slugify(new_list)[:50]\n self.padded_version = self.pad_name(self.name)\n super(Version, self).save(*args, **kwargs)\n\n def pad_name(self, version):\n \"\"\"Create a 0 padded version of the version name.\n\n e.g. input: 2.10.1\n e.g. output: 002010100\n\n This will ensure we have sortable version names.\n\n :param version: A text version in the form 0.0.0 - if the version is\n not in this form, we return the version unaltered.\n :type version: str\n\n :returns: Zero padded representation of the version e.g. 001010100\n :rtype: str\n\n \"\"\"\n tokens = version.split('.')\n if len(tokens) != 3:\n return version\n result = ''\n for token in tokens:\n result += token.zfill(3)\n return result\n\n def __unicode__(self):\n return u'%s : %s' % (self.project.name, self.name)\n\n def get_absolute_url(self):\n return reverse('version-detail', kwargs={\n 'slug': self.slug,\n 'project_slug': self.project.slug\n })\n\n def entries(self):\n \"\"\"Get the entries for this version.\"\"\"\n qs = Entry.objects.filter(version=self).order_by('category')\n return qs\n\n def _entries_for_category(self, category):\n \"\"\"All entries for this version and filtered by the given category.\n\n :param category: Category to filter by.\n :type category: Category\n\n .. note:: only approved entries returned.\n \"\"\"\n qs = Entry.objects.filter(version=self,\n category=category,\n approved=True)\n return qs\n\n def categories(self):\n \"\"\"Get a list of categories where there are one or more entries.\n\n Example use in template::\n {% for row in version.categories %}\n <h2 class=\"text-muted\">{{ row.category.name }}</h2>\n <ul>\n {% for entry in row.entries %}\n <li>{{ entry.name }}</li>\n {% endfor %}\n </ul>\n {% endfor %}\n \"\"\"\n qs = self.entries()\n used = []\n categories = []\n for entry in qs:\n category = entry.category\n if category not in used:\n row = {\n 'category': category,\n 'entries': self._entries_for_category(category)\n }\n categories.append(row)\n used.append(category)\n return categories\n\n def sponsors(self):\n \"\"\"Return a list of sponsors current at time of this version release.\n\n :returns: A list of SponsorPeriod objects whose release date coincides\n with the version release date. Only approved sponsors are returned.\n Returns None if the release date (which is optional) is not set.\n :rtype: Queryset, None\n \"\"\"\n if self.release_date is None:\n return None\n sponsors = SponsorshipPeriod.approved_objects.filter(\n end_date__gte=self.release_date).filter(\n start_date__lte=self.release_date).order_by(\n 'start_date').order_by(\n '-sponsorship_level__value')\n return sponsors\n", "path": "django_project/changes/models/version.py"}], "after_files": [{"content": "# coding=utf-8\nfrom django.core.urlresolvers import reverse\n# from django.utils.text import slugify\nfrom common.utilities import version_slugify\nimport os\nimport logging\nfrom core.settings.contrib import STOP_WORDS\nfrom django.conf.global_settings import MEDIA_ROOT\nfrom django.db import models\nfrom .entry import Entry\nfrom .sponsorship_period import SponsorshipPeriod\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy as _\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApprovedVersionManager(models.Manager):\n \"\"\"Custom version manager that shows only approved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ApprovedVersionManager, self).get_queryset().filter(\n approved=True)\n\n\nclass UnapprovedVersionManager(models.Manager):\n \"\"\"Custom version manager that shows only unapproved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n UnapprovedVersionManager, self).get_queryset().filter(\n approved=False)\n\n\n# noinspection PyUnresolvedReferences\nclass Version(models.Model):\n \"\"\"A version model that the changelog is associated with..\"\"\"\n\n name = models.CharField(\n help_text='Name of this release e.g. 1.0.1.',\n max_length=255,\n null=False,\n blank=False,\n unique=False)\n\n padded_version = models.CharField(\n help_text=(\n 'Numeric version for this release e.g. 001000001 for 1.0.1 '\n 'calculated by zero padding each component of maj/minor/bugfix '\n 'elements from name.'),\n max_length=9,\n null=False,\n blank=True,\n unique=False)\n\n approved = models.BooleanField(\n help_text=(\n 'Whether this version has been approved for use by the '\n 'project owner.'),\n default=False)\n\n image_file = models.ImageField(\n help_text=(\n 'An optional image for this version e.g. a splashscreen. '\n 'Most browsers support dragging the image directly on to the '\n '\"Choose File\" button above.'),\n upload_to=os.path.join(MEDIA_ROOT, 'images/projects'),\n blank=True)\n\n description = models.TextField(\n null=True,\n blank=True,\n help_text='Describe the new version. Markdown is supported.')\n\n release_date = models.DateField(\n _('Release date (yyyy-mm-dd)'),\n help_text='Date of official release',\n null=True,\n blank=True)\n\n author = models.ForeignKey(User)\n slug = models.SlugField()\n project = models.ForeignKey('base.Project')\n objects = models.Manager()\n approved_objects = ApprovedVersionManager()\n unapproved_objects = UnapprovedVersionManager()\n\n # noinspection PyClassicStyleClass\n class Meta:\n \"\"\"Meta options for the version class.\"\"\"\n unique_together = (\n ('name', 'project'),\n ('slug', 'project'),\n )\n app_label = 'changes'\n # ordering = ['-datetime_created']\n\n def save(self, *args, **kwargs):\n if not self.pk:\n words = self.name.split()\n filtered_words = [t for t in words if t.lower() not in STOP_WORDS]\n new_list = ' '.join(filtered_words)\n self.slug = version_slugify(new_list)[:50]\n self.padded_version = self.pad_name(self.name)\n super(Version, self).save(*args, **kwargs)\n\n def pad_name(self, version):\n \"\"\"Create a 0 padded version of the version name.\n\n e.g. input: 2.10.1\n e.g. output: 002010100\n\n This will ensure we have sortable version names.\n\n :param version: A text version in the form 0.0.0 - if the version is\n not in this form, we return the version unaltered.\n :type version: str\n\n :returns: Zero padded representation of the version e.g. 001010100\n :rtype: str\n\n \"\"\"\n tokens = version.split('.')\n if len(tokens) != 3:\n return version\n result = ''\n for token in tokens:\n result += token.zfill(3)\n return result\n\n def __unicode__(self):\n return u'%s : %s' % (self.project.name, self.name)\n\n def get_absolute_url(self):\n return reverse('version-detail', kwargs={\n 'slug': self.slug,\n 'project_slug': self.project.slug\n })\n\n def entries(self):\n \"\"\"Get the entries for this version.\"\"\"\n qs = Entry.objects.filter(version=self).order_by('category')\n return qs\n\n def _entries_for_category(self, category):\n \"\"\"All entries for this version and filtered by the given category.\n\n :param category: Category to filter by.\n :type category: Category\n\n .. note:: only approved entries returned.\n \"\"\"\n qs = Entry.objects.filter(version=self,\n category=category,\n approved=True)\n return qs\n\n def categories(self):\n \"\"\"Get a list of categories where there are one or more entries.\n\n Example use in template::\n {% for row in version.categories %}\n <h2 class=\"text-muted\">{{ row.category.name }}</h2>\n <ul>\n {% for entry in row.entries %}\n <li>{{ entry.name }}</li>\n {% endfor %}\n </ul>\n {% endfor %}\n \"\"\"\n qs = self.entries()\n used = []\n categories = []\n for entry in qs:\n category = entry.category\n if category not in used:\n row = {\n 'category': category,\n 'entries': self._entries_for_category(category)\n }\n categories.append(row)\n used.append(category)\n return categories\n\n def sponsors(self):\n \"\"\"Return a list of sponsors current at time of this version release.\n\n :returns: A list of SponsorPeriod objects for current project\n whose release date coincides with the version release date.\n Only approved sponsors are returned.\n Returns None if the release date (which is optional) is not set.\n :rtype: Queryset, None\n \"\"\"\n if self.release_date is None:\n return None\n sponsors = SponsorshipPeriod.approved_objects.filter(\n end_date__gte=self.release_date).filter(\n start_date__lte=self.release_date).filter(\n project=self.project).order_by(\n 'start_date').order_by(\n '-sponsorship_level__value')\n return sponsors\n", "path": "django_project/changes/models/version.py"}]}
2,412
268