problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_32962 | rasdani/github-patches | git_diff | microsoft__torchgeo-250 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
torchgeo.models.RFC should have a seed argument
The parameters of this model are randomly initialized, but it is not trainable. To have repeatable results with this we need a seed parameter so we can guarantee that parameter init happens the same.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchgeo/models/rcf.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 """Implementation of a random convolutional feature projection model."""
5
6 from typing import cast
7
8 import torch
9 import torch.nn.functional as F
10 from torch import Tensor
11 from torch.nn.modules import Conv2d, Module
12
13 Module.__module__ = "torch.nn"
14 Conv2d.__module__ = "torch.nn"
15
16
17 class RCF(Module):
18 """This model extracts random convolutional features (RCFs) from its input.
19
20 RCFs are used in Multi-task Observation using Satellite Imagery & Kitchen Sinks
21 (MOSAIKS) method proposed in https://www.nature.com/articles/s41467-021-24638-z.
22
23 .. note::
24
25 This Module is *not* trainable. It is only used as a feature extractor.
26 """
27
28 def __init__(
29 self,
30 in_channels: int = 4,
31 features: int = 16,
32 kernel_size: int = 3,
33 bias: float = -1.0,
34 ) -> None:
35 """Initializes the RCF model.
36
37 This is a static model that serves to extract fixed length feature vectors from
38 input patches.
39
40 Args:
41 in_channels: number of input channels
42 features: number of features to compute, must be divisible by 2
43 kernel_size: size of the kernel used to compute the RCFs
44 bias: bias of the convolutional layer
45 """
46 super().__init__()
47
48 assert features % 2 == 0
49
50 # We register the weight and bias tensors as "buffers". This does two things:
51 # makes them behave correctly when we call .to(...) on the module, and makes
52 # them explicitely _not_ Parameters of the model (which might get updated) if
53 # a user tries to train with this model.
54 self.register_buffer(
55 "weights",
56 torch.randn(
57 features // 2,
58 in_channels,
59 kernel_size,
60 kernel_size,
61 requires_grad=False,
62 ),
63 )
64 self.register_buffer(
65 "biases",
66 torch.zeros( # type: ignore[attr-defined]
67 features // 2, requires_grad=False
68 )
69 + bias,
70 )
71
72 def forward(self, x: Tensor) -> Tensor:
73 """Forward pass of the RCF model.
74
75 Args:
76 x: a tensor with shape (B, C, H, W)
77
78 Returns:
79 a tensor of size (B, ``self.num_features``)
80 """
81 x1a = F.relu(
82 F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0),
83 inplace=True,
84 )
85 x1b = F.relu(
86 -F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0),
87 inplace=False,
88 )
89
90 x1a = F.adaptive_avg_pool2d(x1a, (1, 1)).squeeze()
91 x1b = F.adaptive_avg_pool2d(x1b, (1, 1)).squeeze()
92
93 if len(x1a.shape) == 1: # case where we passed a single input
94 output = torch.cat((x1a, x1b), dim=0) # type: ignore[attr-defined]
95 return cast(Tensor, output)
96 else: # case where we passed a batch of > 1 inputs
97 assert len(x1a.shape) == 2
98 output = torch.cat((x1a, x1b), dim=1) # type: ignore[attr-defined]
99 return cast(Tensor, output)
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchgeo/models/rcf.py b/torchgeo/models/rcf.py
--- a/torchgeo/models/rcf.py
+++ b/torchgeo/models/rcf.py
@@ -3,7 +3,7 @@
"""Implementation of a random convolutional feature projection model."""
-from typing import cast
+from typing import Optional, cast
import torch
import torch.nn.functional as F
@@ -31,6 +31,7 @@
features: int = 16,
kernel_size: int = 3,
bias: float = -1.0,
+ seed: Optional[int] = None,
) -> None:
"""Initializes the RCF model.
@@ -42,11 +43,19 @@
features: number of features to compute, must be divisible by 2
kernel_size: size of the kernel used to compute the RCFs
bias: bias of the convolutional layer
+ seed: random seed used to initialize the convolutional layer
"""
super().__init__()
assert features % 2 == 0
+ if seed is None:
+ generator = None
+ else:
+ generator = torch.Generator().manual_seed( # type: ignore[attr-defined]
+ seed
+ )
+
# We register the weight and bias tensors as "buffers". This does two things:
# makes them behave correctly when we call .to(...) on the module, and makes
# them explicitely _not_ Parameters of the model (which might get updated) if
@@ -59,6 +68,7 @@
kernel_size,
kernel_size,
requires_grad=False,
+ generator=generator,
),
)
self.register_buffer(
| {"golden_diff": "diff --git a/torchgeo/models/rcf.py b/torchgeo/models/rcf.py\n--- a/torchgeo/models/rcf.py\n+++ b/torchgeo/models/rcf.py\n@@ -3,7 +3,7 @@\n \n \"\"\"Implementation of a random convolutional feature projection model.\"\"\"\n \n-from typing import cast\n+from typing import Optional, cast\n \n import torch\n import torch.nn.functional as F\n@@ -31,6 +31,7 @@\n features: int = 16,\n kernel_size: int = 3,\n bias: float = -1.0,\n+ seed: Optional[int] = None,\n ) -> None:\n \"\"\"Initializes the RCF model.\n \n@@ -42,11 +43,19 @@\n features: number of features to compute, must be divisible by 2\n kernel_size: size of the kernel used to compute the RCFs\n bias: bias of the convolutional layer\n+ seed: random seed used to initialize the convolutional layer\n \"\"\"\n super().__init__()\n \n assert features % 2 == 0\n \n+ if seed is None:\n+ generator = None\n+ else:\n+ generator = torch.Generator().manual_seed( # type: ignore[attr-defined]\n+ seed\n+ )\n+\n # We register the weight and bias tensors as \"buffers\". This does two things:\n # makes them behave correctly when we call .to(...) on the module, and makes\n # them explicitely _not_ Parameters of the model (which might get updated) if\n@@ -59,6 +68,7 @@\n kernel_size,\n kernel_size,\n requires_grad=False,\n+ generator=generator,\n ),\n )\n self.register_buffer(\n", "issue": "torchgeo.models.RFC should have a seed argument\nThe parameters of this model are randomly initialized, but it is not trainable. To have repeatable results with this we need a seed parameter so we can guarantee that parameter init happens the same.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Implementation of a random convolutional feature projection model.\"\"\"\n\nfrom typing import cast\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torch.nn.modules import Conv2d, Module\n\nModule.__module__ = \"torch.nn\"\nConv2d.__module__ = \"torch.nn\"\n\n\nclass RCF(Module):\n \"\"\"This model extracts random convolutional features (RCFs) from its input.\n\n RCFs are used in Multi-task Observation using Satellite Imagery & Kitchen Sinks\n (MOSAIKS) method proposed in https://www.nature.com/articles/s41467-021-24638-z.\n\n .. note::\n\n This Module is *not* trainable. It is only used as a feature extractor.\n \"\"\"\n\n def __init__(\n self,\n in_channels: int = 4,\n features: int = 16,\n kernel_size: int = 3,\n bias: float = -1.0,\n ) -> None:\n \"\"\"Initializes the RCF model.\n\n This is a static model that serves to extract fixed length feature vectors from\n input patches.\n\n Args:\n in_channels: number of input channels\n features: number of features to compute, must be divisible by 2\n kernel_size: size of the kernel used to compute the RCFs\n bias: bias of the convolutional layer\n \"\"\"\n super().__init__()\n\n assert features % 2 == 0\n\n # We register the weight and bias tensors as \"buffers\". This does two things:\n # makes them behave correctly when we call .to(...) on the module, and makes\n # them explicitely _not_ Parameters of the model (which might get updated) if\n # a user tries to train with this model.\n self.register_buffer(\n \"weights\",\n torch.randn(\n features // 2,\n in_channels,\n kernel_size,\n kernel_size,\n requires_grad=False,\n ),\n )\n self.register_buffer(\n \"biases\",\n torch.zeros( # type: ignore[attr-defined]\n features // 2, requires_grad=False\n )\n + bias,\n )\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Forward pass of the RCF model.\n\n Args:\n x: a tensor with shape (B, C, H, W)\n\n Returns:\n a tensor of size (B, ``self.num_features``)\n \"\"\"\n x1a = F.relu(\n F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0),\n inplace=True,\n )\n x1b = F.relu(\n -F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0),\n inplace=False,\n )\n\n x1a = F.adaptive_avg_pool2d(x1a, (1, 1)).squeeze()\n x1b = F.adaptive_avg_pool2d(x1b, (1, 1)).squeeze()\n\n if len(x1a.shape) == 1: # case where we passed a single input\n output = torch.cat((x1a, x1b), dim=0) # type: ignore[attr-defined]\n return cast(Tensor, output)\n else: # case where we passed a batch of > 1 inputs\n assert len(x1a.shape) == 2\n output = torch.cat((x1a, x1b), dim=1) # type: ignore[attr-defined]\n return cast(Tensor, output)\n", "path": "torchgeo/models/rcf.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Implementation of a random convolutional feature projection model.\"\"\"\n\nfrom typing import Optional, cast\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torch.nn.modules import Conv2d, Module\n\nModule.__module__ = \"torch.nn\"\nConv2d.__module__ = \"torch.nn\"\n\n\nclass RCF(Module):\n \"\"\"This model extracts random convolutional features (RCFs) from its input.\n\n RCFs are used in Multi-task Observation using Satellite Imagery & Kitchen Sinks\n (MOSAIKS) method proposed in https://www.nature.com/articles/s41467-021-24638-z.\n\n .. note::\n\n This Module is *not* trainable. It is only used as a feature extractor.\n \"\"\"\n\n def __init__(\n self,\n in_channels: int = 4,\n features: int = 16,\n kernel_size: int = 3,\n bias: float = -1.0,\n seed: Optional[int] = None,\n ) -> None:\n \"\"\"Initializes the RCF model.\n\n This is a static model that serves to extract fixed length feature vectors from\n input patches.\n\n Args:\n in_channels: number of input channels\n features: number of features to compute, must be divisible by 2\n kernel_size: size of the kernel used to compute the RCFs\n bias: bias of the convolutional layer\n seed: random seed used to initialize the convolutional layer\n \"\"\"\n super().__init__()\n\n assert features % 2 == 0\n\n if seed is None:\n generator = None\n else:\n generator = torch.Generator().manual_seed( # type: ignore[attr-defined]\n seed\n )\n\n # We register the weight and bias tensors as \"buffers\". This does two things:\n # makes them behave correctly when we call .to(...) on the module, and makes\n # them explicitely _not_ Parameters of the model (which might get updated) if\n # a user tries to train with this model.\n self.register_buffer(\n \"weights\",\n torch.randn(\n features // 2,\n in_channels,\n kernel_size,\n kernel_size,\n requires_grad=False,\n generator=generator,\n ),\n )\n self.register_buffer(\n \"biases\",\n torch.zeros( # type: ignore[attr-defined]\n features // 2, requires_grad=False\n )\n + bias,\n )\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Forward pass of the RCF model.\n\n Args:\n x: a tensor with shape (B, C, H, W)\n\n Returns:\n a tensor of size (B, ``self.num_features``)\n \"\"\"\n x1a = F.relu(\n F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0),\n inplace=True,\n )\n x1b = F.relu(\n -F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0),\n inplace=False,\n )\n\n x1a = F.adaptive_avg_pool2d(x1a, (1, 1)).squeeze()\n x1b = F.adaptive_avg_pool2d(x1b, (1, 1)).squeeze()\n\n if len(x1a.shape) == 1: # case where we passed a single input\n output = torch.cat((x1a, x1b), dim=0) # type: ignore[attr-defined]\n return cast(Tensor, output)\n else: # case where we passed a batch of > 1 inputs\n assert len(x1a.shape) == 2\n output = torch.cat((x1a, x1b), dim=1) # type: ignore[attr-defined]\n return cast(Tensor, output)\n", "path": "torchgeo/models/rcf.py"}]} | 1,301 | 380 |
gh_patches_debug_1893 | rasdani/github-patches | git_diff | rasterio__rasterio-778 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Copy colormap when rasters are merged
I'm running `rio merge` over a few single band images that contain a colormap. During the merge, the colormap is not copied to the new raster. Can we modify `rio merge` to preserve the colormap?
I have an initial pass of this change at:
https://github.com/kapadia/rasterio/tree/rio-merge-colormap
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/rio/merge.py`
Content:
```
1 """Merge command."""
2
3 import logging
4
5 import click
6 from cligj import files_inout_arg, format_opt
7
8 from .helpers import resolve_inout
9 from . import options
10 import rasterio
11
12
13 @click.command(short_help="Merge a stack of raster datasets.")
14 @files_inout_arg
15 @options.output_opt
16 @format_opt
17 @options.bounds_opt
18 @options.resolution_opt
19 @options.nodata_opt
20 @options.force_overwrite_opt
21 @click.option('--precision', type=int, default=7,
22 help="Number of decimal places of precision in alignment of "
23 "pixels")
24 @options.creation_options
25 @click.pass_context
26 def merge(ctx, files, output, driver, bounds, res, nodata, force_overwrite,
27 precision, creation_options):
28 """Copy valid pixels from input files to an output file.
29
30 All files must have the same number of bands, data type, and
31 coordinate reference system.
32
33 Input files are merged in their listed order using the reverse
34 painter's algorithm. If the output file exists, its values will be
35 overwritten by input values.
36
37 Geospatial bounds and resolution of a new output file in the
38 units of the input file coordinate reference system may be provided
39 and are otherwise taken from the first input file.
40
41 Note: --res changed from 2 parameters in 0.25.
42
43 \b
44 --res 0.1 0.1 => --res 0.1 (square)
45 --res 0.1 0.2 => --res 0.1 --res 0.2 (rectangular)
46 """
47 from rasterio.merge import merge as merge_tool
48
49 verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
50
51 output, files = resolve_inout(
52 files=files, output=output, force_overwrite=force_overwrite)
53
54 with rasterio.Env(CPL_DEBUG=verbosity > 2):
55 sources = [rasterio.open(f) for f in files]
56 dest, output_transform = merge_tool(sources, bounds=bounds, res=res,
57 nodata=nodata, precision=precision)
58
59 profile = sources[0].profile
60 profile.pop('affine')
61 profile['transform'] = output_transform
62 profile['height'] = dest.shape[1]
63 profile['width'] = dest.shape[2]
64 profile['driver'] = driver
65
66 profile.update(**creation_options)
67
68 with rasterio.open(output, 'w', **profile) as dst:
69 dst.write(dest)
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/rio/merge.py b/rasterio/rio/merge.py
--- a/rasterio/rio/merge.py
+++ b/rasterio/rio/merge.py
@@ -67,3 +67,10 @@
with rasterio.open(output, 'w', **profile) as dst:
dst.write(dest)
+
+ # uses the colormap in the first input raster.
+ try:
+ colormap = sources[0].colormap(1)
+ dst.write_colormap(1, colormap)
+ except ValueError:
+ pass
| {"golden_diff": "diff --git a/rasterio/rio/merge.py b/rasterio/rio/merge.py\n--- a/rasterio/rio/merge.py\n+++ b/rasterio/rio/merge.py\n@@ -67,3 +67,10 @@\n \n with rasterio.open(output, 'w', **profile) as dst:\n dst.write(dest)\n+\n+ # uses the colormap in the first input raster.\n+ try:\n+ colormap = sources[0].colormap(1)\n+ dst.write_colormap(1, colormap)\n+ except ValueError:\n+ pass\n", "issue": "Copy colormap when rasters are merged\nI'm running `rio merge` over a few single band images that contain a colormap. During the merge, the colormap is not copied to the new raster. Can we modify `rio merge` to preserve the colormap?\n\nI have an initial pass of this change at:\n\nhttps://github.com/kapadia/rasterio/tree/rio-merge-colormap\n\n", "before_files": [{"content": "\"\"\"Merge command.\"\"\"\n\nimport logging\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nfrom .helpers import resolve_inout\nfrom . import options\nimport rasterio\n\n\[email protected](short_help=\"Merge a stack of raster datasets.\")\n@files_inout_arg\[email protected]_opt\n@format_opt\[email protected]_opt\[email protected]_opt\[email protected]_opt\[email protected]_overwrite_opt\[email protected]('--precision', type=int, default=7,\n help=\"Number of decimal places of precision in alignment of \"\n \"pixels\")\[email protected]_options\[email protected]_context\ndef merge(ctx, files, output, driver, bounds, res, nodata, force_overwrite,\n precision, creation_options):\n \"\"\"Copy valid pixels from input files to an output file.\n\n All files must have the same number of bands, data type, and\n coordinate reference system.\n\n Input files are merged in their listed order using the reverse\n painter's algorithm. If the output file exists, its values will be\n overwritten by input values.\n\n Geospatial bounds and resolution of a new output file in the\n units of the input file coordinate reference system may be provided\n and are otherwise taken from the first input file.\n\n Note: --res changed from 2 parameters in 0.25.\n\n \\b\n --res 0.1 0.1 => --res 0.1 (square)\n --res 0.1 0.2 => --res 0.1 --res 0.2 (rectangular)\n \"\"\"\n from rasterio.merge import merge as merge_tool\n\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1\n\n output, files = resolve_inout(\n files=files, output=output, force_overwrite=force_overwrite)\n\n with rasterio.Env(CPL_DEBUG=verbosity > 2):\n sources = [rasterio.open(f) for f in files]\n dest, output_transform = merge_tool(sources, bounds=bounds, res=res,\n nodata=nodata, precision=precision)\n\n profile = sources[0].profile\n profile.pop('affine')\n profile['transform'] = output_transform\n profile['height'] = dest.shape[1]\n profile['width'] = dest.shape[2]\n profile['driver'] = driver\n\n profile.update(**creation_options)\n\n with rasterio.open(output, 'w', **profile) as dst:\n dst.write(dest)\n", "path": "rasterio/rio/merge.py"}], "after_files": [{"content": "\"\"\"Merge command.\"\"\"\n\nimport logging\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nfrom .helpers import resolve_inout\nfrom . import options\nimport rasterio\n\n\[email protected](short_help=\"Merge a stack of raster datasets.\")\n@files_inout_arg\[email protected]_opt\n@format_opt\[email protected]_opt\[email protected]_opt\[email protected]_opt\[email protected]_overwrite_opt\[email protected]('--precision', type=int, default=7,\n help=\"Number of decimal places of precision in alignment of \"\n \"pixels\")\[email protected]_options\[email protected]_context\ndef merge(ctx, files, output, driver, bounds, res, nodata, force_overwrite,\n precision, creation_options):\n \"\"\"Copy valid pixels from input files to an output file.\n\n All files must have the same number of bands, data type, and\n coordinate reference system.\n\n Input files are merged in their listed order using the reverse\n painter's algorithm. If the output file exists, its values will be\n overwritten by input values.\n\n Geospatial bounds and resolution of a new output file in the\n units of the input file coordinate reference system may be provided\n and are otherwise taken from the first input file.\n\n Note: --res changed from 2 parameters in 0.25.\n\n \\b\n --res 0.1 0.1 => --res 0.1 (square)\n --res 0.1 0.2 => --res 0.1 --res 0.2 (rectangular)\n \"\"\"\n from rasterio.merge import merge as merge_tool\n\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1\n\n output, files = resolve_inout(\n files=files, output=output, force_overwrite=force_overwrite)\n\n with rasterio.Env(CPL_DEBUG=verbosity > 2):\n sources = [rasterio.open(f) for f in files]\n dest, output_transform = merge_tool(sources, bounds=bounds, res=res,\n nodata=nodata, precision=precision)\n\n profile = sources[0].profile\n profile.pop('affine')\n profile['transform'] = output_transform\n profile['height'] = dest.shape[1]\n profile['width'] = dest.shape[2]\n profile['driver'] = driver\n\n profile.update(**creation_options)\n\n with rasterio.open(output, 'w', **profile) as dst:\n dst.write(dest)\n\n # uses the colormap in the first input raster.\n try:\n colormap = sources[0].colormap(1)\n dst.write_colormap(1, colormap)\n except ValueError:\n pass\n", "path": "rasterio/rio/merge.py"}]} | 1,023 | 129 |
gh_patches_debug_36937 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1923 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`get_follow_object_pk` errors out if `obj.follow_object` is `None`
Occurs when the follow object has been deleted and the follow is not cleaned up. See https://sentry.io/organizations/grand-challenge/issues/2511041483/?project=303639&query=is%3Aunresolved
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/notifications/signals.py`
Content:
```
1 from actstream import action
2 from actstream.actions import follow
3 from actstream.models import Action, Follow, followers
4 from django.db.models.signals import post_save
5 from django.dispatch import receiver
6 from guardian.shortcuts import assign_perm
7 from machina.apps.forum_conversation.models import Post, Topic
8
9 from grandchallenge.notifications.models import Notification
10
11
12 @receiver(post_save, sender=Topic)
13 def create_topic_action(sender, *, instance, created, **_):
14 if created:
15 follow(
16 user=instance.poster,
17 obj=instance,
18 actor_only=False,
19 send_action=False,
20 )
21
22 if int(instance.type) == int(Topic.TOPIC_ANNOUNCE):
23 action.send(
24 sender=instance.poster,
25 verb="announced",
26 action_object=instance,
27 target=instance.forum,
28 context_class="info",
29 )
30 else:
31 action.send(
32 sender=instance.poster,
33 verb="posted",
34 action_object=instance,
35 target=instance.forum,
36 )
37
38
39 @receiver(post_save, sender=Post)
40 def create_post_action(sender, *, instance, created, **_):
41 if (
42 created
43 and instance.topic.posts_count != 0
44 and not instance.is_topic_head
45 ):
46 follow(
47 user=instance.poster,
48 obj=instance.topic,
49 actor_only=False,
50 send_action=False,
51 )
52
53 action.send(
54 sender=instance.poster, verb="replied to", target=instance.topic,
55 )
56
57
58 @receiver(post_save, sender=Action)
59 def create_notification(*, instance, **_):
60 if instance.target:
61 follower_group = followers(instance.target)
62 for follower in follower_group:
63 # only send notifications to followers other than the poster
64 if follower != instance.actor:
65 Notification(user=follower, action=instance).save()
66 else:
67 follower_group = followers(instance.actor)
68 for follower in follower_group:
69 # only send notifications to followers other than the poster
70 if follower != instance.actor:
71 Notification(user=follower, action=instance).save()
72
73
74 @receiver(post_save, sender=Follow)
75 def add_permissions(*, instance, created, **_):
76 if created:
77 assign_perm("change_follow", instance.user, instance)
78 assign_perm("delete_follow", instance.user, instance)
79 assign_perm("view_follow", instance.user, instance)
80
```
Path: `app/grandchallenge/forum_conversation/templatetags/forum_extras.py`
Content:
```
1 from actstream.models import Follow
2 from django import template
3 from django.contrib.contenttypes.models import ContentType
4
5 from grandchallenge.notifications.forms import FollowForm
6
7 register = template.Library()
8
9
10 @register.simple_tag
11 def get_follow_object_pk(user, follow_object):
12 object_follows_for_user = Follow.objects.filter(
13 user=user,
14 content_type=ContentType.objects.get(
15 app_label=follow_object._meta.app_label,
16 model=follow_object._meta.model_name,
17 ),
18 ).all()
19 current_follow_object = []
20 for obj in object_follows_for_user:
21 if obj.follow_object.id == follow_object.id:
22 current_follow_object = obj.pk
23 return current_follow_object
24
25
26 @register.simple_tag
27 def follow_form(*, user, object_id, content_type):
28 return FollowForm(
29 user=user,
30 initial={
31 "object_id": object_id,
32 "content_type": content_type,
33 "actor_only": False,
34 },
35 )
36
37
38 @register.simple_tag()
39 def get_content_type(follow_object):
40 ct = ContentType.objects.get(
41 app_label=follow_object._meta.app_label,
42 model=follow_object._meta.model_name,
43 )
44 return ct
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py
--- a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py
+++ b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py
@@ -16,10 +16,16 @@
model=follow_object._meta.model_name,
),
).all()
- current_follow_object = []
- for obj in object_follows_for_user:
- if obj.follow_object.id == follow_object.id:
- current_follow_object = obj.pk
+
+ if not object_follows_for_user:
+ current_follow_object = []
+ else:
+ current_follow_object = []
+ for obj in object_follows_for_user:
+ if not obj.follow_object:
+ continue
+ elif obj.follow_object.id == follow_object.id:
+ current_follow_object = obj.pk
return current_follow_object
@@ -37,8 +43,11 @@
@register.simple_tag()
def get_content_type(follow_object):
- ct = ContentType.objects.get(
- app_label=follow_object._meta.app_label,
- model=follow_object._meta.model_name,
- )
+ try:
+ ct = ContentType.objects.get(
+ app_label=follow_object._meta.app_label,
+ model=follow_object._meta.model_name,
+ )
+ except AttributeError:
+ ct = None
return ct
diff --git a/app/grandchallenge/notifications/signals.py b/app/grandchallenge/notifications/signals.py
--- a/app/grandchallenge/notifications/signals.py
+++ b/app/grandchallenge/notifications/signals.py
@@ -1,9 +1,11 @@
from actstream import action
from actstream.actions import follow
from actstream.models import Action, Follow, followers
-from django.db.models.signals import post_save
+from django.contrib.contenttypes.models import ContentType
+from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from guardian.shortcuts import assign_perm
+from machina.apps.forum.models import Forum
from machina.apps.forum_conversation.models import Post, Topic
from grandchallenge.notifications.models import Notification
@@ -77,3 +79,13 @@
assign_perm("change_follow", instance.user, instance)
assign_perm("delete_follow", instance.user, instance)
assign_perm("view_follow", instance.user, instance)
+
+
+@receiver(pre_delete, sender=Topic)
+@receiver(pre_delete, sender=Forum)
+@receiver(pre_delete, sender=Post)
+def clean_up_follows(*, instance, **_):
+ ct = ContentType.objects.filter(
+ app_label=instance._meta.app_label, model=instance._meta.model_name
+ ).get()
+ Follow.objects.filter(content_type=ct, object_id=instance.pk).delete()
| {"golden_diff": "diff --git a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n--- a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n+++ b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n@@ -16,10 +16,16 @@\n model=follow_object._meta.model_name,\r\n ),\r\n ).all()\r\n- current_follow_object = []\r\n- for obj in object_follows_for_user:\r\n- if obj.follow_object.id == follow_object.id:\r\n- current_follow_object = obj.pk\r\n+\r\n+ if not object_follows_for_user:\r\n+ current_follow_object = []\r\n+ else:\r\n+ current_follow_object = []\r\n+ for obj in object_follows_for_user:\r\n+ if not obj.follow_object:\r\n+ continue\r\n+ elif obj.follow_object.id == follow_object.id:\r\n+ current_follow_object = obj.pk\r\n return current_follow_object\r\n \r\n \r\n@@ -37,8 +43,11 @@\n \r\n @register.simple_tag()\r\n def get_content_type(follow_object):\r\n- ct = ContentType.objects.get(\r\n- app_label=follow_object._meta.app_label,\r\n- model=follow_object._meta.model_name,\r\n- )\r\n+ try:\r\n+ ct = ContentType.objects.get(\r\n+ app_label=follow_object._meta.app_label,\r\n+ model=follow_object._meta.model_name,\r\n+ )\r\n+ except AttributeError:\r\n+ ct = None\r\n return ct\r\ndiff --git a/app/grandchallenge/notifications/signals.py b/app/grandchallenge/notifications/signals.py\n--- a/app/grandchallenge/notifications/signals.py\n+++ b/app/grandchallenge/notifications/signals.py\n@@ -1,9 +1,11 @@\n from actstream import action\n from actstream.actions import follow\n from actstream.models import Action, Follow, followers\n-from django.db.models.signals import post_save\n+from django.contrib.contenttypes.models import ContentType\n+from django.db.models.signals import post_save, pre_delete\n from django.dispatch import receiver\n from guardian.shortcuts import assign_perm\n+from machina.apps.forum.models import Forum\n from machina.apps.forum_conversation.models import Post, Topic\n \n from grandchallenge.notifications.models import Notification\n@@ -77,3 +79,13 @@\n assign_perm(\"change_follow\", instance.user, instance)\n assign_perm(\"delete_follow\", instance.user, instance)\n assign_perm(\"view_follow\", instance.user, instance)\n+\n+\n+@receiver(pre_delete, sender=Topic)\n+@receiver(pre_delete, sender=Forum)\n+@receiver(pre_delete, sender=Post)\n+def clean_up_follows(*, instance, **_):\n+ ct = ContentType.objects.filter(\n+ app_label=instance._meta.app_label, model=instance._meta.model_name\n+ ).get()\n+ Follow.objects.filter(content_type=ct, object_id=instance.pk).delete()\n", "issue": "`get_follow_object_pk` errors out if `obj.follow_object` is `None`\nOccurs when the follow object has been deleted and the follow is not cleaned up. See https://sentry.io/organizations/grand-challenge/issues/2511041483/?project=303639&query=is%3Aunresolved\n", "before_files": [{"content": "from actstream import action\nfrom actstream.actions import follow\nfrom actstream.models import Action, Follow, followers\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom guardian.shortcuts import assign_perm\nfrom machina.apps.forum_conversation.models import Post, Topic\n\nfrom grandchallenge.notifications.models import Notification\n\n\n@receiver(post_save, sender=Topic)\ndef create_topic_action(sender, *, instance, created, **_):\n if created:\n follow(\n user=instance.poster,\n obj=instance,\n actor_only=False,\n send_action=False,\n )\n\n if int(instance.type) == int(Topic.TOPIC_ANNOUNCE):\n action.send(\n sender=instance.poster,\n verb=\"announced\",\n action_object=instance,\n target=instance.forum,\n context_class=\"info\",\n )\n else:\n action.send(\n sender=instance.poster,\n verb=\"posted\",\n action_object=instance,\n target=instance.forum,\n )\n\n\n@receiver(post_save, sender=Post)\ndef create_post_action(sender, *, instance, created, **_):\n if (\n created\n and instance.topic.posts_count != 0\n and not instance.is_topic_head\n ):\n follow(\n user=instance.poster,\n obj=instance.topic,\n actor_only=False,\n send_action=False,\n )\n\n action.send(\n sender=instance.poster, verb=\"replied to\", target=instance.topic,\n )\n\n\n@receiver(post_save, sender=Action)\ndef create_notification(*, instance, **_):\n if instance.target:\n follower_group = followers(instance.target)\n for follower in follower_group:\n # only send notifications to followers other than the poster\n if follower != instance.actor:\n Notification(user=follower, action=instance).save()\n else:\n follower_group = followers(instance.actor)\n for follower in follower_group:\n # only send notifications to followers other than the poster\n if follower != instance.actor:\n Notification(user=follower, action=instance).save()\n\n\n@receiver(post_save, sender=Follow)\ndef add_permissions(*, instance, created, **_):\n if created:\n assign_perm(\"change_follow\", instance.user, instance)\n assign_perm(\"delete_follow\", instance.user, instance)\n assign_perm(\"view_follow\", instance.user, instance)\n", "path": "app/grandchallenge/notifications/signals.py"}, {"content": "from actstream.models import Follow\r\nfrom django import template\r\nfrom django.contrib.contenttypes.models import ContentType\r\n\r\nfrom grandchallenge.notifications.forms import FollowForm\r\n\r\nregister = template.Library()\r\n\r\n\r\[email protected]_tag\r\ndef get_follow_object_pk(user, follow_object):\r\n object_follows_for_user = Follow.objects.filter(\r\n user=user,\r\n content_type=ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n ),\r\n ).all()\r\n current_follow_object = []\r\n for obj in object_follows_for_user:\r\n if obj.follow_object.id == follow_object.id:\r\n current_follow_object = obj.pk\r\n return current_follow_object\r\n\r\n\r\[email protected]_tag\r\ndef follow_form(*, user, object_id, content_type):\r\n return FollowForm(\r\n user=user,\r\n initial={\r\n \"object_id\": object_id,\r\n \"content_type\": content_type,\r\n \"actor_only\": False,\r\n },\r\n )\r\n\r\n\r\[email protected]_tag()\r\ndef get_content_type(follow_object):\r\n ct = ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n )\r\n return ct\r\n", "path": "app/grandchallenge/forum_conversation/templatetags/forum_extras.py"}], "after_files": [{"content": "from actstream import action\nfrom actstream.actions import follow\nfrom actstream.models import Action, Follow, followers\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models.signals import post_save, pre_delete\nfrom django.dispatch import receiver\nfrom guardian.shortcuts import assign_perm\nfrom machina.apps.forum.models import Forum\nfrom machina.apps.forum_conversation.models import Post, Topic\n\nfrom grandchallenge.notifications.models import Notification\n\n\n@receiver(post_save, sender=Topic)\ndef create_topic_action(sender, *, instance, created, **_):\n if created:\n follow(\n user=instance.poster,\n obj=instance,\n actor_only=False,\n send_action=False,\n )\n\n if int(instance.type) == int(Topic.TOPIC_ANNOUNCE):\n action.send(\n sender=instance.poster,\n verb=\"announced\",\n action_object=instance,\n target=instance.forum,\n context_class=\"info\",\n )\n else:\n action.send(\n sender=instance.poster,\n verb=\"posted\",\n action_object=instance,\n target=instance.forum,\n )\n\n\n@receiver(post_save, sender=Post)\ndef create_post_action(sender, *, instance, created, **_):\n if (\n created\n and instance.topic.posts_count != 0\n and not instance.is_topic_head\n ):\n follow(\n user=instance.poster,\n obj=instance.topic,\n actor_only=False,\n send_action=False,\n )\n\n action.send(\n sender=instance.poster, verb=\"replied to\", target=instance.topic,\n )\n\n\n@receiver(post_save, sender=Action)\ndef create_notification(*, instance, **_):\n if instance.target:\n follower_group = followers(instance.target)\n for follower in follower_group:\n # only send notifications to followers other than the poster\n if follower != instance.actor:\n Notification(user=follower, action=instance).save()\n else:\n follower_group = followers(instance.actor)\n for follower in follower_group:\n # only send notifications to followers other than the poster\n if follower != instance.actor:\n Notification(user=follower, action=instance).save()\n\n\n@receiver(post_save, sender=Follow)\ndef add_permissions(*, instance, created, **_):\n if created:\n assign_perm(\"change_follow\", instance.user, instance)\n assign_perm(\"delete_follow\", instance.user, instance)\n assign_perm(\"view_follow\", instance.user, instance)\n\n\n@receiver(pre_delete, sender=Topic)\n@receiver(pre_delete, sender=Forum)\n@receiver(pre_delete, sender=Post)\ndef clean_up_follows(*, instance, **_):\n ct = ContentType.objects.filter(\n app_label=instance._meta.app_label, model=instance._meta.model_name\n ).get()\n Follow.objects.filter(content_type=ct, object_id=instance.pk).delete()\n", "path": "app/grandchallenge/notifications/signals.py"}, {"content": "from actstream.models import Follow\r\nfrom django import template\r\nfrom django.contrib.contenttypes.models import ContentType\r\n\r\nfrom grandchallenge.notifications.forms import FollowForm\r\n\r\nregister = template.Library()\r\n\r\n\r\[email protected]_tag\r\ndef get_follow_object_pk(user, follow_object):\r\n object_follows_for_user = Follow.objects.filter(\r\n user=user,\r\n content_type=ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n ),\r\n ).all()\r\n\r\n if not object_follows_for_user:\r\n current_follow_object = []\r\n else:\r\n current_follow_object = []\r\n for obj in object_follows_for_user:\r\n if not obj.follow_object:\r\n continue\r\n elif obj.follow_object.id == follow_object.id:\r\n current_follow_object = obj.pk\r\n return current_follow_object\r\n\r\n\r\[email protected]_tag\r\ndef follow_form(*, user, object_id, content_type):\r\n return FollowForm(\r\n user=user,\r\n initial={\r\n \"object_id\": object_id,\r\n \"content_type\": content_type,\r\n \"actor_only\": False,\r\n },\r\n )\r\n\r\n\r\[email protected]_tag()\r\ndef get_content_type(follow_object):\r\n try:\r\n ct = ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n )\r\n except AttributeError:\r\n ct = None\r\n return ct\r\n", "path": "app/grandchallenge/forum_conversation/templatetags/forum_extras.py"}]} | 1,349 | 645 |
gh_patches_debug_23374 | rasdani/github-patches | git_diff | gratipay__gratipay.com-4390 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Localhost not loading in Firefox
Just found this problem in Firefox while setting up Gratipay locally on @dmk246 laptop. For some reason the page never loads when you `make run` and try to open localhost:8537 in Firefox it hangs. We believe it is because `gratipay.report_uri.io`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gratipay/security/__init__.py`
Content:
```
1 from aspen import Response
2
3
4 _requesting_asset = lambda r: r.path.raw.startswith('/assets/')
5
6
7 def only_allow_certain_methods(request):
8 method = request.method.upper()
9 whitelist = ('GET', 'HEAD') if _requesting_asset(request) else ('GET', 'HEAD', 'POST')
10 # POSTing to /assets/ interferes with the csrf.* functions if we're not careful
11 if method not in whitelist:
12 raise Response(405)
13
14
15 def add_headers_to_response(response):
16 """Add security headers.
17 """
18
19 # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options
20 if 'X-Frame-Options' not in response.headers:
21 response.headers['X-Frame-Options'] = 'SAMEORIGIN'
22 elif response.headers['X-Frame-Options'] == 'ALLOWALL':
23
24 # ALLOWALL is non-standard. It's useful as a signal from a simplate
25 # that it doesn't want X-Frame-Options set at all, but because it's
26 # non-standard we don't send it. Instead we unset the header entirely,
27 # which has the desired effect of allowing framing indiscriminately.
28 #
29 # Refs.:
30 #
31 # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options
32 # http://ipsec.pl/node/1094
33
34 del response.headers['X-Frame-Options']
35
36 # https://www.owasp.org/index.php/List_of_useful_HTTP_headers
37 if 'X-Content-Type-Options' not in response.headers:
38 response.headers['X-Content-Type-Options'] = 'nosniff'
39
40 # https://www.owasp.org/index.php/List_of_useful_HTTP_headers
41 if 'X-XSS-Protection' not in response.headers:
42 response.headers['X-XSS-Protection'] = '1; mode=block'
43
44 # https://www.w3.org/TR/referrer-policy/
45 if 'Referrer-Policy' not in response.headers:
46 response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin'
47
48 # https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP
49 if 'content-security-policy-report-only' not in response.headers:
50 response.headers['content-security-policy-report-only'] = (
51 "default-src 'self';"
52 "script-src 'self' assets.gratipay.com 'unsafe-inline';"
53 "style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com;"
54 "img-src *;"
55 "font-src 'self' assets.gratipay.com cloud.typography.com data:;"
56 "upgrade-insecure-requests;"
57 "block-all-mixed-content;"
58 "reflected-xss block;"
59 "report-uri https://gratipay.report-uri.io/r/default/csp/reportOnly;"
60 )
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gratipay/security/__init__.py b/gratipay/security/__init__.py
--- a/gratipay/security/__init__.py
+++ b/gratipay/security/__init__.py
@@ -43,7 +43,8 @@
# https://www.w3.org/TR/referrer-policy/
if 'Referrer-Policy' not in response.headers:
- response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin'
+ response.headers['Referrer-Policy'] = \
+ 'no-referrer-when-downgrade, strict-origin-when-cross-origin'
# https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP
if 'content-security-policy-report-only' not in response.headers:
@@ -53,8 +54,6 @@
"style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com;"
"img-src *;"
"font-src 'self' assets.gratipay.com cloud.typography.com data:;"
- "upgrade-insecure-requests;"
"block-all-mixed-content;"
- "reflected-xss block;"
"report-uri https://gratipay.report-uri.io/r/default/csp/reportOnly;"
)
| {"golden_diff": "diff --git a/gratipay/security/__init__.py b/gratipay/security/__init__.py\n--- a/gratipay/security/__init__.py\n+++ b/gratipay/security/__init__.py\n@@ -43,7 +43,8 @@\n \n # https://www.w3.org/TR/referrer-policy/\n if 'Referrer-Policy' not in response.headers:\n- response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin'\n+ response.headers['Referrer-Policy'] = \\\n+ 'no-referrer-when-downgrade, strict-origin-when-cross-origin'\n \n # https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP\n if 'content-security-policy-report-only' not in response.headers:\n@@ -53,8 +54,6 @@\n \"style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com;\"\n \"img-src *;\"\n \"font-src 'self' assets.gratipay.com cloud.typography.com data:;\"\n- \"upgrade-insecure-requests;\"\n \"block-all-mixed-content;\"\n- \"reflected-xss block;\"\n \"report-uri https://gratipay.report-uri.io/r/default/csp/reportOnly;\"\n )\n", "issue": "Localhost not loading in Firefox\nJust found this problem in Firefox while setting up Gratipay locally on @dmk246 laptop. For some reason the page never loads when you `make run` and try to open localhost:8537 in Firefox it hangs. We believe it is because `gratipay.report_uri.io` \n", "before_files": [{"content": "from aspen import Response\n\n\n_requesting_asset = lambda r: r.path.raw.startswith('/assets/')\n\n\ndef only_allow_certain_methods(request):\n method = request.method.upper()\n whitelist = ('GET', 'HEAD') if _requesting_asset(request) else ('GET', 'HEAD', 'POST')\n # POSTing to /assets/ interferes with the csrf.* functions if we're not careful\n if method not in whitelist:\n raise Response(405)\n\n\ndef add_headers_to_response(response):\n \"\"\"Add security headers.\n \"\"\"\n\n # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options\n if 'X-Frame-Options' not in response.headers:\n response.headers['X-Frame-Options'] = 'SAMEORIGIN'\n elif response.headers['X-Frame-Options'] == 'ALLOWALL':\n\n # ALLOWALL is non-standard. It's useful as a signal from a simplate\n # that it doesn't want X-Frame-Options set at all, but because it's\n # non-standard we don't send it. Instead we unset the header entirely,\n # which has the desired effect of allowing framing indiscriminately.\n #\n # Refs.:\n #\n # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options\n # http://ipsec.pl/node/1094\n\n del response.headers['X-Frame-Options']\n\n # https://www.owasp.org/index.php/List_of_useful_HTTP_headers\n if 'X-Content-Type-Options' not in response.headers:\n response.headers['X-Content-Type-Options'] = 'nosniff'\n\n # https://www.owasp.org/index.php/List_of_useful_HTTP_headers\n if 'X-XSS-Protection' not in response.headers:\n response.headers['X-XSS-Protection'] = '1; mode=block'\n\n # https://www.w3.org/TR/referrer-policy/\n if 'Referrer-Policy' not in response.headers:\n response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin'\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP\n if 'content-security-policy-report-only' not in response.headers:\n response.headers['content-security-policy-report-only'] = (\n \"default-src 'self';\"\n \"script-src 'self' assets.gratipay.com 'unsafe-inline';\"\n \"style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com;\"\n \"img-src *;\"\n \"font-src 'self' assets.gratipay.com cloud.typography.com data:;\"\n \"upgrade-insecure-requests;\"\n \"block-all-mixed-content;\"\n \"reflected-xss block;\"\n \"report-uri https://gratipay.report-uri.io/r/default/csp/reportOnly;\"\n )\n", "path": "gratipay/security/__init__.py"}], "after_files": [{"content": "from aspen import Response\n\n\n_requesting_asset = lambda r: r.path.raw.startswith('/assets/')\n\n\ndef only_allow_certain_methods(request):\n method = request.method.upper()\n whitelist = ('GET', 'HEAD') if _requesting_asset(request) else ('GET', 'HEAD', 'POST')\n # POSTing to /assets/ interferes with the csrf.* functions if we're not careful\n if method not in whitelist:\n raise Response(405)\n\n\ndef add_headers_to_response(response):\n \"\"\"Add security headers.\n \"\"\"\n\n # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options\n if 'X-Frame-Options' not in response.headers:\n response.headers['X-Frame-Options'] = 'SAMEORIGIN'\n elif response.headers['X-Frame-Options'] == 'ALLOWALL':\n\n # ALLOWALL is non-standard. It's useful as a signal from a simplate\n # that it doesn't want X-Frame-Options set at all, but because it's\n # non-standard we don't send it. Instead we unset the header entirely,\n # which has the desired effect of allowing framing indiscriminately.\n #\n # Refs.:\n #\n # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options\n # http://ipsec.pl/node/1094\n\n del response.headers['X-Frame-Options']\n\n # https://www.owasp.org/index.php/List_of_useful_HTTP_headers\n if 'X-Content-Type-Options' not in response.headers:\n response.headers['X-Content-Type-Options'] = 'nosniff'\n\n # https://www.owasp.org/index.php/List_of_useful_HTTP_headers\n if 'X-XSS-Protection' not in response.headers:\n response.headers['X-XSS-Protection'] = '1; mode=block'\n\n # https://www.w3.org/TR/referrer-policy/\n if 'Referrer-Policy' not in response.headers:\n response.headers['Referrer-Policy'] = \\\n 'no-referrer-when-downgrade, strict-origin-when-cross-origin'\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP\n if 'content-security-policy-report-only' not in response.headers:\n response.headers['content-security-policy-report-only'] = (\n \"default-src 'self';\"\n \"script-src 'self' assets.gratipay.com 'unsafe-inline';\"\n \"style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com;\"\n \"img-src *;\"\n \"font-src 'self' assets.gratipay.com cloud.typography.com data:;\"\n \"block-all-mixed-content;\"\n \"report-uri https://gratipay.report-uri.io/r/default/csp/reportOnly;\"\n )\n", "path": "gratipay/security/__init__.py"}]} | 1,066 | 269 |
gh_patches_debug_1920 | rasdani/github-patches | git_diff | mozilla__bugbug-598 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use new 'everchanged' operator instead of changedafter 1970
Depends on https://bugzilla.mozilla.org/show_bug.cgi?id=1546624.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/get_type_labels.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import argparse
7 import csv
8 import sys
9
10 import requests
11
12
13 def parse_args(args):
14 parser = argparse.ArgumentParser()
15 parser.add_argument(
16 "--types",
17 help="Types to retrieve",
18 default=["defect", "enhancement", "task"],
19 nargs="*",
20 )
21 return parser.parse_args(args)
22
23
24 def main(args):
25 params = {
26 "columnlist": "bug_type",
27 "order": "bug_id",
28 "j_top": "OR",
29 "f1": "bug_type",
30 "o1": "changedafter",
31 "v1": "1970-01-01",
32 "f2": "OP",
33 "f3": "bug_type",
34 "o3": "anyexact",
35 "v3": "task,enhancement",
36 "f4": "bug_id",
37 "o4": "greaterthan",
38 "v4": 1540807,
39 "f5": "CP",
40 "ctype": "csv",
41 }
42
43 r = requests.get("https://bugzilla.mozilla.org/buglist.cgi", params=params)
44 r.raise_for_status()
45
46 with open("bugbug/labels/defect_enhancement_task_h.csv", "r") as f:
47 reader = csv.reader(f)
48 headers = next(reader)
49 bug_type_map = {int(row[0]): row[1] for row in reader}
50
51 # We add to our csv both labels that were changed, and labels that are in
52 # the list of requested types.
53 reader = csv.reader(r.text.splitlines())
54 next(reader)
55 for row in reader:
56 if int(row[0]) in bug_type_map or row[1] in args.types:
57 bug_type_map[int(row[0])] = row[1]
58
59 with open("bugbug/labels/defect_enhancement_task_h.csv", "w") as f:
60 writer = csv.writer(f)
61 writer.writerow(headers)
62 writer.writerows(sorted(bug_type_map.items()))
63
64
65 if __name__ == "__main__":
66 main(parse_args(sys.argv[1:]))
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/get_type_labels.py b/scripts/get_type_labels.py
--- a/scripts/get_type_labels.py
+++ b/scripts/get_type_labels.py
@@ -27,8 +27,7 @@
"order": "bug_id",
"j_top": "OR",
"f1": "bug_type",
- "o1": "changedafter",
- "v1": "1970-01-01",
+ "o1": "everchanged",
"f2": "OP",
"f3": "bug_type",
"o3": "anyexact",
| {"golden_diff": "diff --git a/scripts/get_type_labels.py b/scripts/get_type_labels.py\n--- a/scripts/get_type_labels.py\n+++ b/scripts/get_type_labels.py\n@@ -27,8 +27,7 @@\n \"order\": \"bug_id\",\n \"j_top\": \"OR\",\n \"f1\": \"bug_type\",\n- \"o1\": \"changedafter\",\n- \"v1\": \"1970-01-01\",\n+ \"o1\": \"everchanged\",\n \"f2\": \"OP\",\n \"f3\": \"bug_type\",\n \"o3\": \"anyexact\",\n", "issue": "Use new 'everchanged' operator instead of changedafter 1970\nDepends on https://bugzilla.mozilla.org/show_bug.cgi?id=1546624.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport csv\nimport sys\n\nimport requests\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--types\",\n help=\"Types to retrieve\",\n default=[\"defect\", \"enhancement\", \"task\"],\n nargs=\"*\",\n )\n return parser.parse_args(args)\n\n\ndef main(args):\n params = {\n \"columnlist\": \"bug_type\",\n \"order\": \"bug_id\",\n \"j_top\": \"OR\",\n \"f1\": \"bug_type\",\n \"o1\": \"changedafter\",\n \"v1\": \"1970-01-01\",\n \"f2\": \"OP\",\n \"f3\": \"bug_type\",\n \"o3\": \"anyexact\",\n \"v3\": \"task,enhancement\",\n \"f4\": \"bug_id\",\n \"o4\": \"greaterthan\",\n \"v4\": 1540807,\n \"f5\": \"CP\",\n \"ctype\": \"csv\",\n }\n\n r = requests.get(\"https://bugzilla.mozilla.org/buglist.cgi\", params=params)\n r.raise_for_status()\n\n with open(\"bugbug/labels/defect_enhancement_task_h.csv\", \"r\") as f:\n reader = csv.reader(f)\n headers = next(reader)\n bug_type_map = {int(row[0]): row[1] for row in reader}\n\n # We add to our csv both labels that were changed, and labels that are in\n # the list of requested types.\n reader = csv.reader(r.text.splitlines())\n next(reader)\n for row in reader:\n if int(row[0]) in bug_type_map or row[1] in args.types:\n bug_type_map[int(row[0])] = row[1]\n\n with open(\"bugbug/labels/defect_enhancement_task_h.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerow(headers)\n writer.writerows(sorted(bug_type_map.items()))\n\n\nif __name__ == \"__main__\":\n main(parse_args(sys.argv[1:]))\n", "path": "scripts/get_type_labels.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport csv\nimport sys\n\nimport requests\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--types\",\n help=\"Types to retrieve\",\n default=[\"defect\", \"enhancement\", \"task\"],\n nargs=\"*\",\n )\n return parser.parse_args(args)\n\n\ndef main(args):\n params = {\n \"columnlist\": \"bug_type\",\n \"order\": \"bug_id\",\n \"j_top\": \"OR\",\n \"f1\": \"bug_type\",\n \"o1\": \"everchanged\",\n \"f2\": \"OP\",\n \"f3\": \"bug_type\",\n \"o3\": \"anyexact\",\n \"v3\": \"task,enhancement\",\n \"f4\": \"bug_id\",\n \"o4\": \"greaterthan\",\n \"v4\": 1540807,\n \"f5\": \"CP\",\n \"ctype\": \"csv\",\n }\n\n r = requests.get(\"https://bugzilla.mozilla.org/buglist.cgi\", params=params)\n r.raise_for_status()\n\n with open(\"bugbug/labels/defect_enhancement_task_h.csv\", \"r\") as f:\n reader = csv.reader(f)\n headers = next(reader)\n bug_type_map = {int(row[0]): row[1] for row in reader}\n\n # We add to our csv both labels that were changed, and labels that are in\n # the list of requested types.\n reader = csv.reader(r.text.splitlines())\n next(reader)\n for row in reader:\n if int(row[0]) in bug_type_map or row[1] in args.types:\n bug_type_map[int(row[0])] = row[1]\n\n with open(\"bugbug/labels/defect_enhancement_task_h.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerow(headers)\n writer.writerows(sorted(bug_type_map.items()))\n\n\nif __name__ == \"__main__\":\n main(parse_args(sys.argv[1:]))\n", "path": "scripts/get_type_labels.py"}]} | 946 | 133 |
gh_patches_debug_17121 | rasdani/github-patches | git_diff | opendatacube__datacube-core-905 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update release process documentation
Many steps described in the document have since been automated, documentation should reflect that:
- Upload to pypi is done by Travis
- Updates for conda-forge are done by some bot that creates PR
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4
5 tests_require = [
6 'compliance-checker>=4.0.0',
7 'hypothesis',
8 'mock',
9 'pycodestyle',
10 'pylint',
11 'pytest',
12 'pytest-cov',
13 'pytest-timeout',
14 'pytest-httpserver',
15 'moto',
16 ]
17
18 extras_require = {
19 'performance': ['ciso8601', 'bottleneck'],
20 'interactive': ['matplotlib', 'fiona'],
21 'distributed': ['distributed', 'dask[distributed]'],
22 'doc': ['Sphinx', 'setuptools'],
23 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],
24 'celery': ['celery>=4', 'redis'],
25 's3': ['boto3'],
26 'test': tests_require,
27 }
28 # An 'all' option, following ipython naming conventions.
29 extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
30
31 extra_plugins = dict(read=[], write=[], index=[])
32
33 setup(
34 name='datacube',
35 python_requires='>=3.5.2',
36
37 url='https://github.com/opendatacube/datacube-core',
38 author='Open Data Cube',
39 maintainer='Open Data Cube',
40 maintainer_email='',
41 description='An analysis environment for satellite and other earth observation data',
42 long_description=open('README.rst').read(),
43 long_description_content_type='text/x-rst',
44 license='Apache License 2.0',
45 classifiers=[
46 "Development Status :: 4 - Beta",
47 "Intended Audience :: Developers",
48 "Intended Audience :: Science/Research",
49 "License :: OSI Approved :: Apache Software License",
50 "Natural Language :: English",
51 "Operating System :: MacOS :: MacOS X",
52 "Operating System :: POSIX",
53 "Operating System :: POSIX :: BSD",
54 "Operating System :: POSIX :: Linux",
55 "Operating System :: Microsoft :: Windows",
56 "Programming Language :: Python",
57 "Programming Language :: Python :: 3",
58 "Programming Language :: Python :: 3.5",
59 "Programming Language :: Python :: 3.6",
60 "Topic :: Scientific/Engineering :: GIS",
61 "Topic :: Scientific/Engineering :: Information Analysis",
62 ],
63
64 packages=find_packages(
65 exclude=('tests', 'tests.*',
66 'integration_tests', 'integration_tests.*')
67 ),
68 package_data={
69 '': ['*.yaml', '*/*.yaml'],
70 },
71 scripts=[
72 'datacube_apps/scripts/pbs_helpers.sh'
73 ],
74 install_requires=[
75 'affine',
76 'pyproj>=2.5',
77 'shapely>=1.6.4',
78 'cachetools',
79 'click>=5.0',
80 'cloudpickle>=0.4',
81 'dask[array]',
82 'distributed',
83 'jsonschema',
84 'netcdf4',
85 'numpy',
86 'psycopg2',
87 'lark-parser>=0.6.7',
88 'python-dateutil',
89 'pyyaml',
90 'rasterio>=1.0.2', # Multi-band re-project fixed in that version
91 'sqlalchemy',
92 'toolz',
93 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost
94 ],
95 extras_require=extras_require,
96 tests_require=tests_require,
97
98 entry_points={
99 'console_scripts': [
100 'datacube = datacube.scripts.cli_app:cli',
101 'datacube-search = datacube.scripts.search_tool:cli',
102 'datacube-stacker = datacube_apps.stacker:main',
103 'datacube-worker = datacube.execution.worker:main',
104 'datacube-fixer = datacube_apps.stacker:fixer_main',
105 'datacube-ncml = datacube_apps.ncml:ncml_app',
106 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',
107 'movie_generator = datacube_apps.movie_generator:main',
108 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]'
109 ],
110 'datacube.plugins.io.read': [
111 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',
112 *extra_plugins['read'],
113 ],
114 'datacube.plugins.io.write': [
115 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',
116 *extra_plugins['write'],
117 ],
118 'datacube.plugins.index': [
119 'default = datacube.index.index:index_driver_init',
120 *extra_plugins['index'],
121 ],
122 },
123 )
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -32,7 +32,7 @@
setup(
name='datacube',
- python_requires='>=3.5.2',
+ python_requires='>=3.6.0',
url='https://github.com/opendatacube/datacube-core',
author='Open Data Cube',
@@ -55,8 +55,8 @@
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: GIS",
"Topic :: Scientific/Engineering :: Information Analysis",
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -32,7 +32,7 @@\n \n setup(\n name='datacube',\n- python_requires='>=3.5.2',\n+ python_requires='>=3.6.0',\n \n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n@@ -55,8 +55,8 @@\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n+ \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n", "issue": "Update release process documentation\nMany steps described in the document have since been automated, documentation should reflect that:\r\n\r\n- Upload to pypi is done by Travis\r\n- Updates for conda-forge are done by some bot that creates PR\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'compliance-checker>=4.0.0',\n 'hypothesis',\n 'mock',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.5.2',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n install_requires=[\n 'affine',\n 'pyproj>=2.5',\n 'shapely>=1.6.4',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark-parser>=0.6.7',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.0.2', # Multi-band re-project fixed in that version\n 'sqlalchemy',\n 'toolz',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]'\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'compliance-checker>=4.0.0',\n 'hypothesis',\n 'mock',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.6.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n install_requires=[\n 'affine',\n 'pyproj>=2.5',\n 'shapely>=1.6.4',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark-parser>=0.6.7',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.0.2', # Multi-band re-project fixed in that version\n 'sqlalchemy',\n 'toolz',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]'\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}]} | 1,565 | 187 |
gh_patches_debug_27280 | rasdani/github-patches | git_diff | Pylons__pyramid-2620 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pcreate -s shows wrong link to tutorials
after a
```
pcreate -s alchemy scaffold-alchemy
```
I see a link to tutorials, but this link is a 404:
```
Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyramid/scaffolds/__init__.py`
Content:
```
1 import binascii
2 import os
3 from textwrap import dedent
4
5 from pyramid.compat import native_
6
7 from pyramid.scaffolds.template import Template # API
8
9 class PyramidTemplate(Template):
10 """
11 A class that can be used as a base class for Pyramid scaffolding
12 templates.
13 """
14 def pre(self, command, output_dir, vars):
15 """ Overrides :meth:`pyramid.scaffolds.template.Template.pre`, adding
16 several variables to the default variables list (including
17 ``random_string``, and ``package_logger``). It also prevents common
18 misnamings (such as naming a package "site" or naming a package
19 logger "root".
20 """
21 vars['random_string'] = native_(binascii.hexlify(os.urandom(20)))
22 package_logger = vars['package']
23 if package_logger == 'root':
24 # Rename the app logger in the rare case a project is named 'root'
25 package_logger = 'app'
26 vars['package_logger'] = package_logger
27 return Template.pre(self, command, output_dir, vars)
28
29 def post(self, command, output_dir, vars): # pragma: no cover
30 """ Overrides :meth:`pyramid.scaffolds.template.Template.post`, to
31 print "Welcome to Pyramid. Sorry for the convenience." after a
32 successful scaffolding rendering."""
33
34 separator = "=" * 79
35 msg = dedent(
36 """
37 %(separator)s
38 Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials
39 Documentation: http://docs.pylonsproject.org/projects/pyramid
40
41 Twitter (tips & updates): http://twitter.com/pylons
42 Mailing List: http://groups.google.com/group/pylons-discuss
43
44 Welcome to Pyramid. Sorry for the convenience.
45 %(separator)s
46 """ % {'separator': separator})
47
48 self.out(msg)
49 return Template.post(self, command, output_dir, vars)
50
51 def out(self, msg): # pragma: no cover (replaceable testing hook)
52 print(msg)
53
54 class StarterProjectTemplate(PyramidTemplate):
55 _template_dir = 'starter'
56 summary = 'Pyramid starter project'
57
58 class ZODBProjectTemplate(PyramidTemplate):
59 _template_dir = 'zodb'
60 summary = 'Pyramid ZODB project using traversal'
61
62 class AlchemyProjectTemplate(PyramidTemplate):
63 _template_dir = 'alchemy'
64 summary = 'Pyramid SQLAlchemy project using url dispatch'
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyramid/scaffolds/__init__.py b/pyramid/scaffolds/__init__.py
--- a/pyramid/scaffolds/__init__.py
+++ b/pyramid/scaffolds/__init__.py
@@ -35,11 +35,10 @@
msg = dedent(
"""
%(separator)s
- Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials
- Documentation: http://docs.pylonsproject.org/projects/pyramid
-
- Twitter (tips & updates): http://twitter.com/pylons
- Mailing List: http://groups.google.com/group/pylons-discuss
+ Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/
+ Documentation: http://docs.pylonsproject.org/projects/pyramid/en/latest/
+ Twitter: https://twitter.com/trypyramid
+ Mailing List: https://groups.google.com/forum/#!forum/pylons-discuss
Welcome to Pyramid. Sorry for the convenience.
%(separator)s
@@ -53,12 +52,13 @@
class StarterProjectTemplate(PyramidTemplate):
_template_dir = 'starter'
- summary = 'Pyramid starter project'
+ summary = 'Pyramid starter project using URL dispatch and Chameleon'
class ZODBProjectTemplate(PyramidTemplate):
_template_dir = 'zodb'
- summary = 'Pyramid ZODB project using traversal'
+ summary = 'Pyramid project using ZODB, traversal, and Chameleon'
class AlchemyProjectTemplate(PyramidTemplate):
_template_dir = 'alchemy'
- summary = 'Pyramid SQLAlchemy project using url dispatch'
+ summary = 'Pyramid project using SQLAlchemy, SQLite, URL dispatch, and'
+ ' Chameleon'
| {"golden_diff": "diff --git a/pyramid/scaffolds/__init__.py b/pyramid/scaffolds/__init__.py\n--- a/pyramid/scaffolds/__init__.py\n+++ b/pyramid/scaffolds/__init__.py\n@@ -35,11 +35,10 @@\n msg = dedent(\n \"\"\"\n %(separator)s\n- Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials\n- Documentation: http://docs.pylonsproject.org/projects/pyramid\n-\n- Twitter (tips & updates): http://twitter.com/pylons\n- Mailing List: http://groups.google.com/group/pylons-discuss\n+ Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/\n+ Documentation: http://docs.pylonsproject.org/projects/pyramid/en/latest/\n+ Twitter: https://twitter.com/trypyramid\n+ Mailing List: https://groups.google.com/forum/#!forum/pylons-discuss\n \n Welcome to Pyramid. Sorry for the convenience.\n %(separator)s\n@@ -53,12 +52,13 @@\n \n class StarterProjectTemplate(PyramidTemplate):\n _template_dir = 'starter'\n- summary = 'Pyramid starter project'\n+ summary = 'Pyramid starter project using URL dispatch and Chameleon'\n \n class ZODBProjectTemplate(PyramidTemplate):\n _template_dir = 'zodb'\n- summary = 'Pyramid ZODB project using traversal'\n+ summary = 'Pyramid project using ZODB, traversal, and Chameleon'\n \n class AlchemyProjectTemplate(PyramidTemplate):\n _template_dir = 'alchemy'\n- summary = 'Pyramid SQLAlchemy project using url dispatch'\n+ summary = 'Pyramid project using SQLAlchemy, SQLite, URL dispatch, and'\n+ ' Chameleon'\n", "issue": "pcreate -s shows wrong link to tutorials\nafter a \n\n```\npcreate -s alchemy scaffold-alchemy\n```\n\nI see a link to tutorials, but this link is a 404: \n\n```\nTutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials\n```\n\n", "before_files": [{"content": "import binascii\nimport os\nfrom textwrap import dedent\n\nfrom pyramid.compat import native_\n\nfrom pyramid.scaffolds.template import Template # API\n\nclass PyramidTemplate(Template):\n \"\"\"\n A class that can be used as a base class for Pyramid scaffolding\n templates.\n \"\"\"\n def pre(self, command, output_dir, vars):\n \"\"\" Overrides :meth:`pyramid.scaffolds.template.Template.pre`, adding\n several variables to the default variables list (including\n ``random_string``, and ``package_logger``). It also prevents common\n misnamings (such as naming a package \"site\" or naming a package\n logger \"root\".\n \"\"\"\n vars['random_string'] = native_(binascii.hexlify(os.urandom(20)))\n package_logger = vars['package']\n if package_logger == 'root':\n # Rename the app logger in the rare case a project is named 'root'\n package_logger = 'app'\n vars['package_logger'] = package_logger\n return Template.pre(self, command, output_dir, vars)\n\n def post(self, command, output_dir, vars): # pragma: no cover\n \"\"\" Overrides :meth:`pyramid.scaffolds.template.Template.post`, to\n print \"Welcome to Pyramid. Sorry for the convenience.\" after a\n successful scaffolding rendering.\"\"\"\n\n separator = \"=\" * 79\n msg = dedent(\n \"\"\"\n %(separator)s\n Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials\n Documentation: http://docs.pylonsproject.org/projects/pyramid\n\n Twitter (tips & updates): http://twitter.com/pylons\n Mailing List: http://groups.google.com/group/pylons-discuss\n\n Welcome to Pyramid. Sorry for the convenience.\n %(separator)s\n \"\"\" % {'separator': separator})\n\n self.out(msg)\n return Template.post(self, command, output_dir, vars)\n\n def out(self, msg): # pragma: no cover (replaceable testing hook)\n print(msg)\n\nclass StarterProjectTemplate(PyramidTemplate):\n _template_dir = 'starter'\n summary = 'Pyramid starter project'\n\nclass ZODBProjectTemplate(PyramidTemplate):\n _template_dir = 'zodb'\n summary = 'Pyramid ZODB project using traversal'\n\nclass AlchemyProjectTemplate(PyramidTemplate):\n _template_dir = 'alchemy'\n summary = 'Pyramid SQLAlchemy project using url dispatch'\n", "path": "pyramid/scaffolds/__init__.py"}], "after_files": [{"content": "import binascii\nimport os\nfrom textwrap import dedent\n\nfrom pyramid.compat import native_\n\nfrom pyramid.scaffolds.template import Template # API\n\nclass PyramidTemplate(Template):\n \"\"\"\n A class that can be used as a base class for Pyramid scaffolding\n templates.\n \"\"\"\n def pre(self, command, output_dir, vars):\n \"\"\" Overrides :meth:`pyramid.scaffolds.template.Template.pre`, adding\n several variables to the default variables list (including\n ``random_string``, and ``package_logger``). It also prevents common\n misnamings (such as naming a package \"site\" or naming a package\n logger \"root\".\n \"\"\"\n vars['random_string'] = native_(binascii.hexlify(os.urandom(20)))\n package_logger = vars['package']\n if package_logger == 'root':\n # Rename the app logger in the rare case a project is named 'root'\n package_logger = 'app'\n vars['package_logger'] = package_logger\n return Template.pre(self, command, output_dir, vars)\n\n def post(self, command, output_dir, vars): # pragma: no cover\n \"\"\" Overrides :meth:`pyramid.scaffolds.template.Template.post`, to\n print \"Welcome to Pyramid. Sorry for the convenience.\" after a\n successful scaffolding rendering.\"\"\"\n\n separator = \"=\" * 79\n msg = dedent(\n \"\"\"\n %(separator)s\n Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/\n Documentation: http://docs.pylonsproject.org/projects/pyramid/en/latest/\n Twitter: https://twitter.com/trypyramid\n Mailing List: https://groups.google.com/forum/#!forum/pylons-discuss\n\n Welcome to Pyramid. Sorry for the convenience.\n %(separator)s\n \"\"\" % {'separator': separator})\n\n self.out(msg)\n return Template.post(self, command, output_dir, vars)\n\n def out(self, msg): # pragma: no cover (replaceable testing hook)\n print(msg)\n\nclass StarterProjectTemplate(PyramidTemplate):\n _template_dir = 'starter'\n summary = 'Pyramid starter project using URL dispatch and Chameleon'\n\nclass ZODBProjectTemplate(PyramidTemplate):\n _template_dir = 'zodb'\n summary = 'Pyramid project using ZODB, traversal, and Chameleon'\n\nclass AlchemyProjectTemplate(PyramidTemplate):\n _template_dir = 'alchemy'\n summary = 'Pyramid project using SQLAlchemy, SQLite, URL dispatch, and'\n ' Chameleon'\n", "path": "pyramid/scaffolds/__init__.py"}]} | 979 | 398 |
gh_patches_debug_61017 | rasdani/github-patches | git_diff | lnbits__lnbits-2283 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature request] Add server url to "API keys and API docs" section
**Is your feature request related to a problem? Please describe.**
When linking lnbits with external services, (e.g. [zaprite](https://zaprite.com/)) one needs to specify two things: node url and invoice key.

Invoice key is clearly visible in the "API keys and API docs" section, but it's sometimes unclear what my "LNbits Node URL" is.

**Describe the solution you'd like**
Display "LNbits Node URL" in "Node URL, API keys and docs"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/i18n-ai-tool.py`
Content:
```
1 # 1. Always check the results of the procedure
2 # 2. Always run "npx prettier -w lnbits/static/i18n/XX.js" to reformat the result
3
4 import os
5 import re
6 import sys
7
8 import json5
9 from openai import OpenAI
10
11 if len(sys.argv) < 2:
12 print("Usage: python3 tools/i18n-tool.py <code> [language]")
13 sys.exit(1)
14 lang = sys.argv[1]
15
16
17 def load_language(lang):
18 s = open(f"lnbits/static/i18n/{lang}.js", "rt").read()
19 prefix = "window.localisation.%s = {\n" % lang
20 assert s.startswith(prefix)
21 s = s[len(prefix) - 2 :]
22 return json5.loads(s)
23
24
25 def save_language(lang, data):
26 with open(f"lnbits/static/i18n/{lang}.js", "wt") as f:
27 f.write("window.localisation.%s = {\n" % lang)
28 row = 0
29 for k, v in data.items():
30 row += 1
31 f.write(" %s:\n" % k)
32 if "'" in v:
33 f.write(' "%s"' % v)
34 else:
35 f.write(" '%s'" % v)
36 if row == len(data):
37 f.write("\n")
38 else:
39 f.write(",\n")
40 f.write("}\n")
41
42
43 def string_variables_match(str1, str2):
44 pat = re.compile(r"%\{[a-z0-9_]*\}")
45 m1 = re.findall(pat, str1)
46 m2 = re.findall(pat, str2)
47 return sorted(m1) == sorted(m2)
48
49
50 def translate_string(lang_from, lang_to, text):
51 target = {
52 "de": "German",
53 "es": "Spanish",
54 "jp": "Japan",
55 "cn": "Chinese",
56 "fr": "French",
57 "it": "Italian",
58 "pi": "Pirate",
59 "nl": "Dutch",
60 "we": "Welsh",
61 "pl": "Polish",
62 "pt": "Portuguese",
63 "br": "Brazilian Portugese",
64 "cs": "Czech",
65 "sk": "Slovak",
66 "kr": "Korean",
67 }[lang_to]
68 assert os.getenv("OPENAI_API_KEY"), "OPENAI_API_KEY env var not set"
69 client = OpenAI()
70 try:
71 chat_completion = client.chat.completions.create(
72 messages=[
73 {
74 "role": "system",
75 "content": "You are a language expert that speaks all languages in the world. You are about to translate text from English to another language. The text is a part of the software you are translating. If the given text contains a phrase enclosed by curly preceded with a percent sign, do not translate the given phrase, just keep it verbatim. So for example, the phrase %{amount} translated to target language should still be kept as %{amount}. Never output anything else, just the translated string.", # noqa: E501
76 },
77 {
78 "role": "user",
79 "content": f"Translate the following string from English to {target}: {text}", # noqa: E501
80 },
81 ],
82 model="gpt-4-1106-preview", # aka GPT-4 Turbo
83 )
84 translated = chat_completion.choices[0].message.content.strip()
85 # return translated string only if variables were not broken
86 if string_variables_match(text, translated):
87 return translated
88 else:
89 return None
90 except Exception:
91 return None
92
93
94 data_en = load_language("en")
95 data = load_language(lang)
96
97 missing = set(data_en.keys()) - set(data.keys())
98 print(f"Missing {len(missing)} keys in language '{lang}'")
99
100 if len(missing) > 0:
101 new = {}
102 for k in data_en:
103 if k in data:
104 new[k] = data[k]
105 else:
106 print(f"Translating key '{k}'")
107 print(f"{data_en[k]}")
108 translated = translate_string("en", lang, data_en[k])
109 print("->")
110 if translated:
111 print(f"{translated}")
112 new[k] = translated
113 else:
114 print("ERROR")
115 print()
116 save_language(lang, new)
117 else:
118 # check whether variables match for each string
119 for k in data_en:
120 if not string_variables_match(data_en[k], data[k]):
121 print(f"Variables mismatch ({k}):")
122 print(data_en[k])
123 print(data[k])
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/i18n-ai-tool.py b/tools/i18n-ai-tool.py
--- a/tools/i18n-ai-tool.py
+++ b/tools/i18n-ai-tool.py
@@ -64,6 +64,7 @@
"cs": "Czech",
"sk": "Slovak",
"kr": "Korean",
+ "fi": "Finnish",
}[lang_to]
assert os.getenv("OPENAI_API_KEY"), "OPENAI_API_KEY env var not set"
client = OpenAI()
| {"golden_diff": "diff --git a/tools/i18n-ai-tool.py b/tools/i18n-ai-tool.py\n--- a/tools/i18n-ai-tool.py\n+++ b/tools/i18n-ai-tool.py\n@@ -64,6 +64,7 @@\n \"cs\": \"Czech\",\n \"sk\": \"Slovak\",\n \"kr\": \"Korean\",\n+ \"fi\": \"Finnish\",\n }[lang_to]\n assert os.getenv(\"OPENAI_API_KEY\"), \"OPENAI_API_KEY env var not set\"\n client = OpenAI()\n", "issue": "[Feature request] Add server url to \"API keys and API docs\" section\n**Is your feature request related to a problem? Please describe.**\r\nWhen linking lnbits with external services, (e.g. [zaprite](https://zaprite.com/)) one needs to specify two things: node url and invoice key. \r\n\r\n\r\n\r\nInvoice key is clearly visible in the \"API keys and API docs\" section, but it's sometimes unclear what my \"LNbits Node URL\" is. \r\n\r\n\r\n\r\n**Describe the solution you'd like**\r\nDisplay \"LNbits Node URL\" in \"Node URL, API keys and docs\"\n", "before_files": [{"content": "# 1. Always check the results of the procedure\n# 2. Always run \"npx prettier -w lnbits/static/i18n/XX.js\" to reformat the result\n\nimport os\nimport re\nimport sys\n\nimport json5\nfrom openai import OpenAI\n\nif len(sys.argv) < 2:\n print(\"Usage: python3 tools/i18n-tool.py <code> [language]\")\n sys.exit(1)\nlang = sys.argv[1]\n\n\ndef load_language(lang):\n s = open(f\"lnbits/static/i18n/{lang}.js\", \"rt\").read()\n prefix = \"window.localisation.%s = {\\n\" % lang\n assert s.startswith(prefix)\n s = s[len(prefix) - 2 :]\n return json5.loads(s)\n\n\ndef save_language(lang, data):\n with open(f\"lnbits/static/i18n/{lang}.js\", \"wt\") as f:\n f.write(\"window.localisation.%s = {\\n\" % lang)\n row = 0\n for k, v in data.items():\n row += 1\n f.write(\" %s:\\n\" % k)\n if \"'\" in v:\n f.write(' \"%s\"' % v)\n else:\n f.write(\" '%s'\" % v)\n if row == len(data):\n f.write(\"\\n\")\n else:\n f.write(\",\\n\")\n f.write(\"}\\n\")\n\n\ndef string_variables_match(str1, str2):\n pat = re.compile(r\"%\\{[a-z0-9_]*\\}\")\n m1 = re.findall(pat, str1)\n m2 = re.findall(pat, str2)\n return sorted(m1) == sorted(m2)\n\n\ndef translate_string(lang_from, lang_to, text):\n target = {\n \"de\": \"German\",\n \"es\": \"Spanish\",\n \"jp\": \"Japan\",\n \"cn\": \"Chinese\",\n \"fr\": \"French\",\n \"it\": \"Italian\",\n \"pi\": \"Pirate\",\n \"nl\": \"Dutch\",\n \"we\": \"Welsh\",\n \"pl\": \"Polish\",\n \"pt\": \"Portuguese\",\n \"br\": \"Brazilian Portugese\",\n \"cs\": \"Czech\",\n \"sk\": \"Slovak\",\n \"kr\": \"Korean\",\n }[lang_to]\n assert os.getenv(\"OPENAI_API_KEY\"), \"OPENAI_API_KEY env var not set\"\n client = OpenAI()\n try:\n chat_completion = client.chat.completions.create(\n messages=[\n {\n \"role\": \"system\",\n \"content\": \"You are a language expert that speaks all languages in the world. You are about to translate text from English to another language. The text is a part of the software you are translating. If the given text contains a phrase enclosed by curly preceded with a percent sign, do not translate the given phrase, just keep it verbatim. So for example, the phrase %{amount} translated to target language should still be kept as %{amount}. Never output anything else, just the translated string.\", # noqa: E501\n },\n {\n \"role\": \"user\",\n \"content\": f\"Translate the following string from English to {target}: {text}\", # noqa: E501\n },\n ],\n model=\"gpt-4-1106-preview\", # aka GPT-4 Turbo\n )\n translated = chat_completion.choices[0].message.content.strip()\n # return translated string only if variables were not broken\n if string_variables_match(text, translated):\n return translated\n else:\n return None\n except Exception:\n return None\n\n\ndata_en = load_language(\"en\")\ndata = load_language(lang)\n\nmissing = set(data_en.keys()) - set(data.keys())\nprint(f\"Missing {len(missing)} keys in language '{lang}'\")\n\nif len(missing) > 0:\n new = {}\n for k in data_en:\n if k in data:\n new[k] = data[k]\n else:\n print(f\"Translating key '{k}'\")\n print(f\"{data_en[k]}\")\n translated = translate_string(\"en\", lang, data_en[k])\n print(\"->\")\n if translated:\n print(f\"{translated}\")\n new[k] = translated\n else:\n print(\"ERROR\")\n print()\n save_language(lang, new)\nelse:\n # check whether variables match for each string\n for k in data_en:\n if not string_variables_match(data_en[k], data[k]):\n print(f\"Variables mismatch ({k}):\")\n print(data_en[k])\n print(data[k])\n", "path": "tools/i18n-ai-tool.py"}], "after_files": [{"content": "# 1. Always check the results of the procedure\n# 2. Always run \"npx prettier -w lnbits/static/i18n/XX.js\" to reformat the result\n\nimport os\nimport re\nimport sys\n\nimport json5\nfrom openai import OpenAI\n\nif len(sys.argv) < 2:\n print(\"Usage: python3 tools/i18n-tool.py <code> [language]\")\n sys.exit(1)\nlang = sys.argv[1]\n\n\ndef load_language(lang):\n s = open(f\"lnbits/static/i18n/{lang}.js\", \"rt\").read()\n prefix = \"window.localisation.%s = {\\n\" % lang\n assert s.startswith(prefix)\n s = s[len(prefix) - 2 :]\n return json5.loads(s)\n\n\ndef save_language(lang, data):\n with open(f\"lnbits/static/i18n/{lang}.js\", \"wt\") as f:\n f.write(\"window.localisation.%s = {\\n\" % lang)\n row = 0\n for k, v in data.items():\n row += 1\n f.write(\" %s:\\n\" % k)\n if \"'\" in v:\n f.write(' \"%s\"' % v)\n else:\n f.write(\" '%s'\" % v)\n if row == len(data):\n f.write(\"\\n\")\n else:\n f.write(\",\\n\")\n f.write(\"}\\n\")\n\n\ndef string_variables_match(str1, str2):\n pat = re.compile(r\"%\\{[a-z0-9_]*\\}\")\n m1 = re.findall(pat, str1)\n m2 = re.findall(pat, str2)\n return sorted(m1) == sorted(m2)\n\n\ndef translate_string(lang_from, lang_to, text):\n target = {\n \"de\": \"German\",\n \"es\": \"Spanish\",\n \"jp\": \"Japan\",\n \"cn\": \"Chinese\",\n \"fr\": \"French\",\n \"it\": \"Italian\",\n \"pi\": \"Pirate\",\n \"nl\": \"Dutch\",\n \"we\": \"Welsh\",\n \"pl\": \"Polish\",\n \"pt\": \"Portuguese\",\n \"br\": \"Brazilian Portugese\",\n \"cs\": \"Czech\",\n \"sk\": \"Slovak\",\n \"kr\": \"Korean\",\n \"fi\": \"Finnish\",\n }[lang_to]\n assert os.getenv(\"OPENAI_API_KEY\"), \"OPENAI_API_KEY env var not set\"\n client = OpenAI()\n try:\n chat_completion = client.chat.completions.create(\n messages=[\n {\n \"role\": \"system\",\n \"content\": \"You are a language expert that speaks all languages in the world. You are about to translate text from English to another language. The text is a part of the software you are translating. If the given text contains a phrase enclosed by curly preceded with a percent sign, do not translate the given phrase, just keep it verbatim. So for example, the phrase %{amount} translated to target language should still be kept as %{amount}. Never output anything else, just the translated string.\", # noqa: E501\n },\n {\n \"role\": \"user\",\n \"content\": f\"Translate the following string from English to {target}: {text}\", # noqa: E501\n },\n ],\n model=\"gpt-4-1106-preview\", # aka GPT-4 Turbo\n )\n translated = chat_completion.choices[0].message.content.strip()\n # return translated string only if variables were not broken\n if string_variables_match(text, translated):\n return translated\n else:\n return None\n except Exception:\n return None\n\n\ndata_en = load_language(\"en\")\ndata = load_language(lang)\n\nmissing = set(data_en.keys()) - set(data.keys())\nprint(f\"Missing {len(missing)} keys in language '{lang}'\")\n\nif len(missing) > 0:\n new = {}\n for k in data_en:\n if k in data:\n new[k] = data[k]\n else:\n print(f\"Translating key '{k}'\")\n print(f\"{data_en[k]}\")\n translated = translate_string(\"en\", lang, data_en[k])\n print(\"->\")\n if translated:\n print(f\"{translated}\")\n new[k] = translated\n else:\n print(\"ERROR\")\n print()\n save_language(lang, new)\nelse:\n # check whether variables match for each string\n for k in data_en:\n if not string_variables_match(data_en[k], data[k]):\n print(f\"Variables mismatch ({k}):\")\n print(data_en[k])\n print(data[k])\n", "path": "tools/i18n-ai-tool.py"}]} | 1,784 | 126 |
gh_patches_debug_9196 | rasdani/github-patches | git_diff | conda__conda-build-1470 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
conda metapackage
Hello,
I was wondering why the behaviour of `conda metapackage` has changed. Previously, it outputted helpful information about the location of the recently created package. However, this is the output now:
```
BUILD START: cgat-devel-0.4-py27r3.2.2_6
Package: cgat-devel-0.4-py27r3.2.2_6
source tree in: /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476780260959/work
number of files: 1
Fixing permissions
Detected hard-coded path in text file bin/cgat
Fixing permissions
```
Moreover, the command also creates temporary folders that are left empty after the package has been built:
```
sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476720264845
/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476695297317
/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476718035758
/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476718312877
/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476721899323
/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476698228374
/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476696744782
/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476719724225
/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476720123351
/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476780047095
```
Is this required?
Here is additional info about my environment:
```
$ conda info
Current conda install:
platform : linux-64
conda version : 4.2.9
conda is private : False
conda-env version : 4.2.9
conda-build version : 2.0.6
python version : 2.7.12.final.0
requests version : 2.11.1
root environment : /sebastian/conda/conda-build/build-testing (writable)
default environment : /sebastian/conda/conda-build/build-testing
envs directories : /sebastian/conda/conda-build/build-testing/envs
package cache : /sebastian/conda/conda-build/build-testing/pkgs
channel URLs : https://conda.anaconda.org/cgat/linux-64/
https://conda.anaconda.org/cgat/noarch/
https://repo.continuum.io/pkgs/free/linux-64/
https://repo.continuum.io/pkgs/free/noarch/
https://repo.continuum.io/pkgs/pro/linux-64/
https://repo.continuum.io/pkgs/pro/noarch/
https://conda.anaconda.org/conda-forge/linux-64/
https://conda.anaconda.org/conda-forge/noarch/
https://conda.anaconda.org/r/linux-64/
https://conda.anaconda.org/r/noarch/
https://conda.anaconda.org/bioconda/linux-64/
https://conda.anaconda.org/bioconda/noarch/
config file : /ifs/home/sebastian/.condarc
offline mode : False
```
Many thanks,
Sebastian
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_build/metapackage.py`
Content:
```
1 from collections import defaultdict
2 from conda_build.config import Config
3 from conda_build.metadata import MetaData
4
5
6 def create_metapackage(name, version, entry_points=(), build_string=None, build_number=0,
7 dependencies=(), home=None, license_name=None, summary=None, config=None):
8 # local import to avoid circular import, we provid create_metapackage in api
9 from conda_build.build import build
10
11 if not config:
12 config = Config()
13
14 d = defaultdict(dict)
15 d['package']['name'] = name
16 d['package']['version'] = version
17 d['build']['number'] = build_number
18 d['build']['entry_points'] = entry_points
19 # MetaData does the auto stuff if the build string is None
20 d['build']['string'] = build_string
21 d['requirements']['run'] = dependencies
22 d['about']['home'] = home
23 d['about']['license'] = license_name
24 d['about']['summary'] = summary
25 d = dict(d)
26 m = MetaData.fromdict(d, config=config)
27 config.compute_build_id(m.name())
28
29 return build(m, config=config, need_source_download=False)
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda_build/metapackage.py b/conda_build/metapackage.py
--- a/conda_build/metapackage.py
+++ b/conda_build/metapackage.py
@@ -6,7 +6,7 @@
def create_metapackage(name, version, entry_points=(), build_string=None, build_number=0,
dependencies=(), home=None, license_name=None, summary=None, config=None):
# local import to avoid circular import, we provid create_metapackage in api
- from conda_build.build import build
+ from conda_build.api import build
if not config:
config = Config()
| {"golden_diff": "diff --git a/conda_build/metapackage.py b/conda_build/metapackage.py\n--- a/conda_build/metapackage.py\n+++ b/conda_build/metapackage.py\n@@ -6,7 +6,7 @@\n def create_metapackage(name, version, entry_points=(), build_string=None, build_number=0,\n dependencies=(), home=None, license_name=None, summary=None, config=None):\n # local import to avoid circular import, we provid create_metapackage in api\n- from conda_build.build import build\n+ from conda_build.api import build\n \n if not config:\n config = Config()\n", "issue": "conda metapackage \nHello,\n\nI was wondering why the behaviour of `conda metapackage` has changed. Previously, it outputted helpful information about the location of the recently created package. However, this is the output now:\n\n```\nBUILD START: cgat-devel-0.4-py27r3.2.2_6\nPackage: cgat-devel-0.4-py27r3.2.2_6\nsource tree in: /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476780260959/work\nnumber of files: 1\nFixing permissions\nDetected hard-coded path in text file bin/cgat\nFixing permissions\n```\n\nMoreover, the command also creates temporary folders that are left empty after the package has been built:\n\n```\nsebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476720264845\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476695297317\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476718035758\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476718312877\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476721899323\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476698228374\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476696744782\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476719724225\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476720123351\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476780047095\n```\n\nIs this required?\n\nHere is additional info about my environment:\n\n```\n$ conda info\nCurrent conda install:\n\n platform : linux-64\n conda version : 4.2.9\n conda is private : False\n conda-env version : 4.2.9\n conda-build version : 2.0.6\n python version : 2.7.12.final.0\n requests version : 2.11.1\n root environment : /sebastian/conda/conda-build/build-testing (writable)\n default environment : /sebastian/conda/conda-build/build-testing\n envs directories : /sebastian/conda/conda-build/build-testing/envs\n package cache : /sebastian/conda/conda-build/build-testing/pkgs\n channel URLs : https://conda.anaconda.org/cgat/linux-64/\n https://conda.anaconda.org/cgat/noarch/\n https://repo.continuum.io/pkgs/free/linux-64/\n https://repo.continuum.io/pkgs/free/noarch/\n https://repo.continuum.io/pkgs/pro/linux-64/\n https://repo.continuum.io/pkgs/pro/noarch/\n https://conda.anaconda.org/conda-forge/linux-64/\n https://conda.anaconda.org/conda-forge/noarch/\n https://conda.anaconda.org/r/linux-64/\n https://conda.anaconda.org/r/noarch/\n https://conda.anaconda.org/bioconda/linux-64/\n https://conda.anaconda.org/bioconda/noarch/\n config file : /ifs/home/sebastian/.condarc\n offline mode : False\n```\n\nMany thanks,\nSebastian\n\n", "before_files": [{"content": "from collections import defaultdict\nfrom conda_build.config import Config\nfrom conda_build.metadata import MetaData\n\n\ndef create_metapackage(name, version, entry_points=(), build_string=None, build_number=0,\n dependencies=(), home=None, license_name=None, summary=None, config=None):\n # local import to avoid circular import, we provid create_metapackage in api\n from conda_build.build import build\n\n if not config:\n config = Config()\n\n d = defaultdict(dict)\n d['package']['name'] = name\n d['package']['version'] = version\n d['build']['number'] = build_number\n d['build']['entry_points'] = entry_points\n # MetaData does the auto stuff if the build string is None\n d['build']['string'] = build_string\n d['requirements']['run'] = dependencies\n d['about']['home'] = home\n d['about']['license'] = license_name\n d['about']['summary'] = summary\n d = dict(d)\n m = MetaData.fromdict(d, config=config)\n config.compute_build_id(m.name())\n\n return build(m, config=config, need_source_download=False)\n", "path": "conda_build/metapackage.py"}], "after_files": [{"content": "from collections import defaultdict\nfrom conda_build.config import Config\nfrom conda_build.metadata import MetaData\n\n\ndef create_metapackage(name, version, entry_points=(), build_string=None, build_number=0,\n dependencies=(), home=None, license_name=None, summary=None, config=None):\n # local import to avoid circular import, we provid create_metapackage in api\n from conda_build.api import build\n\n if not config:\n config = Config()\n\n d = defaultdict(dict)\n d['package']['name'] = name\n d['package']['version'] = version\n d['build']['number'] = build_number\n d['build']['entry_points'] = entry_points\n # MetaData does the auto stuff if the build string is None\n d['build']['string'] = build_string\n d['requirements']['run'] = dependencies\n d['about']['home'] = home\n d['about']['license'] = license_name\n d['about']['summary'] = summary\n d = dict(d)\n m = MetaData.fromdict(d, config=config)\n config.compute_build_id(m.name())\n\n return build(m, config=config, need_source_download=False)\n", "path": "conda_build/metapackage.py"}]} | 1,461 | 137 |
gh_patches_debug_3382 | rasdani/github-patches | git_diff | cocotb__cocotb-275 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in BusMonitor Causes python Exception
In the bus monitor function in_reset(), there is a typo causing a problem.
The code at lines 168-169, tests if self._reset is valid, but then it accesses self._reset_n when it should be accessing self._reset.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cocotb/monitors/__init__.py`
Content:
```
1 #!/bin/env python
2
3 ''' Copyright (c) 2013 Potential Ventures Ltd
4 Copyright (c) 2013 SolarFlare Communications Inc
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9 * Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
11 * Redistributions in binary form must reproduce the above copyright
12 notice, this list of conditions and the following disclaimer in the
13 documentation and/or other materials provided with the distribution.
14 * Neither the name of Potential Ventures Ltd,
15 SolarFlare Communications Inc nor the
16 names of its contributors may be used to endorse or promote products
17 derived from this software without specific prior written permission.
18
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
20 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
23 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''
29
30 """
31
32 Class defining the standard interface for a monitor within a testbench
33
34 The monitor is responsible for watching the pins of the DUT and recreating
35 the transactions
36 """
37
38 import math
39
40 import cocotb
41 from cocotb.decorators import coroutine
42 from cocotb.triggers import Edge, Event, RisingEdge, ReadOnly, Timer
43 from cocotb.binary import BinaryValue
44 from cocotb.bus import Bus
45 from cocotb.log import SimLog
46 from cocotb.result import ReturnValue
47
48
49 class MonitorStatistics(object):
50 """Wrapper class for storing Monitor statistics"""
51 def __init__(self):
52 self.received_transactions = 0
53
54
55 class Monitor(object):
56
57 def __init__(self, callback=None, event=None):
58 """
59 Constructor for a monitor instance
60
61 callback will be called with each recovered transaction as the argument
62
63 If the callback isn't used, received transactions will be placed on a
64 queue and the event used to notify any consumers.
65 """
66 self._event = event
67 self._wait_event = None
68 self._recvQ = []
69 self._callbacks = []
70 self.stats = MonitorStatistics()
71 self._wait_event = Event()
72
73 # Subclasses may already set up logging
74 if not hasattr(self, "log"):
75 self.log = SimLog("cocotb.monitor.%s" % (self.__class__.__name__))
76
77 if callback is not None:
78 self.add_callback(callback)
79
80 # Create an independent coroutine which can receive stuff
81 self._thread = cocotb.scheduler.add(self._monitor_recv())
82
83 def kill(self):
84 if self._thread:
85 self._thread.kill()
86 self._thread = None
87
88 def __len__(self):
89 return len(self._recvQ)
90
91 def __getitem__(self, idx):
92 return self._recvQ[idx]
93
94 def add_callback(self, callback):
95 self.log.debug("Adding callback of function %s to monitor" %
96 (callback.__name__))
97 self._callbacks.append(callback)
98
99 @coroutine
100 def wait_for_recv(self, timeout=None):
101 if timeout:
102 t = Timer(timeout)
103 fired = yield [self._wait_event.wait(), t]
104 if fired is t:
105 raise ReturnValue(None)
106 else:
107 yield self._wait_event.wait()
108
109 pkt = self._wait_event.data
110 raise ReturnValue(pkt)
111
112 @coroutine
113 def _monitor_recv(self):
114 """
115 actual impementation of the receiver
116
117 subclasses should override this method to implement the actual receive
118 routine and call self._recv() with the recovered transaction
119 """
120 raise NotImplementedError("Attempt to use base monitor class without "
121 "providing a _monitor_recv method")
122
123 def _recv(self, transaction):
124 """Common handling of a received transaction."""
125
126 self.stats.received_transactions += 1
127
128 # either callback based consumer
129 for callback in self._callbacks:
130 callback(transaction)
131
132 # Or queued with a notification
133 if not self._callbacks:
134 self._recvQ.append(transaction)
135
136 if self._event is not None:
137 self._event.set()
138
139 # If anyone was waiting then let them know
140 if self._wait_event is not None:
141 self._wait_event.set(data=transaction)
142 self._wait_event.clear()
143
144
145 class BusMonitor(Monitor):
146 """
147 Wrapper providing common functionality for monitoring busses
148 """
149 _signals = []
150 _optional_signals = []
151
152 def __init__(self, entity, name, clock, reset=None, reset_n=None,
153 callback=None, event=None):
154 self.log = SimLog("cocotb.%s.%s" % (entity.name, name))
155 self.entity = entity
156 self.name = name
157 self.clock = clock
158 self.bus = Bus(self.entity, self.name, self._signals,
159 optional_signals=self._optional_signals)
160 self._reset = reset
161 self._reset_n = reset_n
162 Monitor.__init__(self, callback=callback, event=event)
163
164 @property
165 def in_reset(self):
166 if self._reset_n is not None:
167 return not bool(self._reset_n.value.integer)
168 if self._reset is not None:
169 return bool(self._reset_n.value.integer)
170 return False
171
172 def __str__(self):
173 return "%s(%s)" % (self.__class__.__name__, self.name)
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cocotb/monitors/__init__.py b/cocotb/monitors/__init__.py
--- a/cocotb/monitors/__init__.py
+++ b/cocotb/monitors/__init__.py
@@ -166,7 +166,7 @@
if self._reset_n is not None:
return not bool(self._reset_n.value.integer)
if self._reset is not None:
- return bool(self._reset_n.value.integer)
+ return bool(self._reset.value.integer)
return False
def __str__(self):
| {"golden_diff": "diff --git a/cocotb/monitors/__init__.py b/cocotb/monitors/__init__.py\n--- a/cocotb/monitors/__init__.py\n+++ b/cocotb/monitors/__init__.py\n@@ -166,7 +166,7 @@\n if self._reset_n is not None:\n return not bool(self._reset_n.value.integer)\n if self._reset is not None:\n- return bool(self._reset_n.value.integer)\n+ return bool(self._reset.value.integer)\n return False\n \n def __str__(self):\n", "issue": "Typo in BusMonitor Causes python Exception\nIn the bus monitor function in_reset(), there is a typo causing a problem.\n\nThe code at lines 168-169, tests if self._reset is valid, but then it accesses self._reset_n when it should be accessing self._reset.\n\n", "before_files": [{"content": "#!/bin/env python\n\n''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n\"\"\"\n\n Class defining the standard interface for a monitor within a testbench\n\n The monitor is responsible for watching the pins of the DUT and recreating\n the transactions\n\"\"\"\n\nimport math\n\nimport cocotb\nfrom cocotb.decorators import coroutine\nfrom cocotb.triggers import Edge, Event, RisingEdge, ReadOnly, Timer\nfrom cocotb.binary import BinaryValue\nfrom cocotb.bus import Bus\nfrom cocotb.log import SimLog\nfrom cocotb.result import ReturnValue\n\n\nclass MonitorStatistics(object):\n \"\"\"Wrapper class for storing Monitor statistics\"\"\"\n def __init__(self):\n self.received_transactions = 0\n\n\nclass Monitor(object):\n\n def __init__(self, callback=None, event=None):\n \"\"\"\n Constructor for a monitor instance\n\n callback will be called with each recovered transaction as the argument\n\n If the callback isn't used, received transactions will be placed on a\n queue and the event used to notify any consumers.\n \"\"\"\n self._event = event\n self._wait_event = None\n self._recvQ = []\n self._callbacks = []\n self.stats = MonitorStatistics()\n self._wait_event = Event()\n\n # Subclasses may already set up logging\n if not hasattr(self, \"log\"):\n self.log = SimLog(\"cocotb.monitor.%s\" % (self.__class__.__name__))\n\n if callback is not None:\n self.add_callback(callback)\n\n # Create an independent coroutine which can receive stuff\n self._thread = cocotb.scheduler.add(self._monitor_recv())\n\n def kill(self):\n if self._thread:\n self._thread.kill()\n self._thread = None\n\n def __len__(self):\n return len(self._recvQ)\n\n def __getitem__(self, idx):\n return self._recvQ[idx]\n\n def add_callback(self, callback):\n self.log.debug(\"Adding callback of function %s to monitor\" %\n (callback.__name__))\n self._callbacks.append(callback)\n\n @coroutine\n def wait_for_recv(self, timeout=None):\n if timeout:\n t = Timer(timeout)\n fired = yield [self._wait_event.wait(), t]\n if fired is t:\n raise ReturnValue(None)\n else:\n yield self._wait_event.wait()\n\n pkt = self._wait_event.data\n raise ReturnValue(pkt)\n\n @coroutine\n def _monitor_recv(self):\n \"\"\"\n actual impementation of the receiver\n\n subclasses should override this method to implement the actual receive\n routine and call self._recv() with the recovered transaction\n \"\"\"\n raise NotImplementedError(\"Attempt to use base monitor class without \"\n \"providing a _monitor_recv method\")\n\n def _recv(self, transaction):\n \"\"\"Common handling of a received transaction.\"\"\"\n\n self.stats.received_transactions += 1\n\n # either callback based consumer\n for callback in self._callbacks:\n callback(transaction)\n\n # Or queued with a notification\n if not self._callbacks:\n self._recvQ.append(transaction)\n\n if self._event is not None:\n self._event.set()\n\n # If anyone was waiting then let them know\n if self._wait_event is not None:\n self._wait_event.set(data=transaction)\n self._wait_event.clear()\n\n\nclass BusMonitor(Monitor):\n \"\"\"\n Wrapper providing common functionality for monitoring busses\n \"\"\"\n _signals = []\n _optional_signals = []\n\n def __init__(self, entity, name, clock, reset=None, reset_n=None,\n callback=None, event=None):\n self.log = SimLog(\"cocotb.%s.%s\" % (entity.name, name))\n self.entity = entity\n self.name = name\n self.clock = clock\n self.bus = Bus(self.entity, self.name, self._signals,\n optional_signals=self._optional_signals)\n self._reset = reset\n self._reset_n = reset_n\n Monitor.__init__(self, callback=callback, event=event)\n\n @property\n def in_reset(self):\n if self._reset_n is not None:\n return not bool(self._reset_n.value.integer)\n if self._reset is not None:\n return bool(self._reset_n.value.integer)\n return False\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, self.name)\n", "path": "cocotb/monitors/__init__.py"}], "after_files": [{"content": "#!/bin/env python\n\n''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n\"\"\"\n\n Class defining the standard interface for a monitor within a testbench\n\n The monitor is responsible for watching the pins of the DUT and recreating\n the transactions\n\"\"\"\n\nimport math\n\nimport cocotb\nfrom cocotb.decorators import coroutine\nfrom cocotb.triggers import Edge, Event, RisingEdge, ReadOnly, Timer\nfrom cocotb.binary import BinaryValue\nfrom cocotb.bus import Bus\nfrom cocotb.log import SimLog\nfrom cocotb.result import ReturnValue\n\n\nclass MonitorStatistics(object):\n \"\"\"Wrapper class for storing Monitor statistics\"\"\"\n def __init__(self):\n self.received_transactions = 0\n\n\nclass Monitor(object):\n\n def __init__(self, callback=None, event=None):\n \"\"\"\n Constructor for a monitor instance\n\n callback will be called with each recovered transaction as the argument\n\n If the callback isn't used, received transactions will be placed on a\n queue and the event used to notify any consumers.\n \"\"\"\n self._event = event\n self._wait_event = None\n self._recvQ = []\n self._callbacks = []\n self.stats = MonitorStatistics()\n self._wait_event = Event()\n\n # Subclasses may already set up logging\n if not hasattr(self, \"log\"):\n self.log = SimLog(\"cocotb.monitor.%s\" % (self.__class__.__name__))\n\n if callback is not None:\n self.add_callback(callback)\n\n # Create an independent coroutine which can receive stuff\n self._thread = cocotb.scheduler.add(self._monitor_recv())\n\n def kill(self):\n if self._thread:\n self._thread.kill()\n self._thread = None\n\n def __len__(self):\n return len(self._recvQ)\n\n def __getitem__(self, idx):\n return self._recvQ[idx]\n\n def add_callback(self, callback):\n self.log.debug(\"Adding callback of function %s to monitor\" %\n (callback.__name__))\n self._callbacks.append(callback)\n\n @coroutine\n def wait_for_recv(self, timeout=None):\n if timeout:\n t = Timer(timeout)\n fired = yield [self._wait_event.wait(), t]\n if fired is t:\n raise ReturnValue(None)\n else:\n yield self._wait_event.wait()\n\n pkt = self._wait_event.data\n raise ReturnValue(pkt)\n\n @coroutine\n def _monitor_recv(self):\n \"\"\"\n actual impementation of the receiver\n\n subclasses should override this method to implement the actual receive\n routine and call self._recv() with the recovered transaction\n \"\"\"\n raise NotImplementedError(\"Attempt to use base monitor class without \"\n \"providing a _monitor_recv method\")\n\n def _recv(self, transaction):\n \"\"\"Common handling of a received transaction.\"\"\"\n\n self.stats.received_transactions += 1\n\n # either callback based consumer\n for callback in self._callbacks:\n callback(transaction)\n\n # Or queued with a notification\n if not self._callbacks:\n self._recvQ.append(transaction)\n\n if self._event is not None:\n self._event.set()\n\n # If anyone was waiting then let them know\n if self._wait_event is not None:\n self._wait_event.set(data=transaction)\n self._wait_event.clear()\n\n\nclass BusMonitor(Monitor):\n \"\"\"\n Wrapper providing common functionality for monitoring busses\n \"\"\"\n _signals = []\n _optional_signals = []\n\n def __init__(self, entity, name, clock, reset=None, reset_n=None,\n callback=None, event=None):\n self.log = SimLog(\"cocotb.%s.%s\" % (entity.name, name))\n self.entity = entity\n self.name = name\n self.clock = clock\n self.bus = Bus(self.entity, self.name, self._signals,\n optional_signals=self._optional_signals)\n self._reset = reset\n self._reset_n = reset_n\n Monitor.__init__(self, callback=callback, event=event)\n\n @property\n def in_reset(self):\n if self._reset_n is not None:\n return not bool(self._reset_n.value.integer)\n if self._reset is not None:\n return bool(self._reset.value.integer)\n return False\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, self.name)\n", "path": "cocotb/monitors/__init__.py"}]} | 2,016 | 132 |
gh_patches_debug_17275 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-5892 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Supprimer les messages privés de l'interface d'administration de Django
À l'heure actuelle, un super-admin peut, via l'interface de Django, lire tous les MPs du site. Certes, l'interface est peu pratique pour ça (aucune notion de fil, etc.), mais je trouve tout de même bien peu souhaitable.

Après discussion avec @gcodeur sur ce sujet, je propose donc de **supprimer les MPs de l'interface d'administration de Django**. Une personne avec les accès prod pourrait toujours les lire (vu qu'ils ne sont pas chiffrés de bout en bout), mais ça limiterait d'autant l'exposition.
Supprimer les messages privés de l'interface d'administration de Django
À l'heure actuelle, un super-admin peut, via l'interface de Django, lire tous les MPs du site. Certes, l'interface est peu pratique pour ça (aucune notion de fil, etc.), mais je trouve tout de même bien peu souhaitable.

Après discussion avec @gcodeur sur ce sujet, je propose donc de **supprimer les MPs de l'interface d'administration de Django**. Une personne avec les accès prod pourrait toujours les lire (vu qu'ils ne sont pas chiffrés de bout en bout), mais ça limiterait d'autant l'exposition.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/mp/admin.py`
Content:
```
1 from django.contrib import admin
2
3 from .models import PrivatePost, PrivateTopic, PrivateTopicRead
4
5
6 class PrivatePostAdmin(admin.ModelAdmin):
7
8 """Representation of PrivatePost model in the admin interface."""
9
10 list_display = ('privatetopic', 'author', 'pubdate', 'update', 'position_in_topic')
11 raw_id_fields = ('privatetopic', 'author')
12
13
14 class PrivateTopicAdmin(admin.ModelAdmin):
15
16 """Representation of PrivateTopic model in the admin interface."""
17
18 list_display = ('title', 'subtitle', 'author', 'last_message', 'pubdate')
19 raw_id_fields = ('author', 'participants', 'last_message')
20
21
22 class PrivateTopicReadAdmin(admin.ModelAdmin):
23
24 """Representation of PrivateTopicRead model in the admin interface."""
25
26 list_display = ('privatetopic', 'privatepost', 'user')
27 raw_id_fields = ('privatetopic', 'privatepost', 'user')
28
29
30 admin.site.register(PrivatePost, PrivatePostAdmin)
31 admin.site.register(PrivateTopic, PrivateTopicAdmin)
32 admin.site.register(PrivateTopicRead, PrivateTopicReadAdmin)
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/mp/admin.py b/zds/mp/admin.py
deleted file mode 100644
--- a/zds/mp/admin.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from django.contrib import admin
-
-from .models import PrivatePost, PrivateTopic, PrivateTopicRead
-
-
-class PrivatePostAdmin(admin.ModelAdmin):
-
- """Representation of PrivatePost model in the admin interface."""
-
- list_display = ('privatetopic', 'author', 'pubdate', 'update', 'position_in_topic')
- raw_id_fields = ('privatetopic', 'author')
-
-
-class PrivateTopicAdmin(admin.ModelAdmin):
-
- """Representation of PrivateTopic model in the admin interface."""
-
- list_display = ('title', 'subtitle', 'author', 'last_message', 'pubdate')
- raw_id_fields = ('author', 'participants', 'last_message')
-
-
-class PrivateTopicReadAdmin(admin.ModelAdmin):
-
- """Representation of PrivateTopicRead model in the admin interface."""
-
- list_display = ('privatetopic', 'privatepost', 'user')
- raw_id_fields = ('privatetopic', 'privatepost', 'user')
-
-
-admin.site.register(PrivatePost, PrivatePostAdmin)
-admin.site.register(PrivateTopic, PrivateTopicAdmin)
-admin.site.register(PrivateTopicRead, PrivateTopicReadAdmin)
| {"golden_diff": "diff --git a/zds/mp/admin.py b/zds/mp/admin.py\ndeleted file mode 100644\n--- a/zds/mp/admin.py\n+++ /dev/null\n@@ -1,32 +0,0 @@\n-from django.contrib import admin\n-\n-from .models import PrivatePost, PrivateTopic, PrivateTopicRead\n-\n-\n-class PrivatePostAdmin(admin.ModelAdmin):\n-\n- \"\"\"Representation of PrivatePost model in the admin interface.\"\"\"\n-\n- list_display = ('privatetopic', 'author', 'pubdate', 'update', 'position_in_topic')\n- raw_id_fields = ('privatetopic', 'author')\n-\n-\n-class PrivateTopicAdmin(admin.ModelAdmin):\n-\n- \"\"\"Representation of PrivateTopic model in the admin interface.\"\"\"\n-\n- list_display = ('title', 'subtitle', 'author', 'last_message', 'pubdate')\n- raw_id_fields = ('author', 'participants', 'last_message')\n-\n-\n-class PrivateTopicReadAdmin(admin.ModelAdmin):\n-\n- \"\"\"Representation of PrivateTopicRead model in the admin interface.\"\"\"\n-\n- list_display = ('privatetopic', 'privatepost', 'user')\n- raw_id_fields = ('privatetopic', 'privatepost', 'user')\n-\n-\n-admin.site.register(PrivatePost, PrivatePostAdmin)\n-admin.site.register(PrivateTopic, PrivateTopicAdmin)\n-admin.site.register(PrivateTopicRead, PrivateTopicReadAdmin)\n", "issue": "Supprimer les messages priv\u00e9s de l'interface d'administration de Django\n\u00c0 l'heure actuelle, un super-admin peut, via l'interface de Django, lire tous les MPs du site. Certes, l'interface est peu pratique pour \u00e7a (aucune notion de fil, etc.), mais je trouve tout de m\u00eame bien peu souhaitable.\r\n\r\n\r\n\r\nApr\u00e8s discussion avec @gcodeur sur ce sujet, je propose donc de **supprimer les MPs de l'interface d'administration de Django**. Une personne avec les acc\u00e8s prod pourrait toujours les lire (vu qu'ils ne sont pas chiffr\u00e9s de bout en bout), mais \u00e7a limiterait d'autant l'exposition.\nSupprimer les messages priv\u00e9s de l'interface d'administration de Django\n\u00c0 l'heure actuelle, un super-admin peut, via l'interface de Django, lire tous les MPs du site. Certes, l'interface est peu pratique pour \u00e7a (aucune notion de fil, etc.), mais je trouve tout de m\u00eame bien peu souhaitable.\r\n\r\n\r\n\r\nApr\u00e8s discussion avec @gcodeur sur ce sujet, je propose donc de **supprimer les MPs de l'interface d'administration de Django**. Une personne avec les acc\u00e8s prod pourrait toujours les lire (vu qu'ils ne sont pas chiffr\u00e9s de bout en bout), mais \u00e7a limiterait d'autant l'exposition.\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom .models import PrivatePost, PrivateTopic, PrivateTopicRead\n\n\nclass PrivatePostAdmin(admin.ModelAdmin):\n\n \"\"\"Representation of PrivatePost model in the admin interface.\"\"\"\n\n list_display = ('privatetopic', 'author', 'pubdate', 'update', 'position_in_topic')\n raw_id_fields = ('privatetopic', 'author')\n\n\nclass PrivateTopicAdmin(admin.ModelAdmin):\n\n \"\"\"Representation of PrivateTopic model in the admin interface.\"\"\"\n\n list_display = ('title', 'subtitle', 'author', 'last_message', 'pubdate')\n raw_id_fields = ('author', 'participants', 'last_message')\n\n\nclass PrivateTopicReadAdmin(admin.ModelAdmin):\n\n \"\"\"Representation of PrivateTopicRead model in the admin interface.\"\"\"\n\n list_display = ('privatetopic', 'privatepost', 'user')\n raw_id_fields = ('privatetopic', 'privatepost', 'user')\n\n\nadmin.site.register(PrivatePost, PrivatePostAdmin)\nadmin.site.register(PrivateTopic, PrivateTopicAdmin)\nadmin.site.register(PrivateTopicRead, PrivateTopicReadAdmin)\n", "path": "zds/mp/admin.py"}], "after_files": [{"content": null, "path": "zds/mp/admin.py"}]} | 997 | 299 |
gh_patches_debug_49870 | rasdani/github-patches | git_diff | fossasia__open-event-server-4398 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Attendee : user/<id>/attendee gives Error 400
**I'm submitting a ...** (check one with "x")
- [x] bug report
- [ ] feature request
- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server
URL
```
https://open-event-api.herokuapp.com/v1/users/5/attendees?include=ticket,event,order
```
ERROR
```
{
"errors":[
{
"title":"Invalid include querystring parameter.",
"source":{
"parameter":"include"
},
"status":400,
"detail":"AttendeeSchemaPublic has no attribute ticket"
}
],
"jsonapi":{
"version":"1.0"
}
}
```
Related Front-end route
```
https://open-event-frontend.herokuapp.com/my-tickets
```
Due to recent changes the URL gives ERROR 400.
@poush @shubham-padia @enigmaeth @magdalenesuo Please have a look at it
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/attendees.py`
Content:
```
1 from flask_jwt import current_identity
2 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
3
4 from app.api.bootstrap import api
5 from app.api.helpers.db import safe_query
6 from app.api.helpers.exceptions import ForbiddenException
7 from app.api.helpers.permission_manager import has_access
8 from app.api.helpers.permissions import jwt_required
9 from app.api.helpers.query import event_query
10 from app.api.helpers.utilities import require_relationship
11 from app.api.schema.attendees import AttendeeSchema, AttendeeSchemaPublic
12 from app.models import db
13 from app.models.order import Order
14 from app.models.ticket import Ticket
15 from app.models.ticket_holder import TicketHolder
16 from app.models.user import User
17
18
19 class AttendeeListPost(ResourceList):
20 """
21 List and create Attendees through direct URL
22 """
23
24 def before_post(self, args, kwargs, data):
25 require_relationship(['ticket', 'event'], data)
26 if not has_access('is_coorganizer', event_id=data['event']):
27 raise ForbiddenException({'source': 'event_id'}, "Access Forbidden")
28
29 methods = ['POST']
30 schema = AttendeeSchema
31 data_layer = {'session': db.session,
32 'model': TicketHolder}
33
34
35 class AttendeeList(ResourceList):
36 """
37 List Attendees
38 """
39 def before_get(self, args, kwargs):
40 if kwargs.get('user_id'):
41 self.schema = AttendeeSchemaPublic
42
43 def query(self, view_kwargs):
44 query_ = self.session.query(TicketHolder)
45
46 if view_kwargs.get('order_identifier'):
47 order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')
48 if not has_access('is_registrar', event_id=order.event_id) or not has_access('is_user_itself',
49 id=order.user_id):
50 raise ForbiddenException({'source': ''}, 'Access Forbidden')
51 query_ = query_.join(Order).filter(Order.id == order.id)
52
53 if view_kwargs.get('ticket_id'):
54 ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')
55 if not has_access('is_registrar', event_id=ticket.event_id):
56 raise ForbiddenException({'source': ''}, 'Access Forbidden')
57 query_ = query_.join(Ticket).filter(Ticket.id == ticket.id)
58
59 if view_kwargs.get('user_id'):
60 user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')
61 if not has_access('is_user_itself', user_id=user.id):
62 raise ForbiddenException({'source': ''}, 'Access Forbidden')
63 query_ = query_.join(User, User.email == TicketHolder.email).filter(User.id == user.id)
64
65 query_ = event_query(self, query_, view_kwargs, permission='is_registrar')
66 return query_
67
68 view_kwargs = True
69 methods = ['GET', ]
70 schema = AttendeeSchema
71 data_layer = {'session': db.session,
72 'model': TicketHolder,
73 'methods': {
74 'query': query
75 }}
76
77
78 class AttendeeDetail(ResourceDetail):
79 """
80 Attendee detail by id
81 """
82 def before_get_object(self, view_kwargs):
83 attendee = safe_query(self, TicketHolder, 'id', view_kwargs['id'], 'attendee_id')
84 if not has_access('is_registrar_or_user_itself', user_id=current_identity.id, event_id=attendee.event_id):
85 raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')
86
87 def before_delete_object(self, obj, kwargs):
88 if not has_access('is_registrar', event_id=obj.event_id):
89 raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')
90
91 def before_update_object(self, obj, data, kwargs):
92 if not has_access('is_registrar', event_id=obj.event_id):
93 raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')
94
95 decorators = (jwt_required,)
96 schema = AttendeeSchema
97 data_layer = {'session': db.session,
98 'model': TicketHolder,
99 'methods': {
100 'before_get_object': before_get_object,
101 'before_update_object': before_update_object,
102 'before_delete_object': before_delete_object
103 }}
104
105
106 class AttendeeRelationshipRequired(ResourceRelationship):
107 """
108 Attendee Relationship (Required)
109 """
110 decorators = (jwt_required,)
111 methods = ['GET', 'PATCH']
112 schema = AttendeeSchema
113 data_layer = {'session': db.session,
114 'model': TicketHolder}
115
116
117 class AttendeeRelationshipOptional(ResourceRelationship):
118 """
119 Attendee Relationship(Optional)
120 """
121 decorators = (api.has_permission('is_user_itself', fetch="user_id", fetch_as="id", model=TicketHolder),)
122 schema = AttendeeSchema
123 data_layer = {'session': db.session,
124 'model': TicketHolder}
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/attendees.py b/app/api/attendees.py
--- a/app/api/attendees.py
+++ b/app/api/attendees.py
@@ -36,10 +36,6 @@
"""
List Attendees
"""
- def before_get(self, args, kwargs):
- if kwargs.get('user_id'):
- self.schema = AttendeeSchemaPublic
-
def query(self, view_kwargs):
query_ = self.session.query(TicketHolder)
| {"golden_diff": "diff --git a/app/api/attendees.py b/app/api/attendees.py\n--- a/app/api/attendees.py\n+++ b/app/api/attendees.py\n@@ -36,10 +36,6 @@\n \"\"\"\n List Attendees\n \"\"\"\n- def before_get(self, args, kwargs):\n- if kwargs.get('user_id'):\n- self.schema = AttendeeSchemaPublic\n-\n def query(self, view_kwargs):\n query_ = self.session.query(TicketHolder)\n", "issue": "Attendee : user/<id>/attendee gives Error 400\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server\r\n\r\nURL\r\n```\r\nhttps://open-event-api.herokuapp.com/v1/users/5/attendees?include=ticket,event,order\r\n```\r\n\r\nERROR\r\n```\r\n{\r\n \"errors\":[\r\n {\r\n \"title\":\"Invalid include querystring parameter.\",\r\n \"source\":{\r\n \"parameter\":\"include\"\r\n },\r\n \"status\":400,\r\n \"detail\":\"AttendeeSchemaPublic has no attribute ticket\"\r\n }\r\n ],\r\n \"jsonapi\":{\r\n \"version\":\"1.0\"\r\n }\r\n}\r\n```\r\nRelated Front-end route\r\n```\r\nhttps://open-event-frontend.herokuapp.com/my-tickets\r\n```\r\nDue to recent changes the URL gives ERROR 400.\r\n@poush @shubham-padia @enigmaeth @magdalenesuo Please have a look at it\n", "before_files": [{"content": "from flask_jwt import current_identity\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.attendees import AttendeeSchema, AttendeeSchemaPublic\nfrom app.models import db\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\n\n\nclass AttendeeListPost(ResourceList):\n \"\"\"\n List and create Attendees through direct URL\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n require_relationship(['ticket', 'event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': 'event_id'}, \"Access Forbidden\")\n\n methods = ['POST']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeList(ResourceList):\n \"\"\"\n List Attendees\n \"\"\"\n def before_get(self, args, kwargs):\n if kwargs.get('user_id'):\n self.schema = AttendeeSchemaPublic\n\n def query(self, view_kwargs):\n query_ = self.session.query(TicketHolder)\n\n if view_kwargs.get('order_identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')\n if not has_access('is_registrar', event_id=order.event_id) or not has_access('is_user_itself',\n id=order.user_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Order).filter(Order.id == order.id)\n\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n if not has_access('is_registrar', event_id=ticket.event_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Ticket).filter(Ticket.id == ticket.id)\n\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n if not has_access('is_user_itself', user_id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User, User.email == TicketHolder.email).filter(User.id == user.id)\n\n query_ = event_query(self, query_, view_kwargs, permission='is_registrar')\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'query': query\n }}\n\n\nclass AttendeeDetail(ResourceDetail):\n \"\"\"\n Attendee detail by id\n \"\"\"\n def before_get_object(self, view_kwargs):\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['id'], 'attendee_id')\n if not has_access('is_registrar_or_user_itself', user_id=current_identity.id, event_id=attendee.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_delete_object(self, obj, kwargs):\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_update_object(self, obj, data, kwargs):\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n decorators = (jwt_required,)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'before_delete_object': before_delete_object\n }}\n\n\nclass AttendeeRelationshipRequired(ResourceRelationship):\n \"\"\"\n Attendee Relationship (Required)\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeRelationshipOptional(ResourceRelationship):\n \"\"\"\n Attendee Relationship(Optional)\n \"\"\"\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id\", fetch_as=\"id\", model=TicketHolder),)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n", "path": "app/api/attendees.py"}], "after_files": [{"content": "from flask_jwt import current_identity\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.attendees import AttendeeSchema, AttendeeSchemaPublic\nfrom app.models import db\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\n\n\nclass AttendeeListPost(ResourceList):\n \"\"\"\n List and create Attendees through direct URL\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n require_relationship(['ticket', 'event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': 'event_id'}, \"Access Forbidden\")\n\n methods = ['POST']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeList(ResourceList):\n \"\"\"\n List Attendees\n \"\"\"\n def query(self, view_kwargs):\n query_ = self.session.query(TicketHolder)\n\n if view_kwargs.get('order_identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')\n if not has_access('is_registrar', event_id=order.event_id) or not has_access('is_user_itself',\n id=order.user_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Order).filter(Order.id == order.id)\n\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n if not has_access('is_registrar', event_id=ticket.event_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Ticket).filter(Ticket.id == ticket.id)\n\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n if not has_access('is_user_itself', user_id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User, User.email == TicketHolder.email).filter(User.id == user.id)\n\n query_ = event_query(self, query_, view_kwargs, permission='is_registrar')\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'query': query\n }}\n\n\nclass AttendeeDetail(ResourceDetail):\n \"\"\"\n Attendee detail by id\n \"\"\"\n def before_get_object(self, view_kwargs):\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['id'], 'attendee_id')\n if not has_access('is_registrar_or_user_itself', user_id=current_identity.id, event_id=attendee.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_delete_object(self, obj, kwargs):\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_update_object(self, obj, data, kwargs):\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n decorators = (jwt_required,)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'before_delete_object': before_delete_object\n }}\n\n\nclass AttendeeRelationshipRequired(ResourceRelationship):\n \"\"\"\n Attendee Relationship (Required)\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeRelationshipOptional(ResourceRelationship):\n \"\"\"\n Attendee Relationship(Optional)\n \"\"\"\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id\", fetch_as=\"id\", model=TicketHolder),)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n", "path": "app/api/attendees.py"}]} | 1,825 | 108 |
gh_patches_debug_15406 | rasdani/github-patches | git_diff | vega__altair-3303 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Verify versions of both VegaFusion packages
See https://github.com/altair-viz/altair/pull/3281#issuecomment-1867599879
We should check the version of `vegafusion-python-embed` as well as the version of `vegafusion` since it's possible for these to get out of sync.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `altair/utils/_importers.py`
Content:
```
1 from types import ModuleType
2 from packaging.version import Version
3 from importlib.metadata import version as importlib_version
4
5
6 def import_vegafusion() -> ModuleType:
7 min_version = "1.5.0"
8 try:
9 version = importlib_version("vegafusion")
10 if Version(version) < Version(min_version):
11 raise RuntimeError(
12 f"The vegafusion package must be version {min_version} or greater. "
13 f"Found version {version}"
14 )
15 import vegafusion as vf # type: ignore
16
17 return vf
18 except ImportError as err:
19 raise ImportError(
20 'The "vegafusion" data transformer and chart.transformed_data feature requires\n'
21 f"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\n"
22 "These can be installed with pip using:\n"
23 f' pip install "vegafusion[embed]>={min_version}"\n'
24 "Or with conda using:\n"
25 f' conda install -c conda-forge "vegafusion-python-embed>={min_version}" '
26 f'"vegafusion>={min_version}"\n\n'
27 f"ImportError: {err.args[0]}"
28 ) from err
29
30
31 def import_vl_convert() -> ModuleType:
32 min_version = "1.1.0"
33 try:
34 version = importlib_version("vl-convert-python")
35 if Version(version) < Version(min_version):
36 raise RuntimeError(
37 f"The vl-convert-python package must be version {min_version} or greater. "
38 f"Found version {version}"
39 )
40 import vl_convert as vlc
41
42 return vlc
43 except ImportError as err:
44 raise ImportError(
45 f"The vl-convert Vega-Lite compiler and file export feature requires\n"
46 f"version {min_version} or greater of the 'vl-convert-python' package. \n"
47 f"This can be installed with pip using:\n"
48 f' pip install "vl-convert-python>={min_version}"\n'
49 "or conda:\n"
50 f' conda install -c conda-forge "vl-convert-python>={min_version}"\n\n'
51 f"ImportError: {err.args[0]}"
52 ) from err
53
54
55 def vl_version_for_vl_convert() -> str:
56 from ..vegalite import SCHEMA_VERSION
57
58 # Compute VlConvert's vl_version string (of the form 'v5_2')
59 # from SCHEMA_VERSION (of the form 'v5.2.0')
60 return "_".join(SCHEMA_VERSION.split(".")[:2])
61
62
63 def import_pyarrow_interchange() -> ModuleType:
64 min_version = "11.0.0"
65 try:
66 version = importlib_version("pyarrow")
67
68 if Version(version) < Version(min_version):
69 raise RuntimeError(
70 f"The pyarrow package must be version {min_version} or greater. "
71 f"Found version {version}"
72 )
73 import pyarrow.interchange as pi
74
75 return pi
76 except ImportError as err:
77 raise ImportError(
78 f"Usage of the DataFrame Interchange Protocol requires\n"
79 f"version {min_version} or greater of the pyarrow package. \n"
80 f"This can be installed with pip using:\n"
81 f' pip install "pyarrow>={min_version}"\n'
82 "or conda:\n"
83 f' conda install -c conda-forge "pyarrow>={min_version}"\n\n'
84 f"ImportError: {err.args[0]}"
85 ) from err
86
87
88 def pyarrow_available() -> bool:
89 try:
90 import_pyarrow_interchange()
91 return True
92 except ImportError:
93 return False
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/altair/utils/_importers.py b/altair/utils/_importers.py
--- a/altair/utils/_importers.py
+++ b/altair/utils/_importers.py
@@ -7,10 +7,14 @@
min_version = "1.5.0"
try:
version = importlib_version("vegafusion")
- if Version(version) < Version(min_version):
+ embed_version = importlib_version("vegafusion-python-embed")
+ if version != embed_version or Version(version) < Version(min_version):
raise RuntimeError(
- f"The vegafusion package must be version {min_version} or greater. "
- f"Found version {version}"
+ "The versions of the vegafusion and vegafusion-python-embed packages must match\n"
+ f"and must be version {min_version} or greater.\n"
+ f"Found:\n"
+ f" - vegafusion=={version}\n"
+ f" - vegafusion-python-embed=={embed_version}\n"
)
import vegafusion as vf # type: ignore
| {"golden_diff": "diff --git a/altair/utils/_importers.py b/altair/utils/_importers.py\n--- a/altair/utils/_importers.py\n+++ b/altair/utils/_importers.py\n@@ -7,10 +7,14 @@\n min_version = \"1.5.0\"\n try:\n version = importlib_version(\"vegafusion\")\n- if Version(version) < Version(min_version):\n+ embed_version = importlib_version(\"vegafusion-python-embed\")\n+ if version != embed_version or Version(version) < Version(min_version):\n raise RuntimeError(\n- f\"The vegafusion package must be version {min_version} or greater. \"\n- f\"Found version {version}\"\n+ \"The versions of the vegafusion and vegafusion-python-embed packages must match\\n\"\n+ f\"and must be version {min_version} or greater.\\n\"\n+ f\"Found:\\n\"\n+ f\" - vegafusion=={version}\\n\"\n+ f\" - vegafusion-python-embed=={embed_version}\\n\"\n )\n import vegafusion as vf # type: ignore\n", "issue": "Verify versions of both VegaFusion packages\nSee https://github.com/altair-viz/altair/pull/3281#issuecomment-1867599879\r\n\r\nWe should check the version of `vegafusion-python-embed` as well as the version of `vegafusion` since it's possible for these to get out of sync.\r\n\r\n\n", "before_files": [{"content": "from types import ModuleType\nfrom packaging.version import Version\nfrom importlib.metadata import version as importlib_version\n\n\ndef import_vegafusion() -> ModuleType:\n min_version = \"1.5.0\"\n try:\n version = importlib_version(\"vegafusion\")\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The vegafusion package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import vegafusion as vf # type: ignore\n\n return vf\n except ImportError as err:\n raise ImportError(\n 'The \"vegafusion\" data transformer and chart.transformed_data feature requires\\n'\n f\"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\\n\"\n \"These can be installed with pip using:\\n\"\n f' pip install \"vegafusion[embed]>={min_version}\"\\n'\n \"Or with conda using:\\n\"\n f' conda install -c conda-forge \"vegafusion-python-embed>={min_version}\" '\n f'\"vegafusion>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef import_vl_convert() -> ModuleType:\n min_version = \"1.1.0\"\n try:\n version = importlib_version(\"vl-convert-python\")\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The vl-convert-python package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import vl_convert as vlc\n\n return vlc\n except ImportError as err:\n raise ImportError(\n f\"The vl-convert Vega-Lite compiler and file export feature requires\\n\"\n f\"version {min_version} or greater of the 'vl-convert-python' package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"vl-convert-python>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"vl-convert-python>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef vl_version_for_vl_convert() -> str:\n from ..vegalite import SCHEMA_VERSION\n\n # Compute VlConvert's vl_version string (of the form 'v5_2')\n # from SCHEMA_VERSION (of the form 'v5.2.0')\n return \"_\".join(SCHEMA_VERSION.split(\".\")[:2])\n\n\ndef import_pyarrow_interchange() -> ModuleType:\n min_version = \"11.0.0\"\n try:\n version = importlib_version(\"pyarrow\")\n\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The pyarrow package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import pyarrow.interchange as pi\n\n return pi\n except ImportError as err:\n raise ImportError(\n f\"Usage of the DataFrame Interchange Protocol requires\\n\"\n f\"version {min_version} or greater of the pyarrow package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"pyarrow>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"pyarrow>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef pyarrow_available() -> bool:\n try:\n import_pyarrow_interchange()\n return True\n except ImportError:\n return False\n", "path": "altair/utils/_importers.py"}], "after_files": [{"content": "from types import ModuleType\nfrom packaging.version import Version\nfrom importlib.metadata import version as importlib_version\n\n\ndef import_vegafusion() -> ModuleType:\n min_version = \"1.5.0\"\n try:\n version = importlib_version(\"vegafusion\")\n embed_version = importlib_version(\"vegafusion-python-embed\")\n if version != embed_version or Version(version) < Version(min_version):\n raise RuntimeError(\n \"The versions of the vegafusion and vegafusion-python-embed packages must match\\n\"\n f\"and must be version {min_version} or greater.\\n\"\n f\"Found:\\n\"\n f\" - vegafusion=={version}\\n\"\n f\" - vegafusion-python-embed=={embed_version}\\n\"\n )\n import vegafusion as vf # type: ignore\n\n return vf\n except ImportError as err:\n raise ImportError(\n 'The \"vegafusion\" data transformer and chart.transformed_data feature requires\\n'\n f\"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\\n\"\n \"These can be installed with pip using:\\n\"\n f' pip install \"vegafusion[embed]>={min_version}\"\\n'\n \"Or with conda using:\\n\"\n f' conda install -c conda-forge \"vegafusion-python-embed>={min_version}\" '\n f'\"vegafusion>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef import_vl_convert() -> ModuleType:\n min_version = \"1.1.0\"\n try:\n version = importlib_version(\"vl-convert-python\")\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The vl-convert-python package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import vl_convert as vlc\n\n return vlc\n except ImportError as err:\n raise ImportError(\n f\"The vl-convert Vega-Lite compiler and file export feature requires\\n\"\n f\"version {min_version} or greater of the 'vl-convert-python' package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"vl-convert-python>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"vl-convert-python>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef vl_version_for_vl_convert() -> str:\n from ..vegalite import SCHEMA_VERSION\n\n # Compute VlConvert's vl_version string (of the form 'v5_2')\n # from SCHEMA_VERSION (of the form 'v5.2.0')\n return \"_\".join(SCHEMA_VERSION.split(\".\")[:2])\n\n\ndef import_pyarrow_interchange() -> ModuleType:\n min_version = \"11.0.0\"\n try:\n version = importlib_version(\"pyarrow\")\n\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The pyarrow package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import pyarrow.interchange as pi\n\n return pi\n except ImportError as err:\n raise ImportError(\n f\"Usage of the DataFrame Interchange Protocol requires\\n\"\n f\"version {min_version} or greater of the pyarrow package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"pyarrow>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"pyarrow>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef pyarrow_available() -> bool:\n try:\n import_pyarrow_interchange()\n return True\n except ImportError:\n return False\n", "path": "altair/utils/_importers.py"}]} | 1,363 | 252 |
gh_patches_debug_4396 | rasdani/github-patches | git_diff | oppia__oppia-1465 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
In the rich-text editor, auto-prepend "https://" to links which don't specify a protocol
```
Currently the non-interactive link widget will only accept links that begin
with either "http://" or "https://". I propose that whenever a link does not,
e.g. "www.google.com" we automatically prepend "http://www.google.com" to the
link string that is stored.
```
Original issue reported on code.google.com by `[email protected]` on 24 Aug 2014 at 9:43
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `extensions/rich_text_components/Link/Link.py`
Content:
```
1 # coding: utf-8
2 #
3 # Copyright 2014 The Oppia Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, softwar
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from extensions.rich_text_components import base
18
19
20 class Link(base.BaseRichTextComponent):
21 """A rich-text component for displaying links."""
22
23 name = 'Link'
24 category = 'Basic Input'
25 description = 'A link to a URL.'
26 frontend_name = 'link'
27 tooltip = 'Insert link'
28
29 _customization_arg_specs = [{
30 'name': 'url',
31 'description': (
32 'The link URL. It must start with http:// or https://'),
33 'schema': {
34 'type': 'custom',
35 'obj_type': 'SanitizedUrl',
36 },
37 'default_value': 'https://www.example.com',
38 }, {
39 'name': 'text',
40 'description': (
41 'The link text. If left blank, the link URL will be used.'),
42 'schema': {
43 'type': 'unicode',
44 },
45 'default_value': '',
46 }, {
47 'name': 'open_link_in_same_window',
48 'description': 'Open the link in the same window?',
49 'schema': {
50 'type': 'bool'
51 },
52 'default_value': False,
53 }]
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/extensions/rich_text_components/Link/Link.py b/extensions/rich_text_components/Link/Link.py
--- a/extensions/rich_text_components/Link/Link.py
+++ b/extensions/rich_text_components/Link/Link.py
@@ -29,7 +29,7 @@
_customization_arg_specs = [{
'name': 'url',
'description': (
- 'The link URL. It must start with http:// or https://'),
+ 'The link URL. If no protocol is specified, HTTPS will be used.'),
'schema': {
'type': 'custom',
'obj_type': 'SanitizedUrl',
| {"golden_diff": "diff --git a/extensions/rich_text_components/Link/Link.py b/extensions/rich_text_components/Link/Link.py\n--- a/extensions/rich_text_components/Link/Link.py\n+++ b/extensions/rich_text_components/Link/Link.py\n@@ -29,7 +29,7 @@\n _customization_arg_specs = [{\n 'name': 'url',\n 'description': (\n- 'The link URL. It must start with http:// or https://'),\n+ 'The link URL. If no protocol is specified, HTTPS will be used.'),\n 'schema': {\n 'type': 'custom',\n 'obj_type': 'SanitizedUrl',\n", "issue": "In the rich-text editor, auto-prepend \"https://\" to links which don't specify a protocol\n```\nCurrently the non-interactive link widget will only accept links that begin \nwith either \"http://\" or \"https://\". I propose that whenever a link does not, \ne.g. \"www.google.com\" we automatically prepend \"http://www.google.com\" to the \nlink string that is stored.\n```\n\nOriginal issue reported on code.google.com by `[email protected]` on 24 Aug 2014 at 9:43\n\n", "before_files": [{"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom extensions.rich_text_components import base\n\n\nclass Link(base.BaseRichTextComponent):\n \"\"\"A rich-text component for displaying links.\"\"\"\n\n name = 'Link'\n category = 'Basic Input'\n description = 'A link to a URL.'\n frontend_name = 'link'\n tooltip = 'Insert link'\n\n _customization_arg_specs = [{\n 'name': 'url',\n 'description': (\n 'The link URL. It must start with http:// or https://'),\n 'schema': {\n 'type': 'custom',\n 'obj_type': 'SanitizedUrl',\n },\n 'default_value': 'https://www.example.com',\n }, {\n 'name': 'text',\n 'description': (\n 'The link text. If left blank, the link URL will be used.'),\n 'schema': {\n 'type': 'unicode',\n },\n 'default_value': '',\n }, {\n 'name': 'open_link_in_same_window',\n 'description': 'Open the link in the same window?',\n 'schema': {\n 'type': 'bool'\n },\n 'default_value': False,\n }]\n", "path": "extensions/rich_text_components/Link/Link.py"}], "after_files": [{"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom extensions.rich_text_components import base\n\n\nclass Link(base.BaseRichTextComponent):\n \"\"\"A rich-text component for displaying links.\"\"\"\n\n name = 'Link'\n category = 'Basic Input'\n description = 'A link to a URL.'\n frontend_name = 'link'\n tooltip = 'Insert link'\n\n _customization_arg_specs = [{\n 'name': 'url',\n 'description': (\n 'The link URL. If no protocol is specified, HTTPS will be used.'),\n 'schema': {\n 'type': 'custom',\n 'obj_type': 'SanitizedUrl',\n },\n 'default_value': 'https://www.example.com',\n }, {\n 'name': 'text',\n 'description': (\n 'The link text. If left blank, the link URL will be used.'),\n 'schema': {\n 'type': 'unicode',\n },\n 'default_value': '',\n }, {\n 'name': 'open_link_in_same_window',\n 'description': 'Open the link in the same window?',\n 'schema': {\n 'type': 'bool'\n },\n 'default_value': False,\n }]\n", "path": "extensions/rich_text_components/Link/Link.py"}]} | 871 | 141 |
gh_patches_debug_26467 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-891 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Import of Bezirksregionen stopped working
`$ manage.py import_geodata --gdal-legacy`
Leads to a `KeyError`, probably the data format has changed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/maps/management/commands/import_geodata.py`
Content:
```
1 import json
2 import os
3 import subprocess
4 import sys
5
6 from django.core.management.base import BaseCommand
7
8 from meinberlin.apps.maps import models as map_models
9
10
11 class Command(BaseCommand):
12 help = 'Create map presets for berlin GEO-Data'
13
14 def add_arguments(self, parser):
15 parser.add_argument(
16 '--gdal-legacy',
17 action='store_true',
18 dest='gdal_legacy',
19 default=False,
20 help='GDAL version <= 1.10',
21 )
22
23 def handle(self, *args, **options):
24 self.is_gdal_legacy = options['gdal_legacy']
25 self._import_districts()
26 self._import_regions()
27
28 def _import_districts(self):
29 category = self._preset_category('Berlin')
30 tmpfile = '/tmp/bezirke.json'
31 url = 'http://fbinter.stadt-berlin.de/fb/' \
32 'wfs/geometry/senstadt/re_bezirke/'
33 self._download_geodata(tmpfile, url, 'fis:re_bezirke')
34 data = json.load(open(tmpfile, 'r'))
35 for feature in data['features']:
36 district = feature['properties']['spatial_alias']
37 if not map_models.MapPreset.objects.filter(name=district).exists():
38 self._create_map_preset(district, feature, category)
39 os.remove(tmpfile)
40
41 def _import_regions(self):
42 url = 'http://fbinter.stadt-berlin.de/fb/' \
43 'wfs/geometry/senstadt/re_bezirksregion'
44 tmpfile = '/tmp/bezirksregions.json'
45 self._download_geodata(tmpfile, url,
46 'fis:re_bezirksregion')
47 data = json.load(open(tmpfile, 'r'))
48 for feature in data['features']:
49 district = feature['properties']['BEZIRK']
50 region = feature['properties']['BZR_NAME']
51 category = self._preset_category(district)
52 if not map_models.MapPreset.objects.filter(name=region).exists():
53 self._create_map_preset(region, feature, category)
54 os.remove(tmpfile)
55
56 def _preset_category(self, name):
57 category, _ = \
58 map_models.MapPresetCategory.objects.get_or_create(name=name)
59 return category
60
61 def _create_map_preset(self, name, feature, category):
62 polygon = {
63 'type': 'FeatureCollection',
64 'features': [feature]
65 }
66 map_preset = map_models.MapPreset(
67 name=name,
68 polygon=polygon,
69 category=category
70 )
71 map_preset.save()
72
73 def _download_geodata(self, filename: str, url: str, layer: str):
74 try:
75 os.remove(filename)
76 except:
77 pass
78
79 src = 'WFS:{}{}'.format(
80 url,
81 '?TYPENAMES=GML2' if self.is_gdal_legacy else ''
82 )
83 try:
84 print('Trying to download file from {}'.format(url))
85 subprocess.check_call([
86 'ogr2ogr', '-s_srs', 'EPSG:25833', '-t_srs', 'WGS84',
87 '-f', 'geoJSON', filename, src, layer
88 ])
89 except FileNotFoundError as e:
90 print('Make sure ogr2ogr is installed and in user PATH.')
91 sys.exit(e)
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/maps/management/commands/import_geodata.py b/meinberlin/apps/maps/management/commands/import_geodata.py
--- a/meinberlin/apps/maps/management/commands/import_geodata.py
+++ b/meinberlin/apps/maps/management/commands/import_geodata.py
@@ -40,13 +40,13 @@
def _import_regions(self):
url = 'http://fbinter.stadt-berlin.de/fb/' \
- 'wfs/geometry/senstadt/re_bezirksregion'
+ 'wfs/geometry/senstadt/re_bezirksregion/'
tmpfile = '/tmp/bezirksregions.json'
self._download_geodata(tmpfile, url,
'fis:re_bezirksregion')
data = json.load(open(tmpfile, 'r'))
for feature in data['features']:
- district = feature['properties']['BEZIRK']
+ district = feature['properties']['BEZNAME']
region = feature['properties']['BZR_NAME']
category = self._preset_category(district)
if not map_models.MapPreset.objects.filter(name=region).exists():
@@ -78,7 +78,7 @@
src = 'WFS:{}{}'.format(
url,
- '?TYPENAMES=GML2' if self.is_gdal_legacy else ''
+ '?VERSION=1.1.0' if self.is_gdal_legacy else ''
)
try:
print('Trying to download file from {}'.format(url))
| {"golden_diff": "diff --git a/meinberlin/apps/maps/management/commands/import_geodata.py b/meinberlin/apps/maps/management/commands/import_geodata.py\n--- a/meinberlin/apps/maps/management/commands/import_geodata.py\n+++ b/meinberlin/apps/maps/management/commands/import_geodata.py\n@@ -40,13 +40,13 @@\n \n def _import_regions(self):\n url = 'http://fbinter.stadt-berlin.de/fb/' \\\n- 'wfs/geometry/senstadt/re_bezirksregion'\n+ 'wfs/geometry/senstadt/re_bezirksregion/'\n tmpfile = '/tmp/bezirksregions.json'\n self._download_geodata(tmpfile, url,\n 'fis:re_bezirksregion')\n data = json.load(open(tmpfile, 'r'))\n for feature in data['features']:\n- district = feature['properties']['BEZIRK']\n+ district = feature['properties']['BEZNAME']\n region = feature['properties']['BZR_NAME']\n category = self._preset_category(district)\n if not map_models.MapPreset.objects.filter(name=region).exists():\n@@ -78,7 +78,7 @@\n \n src = 'WFS:{}{}'.format(\n url,\n- '?TYPENAMES=GML2' if self.is_gdal_legacy else ''\n+ '?VERSION=1.1.0' if self.is_gdal_legacy else ''\n )\n try:\n print('Trying to download file from {}'.format(url))\n", "issue": "Import of Bezirksregionen stopped working\n`$ manage.py import_geodata --gdal-legacy`\r\n\r\nLeads to a `KeyError`, probably the data format has changed.\r\n\n", "before_files": [{"content": "import json\nimport os\nimport subprocess\nimport sys\n\nfrom django.core.management.base import BaseCommand\n\nfrom meinberlin.apps.maps import models as map_models\n\n\nclass Command(BaseCommand):\n help = 'Create map presets for berlin GEO-Data'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--gdal-legacy',\n action='store_true',\n dest='gdal_legacy',\n default=False,\n help='GDAL version <= 1.10',\n )\n\n def handle(self, *args, **options):\n self.is_gdal_legacy = options['gdal_legacy']\n self._import_districts()\n self._import_regions()\n\n def _import_districts(self):\n category = self._preset_category('Berlin')\n tmpfile = '/tmp/bezirke.json'\n url = 'http://fbinter.stadt-berlin.de/fb/' \\\n 'wfs/geometry/senstadt/re_bezirke/'\n self._download_geodata(tmpfile, url, 'fis:re_bezirke')\n data = json.load(open(tmpfile, 'r'))\n for feature in data['features']:\n district = feature['properties']['spatial_alias']\n if not map_models.MapPreset.objects.filter(name=district).exists():\n self._create_map_preset(district, feature, category)\n os.remove(tmpfile)\n\n def _import_regions(self):\n url = 'http://fbinter.stadt-berlin.de/fb/' \\\n 'wfs/geometry/senstadt/re_bezirksregion'\n tmpfile = '/tmp/bezirksregions.json'\n self._download_geodata(tmpfile, url,\n 'fis:re_bezirksregion')\n data = json.load(open(tmpfile, 'r'))\n for feature in data['features']:\n district = feature['properties']['BEZIRK']\n region = feature['properties']['BZR_NAME']\n category = self._preset_category(district)\n if not map_models.MapPreset.objects.filter(name=region).exists():\n self._create_map_preset(region, feature, category)\n os.remove(tmpfile)\n\n def _preset_category(self, name):\n category, _ = \\\n map_models.MapPresetCategory.objects.get_or_create(name=name)\n return category\n\n def _create_map_preset(self, name, feature, category):\n polygon = {\n 'type': 'FeatureCollection',\n 'features': [feature]\n }\n map_preset = map_models.MapPreset(\n name=name,\n polygon=polygon,\n category=category\n )\n map_preset.save()\n\n def _download_geodata(self, filename: str, url: str, layer: str):\n try:\n os.remove(filename)\n except:\n pass\n\n src = 'WFS:{}{}'.format(\n url,\n '?TYPENAMES=GML2' if self.is_gdal_legacy else ''\n )\n try:\n print('Trying to download file from {}'.format(url))\n subprocess.check_call([\n 'ogr2ogr', '-s_srs', 'EPSG:25833', '-t_srs', 'WGS84',\n '-f', 'geoJSON', filename, src, layer\n ])\n except FileNotFoundError as e:\n print('Make sure ogr2ogr is installed and in user PATH.')\n sys.exit(e)\n", "path": "meinberlin/apps/maps/management/commands/import_geodata.py"}], "after_files": [{"content": "import json\nimport os\nimport subprocess\nimport sys\n\nfrom django.core.management.base import BaseCommand\n\nfrom meinberlin.apps.maps import models as map_models\n\n\nclass Command(BaseCommand):\n help = 'Create map presets for berlin GEO-Data'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--gdal-legacy',\n action='store_true',\n dest='gdal_legacy',\n default=False,\n help='GDAL version <= 1.10',\n )\n\n def handle(self, *args, **options):\n self.is_gdal_legacy = options['gdal_legacy']\n self._import_districts()\n self._import_regions()\n\n def _import_districts(self):\n category = self._preset_category('Berlin')\n tmpfile = '/tmp/bezirke.json'\n url = 'http://fbinter.stadt-berlin.de/fb/' \\\n 'wfs/geometry/senstadt/re_bezirke/'\n self._download_geodata(tmpfile, url, 'fis:re_bezirke')\n data = json.load(open(tmpfile, 'r'))\n for feature in data['features']:\n district = feature['properties']['spatial_alias']\n if not map_models.MapPreset.objects.filter(name=district).exists():\n self._create_map_preset(district, feature, category)\n os.remove(tmpfile)\n\n def _import_regions(self):\n url = 'http://fbinter.stadt-berlin.de/fb/' \\\n 'wfs/geometry/senstadt/re_bezirksregion/'\n tmpfile = '/tmp/bezirksregions.json'\n self._download_geodata(tmpfile, url,\n 'fis:re_bezirksregion')\n data = json.load(open(tmpfile, 'r'))\n for feature in data['features']:\n district = feature['properties']['BEZNAME']\n region = feature['properties']['BZR_NAME']\n category = self._preset_category(district)\n if not map_models.MapPreset.objects.filter(name=region).exists():\n self._create_map_preset(region, feature, category)\n os.remove(tmpfile)\n\n def _preset_category(self, name):\n category, _ = \\\n map_models.MapPresetCategory.objects.get_or_create(name=name)\n return category\n\n def _create_map_preset(self, name, feature, category):\n polygon = {\n 'type': 'FeatureCollection',\n 'features': [feature]\n }\n map_preset = map_models.MapPreset(\n name=name,\n polygon=polygon,\n category=category\n )\n map_preset.save()\n\n def _download_geodata(self, filename: str, url: str, layer: str):\n try:\n os.remove(filename)\n except:\n pass\n\n src = 'WFS:{}{}'.format(\n url,\n '?VERSION=1.1.0' if self.is_gdal_legacy else ''\n )\n try:\n print('Trying to download file from {}'.format(url))\n subprocess.check_call([\n 'ogr2ogr', '-s_srs', 'EPSG:25833', '-t_srs', 'WGS84',\n '-f', 'geoJSON', filename, src, layer\n ])\n except FileNotFoundError as e:\n print('Make sure ogr2ogr is installed and in user PATH.')\n sys.exit(e)\n", "path": "meinberlin/apps/maps/management/commands/import_geodata.py"}]} | 1,215 | 341 |
gh_patches_debug_4242 | rasdani/github-patches | git_diff | kivy__python-for-android-1995 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TestGetSystemPythonExecutable.test_virtualenv test fail
The `TestGetSystemPythonExecutable.test_virtualenv` and `TestGetSystemPythonExecutable.test_venv` tests started failing all of a sudden.
Error was:
```
ModuleNotFoundError: No module named \'pytoml\'\n'
```
This ca be reproduced in local via:
```sh
pytest tests/test_pythonpackage_basic.py::TestGetSystemPythonExecutable::test_virtualenv
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1
2 import glob
3 from io import open # for open(..,encoding=...) parameter in python 2
4 from os import walk
5 from os.path import join, dirname, sep
6 import os
7 import re
8 from setuptools import setup, find_packages
9
10 # NOTE: All package data should also be set in MANIFEST.in
11
12 packages = find_packages()
13
14 package_data = {'': ['*.tmpl',
15 '*.patch', ], }
16
17 data_files = []
18
19
20
21 # must be a single statement since buildozer is currently parsing it, refs:
22 # https://github.com/kivy/buildozer/issues/722
23 install_reqs = [
24 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',
25 'enum34; python_version<"3.4"', 'sh>=1.10; sys_platform!="nt"',
26 'pep517', 'pytoml', 'virtualenv'
27 ]
28 # (pep517, pytoml and virtualenv are used by pythonpackage.py)
29
30 # By specifying every file manually, package_data will be able to
31 # include them in binary distributions. Note that we have to add
32 # everything as a 'pythonforandroid' rule, using '' apparently doesn't
33 # work.
34 def recursively_include(results, directory, patterns):
35 for root, subfolders, files in walk(directory):
36 for fn in files:
37 if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):
38 continue
39 filename = join(root, fn)
40 directory = 'pythonforandroid'
41 if directory not in results:
42 results[directory] = []
43 results[directory].append(join(*filename.split(sep)[1:]))
44
45 recursively_include(package_data, 'pythonforandroid/recipes',
46 ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',
47 '*.mk', '*.jam', ])
48 recursively_include(package_data, 'pythonforandroid/bootstraps',
49 ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',
50 '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',
51 '*.gradle', '.gitkeep', 'gradlew*', '*.jar', "*.patch", ])
52 recursively_include(package_data, 'pythonforandroid/bootstraps',
53 ['sdl-config', ])
54 recursively_include(package_data, 'pythonforandroid/bootstraps/webview',
55 ['*.html', ])
56 recursively_include(package_data, 'pythonforandroid',
57 ['liblink', 'biglink', 'liblink.sh'])
58
59 with open(join(dirname(__file__), 'README.md'),
60 encoding="utf-8",
61 errors="replace",
62 ) as fileh:
63 long_description = fileh.read()
64
65 init_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')
66 version = None
67 try:
68 with open(init_filen,
69 encoding="utf-8",
70 errors="replace"
71 ) as fileh:
72 lines = fileh.readlines()
73 except IOError:
74 pass
75 else:
76 for line in lines:
77 line = line.strip()
78 if line.startswith('__version__ = '):
79 matches = re.findall(r'["\'].+["\']', line)
80 if matches:
81 version = matches[0].strip("'").strip('"')
82 break
83 if version is None:
84 raise Exception('Error: version could not be loaded from {}'.format(init_filen))
85
86 setup(name='python-for-android',
87 version=version,
88 description='Android APK packager for Python scripts and apps',
89 long_description=long_description,
90 long_description_content_type='text/markdown',
91 author='The Kivy team',
92 author_email='[email protected]',
93 url='https://github.com/kivy/python-for-android',
94 license='MIT',
95 install_requires=install_reqs,
96 entry_points={
97 'console_scripts': [
98 'python-for-android = pythonforandroid.entrypoints:main',
99 'p4a = pythonforandroid.entrypoints:main',
100 ],
101 'distutils.commands': [
102 'apk = pythonforandroid.bdistapk:BdistAPK',
103 ],
104 },
105 classifiers = [
106 'Development Status :: 5 - Production/Stable',
107 'Intended Audience :: Developers',
108 'License :: OSI Approved :: MIT License',
109 'Operating System :: Microsoft :: Windows',
110 'Operating System :: OS Independent',
111 'Operating System :: POSIX :: Linux',
112 'Operating System :: MacOS :: MacOS X',
113 'Operating System :: Android',
114 'Programming Language :: C',
115 'Programming Language :: Python :: 3',
116 'Topic :: Software Development',
117 'Topic :: Utilities',
118 ],
119 packages=packages,
120 package_data=package_data,
121 )
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,7 +23,7 @@
install_reqs = [
'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',
'enum34; python_version<"3.4"', 'sh>=1.10; sys_platform!="nt"',
- 'pep517', 'pytoml', 'virtualenv'
+ 'pep517<0.7.0"', 'pytoml', 'virtualenv'
]
# (pep517, pytoml and virtualenv are used by pythonpackage.py)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,7 +23,7 @@\n install_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n- 'pep517', 'pytoml', 'virtualenv'\n+ 'pep517<0.7.0\"', 'pytoml', 'virtualenv'\n ]\n # (pep517, pytoml and virtualenv are used by pythonpackage.py)\n", "issue": "TestGetSystemPythonExecutable.test_virtualenv test fail\nThe `TestGetSystemPythonExecutable.test_virtualenv` and `TestGetSystemPythonExecutable.test_venv` tests started failing all of a sudden.\r\nError was:\r\n```\r\nModuleNotFoundError: No module named \\'pytoml\\'\\n'\r\n```\r\nThis ca be reproduced in local via:\r\n```sh\r\npytest tests/test_pythonpackage_basic.py::TestGetSystemPythonExecutable::test_virtualenv\r\n```\r\n\r\n\n", "before_files": [{"content": "\nimport glob\nfrom io import open # for open(..,encoding=...) parameter in python 2\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport os\nimport re\nfrom setuptools import setup, find_packages\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch', ], }\n\ndata_files = []\n\n\n\n# must be a single statement since buildozer is currently parsing it, refs:\n# https://github.com/kivy/buildozer/issues/722\ninstall_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n 'pep517', 'pytoml', 'virtualenv'\n]\n# (pep517, pytoml and virtualenv are used by pythonpackage.py)\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n '*.mk', '*.jam', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',\n '*.gradle', '.gitkeep', 'gradlew*', '*.jar', \"*.patch\", ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps/webview',\n ['*.html', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nwith open(join(dirname(__file__), 'README.md'),\n encoding=\"utf-8\",\n errors=\"replace\",\n ) as fileh:\n long_description = fileh.read()\n\ninit_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')\nversion = None\ntry:\n with open(init_filen,\n encoding=\"utf-8\",\n errors=\"replace\"\n ) as fileh:\n lines = fileh.readlines()\nexcept IOError:\n pass\nelse:\n for line in lines:\n line = line.strip()\n if line.startswith('__version__ = '):\n matches = re.findall(r'[\"\\'].+[\"\\']', line)\n if matches:\n version = matches[0].strip(\"'\").strip('\"')\n break\nif version is None:\n raise Exception('Error: version could not be loaded from {}'.format(init_filen))\n\nsetup(name='python-for-android',\n version=version,\n description='Android APK packager for Python scripts and apps',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android',\n license='MIT',\n install_requires=install_reqs,\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.entrypoints:main',\n 'p4a = pythonforandroid.entrypoints:main',\n ],\n 'distutils.commands': [\n 'apk = pythonforandroid.bdistapk:BdistAPK',\n ],\n },\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Android',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n", "path": "setup.py"}], "after_files": [{"content": "\nimport glob\nfrom io import open # for open(..,encoding=...) parameter in python 2\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport os\nimport re\nfrom setuptools import setup, find_packages\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch', ], }\n\ndata_files = []\n\n\n\n# must be a single statement since buildozer is currently parsing it, refs:\n# https://github.com/kivy/buildozer/issues/722\ninstall_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n 'pep517<0.7.0\"', 'pytoml', 'virtualenv'\n]\n# (pep517, pytoml and virtualenv are used by pythonpackage.py)\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n '*.mk', '*.jam', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',\n '*.gradle', '.gitkeep', 'gradlew*', '*.jar', \"*.patch\", ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps/webview',\n ['*.html', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nwith open(join(dirname(__file__), 'README.md'),\n encoding=\"utf-8\",\n errors=\"replace\",\n ) as fileh:\n long_description = fileh.read()\n\ninit_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')\nversion = None\ntry:\n with open(init_filen,\n encoding=\"utf-8\",\n errors=\"replace\"\n ) as fileh:\n lines = fileh.readlines()\nexcept IOError:\n pass\nelse:\n for line in lines:\n line = line.strip()\n if line.startswith('__version__ = '):\n matches = re.findall(r'[\"\\'].+[\"\\']', line)\n if matches:\n version = matches[0].strip(\"'\").strip('\"')\n break\nif version is None:\n raise Exception('Error: version could not be loaded from {}'.format(init_filen))\n\nsetup(name='python-for-android',\n version=version,\n description='Android APK packager for Python scripts and apps',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android',\n license='MIT',\n install_requires=install_reqs,\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.entrypoints:main',\n 'p4a = pythonforandroid.entrypoints:main',\n ],\n 'distutils.commands': [\n 'apk = pythonforandroid.bdistapk:BdistAPK',\n ],\n },\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Android',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n", "path": "setup.py"}]} | 1,625 | 150 |
gh_patches_debug_36038 | rasdani/github-patches | git_diff | scverse__scanpy-260 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`NameError: name 'logg' is not defined` when using `sc.queries.mitochondrial_genes`
I just tried
```python
import scanpy.api as sc
sc.queries.mitochondrial_genes('www.ensembl.org', 'strange_organism')
```
I would expect scanpy complains that it does not know `'strange_organism'`, but I get the error
```python
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-13-6a41b361ab41> in <module>()
1 import scanpy.api as sc
----> 2 sc.queries.mitochondrial_genes('www.ensembl.org', 'drerio')
~/software/scanpy/scanpy/queries/__init__.py in mitochondrial_genes(host, org)
34 s.add_attribute_to_xml('mgi_symbol')
35 else:
---> 36 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)
37 return None
38 s.add_attribute_to_xml('chromosome_name')
NameError: name 'logg' is not defined
```
It seems to me like `queries/__init__.py` misses an `from .. import logging as logg` statement.
Would maybe also make sense to show the the message that an organism is not available at verbosity level 1 instead of 4?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scanpy/queries/__init__.py`
Content:
```
1 import pandas as pd
2
3
4 def mitochondrial_genes(host, org):
5 """Mitochondrial gene symbols for specific organism through BioMart.
6
7 Parameters
8 ----------
9 host : {{'www.ensembl.org', ...}}
10 A valid BioMart host URL.
11 org : {{'hsapiens', 'mmusculus'}}
12 Organism to query. Currently available are human ('hsapiens') and mouse
13 ('mmusculus').
14
15 Returns
16 -------
17 A `pd.Index` containing mitochondrial gene symbols.
18 """
19 try:
20 from bioservices import biomart
21 except ImportError:
22 raise ImportError(
23 'You need to install the `bioservices` module.')
24 from io import StringIO
25 s = biomart.BioMart(host=host)
26
27 # building query
28 s.new_query()
29 if org == 'hsapiens':
30 s.add_dataset_to_xml('hsapiens_gene_ensembl')
31 s.add_attribute_to_xml('hgnc_symbol')
32 elif org == 'mmusculus':
33 s.add_dataset_to_xml('mmusculus_gene_ensembl')
34 s.add_attribute_to_xml('mgi_symbol')
35 else:
36 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)
37 return None
38 s.add_attribute_to_xml('chromosome_name')
39 xml = s.get_xml()
40
41 # parsing mitochondrial gene symbols
42 res = pd.read_csv(StringIO(s.query(xml)), sep='\t', header=None)
43 res.columns = ['symbol', 'chromosome_name']
44 res = res.dropna()
45 res = res[res['chromosome_name'] == 'MT']
46 res = res.set_index('symbol')
47 res = res[~res.index.duplicated(keep='first')]
48
49 return res.index
50
51
52 def gene_coordinates(host, org, gene, chr_exclude=[]):
53 """Retrieve gene coordinates for specific organism through BioMart.
54 Parameters
55 ----------
56 host : {{'www.ensembl.org', ...}}
57 A valid BioMart host URL. Can be used to control genome build.
58 org : {{'hsapiens', 'mmusculus'}}
59 Organism to query. Currently available are human ('hsapiens') and mouse
60 ('mmusculus').
61 gene :
62 The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve
63 coordinates.
64 chr_exclude :
65 A list of chromosomes to exclude from query.
66 Returns
67 -------
68 A `pd.DataFrame` containing gene coordinates for the specified gene symbol.
69 """
70 try:
71 from bioservices import biomart
72 except ImportError:
73 raise ImportError(
74 'You need to install the `bioservices` module.')
75 from io import StringIO
76 s = biomart.BioMart(host=host)
77
78 # building query
79 s.new_query()
80 if org == 'hsapiens':
81 s.add_dataset_to_xml('hsapiens_gene_ensembl')
82 s.add_attribute_to_xml('hgnc_symbol')
83 elif org == 'mmusculus':
84 s.add_dataset_to_xml('mmusculus_gene_ensembl')
85 s.add_attribute_to_xml('mgi_symbol')
86 else:
87 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)
88 return None
89 s.add_attribute_to_xml('chromosome_name')
90 s.add_attribute_to_xml('start_position')
91 s.add_attribute_to_xml('end_position')
92 xml = s.get_xml()
93
94 # parsing gene coordinates
95 res = pd.read_csv(StringIO(s.query(xml)), sep='\t', header=None)
96 res.columns = ['symbol', 'chromosome_name', 'start', 'end']
97 res = res.dropna()
98 res = res[~res['chromosome_name'].isin(chr_exclude)]
99 res = res.set_index('symbol')
100
101 return res.loc[[gene], :]
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scanpy/queries/__init__.py b/scanpy/queries/__init__.py
--- a/scanpy/queries/__init__.py
+++ b/scanpy/queries/__init__.py
@@ -1,4 +1,5 @@
import pandas as pd
+from .. import logging as logg
def mitochondrial_genes(host, org):
@@ -8,9 +9,9 @@
----------
host : {{'www.ensembl.org', ...}}
A valid BioMart host URL.
- org : {{'hsapiens', 'mmusculus'}}
- Organism to query. Currently available are human ('hsapiens') and mouse
- ('mmusculus').
+ org : {{'hsapiens', 'mmusculus', 'drerio'}}
+ Organism to query. Currently available are human ('hsapiens'), mouse
+ ('mmusculus') and zebrafish ('drerio').
Returns
-------
@@ -32,6 +33,9 @@
elif org == 'mmusculus':
s.add_dataset_to_xml('mmusculus_gene_ensembl')
s.add_attribute_to_xml('mgi_symbol')
+ elif org == 'drerio':
+ s.add_dataset_to_xml('drerio_gene_ensembl')
+ s.add_attribute_to_xml('zfin_id_symbol')
else:
logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)
return None
@@ -55,9 +59,9 @@
----------
host : {{'www.ensembl.org', ...}}
A valid BioMart host URL. Can be used to control genome build.
- org : {{'hsapiens', 'mmusculus'}}
- Organism to query. Currently available are human ('hsapiens') and mouse
- ('mmusculus').
+ org : {{'hsapiens', 'mmusculus', 'drerio'}}
+ Organism to query. Currently available are human ('hsapiens'), mouse
+ ('mmusculus') and zebrafish ('drerio').
gene :
The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve
coordinates.
@@ -83,6 +87,9 @@
elif org == 'mmusculus':
s.add_dataset_to_xml('mmusculus_gene_ensembl')
s.add_attribute_to_xml('mgi_symbol')
+ elif org == 'drerio':
+ s.add_dataset_to_xml('drerio_gene_ensembl')
+ s.add_attribute_to_xml('zfin_id_symbol')
else:
logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)
return None
| {"golden_diff": "diff --git a/scanpy/queries/__init__.py b/scanpy/queries/__init__.py\n--- a/scanpy/queries/__init__.py\n+++ b/scanpy/queries/__init__.py\n@@ -1,4 +1,5 @@\n import pandas as pd\n+from .. import logging as logg\n \n \n def mitochondrial_genes(host, org):\n@@ -8,9 +9,9 @@\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL.\n- org : {{'hsapiens', 'mmusculus'}}\n- Organism to query. Currently available are human ('hsapiens') and mouse\n- ('mmusculus').\n+ org : {{'hsapiens', 'mmusculus', 'drerio'}}\n+ Organism to query. Currently available are human ('hsapiens'), mouse\n+ ('mmusculus') and zebrafish ('drerio').\n \n Returns\n -------\n@@ -32,6 +33,9 @@\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n+ elif org == 'drerio':\n+ s.add_dataset_to_xml('drerio_gene_ensembl')\n+ s.add_attribute_to_xml('zfin_id_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n@@ -55,9 +59,9 @@\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL. Can be used to control genome build.\n- org : {{'hsapiens', 'mmusculus'}}\n- Organism to query. Currently available are human ('hsapiens') and mouse\n- ('mmusculus').\n+ org : {{'hsapiens', 'mmusculus', 'drerio'}}\n+ Organism to query. Currently available are human ('hsapiens'), mouse\n+ ('mmusculus') and zebrafish ('drerio').\n gene :\n The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve\n coordinates.\n@@ -83,6 +87,9 @@\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n+ elif org == 'drerio':\n+ s.add_dataset_to_xml('drerio_gene_ensembl')\n+ s.add_attribute_to_xml('zfin_id_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n", "issue": "`NameError: name 'logg' is not defined` when using `sc.queries.mitochondrial_genes`\nI just tried\r\n```python\r\nimport scanpy.api as sc\r\nsc.queries.mitochondrial_genes('www.ensembl.org', 'strange_organism')\r\n```\r\nI would expect scanpy complains that it does not know `'strange_organism'`, but I get the error \r\n```python\r\n---------------------------------------------------------------------------\r\nNameError Traceback (most recent call last)\r\n<ipython-input-13-6a41b361ab41> in <module>()\r\n 1 import scanpy.api as sc\r\n----> 2 sc.queries.mitochondrial_genes('www.ensembl.org', 'drerio')\r\n\r\n~/software/scanpy/scanpy/queries/__init__.py in mitochondrial_genes(host, org)\r\n 34 s.add_attribute_to_xml('mgi_symbol')\r\n 35 else:\r\n---> 36 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\r\n 37 return None\r\n 38 s.add_attribute_to_xml('chromosome_name')\r\n\r\nNameError: name 'logg' is not defined\r\n```\r\nIt seems to me like `queries/__init__.py` misses an `from .. import logging as logg` statement.\r\n\r\nWould maybe also make sense to show the the message that an organism is not available at verbosity level 1 instead of 4?\n", "before_files": [{"content": "import pandas as pd\n\n\ndef mitochondrial_genes(host, org):\n \"\"\"Mitochondrial gene symbols for specific organism through BioMart.\n\n Parameters\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL.\n org : {{'hsapiens', 'mmusculus'}}\n Organism to query. Currently available are human ('hsapiens') and mouse\n ('mmusculus').\n\n Returns\n -------\n A `pd.Index` containing mitochondrial gene symbols.\n \"\"\"\n try:\n from bioservices import biomart\n except ImportError:\n raise ImportError(\n 'You need to install the `bioservices` module.')\n from io import StringIO\n s = biomart.BioMart(host=host)\n\n # building query\n s.new_query()\n if org == 'hsapiens':\n s.add_dataset_to_xml('hsapiens_gene_ensembl')\n s.add_attribute_to_xml('hgnc_symbol')\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n s.add_attribute_to_xml('chromosome_name')\n xml = s.get_xml()\n\n # parsing mitochondrial gene symbols\n res = pd.read_csv(StringIO(s.query(xml)), sep='\\t', header=None)\n res.columns = ['symbol', 'chromosome_name']\n res = res.dropna()\n res = res[res['chromosome_name'] == 'MT']\n res = res.set_index('symbol')\n res = res[~res.index.duplicated(keep='first')]\n\n return res.index\n\n\ndef gene_coordinates(host, org, gene, chr_exclude=[]):\n \"\"\"Retrieve gene coordinates for specific organism through BioMart.\n Parameters\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL. Can be used to control genome build.\n org : {{'hsapiens', 'mmusculus'}}\n Organism to query. Currently available are human ('hsapiens') and mouse\n ('mmusculus').\n gene :\n The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve\n coordinates.\n chr_exclude :\n A list of chromosomes to exclude from query.\n Returns\n -------\n A `pd.DataFrame` containing gene coordinates for the specified gene symbol.\n \"\"\"\n try:\n from bioservices import biomart\n except ImportError:\n raise ImportError(\n 'You need to install the `bioservices` module.')\n from io import StringIO\n s = biomart.BioMart(host=host)\n\n # building query\n s.new_query()\n if org == 'hsapiens':\n s.add_dataset_to_xml('hsapiens_gene_ensembl')\n s.add_attribute_to_xml('hgnc_symbol')\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n s.add_attribute_to_xml('chromosome_name')\n s.add_attribute_to_xml('start_position')\n s.add_attribute_to_xml('end_position')\n xml = s.get_xml()\n\n # parsing gene coordinates\n res = pd.read_csv(StringIO(s.query(xml)), sep='\\t', header=None)\n res.columns = ['symbol', 'chromosome_name', 'start', 'end']\n res = res.dropna()\n res = res[~res['chromosome_name'].isin(chr_exclude)]\n res = res.set_index('symbol')\n\n return res.loc[[gene], :]\n", "path": "scanpy/queries/__init__.py"}], "after_files": [{"content": "import pandas as pd\nfrom .. import logging as logg\n\n\ndef mitochondrial_genes(host, org):\n \"\"\"Mitochondrial gene symbols for specific organism through BioMart.\n\n Parameters\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL.\n org : {{'hsapiens', 'mmusculus', 'drerio'}}\n Organism to query. Currently available are human ('hsapiens'), mouse\n ('mmusculus') and zebrafish ('drerio').\n\n Returns\n -------\n A `pd.Index` containing mitochondrial gene symbols.\n \"\"\"\n try:\n from bioservices import biomart\n except ImportError:\n raise ImportError(\n 'You need to install the `bioservices` module.')\n from io import StringIO\n s = biomart.BioMart(host=host)\n\n # building query\n s.new_query()\n if org == 'hsapiens':\n s.add_dataset_to_xml('hsapiens_gene_ensembl')\n s.add_attribute_to_xml('hgnc_symbol')\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n elif org == 'drerio':\n s.add_dataset_to_xml('drerio_gene_ensembl')\n s.add_attribute_to_xml('zfin_id_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n s.add_attribute_to_xml('chromosome_name')\n xml = s.get_xml()\n\n # parsing mitochondrial gene symbols\n res = pd.read_csv(StringIO(s.query(xml)), sep='\\t', header=None)\n res.columns = ['symbol', 'chromosome_name']\n res = res.dropna()\n res = res[res['chromosome_name'] == 'MT']\n res = res.set_index('symbol')\n res = res[~res.index.duplicated(keep='first')]\n\n return res.index\n\n\ndef gene_coordinates(host, org, gene, chr_exclude=[]):\n \"\"\"Retrieve gene coordinates for specific organism through BioMart.\n Parameters\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL. Can be used to control genome build.\n org : {{'hsapiens', 'mmusculus', 'drerio'}}\n Organism to query. Currently available are human ('hsapiens'), mouse\n ('mmusculus') and zebrafish ('drerio').\n gene :\n The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve\n coordinates.\n chr_exclude :\n A list of chromosomes to exclude from query.\n Returns\n -------\n A `pd.DataFrame` containing gene coordinates for the specified gene symbol.\n \"\"\"\n try:\n from bioservices import biomart\n except ImportError:\n raise ImportError(\n 'You need to install the `bioservices` module.')\n from io import StringIO\n s = biomart.BioMart(host=host)\n\n # building query\n s.new_query()\n if org == 'hsapiens':\n s.add_dataset_to_xml('hsapiens_gene_ensembl')\n s.add_attribute_to_xml('hgnc_symbol')\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n elif org == 'drerio':\n s.add_dataset_to_xml('drerio_gene_ensembl')\n s.add_attribute_to_xml('zfin_id_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n s.add_attribute_to_xml('chromosome_name')\n s.add_attribute_to_xml('start_position')\n s.add_attribute_to_xml('end_position')\n xml = s.get_xml()\n\n # parsing gene coordinates\n res = pd.read_csv(StringIO(s.query(xml)), sep='\\t', header=None)\n res.columns = ['symbol', 'chromosome_name', 'start', 'end']\n res = res.dropna()\n res = res[~res['chromosome_name'].isin(chr_exclude)]\n res = res.set_index('symbol')\n\n return res.loc[[gene], :]\n", "path": "scanpy/queries/__init__.py"}]} | 1,604 | 613 |
gh_patches_debug_28469 | rasdani/github-patches | git_diff | fossasia__open-event-server-2390 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show image and square crop option (like in wizard) for speakers and ensure it shows up after import

As the above screenshot shows, the image of the speaker does not show up as expected. In the wizard step 1 it is already implemented in that way.
Compare: http://open-event-dev.herokuapp.com/events/132/speakers/882/edit/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/views/admin/models_views/speakers.py`
Content:
```
1 import json
2
3 from flask.ext.admin import BaseView
4 from flask.ext.restplus import abort
5 from flask_admin import expose
6 from flask.ext import login
7 from flask import request, url_for, redirect, flash
8 from ....helpers.data import delete_from_db, save_to_db
9 from ....helpers.data_getter import DataGetter
10 from ....helpers.storage import upload, UPLOAD_PATHS
11
12
13 def get_speaker_or_throw(speaker_id):
14 session = DataGetter.get_speaker(speaker_id)
15 if not session:
16 abort(404)
17 return session
18
19
20 class SpeakersView(BaseView):
21
22 def is_accessible(self):
23 return login.current_user.is_authenticated
24
25 def _handle_view(self, name, **kwargs):
26 if not self.is_accessible():
27 return redirect(url_for('admin.login_view', next=request.url))
28 event = DataGetter.get_event(kwargs['event_id'])
29 if not event.has_session_speakers:
30 return self.render('/gentelella/admin/event/info/enable_module.html', active_page='speakers', title='Speakers', event=event)
31
32 @expose('/')
33 def index_view(self, event_id):
34 speakers = DataGetter.get_speakers(event_id)
35 event = DataGetter.get_event(event_id)
36 return self.render('/gentelella/admin/event/speakers/base_speaker_table.html',
37 speakers=speakers, event_id=event_id, event=event)
38
39 @expose('/<int:speaker_id>/edit/', methods=('GET', 'POST'))
40 def edit_view(self, event_id, speaker_id):
41 speaker = get_speaker_or_throw(speaker_id)
42 event = DataGetter.get_event(event_id)
43 form_elems = DataGetter.get_custom_form_elements(event_id)
44 if not form_elems:
45 flash("Speaker form has been incorrectly configured for this event. Editing has been disabled", "danger")
46 return redirect(url_for('.index_view', event_id=event_id))
47 speaker_form = json.loads(form_elems.speaker_form)
48 if request.method == 'GET':
49 return self.render('/gentelella/admin/event/speakers/edit.html',
50 speaker=speaker, event_id=event_id,
51 event=event, speaker_form=speaker_form)
52 if request.method == 'POST':
53 # set photo
54 if 'photo' in request.files and request.files['photo'].filename != '':
55 speaker_img_file = request.files['photo']
56 speaker_img = upload(
57 speaker_img_file,
58 UPLOAD_PATHS['speakers']['photo'].format(
59 event_id=int(event_id), id=int(speaker.id)
60 ))
61 speaker.photo = speaker_img
62 # set other fields
63 speaker.name = request.form.get('name', None)
64 speaker.short_biography = request.form.get('short_biography', None)
65 speaker.long_biography = request.form.get('long_biography', None)
66 speaker.email = request.form.get('email', None)
67 speaker.mobile = request.form.get('mobile', None)
68 speaker.website = request.form.get('website', None)
69 speaker.twitter = request.form.get('twitter', None)
70 speaker.facebook = request.form.get('facebook', None)
71 speaker.github = request.form.get('github', None)
72 speaker.linkedin = request.form.get('linkedin', None)
73 speaker.organisation = request.form.get('organisation', None)
74 speaker.featured = True if request.form.get('featured', 'false') == 'true' else False
75 speaker.position = request.form.get('position', None)
76 speaker.country = request.form.get('country', None)
77 save_to_db(speaker, "Speaker has been updated")
78 flash("Speaker has been saved", "success")
79
80 return redirect(url_for('.index_view', event_id=event_id))
81
82 @expose('/<int:speaker_id>/delete', methods=('GET',))
83 def delete(self, event_id, speaker_id):
84 speaker = get_speaker_or_throw(speaker_id)
85 delete_from_db(speaker, 'Speaker Rejected')
86 flash("The speaker has been deleted", "danger")
87 return redirect(url_for('.index_view', event_id=event_id))
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/views/admin/models_views/speakers.py b/app/views/admin/models_views/speakers.py
--- a/app/views/admin/models_views/speakers.py
+++ b/app/views/admin/models_views/speakers.py
@@ -4,10 +4,11 @@
from flask.ext.restplus import abort
from flask_admin import expose
from flask.ext import login
-from flask import request, url_for, redirect, flash
+from flask import request, url_for, redirect, flash, jsonify
from ....helpers.data import delete_from_db, save_to_db
from ....helpers.data_getter import DataGetter
from ....helpers.storage import upload, UPLOAD_PATHS
+from app.helpers.helpers import uploaded_file
def get_speaker_or_throw(speaker_id):
@@ -85,3 +86,23 @@
delete_from_db(speaker, 'Speaker Rejected')
flash("The speaker has been deleted", "danger")
return redirect(url_for('.index_view', event_id=event_id))
+
+ @expose('/<int:speaker_id>/photo_upload', methods=('POST',))
+ def photo_upload(self, event_id, speaker_id):
+ speaker = get_speaker_or_throw(speaker_id)
+ event = DataGetter.get_event(event_id)
+ photo = request.form['photo']
+ if photo:
+ photo_file = uploaded_file(file_content=photo)
+ photo = upload(
+ photo_file,
+ UPLOAD_PATHS['speakers']['photo'].format(
+ event_id=int(event_id), id=int(speaker.id)
+ ))
+ speaker.photo = photo
+ save_to_db(speaker)
+ return jsonify({'status': 'ok', 'photo': photo})
+ else:
+ speaker.photo = None
+ save_to_db(speaker)
+ return jsonify({'status': 'Removed'})
| {"golden_diff": "diff --git a/app/views/admin/models_views/speakers.py b/app/views/admin/models_views/speakers.py\n--- a/app/views/admin/models_views/speakers.py\n+++ b/app/views/admin/models_views/speakers.py\n@@ -4,10 +4,11 @@\n from flask.ext.restplus import abort\n from flask_admin import expose\n from flask.ext import login\n-from flask import request, url_for, redirect, flash\n+from flask import request, url_for, redirect, flash, jsonify\n from ....helpers.data import delete_from_db, save_to_db\n from ....helpers.data_getter import DataGetter\n from ....helpers.storage import upload, UPLOAD_PATHS\n+from app.helpers.helpers import uploaded_file\n \n \n def get_speaker_or_throw(speaker_id):\n@@ -85,3 +86,23 @@\n delete_from_db(speaker, 'Speaker Rejected')\n flash(\"The speaker has been deleted\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n+\n+ @expose('/<int:speaker_id>/photo_upload', methods=('POST',))\n+ def photo_upload(self, event_id, speaker_id):\n+ speaker = get_speaker_or_throw(speaker_id)\n+ event = DataGetter.get_event(event_id)\n+ photo = request.form['photo']\n+ if photo:\n+ photo_file = uploaded_file(file_content=photo)\n+ photo = upload(\n+ photo_file,\n+ UPLOAD_PATHS['speakers']['photo'].format(\n+ event_id=int(event_id), id=int(speaker.id)\n+ ))\n+ speaker.photo = photo\n+ save_to_db(speaker)\n+ return jsonify({'status': 'ok', 'photo': photo})\n+ else:\n+ speaker.photo = None\n+ save_to_db(speaker)\n+ return jsonify({'status': 'Removed'})\n", "issue": "Show image and square crop option (like in wizard) for speakers and ensure it shows up after import\n\n\nAs the above screenshot shows, the image of the speaker does not show up as expected. In the wizard step 1 it is already implemented in that way.\n\nCompare: http://open-event-dev.herokuapp.com/events/132/speakers/882/edit/\n\n", "before_files": [{"content": "import json\n\nfrom flask.ext.admin import BaseView\nfrom flask.ext.restplus import abort\nfrom flask_admin import expose\nfrom flask.ext import login\nfrom flask import request, url_for, redirect, flash\nfrom ....helpers.data import delete_from_db, save_to_db\nfrom ....helpers.data_getter import DataGetter\nfrom ....helpers.storage import upload, UPLOAD_PATHS\n\n\ndef get_speaker_or_throw(speaker_id):\n session = DataGetter.get_speaker(speaker_id)\n if not session:\n abort(404)\n return session\n\n\nclass SpeakersView(BaseView):\n\n def is_accessible(self):\n return login.current_user.is_authenticated\n\n def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n return redirect(url_for('admin.login_view', next=request.url))\n event = DataGetter.get_event(kwargs['event_id'])\n if not event.has_session_speakers:\n return self.render('/gentelella/admin/event/info/enable_module.html', active_page='speakers', title='Speakers', event=event)\n\n @expose('/')\n def index_view(self, event_id):\n speakers = DataGetter.get_speakers(event_id)\n event = DataGetter.get_event(event_id)\n return self.render('/gentelella/admin/event/speakers/base_speaker_table.html',\n speakers=speakers, event_id=event_id, event=event)\n\n @expose('/<int:speaker_id>/edit/', methods=('GET', 'POST'))\n def edit_view(self, event_id, speaker_id):\n speaker = get_speaker_or_throw(speaker_id)\n event = DataGetter.get_event(event_id)\n form_elems = DataGetter.get_custom_form_elements(event_id)\n if not form_elems:\n flash(\"Speaker form has been incorrectly configured for this event. Editing has been disabled\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n speaker_form = json.loads(form_elems.speaker_form)\n if request.method == 'GET':\n return self.render('/gentelella/admin/event/speakers/edit.html',\n speaker=speaker, event_id=event_id,\n event=event, speaker_form=speaker_form)\n if request.method == 'POST':\n # set photo\n if 'photo' in request.files and request.files['photo'].filename != '':\n speaker_img_file = request.files['photo']\n speaker_img = upload(\n speaker_img_file,\n UPLOAD_PATHS['speakers']['photo'].format(\n event_id=int(event_id), id=int(speaker.id)\n ))\n speaker.photo = speaker_img\n # set other fields\n speaker.name = request.form.get('name', None)\n speaker.short_biography = request.form.get('short_biography', None)\n speaker.long_biography = request.form.get('long_biography', None)\n speaker.email = request.form.get('email', None)\n speaker.mobile = request.form.get('mobile', None)\n speaker.website = request.form.get('website', None)\n speaker.twitter = request.form.get('twitter', None)\n speaker.facebook = request.form.get('facebook', None)\n speaker.github = request.form.get('github', None)\n speaker.linkedin = request.form.get('linkedin', None)\n speaker.organisation = request.form.get('organisation', None)\n speaker.featured = True if request.form.get('featured', 'false') == 'true' else False\n speaker.position = request.form.get('position', None)\n speaker.country = request.form.get('country', None)\n save_to_db(speaker, \"Speaker has been updated\")\n flash(\"Speaker has been saved\", \"success\")\n\n return redirect(url_for('.index_view', event_id=event_id))\n\n @expose('/<int:speaker_id>/delete', methods=('GET',))\n def delete(self, event_id, speaker_id):\n speaker = get_speaker_or_throw(speaker_id)\n delete_from_db(speaker, 'Speaker Rejected')\n flash(\"The speaker has been deleted\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n", "path": "app/views/admin/models_views/speakers.py"}], "after_files": [{"content": "import json\n\nfrom flask.ext.admin import BaseView\nfrom flask.ext.restplus import abort\nfrom flask_admin import expose\nfrom flask.ext import login\nfrom flask import request, url_for, redirect, flash, jsonify\nfrom ....helpers.data import delete_from_db, save_to_db\nfrom ....helpers.data_getter import DataGetter\nfrom ....helpers.storage import upload, UPLOAD_PATHS\nfrom app.helpers.helpers import uploaded_file\n\n\ndef get_speaker_or_throw(speaker_id):\n session = DataGetter.get_speaker(speaker_id)\n if not session:\n abort(404)\n return session\n\n\nclass SpeakersView(BaseView):\n\n def is_accessible(self):\n return login.current_user.is_authenticated\n\n def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n return redirect(url_for('admin.login_view', next=request.url))\n event = DataGetter.get_event(kwargs['event_id'])\n if not event.has_session_speakers:\n return self.render('/gentelella/admin/event/info/enable_module.html', active_page='speakers', title='Speakers', event=event)\n\n @expose('/')\n def index_view(self, event_id):\n speakers = DataGetter.get_speakers(event_id)\n event = DataGetter.get_event(event_id)\n return self.render('/gentelella/admin/event/speakers/base_speaker_table.html',\n speakers=speakers, event_id=event_id, event=event)\n\n @expose('/<int:speaker_id>/edit/', methods=('GET', 'POST'))\n def edit_view(self, event_id, speaker_id):\n speaker = get_speaker_or_throw(speaker_id)\n event = DataGetter.get_event(event_id)\n form_elems = DataGetter.get_custom_form_elements(event_id)\n if not form_elems:\n flash(\"Speaker form has been incorrectly configured for this event. Editing has been disabled\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n speaker_form = json.loads(form_elems.speaker_form)\n if request.method == 'GET':\n return self.render('/gentelella/admin/event/speakers/edit.html',\n speaker=speaker, event_id=event_id,\n event=event, speaker_form=speaker_form)\n if request.method == 'POST':\n # set photo\n if 'photo' in request.files and request.files['photo'].filename != '':\n speaker_img_file = request.files['photo']\n speaker_img = upload(\n speaker_img_file,\n UPLOAD_PATHS['speakers']['photo'].format(\n event_id=int(event_id), id=int(speaker.id)\n ))\n speaker.photo = speaker_img\n # set other fields\n speaker.name = request.form.get('name', None)\n speaker.short_biography = request.form.get('short_biography', None)\n speaker.long_biography = request.form.get('long_biography', None)\n speaker.email = request.form.get('email', None)\n speaker.mobile = request.form.get('mobile', None)\n speaker.website = request.form.get('website', None)\n speaker.twitter = request.form.get('twitter', None)\n speaker.facebook = request.form.get('facebook', None)\n speaker.github = request.form.get('github', None)\n speaker.linkedin = request.form.get('linkedin', None)\n speaker.organisation = request.form.get('organisation', None)\n speaker.featured = True if request.form.get('featured', 'false') == 'true' else False\n speaker.position = request.form.get('position', None)\n speaker.country = request.form.get('country', None)\n save_to_db(speaker, \"Speaker has been updated\")\n flash(\"Speaker has been saved\", \"success\")\n\n return redirect(url_for('.index_view', event_id=event_id))\n\n @expose('/<int:speaker_id>/delete', methods=('GET',))\n def delete(self, event_id, speaker_id):\n speaker = get_speaker_or_throw(speaker_id)\n delete_from_db(speaker, 'Speaker Rejected')\n flash(\"The speaker has been deleted\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n\n @expose('/<int:speaker_id>/photo_upload', methods=('POST',))\n def photo_upload(self, event_id, speaker_id):\n speaker = get_speaker_or_throw(speaker_id)\n event = DataGetter.get_event(event_id)\n photo = request.form['photo']\n if photo:\n photo_file = uploaded_file(file_content=photo)\n photo = upload(\n photo_file,\n UPLOAD_PATHS['speakers']['photo'].format(\n event_id=int(event_id), id=int(speaker.id)\n ))\n speaker.photo = photo\n save_to_db(speaker)\n return jsonify({'status': 'ok', 'photo': photo})\n else:\n speaker.photo = None\n save_to_db(speaker)\n return jsonify({'status': 'Removed'})\n", "path": "app/views/admin/models_views/speakers.py"}]} | 1,450 | 396 |
gh_patches_debug_9587 | rasdani/github-patches | git_diff | freedomofpress__securedrop-2475 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Session expiring do not display a localized logout message.
# Bug
## Description
Like #2391, if a source has their session expire, they will not be shown a localized message when they log out.
## Steps to Reproduce
Set session expire to 30 seconds. Log in. Set locale to not-english. Wait 30 seconds. Refresh. See no-localized flashed message.
## Expected Behavior
The logout message is localized.
## Actual Behavior
It is not.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/source_app/__init__.py`
Content:
```
1 from datetime import datetime, timedelta
2 from flask import (Flask, render_template, flash, Markup, request, g, session,
3 url_for, redirect)
4 from flask_babel import gettext
5 from flask_assets import Environment
6 from flask_wtf.csrf import CSRFProtect
7 from jinja2 import evalcontextfilter
8 from os import path
9 from sqlalchemy.orm.exc import NoResultFound
10
11 import crypto_util
12 import i18n
13 import store
14 import template_filters
15 import version
16
17 from db import Source, db_session
18 from request_that_secures_file_uploads import RequestThatSecuresFileUploads
19 from source_app import main, info, api
20 from source_app.decorators import ignore_static
21 from source_app.utils import logged_in
22
23
24 def create_app(config):
25 app = Flask(__name__,
26 template_folder=config.SOURCE_TEMPLATES_DIR,
27 static_folder=path.join(config.SECUREDROP_ROOT, 'static'))
28 app.request_class = RequestThatSecuresFileUploads
29 app.config.from_object(config.SourceInterfaceFlaskConfig)
30
31 # The default CSRF token expiration is 1 hour. Since large uploads can
32 # take longer than an hour over Tor, we increase the valid window to 24h.
33 app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24
34 CSRFProtect(app)
35
36 assets = Environment(app)
37 app.config['assets'] = assets
38
39 i18n.setup_app(app)
40
41 app.jinja_env.trim_blocks = True
42 app.jinja_env.lstrip_blocks = True
43 app.jinja_env.globals['version'] = version.__version__
44 if getattr(config, 'CUSTOM_HEADER_IMAGE', None):
45 app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE
46 app.jinja_env.globals['use_custom_header_image'] = True
47 else:
48 app.jinja_env.globals['header_image'] = 'logo.png'
49 app.jinja_env.globals['use_custom_header_image'] = False
50
51 app.jinja_env.filters['rel_datetime_format'] = \
52 template_filters.rel_datetime_format
53 app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)
54 app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat
55
56 for module in [main, info, api]:
57 app.register_blueprint(module.make_blueprint(config))
58
59 @app.before_request
60 @ignore_static
61 def check_tor2web():
62 # ignore_static here so we only flash a single message warning
63 # about Tor2Web, corresponding to the initial page load.
64 if 'X-tor2web' in request.headers:
65 flash(Markup(gettext(
66 '<strong>WARNING:</strong> You appear to be using Tor2Web. '
67 'This <strong>does not</strong> provide anonymity. '
68 '<a href="{url}">Why is this dangerous?</a>')
69 .format(url=url_for('info.tor2web_warning'))),
70 "banner-warning")
71
72 @app.before_request
73 @ignore_static
74 def setup_g():
75 """Store commonly used values in Flask's special g object"""
76 g.locale = i18n.get_locale()
77 g.text_direction = i18n.get_text_direction(g.locale)
78 g.html_lang = i18n.locale_to_rfc_5646(g.locale)
79 g.locales = i18n.get_locale2name()
80
81 if 'expires' in session and datetime.utcnow() >= session['expires']:
82 session.clear()
83 msg = render_template('session_timeout.html')
84 flash(Markup(msg), "important")
85
86 session['expires'] = datetime.utcnow() + \
87 timedelta(minutes=getattr(config,
88 'SESSION_EXPIRATION_MINUTES',
89 30))
90
91 # ignore_static here because `crypto_util.hash_codename` is scrypt
92 # (very time consuming), and we don't need to waste time running if
93 # we're just serving a static resource that won't need to access
94 # these common values.
95 if logged_in():
96 g.codename = session['codename']
97 g.filesystem_id = crypto_util.hash_codename(g.codename)
98 try:
99 g.source = Source.query \
100 .filter(Source.filesystem_id == g.filesystem_id) \
101 .one()
102 except NoResultFound as e:
103 app.logger.error(
104 "Found no Sources when one was expected: %s" %
105 (e,))
106 del session['logged_in']
107 del session['codename']
108 return redirect(url_for('main.index'))
109 g.loc = store.path(g.filesystem_id)
110
111 @app.teardown_appcontext
112 def shutdown_session(exception=None):
113 """Automatically remove database sessions at the end of the request, or
114 when the application shuts down"""
115 db_session.remove()
116
117 @app.errorhandler(404)
118 def page_not_found(error):
119 return render_template('notfound.html'), 404
120
121 @app.errorhandler(500)
122 def internal_error(error):
123 return render_template('error.html'), 500
124
125 return app
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/source_app/__init__.py b/securedrop/source_app/__init__.py
--- a/securedrop/source_app/__init__.py
+++ b/securedrop/source_app/__init__.py
@@ -79,8 +79,11 @@
g.locales = i18n.get_locale2name()
if 'expires' in session and datetime.utcnow() >= session['expires']:
- session.clear()
msg = render_template('session_timeout.html')
+
+ # clear the session after we render the message so it's localized
+ session.clear()
+
flash(Markup(msg), "important")
session['expires'] = datetime.utcnow() + \
| {"golden_diff": "diff --git a/securedrop/source_app/__init__.py b/securedrop/source_app/__init__.py\n--- a/securedrop/source_app/__init__.py\n+++ b/securedrop/source_app/__init__.py\n@@ -79,8 +79,11 @@\n g.locales = i18n.get_locale2name()\n \n if 'expires' in session and datetime.utcnow() >= session['expires']:\n- session.clear()\n msg = render_template('session_timeout.html')\n+\n+ # clear the session after we render the message so it's localized\n+ session.clear()\n+\n flash(Markup(msg), \"important\")\n \n session['expires'] = datetime.utcnow() + \\\n", "issue": "Session expiring do not display a localized logout message.\n# Bug\r\n\r\n## Description\r\n\r\nLike #2391, if a source has their session expire, they will not be shown a localized message when they log out.\r\n\r\n## Steps to Reproduce\r\n\r\nSet session expire to 30 seconds. Log in. Set locale to not-english. Wait 30 seconds. Refresh. See no-localized flashed message.\r\n\r\n## Expected Behavior\r\n\r\nThe logout message is localized.\r\n\r\n## Actual Behavior\r\n\r\nIt is not.\n", "before_files": [{"content": "from datetime import datetime, timedelta\nfrom flask import (Flask, render_template, flash, Markup, request, g, session,\n url_for, redirect)\nfrom flask_babel import gettext\nfrom flask_assets import Environment\nfrom flask_wtf.csrf import CSRFProtect\nfrom jinja2 import evalcontextfilter\nfrom os import path\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport crypto_util\nimport i18n\nimport store\nimport template_filters\nimport version\n\nfrom db import Source, db_session\nfrom request_that_secures_file_uploads import RequestThatSecuresFileUploads\nfrom source_app import main, info, api\nfrom source_app.decorators import ignore_static\nfrom source_app.utils import logged_in\n\n\ndef create_app(config):\n app = Flask(__name__,\n template_folder=config.SOURCE_TEMPLATES_DIR,\n static_folder=path.join(config.SECUREDROP_ROOT, 'static'))\n app.request_class = RequestThatSecuresFileUploads\n app.config.from_object(config.SourceInterfaceFlaskConfig)\n\n # The default CSRF token expiration is 1 hour. Since large uploads can\n # take longer than an hour over Tor, we increase the valid window to 24h.\n app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24\n CSRFProtect(app)\n\n assets = Environment(app)\n app.config['assets'] = assets\n\n i18n.setup_app(app)\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n app.jinja_env.globals['version'] = version.__version__\n if getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\n else:\n app.jinja_env.globals['header_image'] = 'logo.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n app.jinja_env.filters['rel_datetime_format'] = \\\n template_filters.rel_datetime_format\n app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)\n app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat\n\n for module in [main, info, api]:\n app.register_blueprint(module.make_blueprint(config))\n\n @app.before_request\n @ignore_static\n def check_tor2web():\n # ignore_static here so we only flash a single message warning\n # about Tor2Web, corresponding to the initial page load.\n if 'X-tor2web' in request.headers:\n flash(Markup(gettext(\n '<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"{url}\">Why is this dangerous?</a>')\n .format(url=url_for('info.tor2web_warning'))),\n \"banner-warning\")\n\n @app.before_request\n @ignore_static\n def setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n g.locale = i18n.get_locale()\n g.text_direction = i18n.get_text_direction(g.locale)\n g.html_lang = i18n.locale_to_rfc_5646(g.locale)\n g.locales = i18n.get_locale2name()\n\n if 'expires' in session and datetime.utcnow() >= session['expires']:\n session.clear()\n msg = render_template('session_timeout.html')\n flash(Markup(msg), \"important\")\n\n session['expires'] = datetime.utcnow() + \\\n timedelta(minutes=getattr(config,\n 'SESSION_EXPIRATION_MINUTES',\n 30))\n\n # ignore_static here because `crypto_util.hash_codename` is scrypt\n # (very time consuming), and we don't need to waste time running if\n # we're just serving a static resource that won't need to access\n # these common values.\n if logged_in():\n g.codename = session['codename']\n g.filesystem_id = crypto_util.hash_codename(g.codename)\n try:\n g.source = Source.query \\\n .filter(Source.filesystem_id == g.filesystem_id) \\\n .one()\n except NoResultFound as e:\n app.logger.error(\n \"Found no Sources when one was expected: %s\" %\n (e,))\n del session['logged_in']\n del session['codename']\n return redirect(url_for('main.index'))\n g.loc = store.path(g.filesystem_id)\n\n @app.teardown_appcontext\n def shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n @app.errorhandler(404)\n def page_not_found(error):\n return render_template('notfound.html'), 404\n\n @app.errorhandler(500)\n def internal_error(error):\n return render_template('error.html'), 500\n\n return app\n", "path": "securedrop/source_app/__init__.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\nfrom flask import (Flask, render_template, flash, Markup, request, g, session,\n url_for, redirect)\nfrom flask_babel import gettext\nfrom flask_assets import Environment\nfrom flask_wtf.csrf import CSRFProtect\nfrom jinja2 import evalcontextfilter\nfrom os import path\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport crypto_util\nimport i18n\nimport store\nimport template_filters\nimport version\n\nfrom db import Source, db_session\nfrom request_that_secures_file_uploads import RequestThatSecuresFileUploads\nfrom source_app import main, info, api\nfrom source_app.decorators import ignore_static\nfrom source_app.utils import logged_in\n\n\ndef create_app(config):\n app = Flask(__name__,\n template_folder=config.SOURCE_TEMPLATES_DIR,\n static_folder=path.join(config.SECUREDROP_ROOT, 'static'))\n app.request_class = RequestThatSecuresFileUploads\n app.config.from_object(config.SourceInterfaceFlaskConfig)\n\n # The default CSRF token expiration is 1 hour. Since large uploads can\n # take longer than an hour over Tor, we increase the valid window to 24h.\n app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24\n CSRFProtect(app)\n\n assets = Environment(app)\n app.config['assets'] = assets\n\n i18n.setup_app(app)\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n app.jinja_env.globals['version'] = version.__version__\n if getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\n else:\n app.jinja_env.globals['header_image'] = 'logo.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n app.jinja_env.filters['rel_datetime_format'] = \\\n template_filters.rel_datetime_format\n app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)\n app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat\n\n for module in [main, info, api]:\n app.register_blueprint(module.make_blueprint(config))\n\n @app.before_request\n @ignore_static\n def check_tor2web():\n # ignore_static here so we only flash a single message warning\n # about Tor2Web, corresponding to the initial page load.\n if 'X-tor2web' in request.headers:\n flash(Markup(gettext(\n '<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"{url}\">Why is this dangerous?</a>')\n .format(url=url_for('info.tor2web_warning'))),\n \"banner-warning\")\n\n @app.before_request\n @ignore_static\n def setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n g.locale = i18n.get_locale()\n g.text_direction = i18n.get_text_direction(g.locale)\n g.html_lang = i18n.locale_to_rfc_5646(g.locale)\n g.locales = i18n.get_locale2name()\n\n if 'expires' in session and datetime.utcnow() >= session['expires']:\n msg = render_template('session_timeout.html')\n\n # clear the session after we render the message so it's localized\n session.clear()\n\n flash(Markup(msg), \"important\")\n\n session['expires'] = datetime.utcnow() + \\\n timedelta(minutes=getattr(config,\n 'SESSION_EXPIRATION_MINUTES',\n 30))\n\n # ignore_static here because `crypto_util.hash_codename` is scrypt\n # (very time consuming), and we don't need to waste time running if\n # we're just serving a static resource that won't need to access\n # these common values.\n if logged_in():\n g.codename = session['codename']\n g.filesystem_id = crypto_util.hash_codename(g.codename)\n try:\n g.source = Source.query \\\n .filter(Source.filesystem_id == g.filesystem_id) \\\n .one()\n except NoResultFound as e:\n app.logger.error(\n \"Found no Sources when one was expected: %s\" %\n (e,))\n del session['logged_in']\n del session['codename']\n return redirect(url_for('main.index'))\n g.loc = store.path(g.filesystem_id)\n\n @app.teardown_appcontext\n def shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n @app.errorhandler(404)\n def page_not_found(error):\n return render_template('notfound.html'), 404\n\n @app.errorhandler(500)\n def internal_error(error):\n return render_template('error.html'), 500\n\n return app\n", "path": "securedrop/source_app/__init__.py"}]} | 1,736 | 152 |
gh_patches_debug_1179 | rasdani/github-patches | git_diff | locustio__locust-1395 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update flask version
Our minimum required flask version is too old (saw at least one person having an issue https://stackoverflow.com/questions/61969924/typeerror-when-i-run-a-locustfile-py)
https://flask.palletsprojects.com/en/1.1.x/changelog/#version-0-12-5 is a minimum, but we should probably go to 1.x right away.
I can do the PR
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import ast
3 import os
4 import re
5 import sys
6
7 from setuptools import find_packages, setup
8
9 ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
10
11 # parse version from locust/__init__.py
12 _version_re = re.compile(r'__version__\s+=\s+(.*)')
13 _init_file = os.path.join(ROOT_PATH, "locust", "__init__.py")
14 with open(_init_file, 'rb') as f:
15 version = str(ast.literal_eval(_version_re.search(
16 f.read().decode('utf-8')).group(1)))
17
18 setup(
19 name='locust',
20 version=version,
21 install_requires=[
22 "gevent>=1.5.0",
23 "flask>=0.10.1",
24 "requests>=2.9.1",
25 "msgpack>=0.6.2",
26 "pyzmq>=16.0.2",
27 "geventhttpclient>=1.4.2",
28 "ConfigArgParse>=1.0",
29 "psutil>=5.6.7",
30 "Flask-BasicAuth>=0.2.0"
31 ],
32 test_suite="locust.test",
33 tests_require=[
34 'cryptography',
35 'mock',
36 'pyquery',
37 ],
38 )
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@
version=version,
install_requires=[
"gevent>=1.5.0",
- "flask>=0.10.1",
+ "flask>=1.1.2",
"requests>=2.9.1",
"msgpack>=0.6.2",
"pyzmq>=16.0.2",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,7 @@\n version=version,\n install_requires=[\n \"gevent>=1.5.0\",\n- \"flask>=0.10.1\", \n+ \"flask>=1.1.2\", \n \"requests>=2.9.1\", \n \"msgpack>=0.6.2\", \n \"pyzmq>=16.0.2\",\n", "issue": "Update flask version\nOur minimum required flask version is too old (saw at least one person having an issue https://stackoverflow.com/questions/61969924/typeerror-when-i-run-a-locustfile-py)\r\n\r\nhttps://flask.palletsprojects.com/en/1.1.x/changelog/#version-0-12-5 is a minimum, but we should probably go to 1.x right away.\r\n\r\nI can do the PR\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\nsetup(\n name='locust',\n version=version,\n install_requires=[\n \"gevent>=1.5.0\",\n \"flask>=0.10.1\", \n \"requests>=2.9.1\", \n \"msgpack>=0.6.2\", \n \"pyzmq>=16.0.2\", \n \"geventhttpclient>=1.4.2\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\"\n ],\n test_suite=\"locust.test\",\n tests_require=[\n 'cryptography',\n 'mock',\n 'pyquery',\n ], \n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\nsetup(\n name='locust',\n version=version,\n install_requires=[\n \"gevent>=1.5.0\",\n \"flask>=1.1.2\", \n \"requests>=2.9.1\", \n \"msgpack>=0.6.2\", \n \"pyzmq>=16.0.2\", \n \"geventhttpclient>=1.4.2\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\"\n ],\n test_suite=\"locust.test\",\n tests_require=[\n 'cryptography',\n 'mock',\n 'pyquery',\n ], \n)\n", "path": "setup.py"}]} | 712 | 115 |
gh_patches_debug_18495 | rasdani/github-patches | git_diff | apache__airflow-8230 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Airflow webserver not starting with SQLAlchemy==1.3.16
**Apache Airflow version**: 1.10.9
**Environment**: Ubuntu 18.04 LTS
- **Cloud provider or hardware configuration**:
- **OS** (e.g. from /etc/os-release):Ubuntu 18.04 LTS
**What happened**: airflow webserver error
airflow@airflow:~$ airflow webserver
[2020-04-08 09:45:49,843] {settings.py:253} INFO - settings.configure_orm(): Using pool settings. pool_size=5, max_overflow=10, pool_recycle=1800, pid=30494
____________ _____________
____ |__( )_________ __/__ /________ __
____ /| |_ /__ ___/_ /_ __ /_ __ \_ | /| / /
___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /
_/_/ |_/_/ /_/ /_/ /_/ \____/____/|__/
[2020-04-08 09:45:50,462] {__init__.py:51} INFO - Using executor LocalExecutor
[2020-04-08 09:45:50,463] {dagbag.py:403} INFO - Filling up the DagBag from /home/airflow/airflow/dags
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 37, in <module>
args.func(args)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/cli.py", line 75, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/bin/cli.py", line 900, in webserver
app = cached_app_rbac(None) if settings.RBAC else cached_app(None)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py", line 233, in cached_app
app = create_app(config, testing)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py", line 103, in create_app
models.Chart, Session, name="Charts", category="Data Profiling"))
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py", line 330, in __init__
menu_icon_value=menu_icon_value)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py", line 818, in __init__
self._refresh_cache()
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py", line 913, in _refresh_cache
self._search_supported = self.init_search()
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py", line 581, in init_search
if tools.is_hybrid_property(self.model, name):
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py", line 209, in is_hybrid_property
return last_name in get_hybrid_properties(last_model)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py", line 190, in get_hybrid_properties
for key, prop in inspect(model).all_orm_descriptors.items()
File "/home/airflow/.local/lib/python3.6/site-packages/sqlalchemy/inspection.py", line 72, in inspect
"available for object of type %s" % type_
sqlalchemy.exc.NoInspectionAvailable: No inspection system is available for object of type <class 'method'>
**What you expected to happen**: to start
<!-- What do you think went wrong? -->
**How to reproduce it**:
Install airflow with pip3 and postgres from ubuntu which is 10.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/models/chart.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Licensed to the Apache Software Foundation (ASF) under one
4 # or more contributor license agreements. See the NOTICE file
5 # distributed with this work for additional information
6 # regarding copyright ownership. The ASF licenses this file
7 # to you under the Apache License, Version 2.0 (the
8 # "License"); you may not use this file except in compliance
9 # with the License. You may obtain a copy of the License at
10 #
11 # http://www.apache.org/licenses/LICENSE-2.0
12 #
13 # Unless required by applicable law or agreed to in writing,
14 # software distributed under the License is distributed on an
15 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16 # KIND, either express or implied. See the License for the
17 # specific language governing permissions and limitations
18 # under the License.
19
20 from sqlalchemy import Column, String, Integer, Boolean, ForeignKey, Text
21 from sqlalchemy.orm import relationship
22
23 from airflow.models.base import Base, ID_LEN
24 from airflow.utils.sqlalchemy import UtcDateTime
25 from airflow.utils import timezone
26
27
28 class Chart(Base):
29 __tablename__ = "chart"
30
31 id = Column(Integer, primary_key=True)
32 label = Column(String(200))
33 conn_id = Column(String(ID_LEN), nullable=False)
34 user_id = Column(Integer(), ForeignKey('users.id'), nullable=True)
35 chart_type = Column(String(100), default="line")
36 sql_layout = Column(String(50), default="series")
37 sql = Column(Text, default="SELECT series, x, y FROM table")
38 y_log_scale = Column(Boolean)
39 show_datatable = Column(Boolean)
40 show_sql = Column(Boolean, default=True)
41 height = Column(Integer, default=600)
42 default_params = Column(String(5000), default="{}")
43 owner = relationship(
44 "User", cascade=False, cascade_backrefs=False, backref='charts')
45 x_is_date = Column(Boolean, default=True)
46 iteration_no = Column(Integer, default=0)
47 last_modified = Column(UtcDateTime, default=timezone.utcnow)
48
49 def __repr__(self):
50 return self.label
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/airflow/models/chart.py b/airflow/models/chart.py
--- a/airflow/models/chart.py
+++ b/airflow/models/chart.py
@@ -21,6 +21,7 @@
from sqlalchemy.orm import relationship
from airflow.models.base import Base, ID_LEN
+from airflow.models.user import User
from airflow.utils.sqlalchemy import UtcDateTime
from airflow.utils import timezone
@@ -41,7 +42,7 @@
height = Column(Integer, default=600)
default_params = Column(String(5000), default="{}")
owner = relationship(
- "User", cascade=False, cascade_backrefs=False, backref='charts')
+ User, cascade=False, cascade_backrefs=False, backref='charts')
x_is_date = Column(Boolean, default=True)
iteration_no = Column(Integer, default=0)
last_modified = Column(UtcDateTime, default=timezone.utcnow)
| {"golden_diff": "diff --git a/airflow/models/chart.py b/airflow/models/chart.py\n--- a/airflow/models/chart.py\n+++ b/airflow/models/chart.py\n@@ -21,6 +21,7 @@\n from sqlalchemy.orm import relationship\n \n from airflow.models.base import Base, ID_LEN\n+from airflow.models.user import User\n from airflow.utils.sqlalchemy import UtcDateTime\n from airflow.utils import timezone\n \n@@ -41,7 +42,7 @@\n height = Column(Integer, default=600)\n default_params = Column(String(5000), default=\"{}\")\n owner = relationship(\n- \"User\", cascade=False, cascade_backrefs=False, backref='charts')\n+ User, cascade=False, cascade_backrefs=False, backref='charts')\n x_is_date = Column(Boolean, default=True)\n iteration_no = Column(Integer, default=0)\n last_modified = Column(UtcDateTime, default=timezone.utcnow)\n", "issue": "Airflow webserver not starting with SQLAlchemy==1.3.16\n\r\n**Apache Airflow version**: 1.10.9\r\n**Environment**: Ubuntu 18.04 LTS\r\n\r\n- **Cloud provider or hardware configuration**:\r\n- **OS** (e.g. from /etc/os-release):Ubuntu 18.04 LTS\r\n\r\n**What happened**: airflow webserver error\r\n\r\nairflow@airflow:~$ airflow webserver\r\n[2020-04-08 09:45:49,843] {settings.py:253} INFO - settings.configure_orm(): Using pool settings. pool_size=5, max_overflow=10, pool_recycle=1800, pid=30494\r\n ____________ _____________\r\n ____ |__( )_________ __/__ /________ __\r\n____ /| |_ /__ ___/_ /_ __ /_ __ \\_ | /| / /\r\n___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /\r\n _/_/ |_/_/ /_/ /_/ /_/ \\____/____/|__/\r\n[2020-04-08 09:45:50,462] {__init__.py:51} INFO - Using executor LocalExecutor\r\n[2020-04-08 09:45:50,463] {dagbag.py:403} INFO - Filling up the DagBag from /home/airflow/airflow/dags\r\nTraceback (most recent call last):\r\n File \"/home/airflow/.local/bin/airflow\", line 37, in <module>\r\n args.func(args)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/cli.py\", line 75, in wrapper\r\n return f(*args, **kwargs)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/bin/cli.py\", line 900, in webserver\r\n app = cached_app_rbac(None) if settings.RBAC else cached_app(None)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py\", line 233, in cached_app\r\n app = create_app(config, testing)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py\", line 103, in create_app\r\n models.Chart, Session, name=\"Charts\", category=\"Data Profiling\"))\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py\", line 330, in __init__\r\n menu_icon_value=menu_icon_value)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py\", line 818, in __init__\r\n self._refresh_cache()\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py\", line 913, in _refresh_cache\r\n self._search_supported = self.init_search()\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py\", line 581, in init_search\r\n if tools.is_hybrid_property(self.model, name):\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py\", line 209, in is_hybrid_property\r\n return last_name in get_hybrid_properties(last_model)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py\", line 190, in get_hybrid_properties\r\n for key, prop in inspect(model).all_orm_descriptors.items()\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/sqlalchemy/inspection.py\", line 72, in inspect\r\n \"available for object of type %s\" % type_\r\nsqlalchemy.exc.NoInspectionAvailable: No inspection system is available for object of type <class 'method'>\r\n\r\n**What you expected to happen**: to start\r\n\r\n<!-- What do you think went wrong? -->\r\n\r\n**How to reproduce it**:\r\nInstall airflow with pip3 and postgres from ubuntu which is 10.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom sqlalchemy import Column, String, Integer, Boolean, ForeignKey, Text\nfrom sqlalchemy.orm import relationship\n\nfrom airflow.models.base import Base, ID_LEN\nfrom airflow.utils.sqlalchemy import UtcDateTime\nfrom airflow.utils import timezone\n\n\nclass Chart(Base):\n __tablename__ = \"chart\"\n\n id = Column(Integer, primary_key=True)\n label = Column(String(200))\n conn_id = Column(String(ID_LEN), nullable=False)\n user_id = Column(Integer(), ForeignKey('users.id'), nullable=True)\n chart_type = Column(String(100), default=\"line\")\n sql_layout = Column(String(50), default=\"series\")\n sql = Column(Text, default=\"SELECT series, x, y FROM table\")\n y_log_scale = Column(Boolean)\n show_datatable = Column(Boolean)\n show_sql = Column(Boolean, default=True)\n height = Column(Integer, default=600)\n default_params = Column(String(5000), default=\"{}\")\n owner = relationship(\n \"User\", cascade=False, cascade_backrefs=False, backref='charts')\n x_is_date = Column(Boolean, default=True)\n iteration_no = Column(Integer, default=0)\n last_modified = Column(UtcDateTime, default=timezone.utcnow)\n\n def __repr__(self):\n return self.label\n", "path": "airflow/models/chart.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom sqlalchemy import Column, String, Integer, Boolean, ForeignKey, Text\nfrom sqlalchemy.orm import relationship\n\nfrom airflow.models.base import Base, ID_LEN\nfrom airflow.models.user import User\nfrom airflow.utils.sqlalchemy import UtcDateTime\nfrom airflow.utils import timezone\n\n\nclass Chart(Base):\n __tablename__ = \"chart\"\n\n id = Column(Integer, primary_key=True)\n label = Column(String(200))\n conn_id = Column(String(ID_LEN), nullable=False)\n user_id = Column(Integer(), ForeignKey('users.id'), nullable=True)\n chart_type = Column(String(100), default=\"line\")\n sql_layout = Column(String(50), default=\"series\")\n sql = Column(Text, default=\"SELECT series, x, y FROM table\")\n y_log_scale = Column(Boolean)\n show_datatable = Column(Boolean)\n show_sql = Column(Boolean, default=True)\n height = Column(Integer, default=600)\n default_params = Column(String(5000), default=\"{}\")\n owner = relationship(\n User, cascade=False, cascade_backrefs=False, backref='charts')\n x_is_date = Column(Boolean, default=True)\n iteration_no = Column(Integer, default=0)\n last_modified = Column(UtcDateTime, default=timezone.utcnow)\n\n def __repr__(self):\n return self.label\n", "path": "airflow/models/chart.py"}]} | 1,792 | 204 |
gh_patches_debug_17420 | rasdani/github-patches | git_diff | pytorch__ignite-2676 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scheduled workflow failed
Oh no, something went wrong in the scheduled workflow **PyTorch version tests with commit 98844bf82b963a429d22b09f650cb0af2023bf20**.
Please look into it:
https://github.com/pytorch/ignite/actions/runs/2923090334
Feel free to close this if this was just a one-off error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/metrics/gan/utils.py`
Content:
```
1 from typing import Callable, Optional, Union
2
3 import torch
4 from packaging.version import Version
5
6 from ignite.metrics.metric import Metric
7
8
9 class InceptionModel(torch.nn.Module):
10 r"""Inception Model pre-trained on the ImageNet Dataset.
11
12 Args:
13 return_features: set it to `True` if you want the model to return features from the last pooling
14 layer instead of prediction probabilities.
15 device: specifies which device updates are accumulated on. Setting the
16 metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
17 non-blocking. By default, CPU.
18 """
19
20 def __init__(self, return_features: bool, device: Union[str, torch.device] = "cpu") -> None:
21 try:
22 from torchvision import models
23 except ImportError:
24 raise RuntimeError("This module requires torchvision to be installed.")
25 super(InceptionModel, self).__init__()
26 self._device = device
27 if Version(torch.__version__) <= Version("1.7.0"):
28 model_kwargs = {"pretrained": True}
29 else:
30 model_kwargs = {"weights": models.Inception_V3_Weights.DEFAULT}
31
32 self.model = models.inception_v3(**model_kwargs).to(self._device)
33
34 if return_features:
35 self.model.fc = torch.nn.Identity()
36 else:
37 self.model.fc = torch.nn.Sequential(self.model.fc, torch.nn.Softmax(dim=1))
38 self.model.eval()
39
40 @torch.no_grad()
41 def forward(self, data: torch.Tensor) -> torch.Tensor:
42 if data.dim() != 4:
43 raise ValueError(f"Inputs should be a tensor of dim 4, got {data.dim()}")
44 if data.shape[1] != 3:
45 raise ValueError(f"Inputs should be a tensor with 3 channels, got {data.shape}")
46 if data.device != torch.device(self._device):
47 data = data.to(self._device)
48 return self.model(data)
49
50
51 class _BaseInceptionMetric(Metric):
52 def __init__(
53 self,
54 num_features: Optional[int],
55 feature_extractor: Optional[torch.nn.Module],
56 output_transform: Callable = lambda x: x,
57 device: Union[str, torch.device] = torch.device("cpu"),
58 ) -> None:
59
60 if num_features is None:
61 raise ValueError("Argument num_features must be provided, if feature_extractor is specified.")
62
63 if feature_extractor is None:
64 feature_extractor = torch.nn.Identity()
65
66 if num_features <= 0:
67 raise ValueError(f"Argument num_features must be greater to zero, got: {num_features}")
68
69 if not isinstance(feature_extractor, torch.nn.Module):
70 raise TypeError(
71 f"Argument feature_extractor must be of type torch.nn.Module, got {type(self._feature_extractor)}"
72 )
73
74 self._num_features = num_features
75 self._feature_extractor = feature_extractor.to(device)
76
77 super(_BaseInceptionMetric, self).__init__(output_transform=output_transform, device=device)
78
79 def _check_feature_shapes(self, samples: torch.Tensor) -> None:
80
81 if samples.dim() != 2:
82 raise ValueError(f"feature_extractor output must be a tensor of dim 2, got: {samples.dim()}")
83
84 if samples.shape[0] == 0:
85 raise ValueError(f"Batch size should be greater than one, got: {samples.shape[0]}")
86
87 if samples.shape[1] != self._num_features:
88 raise ValueError(
89 f"num_features returned by feature_extractor should be {self._num_features}, got: {samples.shape[1]}"
90 )
91
92 def _extract_features(self, inputs: torch.Tensor) -> torch.Tensor:
93
94 inputs = inputs.detach()
95
96 if inputs.device != torch.device(self._device):
97 inputs = inputs.to(self._device)
98
99 with torch.no_grad():
100 outputs = self._feature_extractor(inputs).to(self._device, dtype=torch.float64)
101 self._check_feature_shapes(outputs)
102
103 return outputs
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/metrics/gan/utils.py b/ignite/metrics/gan/utils.py
--- a/ignite/metrics/gan/utils.py
+++ b/ignite/metrics/gan/utils.py
@@ -19,12 +19,13 @@
def __init__(self, return_features: bool, device: Union[str, torch.device] = "cpu") -> None:
try:
+ import torchvision
from torchvision import models
except ImportError:
raise RuntimeError("This module requires torchvision to be installed.")
super(InceptionModel, self).__init__()
self._device = device
- if Version(torch.__version__) <= Version("1.7.0"):
+ if Version(torchvision.__version__) < Version("0.13.0"):
model_kwargs = {"pretrained": True}
else:
model_kwargs = {"weights": models.Inception_V3_Weights.DEFAULT}
| {"golden_diff": "diff --git a/ignite/metrics/gan/utils.py b/ignite/metrics/gan/utils.py\n--- a/ignite/metrics/gan/utils.py\n+++ b/ignite/metrics/gan/utils.py\n@@ -19,12 +19,13 @@\n \n def __init__(self, return_features: bool, device: Union[str, torch.device] = \"cpu\") -> None:\n try:\n+ import torchvision\n from torchvision import models\n except ImportError:\n raise RuntimeError(\"This module requires torchvision to be installed.\")\n super(InceptionModel, self).__init__()\n self._device = device\n- if Version(torch.__version__) <= Version(\"1.7.0\"):\n+ if Version(torchvision.__version__) < Version(\"0.13.0\"):\n model_kwargs = {\"pretrained\": True}\n else:\n model_kwargs = {\"weights\": models.Inception_V3_Weights.DEFAULT}\n", "issue": "Scheduled workflow failed\nOh no, something went wrong in the scheduled workflow **PyTorch version tests with commit 98844bf82b963a429d22b09f650cb0af2023bf20**.\nPlease look into it:\n\nhttps://github.com/pytorch/ignite/actions/runs/2923090334\n\nFeel free to close this if this was just a one-off error.\n\n", "before_files": [{"content": "from typing import Callable, Optional, Union\n\nimport torch\nfrom packaging.version import Version\n\nfrom ignite.metrics.metric import Metric\n\n\nclass InceptionModel(torch.nn.Module):\n r\"\"\"Inception Model pre-trained on the ImageNet Dataset.\n\n Args:\n return_features: set it to `True` if you want the model to return features from the last pooling\n layer instead of prediction probabilities.\n device: specifies which device updates are accumulated on. Setting the\n metric's device to be the same as your ``update`` arguments ensures the ``update`` method is\n non-blocking. By default, CPU.\n \"\"\"\n\n def __init__(self, return_features: bool, device: Union[str, torch.device] = \"cpu\") -> None:\n try:\n from torchvision import models\n except ImportError:\n raise RuntimeError(\"This module requires torchvision to be installed.\")\n super(InceptionModel, self).__init__()\n self._device = device\n if Version(torch.__version__) <= Version(\"1.7.0\"):\n model_kwargs = {\"pretrained\": True}\n else:\n model_kwargs = {\"weights\": models.Inception_V3_Weights.DEFAULT}\n\n self.model = models.inception_v3(**model_kwargs).to(self._device)\n\n if return_features:\n self.model.fc = torch.nn.Identity()\n else:\n self.model.fc = torch.nn.Sequential(self.model.fc, torch.nn.Softmax(dim=1))\n self.model.eval()\n\n @torch.no_grad()\n def forward(self, data: torch.Tensor) -> torch.Tensor:\n if data.dim() != 4:\n raise ValueError(f\"Inputs should be a tensor of dim 4, got {data.dim()}\")\n if data.shape[1] != 3:\n raise ValueError(f\"Inputs should be a tensor with 3 channels, got {data.shape}\")\n if data.device != torch.device(self._device):\n data = data.to(self._device)\n return self.model(data)\n\n\nclass _BaseInceptionMetric(Metric):\n def __init__(\n self,\n num_features: Optional[int],\n feature_extractor: Optional[torch.nn.Module],\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ) -> None:\n\n if num_features is None:\n raise ValueError(\"Argument num_features must be provided, if feature_extractor is specified.\")\n\n if feature_extractor is None:\n feature_extractor = torch.nn.Identity()\n\n if num_features <= 0:\n raise ValueError(f\"Argument num_features must be greater to zero, got: {num_features}\")\n\n if not isinstance(feature_extractor, torch.nn.Module):\n raise TypeError(\n f\"Argument feature_extractor must be of type torch.nn.Module, got {type(self._feature_extractor)}\"\n )\n\n self._num_features = num_features\n self._feature_extractor = feature_extractor.to(device)\n\n super(_BaseInceptionMetric, self).__init__(output_transform=output_transform, device=device)\n\n def _check_feature_shapes(self, samples: torch.Tensor) -> None:\n\n if samples.dim() != 2:\n raise ValueError(f\"feature_extractor output must be a tensor of dim 2, got: {samples.dim()}\")\n\n if samples.shape[0] == 0:\n raise ValueError(f\"Batch size should be greater than one, got: {samples.shape[0]}\")\n\n if samples.shape[1] != self._num_features:\n raise ValueError(\n f\"num_features returned by feature_extractor should be {self._num_features}, got: {samples.shape[1]}\"\n )\n\n def _extract_features(self, inputs: torch.Tensor) -> torch.Tensor:\n\n inputs = inputs.detach()\n\n if inputs.device != torch.device(self._device):\n inputs = inputs.to(self._device)\n\n with torch.no_grad():\n outputs = self._feature_extractor(inputs).to(self._device, dtype=torch.float64)\n self._check_feature_shapes(outputs)\n\n return outputs\n", "path": "ignite/metrics/gan/utils.py"}], "after_files": [{"content": "from typing import Callable, Optional, Union\n\nimport torch\nfrom packaging.version import Version\n\nfrom ignite.metrics.metric import Metric\n\n\nclass InceptionModel(torch.nn.Module):\n r\"\"\"Inception Model pre-trained on the ImageNet Dataset.\n\n Args:\n return_features: set it to `True` if you want the model to return features from the last pooling\n layer instead of prediction probabilities.\n device: specifies which device updates are accumulated on. Setting the\n metric's device to be the same as your ``update`` arguments ensures the ``update`` method is\n non-blocking. By default, CPU.\n \"\"\"\n\n def __init__(self, return_features: bool, device: Union[str, torch.device] = \"cpu\") -> None:\n try:\n import torchvision\n from torchvision import models\n except ImportError:\n raise RuntimeError(\"This module requires torchvision to be installed.\")\n super(InceptionModel, self).__init__()\n self._device = device\n if Version(torchvision.__version__) < Version(\"0.13.0\"):\n model_kwargs = {\"pretrained\": True}\n else:\n model_kwargs = {\"weights\": models.Inception_V3_Weights.DEFAULT}\n\n self.model = models.inception_v3(**model_kwargs).to(self._device)\n\n if return_features:\n self.model.fc = torch.nn.Identity()\n else:\n self.model.fc = torch.nn.Sequential(self.model.fc, torch.nn.Softmax(dim=1))\n self.model.eval()\n\n @torch.no_grad()\n def forward(self, data: torch.Tensor) -> torch.Tensor:\n if data.dim() != 4:\n raise ValueError(f\"Inputs should be a tensor of dim 4, got {data.dim()}\")\n if data.shape[1] != 3:\n raise ValueError(f\"Inputs should be a tensor with 3 channels, got {data.shape}\")\n if data.device != torch.device(self._device):\n data = data.to(self._device)\n return self.model(data)\n\n\nclass _BaseInceptionMetric(Metric):\n def __init__(\n self,\n num_features: Optional[int],\n feature_extractor: Optional[torch.nn.Module],\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ) -> None:\n\n if num_features is None:\n raise ValueError(\"Argument num_features must be provided, if feature_extractor is specified.\")\n\n if feature_extractor is None:\n feature_extractor = torch.nn.Identity()\n\n if num_features <= 0:\n raise ValueError(f\"Argument num_features must be greater to zero, got: {num_features}\")\n\n if not isinstance(feature_extractor, torch.nn.Module):\n raise TypeError(\n f\"Argument feature_extractor must be of type torch.nn.Module, got {type(self._feature_extractor)}\"\n )\n\n self._num_features = num_features\n self._feature_extractor = feature_extractor.to(device)\n\n super(_BaseInceptionMetric, self).__init__(output_transform=output_transform, device=device)\n\n def _check_feature_shapes(self, samples: torch.Tensor) -> None:\n\n if samples.dim() != 2:\n raise ValueError(f\"feature_extractor output must be a tensor of dim 2, got: {samples.dim()}\")\n\n if samples.shape[0] == 0:\n raise ValueError(f\"Batch size should be greater than one, got: {samples.shape[0]}\")\n\n if samples.shape[1] != self._num_features:\n raise ValueError(\n f\"num_features returned by feature_extractor should be {self._num_features}, got: {samples.shape[1]}\"\n )\n\n def _extract_features(self, inputs: torch.Tensor) -> torch.Tensor:\n\n inputs = inputs.detach()\n\n if inputs.device != torch.device(self._device):\n inputs = inputs.to(self._device)\n\n with torch.no_grad():\n outputs = self._feature_extractor(inputs).to(self._device, dtype=torch.float64)\n self._check_feature_shapes(outputs)\n\n return outputs\n", "path": "ignite/metrics/gan/utils.py"}]} | 1,420 | 198 |
gh_patches_debug_41631 | rasdani/github-patches | git_diff | acl-org__acl-anthology-3045 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Compact bibfile
Overleaf has a 50 MB file size limit, and `anthology.bib` is now larger than this. We should create a compact BibTeX export using string substitution [as suggested here](https://twitter.com/daniel_hers/status/1744434842895294496). I'm not sure if this should just replace the current Anthology bib file, or become a new export, say `anthology-compact.bib`:
* The advantage of encompactifying the current file is it would work for everyone without having to change anything.
* The disadvantage is it complicates cutting-and-pasting.
* However, we already have [https://aclanthology.org/anthology+abstracts.bib.gz](https://aclanthology.org/anthology+abstracts.bib.gz). I'm not sure how often people cut-and-paste individual entries from the complete file, anyway; it seems that it's main use is in Overleaf.
I'm therefore include to simply replace `anthology.bib`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bin/create_bibtex.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright 2019 Marcel Bollmann <[email protected]>
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17
18 """Usage: create_bibtex.py [--importdir=DIR] [--exportdir=DIR] [-c] [--debug]
19
20 Creates .bib files for all papers in the Hugo directory.
21
22 Options:
23 --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]
24 --exportdir=DIR Directory to write exported files to. [default: {scriptdir}/../build/data-export/]
25 --debug Output debug-level log messages.
26 -c, --clean Delete existing files in target directory before generation.
27 -h, --help Display this helpful text.
28 """
29
30 from docopt import docopt
31 from tqdm import tqdm
32 import gzip
33 import logging as log
34 import os
35
36 from anthology import Anthology
37 from anthology.utils import SeverityTracker, deconstruct_anthology_id, infer_year
38 from create_hugo_pages import check_directory
39
40
41 def volume_sorter(volume_tuple):
42 """
43 Extracts the year so that we can sort by the year and then
44 the collection ID.
45 """
46 volume_id = volume_tuple[0]
47 collection_id, year, _ = deconstruct_anthology_id(volume_id)
48 year = infer_year(collection_id)
49 return year, volume_id
50
51
52 def create_bibtex(anthology, trgdir, limit=0, clean=False) -> None:
53 """Creates .bib files for all papers.
54
55 :param anthology: The Anthology object.
56 :param trgdir: The target directory to write to
57 :param limit: If nonzero, only generate {limit} entries per volume
58 :param clean: Clean the directory first
59 """
60 if not check_directory("{}/papers".format(trgdir), clean=clean):
61 return
62 if not check_directory("{}/volumes".format(trgdir), clean=clean):
63 return
64
65 log.info("Creating BibTeX files for all papers...")
66 with open(
67 "{}/anthology.bib".format(trgdir), "wt", encoding="utf-8"
68 ) as file_anthology_raw, gzip.open(
69 "{}/anthology.bib.gz".format(trgdir), "wt", encoding="utf-8"
70 ) as file_anthology, gzip.open(
71 "{}/anthology+abstracts.bib.gz".format(trgdir), "wt", encoding="utf-8"
72 ) as file_anthology_with_abstracts:
73 for volume_id, volume in tqdm(
74 sorted(anthology.volumes.items(), key=volume_sorter, reverse=True)
75 ):
76 volume_dir = trgdir
77 if not os.path.exists(volume_dir):
78 os.makedirs(volume_dir)
79 with open("{}/volumes/{}.bib".format(trgdir, volume_id), "w") as file_volume:
80 for i, paper in enumerate(volume, 1):
81 if limit and i > limit:
82 break
83
84 with open(
85 "{}/{}.bib".format(volume_dir, paper.full_id), "w"
86 ) as file_paper:
87 contents = paper.as_bibtex()
88 print(contents, file=file_paper)
89 print(contents, file=file_anthology_with_abstracts)
90
91 concise_contents = paper.as_bibtex(concise=True)
92 print(concise_contents, file=file_volume)
93 print(concise_contents, file=file_anthology)
94 print(concise_contents, file=file_anthology_raw)
95
96
97 if __name__ == "__main__":
98 args = docopt(__doc__)
99 scriptdir = os.path.dirname(os.path.abspath(__file__))
100 if "{scriptdir}" in args["--importdir"]:
101 args["--importdir"] = os.path.abspath(
102 args["--importdir"].format(scriptdir=scriptdir)
103 )
104 if "{scriptdir}" in args["--exportdir"]:
105 args["--exportdir"] = os.path.abspath(
106 args["--exportdir"].format(scriptdir=scriptdir)
107 )
108
109 log_level = log.DEBUG if args["--debug"] else log.INFO
110 log.basicConfig(format="%(levelname)-8s %(message)s", level=log_level)
111 tracker = SeverityTracker()
112 log.getLogger().addHandler(tracker)
113
114 # If NOBIB is set, generate only three bibs per volume
115 limit = 0 if os.environ.get("NOBIB", "false") == "false" else 3
116 log.info(f"NOBIB=true, generating only {limit} BibTEX files per volume")
117
118 anthology = Anthology(importdir=args["--importdir"], fast_load=True)
119 create_bibtex(anthology, args["--exportdir"], limit=limit, clean=args["--clean"])
120
121 if tracker.highest >= log.ERROR:
122 exit(1)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bin/create_bibtex.py b/bin/create_bibtex.py
--- a/bin/create_bibtex.py
+++ b/bin/create_bibtex.py
@@ -27,6 +27,7 @@
-h, --help Display this helpful text.
"""
+import re
from docopt import docopt
from tqdm import tqdm
import gzip
@@ -70,9 +71,20 @@
) as file_anthology, gzip.open(
"{}/anthology+abstracts.bib.gz".format(trgdir), "wt", encoding="utf-8"
) as file_anthology_with_abstracts:
+ # Add some shortcuts to the consolidated bib file
+ print(
+ "@string{acl = {Association for Computational Linguistics}}",
+ file=file_anthology_raw,
+ )
+ print("@string{anth = {https://aclanthology.org/}}", file=file_anthology_raw)
+ print(file=file_anthology_raw)
+
for volume_id, volume in tqdm(
sorted(anthology.volumes.items(), key=volume_sorter, reverse=True)
):
+ # reset this each time
+ abbrev = None
+
volume_dir = trgdir
if not os.path.exists(volume_dir):
os.makedirs(volume_dir)
@@ -91,6 +103,53 @@
concise_contents = paper.as_bibtex(concise=True)
print(concise_contents, file=file_volume)
print(concise_contents, file=file_anthology)
+
+ # Space saver (https://github.com/acl-org/acl-anthology/issues/3016)
+ # Replace verbose text with abbreviations to get the file under 50 MB for Overleaf
+
+ concise_contents = concise_contents.replace(
+ 'publisher = "Association for Computational Linguistics",',
+ "publisher = acl,",
+ )
+ concise_contents = re.sub(
+ r'url = "https://aclanthology.org/(.*)"',
+ r"url = anth # {\1}",
+ concise_contents,
+ )
+
+ # Abbreviate the booktitle by extracting it and printing it before
+ # the first entry in each volume
+ if concise_contents.startswith("@proceedings"):
+ # Grab the title string and create the alias
+ abbrev = f"{volume.get_venues()[0].upper()}:{infer_year(volume.collection_id)}:{volume.volume_id}"
+ try:
+ booktitle = re.search(
+ r" title = \"(.*)\",", concise_contents
+ ).group(1)
+ print(
+ f"@string{{{abbrev} = {{{booktitle}}}}}",
+ file=file_anthology_raw,
+ )
+ except AttributeError:
+ import sys
+
+ print(
+ f"Could not find title for {volume_id}",
+ file=sys.stderr,
+ )
+ abbrev = None
+
+ if abbrev is not None and "booktitle" in concise_contents:
+ # substitute the alias for the booktitle
+ concise_contents = re.sub(
+ r" booktitle = (\".*\"),",
+ f" booktitle = {abbrev},",
+ concise_contents,
+ )
+
+ # Remove newlines, indentations, and double-spaces around author separators
+ concise_contents = re.sub(r"\s+", " ", concise_contents)
+
print(concise_contents, file=file_anthology_raw)
| {"golden_diff": "diff --git a/bin/create_bibtex.py b/bin/create_bibtex.py\n--- a/bin/create_bibtex.py\n+++ b/bin/create_bibtex.py\n@@ -27,6 +27,7 @@\n -h, --help Display this helpful text.\n \"\"\"\n \n+import re\n from docopt import docopt\n from tqdm import tqdm\n import gzip\n@@ -70,9 +71,20 @@\n ) as file_anthology, gzip.open(\n \"{}/anthology+abstracts.bib.gz\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology_with_abstracts:\n+ # Add some shortcuts to the consolidated bib file\n+ print(\n+ \"@string{acl = {Association for Computational Linguistics}}\",\n+ file=file_anthology_raw,\n+ )\n+ print(\"@string{anth = {https://aclanthology.org/}}\", file=file_anthology_raw)\n+ print(file=file_anthology_raw)\n+\n for volume_id, volume in tqdm(\n sorted(anthology.volumes.items(), key=volume_sorter, reverse=True)\n ):\n+ # reset this each time\n+ abbrev = None\n+\n volume_dir = trgdir\n if not os.path.exists(volume_dir):\n os.makedirs(volume_dir)\n@@ -91,6 +103,53 @@\n concise_contents = paper.as_bibtex(concise=True)\n print(concise_contents, file=file_volume)\n print(concise_contents, file=file_anthology)\n+\n+ # Space saver (https://github.com/acl-org/acl-anthology/issues/3016)\n+ # Replace verbose text with abbreviations to get the file under 50 MB for Overleaf\n+\n+ concise_contents = concise_contents.replace(\n+ 'publisher = \"Association for Computational Linguistics\",',\n+ \"publisher = acl,\",\n+ )\n+ concise_contents = re.sub(\n+ r'url = \"https://aclanthology.org/(.*)\"',\n+ r\"url = anth # {\\1}\",\n+ concise_contents,\n+ )\n+\n+ # Abbreviate the booktitle by extracting it and printing it before\n+ # the first entry in each volume\n+ if concise_contents.startswith(\"@proceedings\"):\n+ # Grab the title string and create the alias\n+ abbrev = f\"{volume.get_venues()[0].upper()}:{infer_year(volume.collection_id)}:{volume.volume_id}\"\n+ try:\n+ booktitle = re.search(\n+ r\" title = \\\"(.*)\\\",\", concise_contents\n+ ).group(1)\n+ print(\n+ f\"@string{{{abbrev} = {{{booktitle}}}}}\",\n+ file=file_anthology_raw,\n+ )\n+ except AttributeError:\n+ import sys\n+\n+ print(\n+ f\"Could not find title for {volume_id}\",\n+ file=sys.stderr,\n+ )\n+ abbrev = None\n+\n+ if abbrev is not None and \"booktitle\" in concise_contents:\n+ # substitute the alias for the booktitle\n+ concise_contents = re.sub(\n+ r\" booktitle = (\\\".*\\\"),\",\n+ f\" booktitle = {abbrev},\",\n+ concise_contents,\n+ )\n+\n+ # Remove newlines, indentations, and double-spaces around author separators\n+ concise_contents = re.sub(r\"\\s+\", \" \", concise_contents)\n+\n print(concise_contents, file=file_anthology_raw)\n", "issue": "Compact bibfile\nOverleaf has a 50 MB file size limit, and `anthology.bib` is now larger than this. We should create a compact BibTeX export using string substitution [as suggested here](https://twitter.com/daniel_hers/status/1744434842895294496). I'm not sure if this should just replace the current Anthology bib file, or become a new export, say `anthology-compact.bib`:\r\n\r\n* The advantage of encompactifying the current file is it would work for everyone without having to change anything.\r\n* The disadvantage is it complicates cutting-and-pasting.\r\n* However, we already have [https://aclanthology.org/anthology+abstracts.bib.gz](https://aclanthology.org/anthology+abstracts.bib.gz). I'm not sure how often people cut-and-paste individual entries from the complete file, anyway; it seems that it's main use is in Overleaf.\r\n\r\nI'm therefore include to simply replace `anthology.bib`.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Usage: create_bibtex.py [--importdir=DIR] [--exportdir=DIR] [-c] [--debug]\n\nCreates .bib files for all papers in the Hugo directory.\n\nOptions:\n --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]\n --exportdir=DIR Directory to write exported files to. [default: {scriptdir}/../build/data-export/]\n --debug Output debug-level log messages.\n -c, --clean Delete existing files in target directory before generation.\n -h, --help Display this helpful text.\n\"\"\"\n\nfrom docopt import docopt\nfrom tqdm import tqdm\nimport gzip\nimport logging as log\nimport os\n\nfrom anthology import Anthology\nfrom anthology.utils import SeverityTracker, deconstruct_anthology_id, infer_year\nfrom create_hugo_pages import check_directory\n\n\ndef volume_sorter(volume_tuple):\n \"\"\"\n Extracts the year so that we can sort by the year and then\n the collection ID.\n \"\"\"\n volume_id = volume_tuple[0]\n collection_id, year, _ = deconstruct_anthology_id(volume_id)\n year = infer_year(collection_id)\n return year, volume_id\n\n\ndef create_bibtex(anthology, trgdir, limit=0, clean=False) -> None:\n \"\"\"Creates .bib files for all papers.\n\n :param anthology: The Anthology object.\n :param trgdir: The target directory to write to\n :param limit: If nonzero, only generate {limit} entries per volume\n :param clean: Clean the directory first\n \"\"\"\n if not check_directory(\"{}/papers\".format(trgdir), clean=clean):\n return\n if not check_directory(\"{}/volumes\".format(trgdir), clean=clean):\n return\n\n log.info(\"Creating BibTeX files for all papers...\")\n with open(\n \"{}/anthology.bib\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology_raw, gzip.open(\n \"{}/anthology.bib.gz\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology, gzip.open(\n \"{}/anthology+abstracts.bib.gz\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology_with_abstracts:\n for volume_id, volume in tqdm(\n sorted(anthology.volumes.items(), key=volume_sorter, reverse=True)\n ):\n volume_dir = trgdir\n if not os.path.exists(volume_dir):\n os.makedirs(volume_dir)\n with open(\"{}/volumes/{}.bib\".format(trgdir, volume_id), \"w\") as file_volume:\n for i, paper in enumerate(volume, 1):\n if limit and i > limit:\n break\n\n with open(\n \"{}/{}.bib\".format(volume_dir, paper.full_id), \"w\"\n ) as file_paper:\n contents = paper.as_bibtex()\n print(contents, file=file_paper)\n print(contents, file=file_anthology_with_abstracts)\n\n concise_contents = paper.as_bibtex(concise=True)\n print(concise_contents, file=file_volume)\n print(concise_contents, file=file_anthology)\n print(concise_contents, file=file_anthology_raw)\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--importdir\"]:\n args[\"--importdir\"] = os.path.abspath(\n args[\"--importdir\"].format(scriptdir=scriptdir)\n )\n if \"{scriptdir}\" in args[\"--exportdir\"]:\n args[\"--exportdir\"] = os.path.abspath(\n args[\"--exportdir\"].format(scriptdir=scriptdir)\n )\n\n log_level = log.DEBUG if args[\"--debug\"] else log.INFO\n log.basicConfig(format=\"%(levelname)-8s %(message)s\", level=log_level)\n tracker = SeverityTracker()\n log.getLogger().addHandler(tracker)\n\n # If NOBIB is set, generate only three bibs per volume\n limit = 0 if os.environ.get(\"NOBIB\", \"false\") == \"false\" else 3\n log.info(f\"NOBIB=true, generating only {limit} BibTEX files per volume\")\n\n anthology = Anthology(importdir=args[\"--importdir\"], fast_load=True)\n create_bibtex(anthology, args[\"--exportdir\"], limit=limit, clean=args[\"--clean\"])\n\n if tracker.highest >= log.ERROR:\n exit(1)\n", "path": "bin/create_bibtex.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Usage: create_bibtex.py [--importdir=DIR] [--exportdir=DIR] [-c] [--debug]\n\nCreates .bib files for all papers in the Hugo directory.\n\nOptions:\n --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]\n --exportdir=DIR Directory to write exported files to. [default: {scriptdir}/../build/data-export/]\n --debug Output debug-level log messages.\n -c, --clean Delete existing files in target directory before generation.\n -h, --help Display this helpful text.\n\"\"\"\n\nimport re\nfrom docopt import docopt\nfrom tqdm import tqdm\nimport gzip\nimport logging as log\nimport os\n\nfrom anthology import Anthology\nfrom anthology.utils import SeverityTracker, deconstruct_anthology_id, infer_year\nfrom create_hugo_pages import check_directory\n\n\ndef volume_sorter(volume_tuple):\n \"\"\"\n Extracts the year so that we can sort by the year and then\n the collection ID.\n \"\"\"\n volume_id = volume_tuple[0]\n collection_id, year, _ = deconstruct_anthology_id(volume_id)\n year = infer_year(collection_id)\n return year, volume_id\n\n\ndef create_bibtex(anthology, trgdir, limit=0, clean=False) -> None:\n \"\"\"Creates .bib files for all papers.\n\n :param anthology: The Anthology object.\n :param trgdir: The target directory to write to\n :param limit: If nonzero, only generate {limit} entries per volume\n :param clean: Clean the directory first\n \"\"\"\n if not check_directory(\"{}/papers\".format(trgdir), clean=clean):\n return\n if not check_directory(\"{}/volumes\".format(trgdir), clean=clean):\n return\n\n log.info(\"Creating BibTeX files for all papers...\")\n with open(\n \"{}/anthology.bib\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology_raw, gzip.open(\n \"{}/anthology.bib.gz\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology, gzip.open(\n \"{}/anthology+abstracts.bib.gz\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology_with_abstracts:\n # Add some shortcuts to the consolidated bib file\n print(\n \"@string{acl = {Association for Computational Linguistics}}\",\n file=file_anthology_raw,\n )\n print(\"@string{anth = {https://aclanthology.org/}}\", file=file_anthology_raw)\n print(file=file_anthology_raw)\n\n for volume_id, volume in tqdm(\n sorted(anthology.volumes.items(), key=volume_sorter, reverse=True)\n ):\n # reset this each time\n abbrev = None\n\n volume_dir = trgdir\n if not os.path.exists(volume_dir):\n os.makedirs(volume_dir)\n with open(\"{}/volumes/{}.bib\".format(trgdir, volume_id), \"w\") as file_volume:\n for i, paper in enumerate(volume, 1):\n if limit and i > limit:\n break\n\n with open(\n \"{}/{}.bib\".format(volume_dir, paper.full_id), \"w\"\n ) as file_paper:\n contents = paper.as_bibtex()\n print(contents, file=file_paper)\n print(contents, file=file_anthology_with_abstracts)\n\n concise_contents = paper.as_bibtex(concise=True)\n print(concise_contents, file=file_volume)\n print(concise_contents, file=file_anthology)\n\n # Space saver (https://github.com/acl-org/acl-anthology/issues/3016)\n # Replace verbose text with abbreviations to get the file under 50 MB for Overleaf\n\n concise_contents = concise_contents.replace(\n 'publisher = \"Association for Computational Linguistics\",',\n \"publisher = acl,\",\n )\n concise_contents = re.sub(\n r'url = \"https://aclanthology.org/(.*)\"',\n r\"url = anth # {\\1}\",\n concise_contents,\n )\n\n # Abbreviate the booktitle by extracting it and printing it before\n # the first entry in each volume\n if concise_contents.startswith(\"@proceedings\"):\n # Grab the title string and create the alias\n abbrev = f\"{volume.get_venues()[0].upper()}:{infer_year(volume.collection_id)}:{volume.volume_id}\"\n try:\n booktitle = re.search(\n r\" title = \\\"(.*)\\\",\", concise_contents\n ).group(1)\n print(\n f\"@string{{{abbrev} = {{{booktitle}}}}}\",\n file=file_anthology_raw,\n )\n except AttributeError:\n import sys\n\n print(\n f\"Could not find title for {volume_id}\",\n file=sys.stderr,\n )\n abbrev = None\n\n if abbrev is not None and \"booktitle\" in concise_contents:\n # substitute the alias for the booktitle\n concise_contents = re.sub(\n r\" booktitle = (\\\".*\\\"),\",\n f\" booktitle = {abbrev},\",\n concise_contents,\n )\n\n # Remove newlines, indentations, and double-spaces around author separators\n concise_contents = re.sub(r\"\\s+\", \" \", concise_contents)\n\n print(concise_contents, file=file_anthology_raw)\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--importdir\"]:\n args[\"--importdir\"] = os.path.abspath(\n args[\"--importdir\"].format(scriptdir=scriptdir)\n )\n if \"{scriptdir}\" in args[\"--exportdir\"]:\n args[\"--exportdir\"] = os.path.abspath(\n args[\"--exportdir\"].format(scriptdir=scriptdir)\n )\n\n log_level = log.DEBUG if args[\"--debug\"] else log.INFO\n log.basicConfig(format=\"%(levelname)-8s %(message)s\", level=log_level)\n tracker = SeverityTracker()\n log.getLogger().addHandler(tracker)\n\n # If NOBIB is set, generate only three bibs per volume\n limit = 0 if os.environ.get(\"NOBIB\", \"false\") == \"false\" else 3\n log.info(f\"NOBIB=true, generating only {limit} BibTEX files per volume\")\n\n anthology = Anthology(importdir=args[\"--importdir\"], fast_load=True)\n create_bibtex(anthology, args[\"--exportdir\"], limit=limit, clean=args[\"--clean\"])\n\n if tracker.highest >= log.ERROR:\n exit(1)\n", "path": "bin/create_bibtex.py"}]} | 1,918 | 769 |
gh_patches_debug_30939 | rasdani/github-patches | git_diff | keras-team__keras-nlp-357 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve our continuous testing for model presets
Opening an issue to track the changes proposed on https://github.com/keras-team/keras-nlp/pull/357, as it has gotten slightly larger in scope.
I would like to propose the following changes to our "network_tests" for presets:
- We collocate the preset testing within the model directory, and use test annotations to control how they are run.
- We run the smallest available preset (per model) continuously on GCP, so we get some automated coverage for our preset code.
- We actually test the output of our smallest available preset (with a relaxed float tolerance), so we can catch code updates that would break our checkpoints.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras_nlp/conftest.py`
Content:
```
1 # Copyright 2022 The KerasNLP Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import sys
15
16 import pytest
17
18
19 def pytest_addoption(parser):
20 parser.addoption(
21 "--runslow", action="store_true", default=False, help="run slow tests"
22 )
23
24
25 def pytest_configure(config):
26 config.addinivalue_line("markers", "slow: mark test as slow to run")
27
28
29 def pytest_collection_modifyitems(config, items):
30 if config.getoption("--runslow"):
31 # --runslow given in cli: do not skip slow tests
32 return
33 skip_slow = pytest.mark.skip(reason="need --runslow option to run")
34 skip_xla = pytest.mark.skipif(
35 sys.platform == "darwin", reason="XLA unsupported on MacOS."
36 )
37
38 for item in items:
39 if "slow" in item.keywords:
40 item.add_marker(skip_slow)
41 if "jit_compile_true" in item.name:
42 item.add_marker(skip_xla)
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/keras_nlp/conftest.py b/keras_nlp/conftest.py
--- a/keras_nlp/conftest.py
+++ b/keras_nlp/conftest.py
@@ -18,25 +18,48 @@
def pytest_addoption(parser):
parser.addoption(
- "--runslow", action="store_true", default=False, help="run slow tests"
+ "--run_large",
+ action="store_true",
+ default=False,
+ help="run large tests",
+ )
+ parser.addoption(
+ "--run_extra_large",
+ action="store_true",
+ default=False,
+ help="run extra_large tests",
)
def pytest_configure(config):
- config.addinivalue_line("markers", "slow: mark test as slow to run")
+ config.addinivalue_line(
+ "markers", "large: mark test as being slow or requiring a network"
+ )
+ config.addinivalue_line(
+ "markers",
+ "extra_large: mark test as being too large to run continuously",
+ )
def pytest_collection_modifyitems(config, items):
- if config.getoption("--runslow"):
- # --runslow given in cli: do not skip slow tests
- return
- skip_slow = pytest.mark.skip(reason="need --runslow option to run")
+ run_extra_large_tests = config.getoption("--run_extra_large")
+ # Run large tests for --run_extra_large or --run_large.
+ run_large_tests = config.getoption("--run_large") or run_extra_large_tests
+
+ # Messages to annotate skipped tests with.
skip_xla = pytest.mark.skipif(
sys.platform == "darwin", reason="XLA unsupported on MacOS."
)
-
+ skip_large = pytest.mark.skipif(
+ not run_large_tests, reason="need --run_large option to run"
+ )
+ skip_extra_large = pytest.mark.skipif(
+ not run_extra_large_tests, reason="need --run_extra_large option to run"
+ )
for item in items:
- if "slow" in item.keywords:
- item.add_marker(skip_slow)
if "jit_compile_true" in item.name:
item.add_marker(skip_xla)
+ if "large" in item.keywords:
+ item.add_marker(skip_large)
+ if "extra_large" in item.keywords:
+ item.add_marker(skip_extra_large)
| {"golden_diff": "diff --git a/keras_nlp/conftest.py b/keras_nlp/conftest.py\n--- a/keras_nlp/conftest.py\n+++ b/keras_nlp/conftest.py\n@@ -18,25 +18,48 @@\n \n def pytest_addoption(parser):\n parser.addoption(\n- \"--runslow\", action=\"store_true\", default=False, help=\"run slow tests\"\n+ \"--run_large\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"run large tests\",\n+ )\n+ parser.addoption(\n+ \"--run_extra_large\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"run extra_large tests\",\n )\n \n \n def pytest_configure(config):\n- config.addinivalue_line(\"markers\", \"slow: mark test as slow to run\")\n+ config.addinivalue_line(\n+ \"markers\", \"large: mark test as being slow or requiring a network\"\n+ )\n+ config.addinivalue_line(\n+ \"markers\",\n+ \"extra_large: mark test as being too large to run continuously\",\n+ )\n \n \n def pytest_collection_modifyitems(config, items):\n- if config.getoption(\"--runslow\"):\n- # --runslow given in cli: do not skip slow tests\n- return\n- skip_slow = pytest.mark.skip(reason=\"need --runslow option to run\")\n+ run_extra_large_tests = config.getoption(\"--run_extra_large\")\n+ # Run large tests for --run_extra_large or --run_large.\n+ run_large_tests = config.getoption(\"--run_large\") or run_extra_large_tests\n+\n+ # Messages to annotate skipped tests with.\n skip_xla = pytest.mark.skipif(\n sys.platform == \"darwin\", reason=\"XLA unsupported on MacOS.\"\n )\n-\n+ skip_large = pytest.mark.skipif(\n+ not run_large_tests, reason=\"need --run_large option to run\"\n+ )\n+ skip_extra_large = pytest.mark.skipif(\n+ not run_extra_large_tests, reason=\"need --run_extra_large option to run\"\n+ )\n for item in items:\n- if \"slow\" in item.keywords:\n- item.add_marker(skip_slow)\n if \"jit_compile_true\" in item.name:\n item.add_marker(skip_xla)\n+ if \"large\" in item.keywords:\n+ item.add_marker(skip_large)\n+ if \"extra_large\" in item.keywords:\n+ item.add_marker(skip_extra_large)\n", "issue": "Improve our continuous testing for model presets\nOpening an issue to track the changes proposed on https://github.com/keras-team/keras-nlp/pull/357, as it has gotten slightly larger in scope.\r\n\r\nI would like to propose the following changes to our \"network_tests\" for presets:\r\n\r\n - We collocate the preset testing within the model directory, and use test annotations to control how they are run.\r\n - We run the smallest available preset (per model) continuously on GCP, so we get some automated coverage for our preset code.\r\n - We actually test the output of our smallest available preset (with a relaxed float tolerance), so we can catch code updates that would break our checkpoints.\n", "before_files": [{"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\n\nimport pytest\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--runslow\", action=\"store_true\", default=False, help=\"run slow tests\"\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\", \"slow: mark test as slow to run\")\n\n\ndef pytest_collection_modifyitems(config, items):\n if config.getoption(\"--runslow\"):\n # --runslow given in cli: do not skip slow tests\n return\n skip_slow = pytest.mark.skip(reason=\"need --runslow option to run\")\n skip_xla = pytest.mark.skipif(\n sys.platform == \"darwin\", reason=\"XLA unsupported on MacOS.\"\n )\n\n for item in items:\n if \"slow\" in item.keywords:\n item.add_marker(skip_slow)\n if \"jit_compile_true\" in item.name:\n item.add_marker(skip_xla)\n", "path": "keras_nlp/conftest.py"}], "after_files": [{"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\n\nimport pytest\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--run_large\",\n action=\"store_true\",\n default=False,\n help=\"run large tests\",\n )\n parser.addoption(\n \"--run_extra_large\",\n action=\"store_true\",\n default=False,\n help=\"run extra_large tests\",\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\n \"markers\", \"large: mark test as being slow or requiring a network\"\n )\n config.addinivalue_line(\n \"markers\",\n \"extra_large: mark test as being too large to run continuously\",\n )\n\n\ndef pytest_collection_modifyitems(config, items):\n run_extra_large_tests = config.getoption(\"--run_extra_large\")\n # Run large tests for --run_extra_large or --run_large.\n run_large_tests = config.getoption(\"--run_large\") or run_extra_large_tests\n\n # Messages to annotate skipped tests with.\n skip_xla = pytest.mark.skipif(\n sys.platform == \"darwin\", reason=\"XLA unsupported on MacOS.\"\n )\n skip_large = pytest.mark.skipif(\n not run_large_tests, reason=\"need --run_large option to run\"\n )\n skip_extra_large = pytest.mark.skipif(\n not run_extra_large_tests, reason=\"need --run_extra_large option to run\"\n )\n for item in items:\n if \"jit_compile_true\" in item.name:\n item.add_marker(skip_xla)\n if \"large\" in item.keywords:\n item.add_marker(skip_large)\n if \"extra_large\" in item.keywords:\n item.add_marker(skip_extra_large)\n", "path": "keras_nlp/conftest.py"}]} | 809 | 540 |
gh_patches_debug_59678 | rasdani/github-patches | git_diff | mozilla__bugbug-31 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create a classifier to detect bugs that need QA
Needed for https://github.com/mozilla/relman-auto-nag/issues/227.
To do this, we'll need to collect some labels.
We can automatically create some positive labels by getting bugs that have the `qawanted` keyword or that have `qe-verify` flag.
We can't automatically create negative labels because we can't be sure that QA was not needed when the keyword/flag was not set.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bugbug/models/qaneeded.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import xgboost
7 from sklearn.feature_extraction import DictVectorizer
8 from sklearn.pipeline import FeatureUnion
9 from sklearn.pipeline import Pipeline
10
11 from bugbug import bug_features
12 from bugbug import labels
13 from bugbug.model import Model
14 from bugbug.utils import DictSelector
15
16
17 class QANeededModel(Model):
18 def __init__(self, lemmatization=False):
19 Model.__init__(self, lemmatization)
20
21 self.classes = labels.get_qa_needed_labels()
22
23 feature_extractors = [
24 bug_features.has_str(),
25 bug_features.has_regression_range(),
26 bug_features.severity(),
27 bug_features.keywords(),
28 bug_features.is_coverity_issue(),
29 bug_features.has_crash_signature(),
30 bug_features.has_url(),
31 bug_features.has_w3c_url(),
32 bug_features.has_github_url(),
33 bug_features.whiteboard(),
34 bug_features.patches(),
35 bug_features.landings(),
36 bug_features.title(),
37 bug_features.comments(),
38 ]
39
40 self.extraction_pipeline = Pipeline([
41 ('bug_extractor', bug_features.BugExtractor(feature_extractors)),
42 ('union', FeatureUnion(
43 transformer_list=[
44 ('data', Pipeline([
45 ('selector', DictSelector(key='data')),
46 ('vect', DictVectorizer()),
47 ])),
48
49 ('title', Pipeline([
50 ('selector', DictSelector(key='title')),
51 ('tfidf', self.text_vectorizer(stop_words='english')),
52 ])),
53
54 ('comments', Pipeline([
55 ('selector', DictSelector(key='comments')),
56 ('tfidf', self.text_vectorizer(stop_words='english')),
57 ])),
58 ],
59 )),
60 ])
61
62 self.clf = xgboost.XGBClassifier(n_jobs=16)
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bugbug/models/qaneeded.py b/bugbug/models/qaneeded.py
--- a/bugbug/models/qaneeded.py
+++ b/bugbug/models/qaneeded.py
@@ -24,7 +24,7 @@
bug_features.has_str(),
bug_features.has_regression_range(),
bug_features.severity(),
- bug_features.keywords(),
+ bug_features.keywords(set(['qawanted'])),
bug_features.is_coverity_issue(),
bug_features.has_crash_signature(),
bug_features.has_url(),
| {"golden_diff": "diff --git a/bugbug/models/qaneeded.py b/bugbug/models/qaneeded.py\n--- a/bugbug/models/qaneeded.py\n+++ b/bugbug/models/qaneeded.py\n@@ -24,7 +24,7 @@\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n- bug_features.keywords(),\n+ bug_features.keywords(set(['qawanted'])),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n", "issue": "Create a classifier to detect bugs that need QA\nNeeded for https://github.com/mozilla/relman-auto-nag/issues/227.\r\n\r\nTo do this, we'll need to collect some labels.\r\nWe can automatically create some positive labels by getting bugs that have the `qawanted` keyword or that have `qe-verify` flag.\r\nWe can't automatically create negative labels because we can't be sure that QA was not needed when the keyword/flag was not set.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import FeatureUnion\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import labels\nfrom bugbug.model import Model\nfrom bugbug.utils import DictSelector\n\n\nclass QANeededModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n self.classes = labels.get_qa_needed_labels()\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords(),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n bug_features.comments(),\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors)),\n ('union', FeatureUnion(\n transformer_list=[\n ('data', Pipeline([\n ('selector', DictSelector(key='data')),\n ('vect', DictVectorizer()),\n ])),\n\n ('title', Pipeline([\n ('selector', DictSelector(key='title')),\n ('tfidf', self.text_vectorizer(stop_words='english')),\n ])),\n\n ('comments', Pipeline([\n ('selector', DictSelector(key='comments')),\n ('tfidf', self.text_vectorizer(stop_words='english')),\n ])),\n ],\n )),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n", "path": "bugbug/models/qaneeded.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import FeatureUnion\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import labels\nfrom bugbug.model import Model\nfrom bugbug.utils import DictSelector\n\n\nclass QANeededModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n self.classes = labels.get_qa_needed_labels()\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords(set(['qawanted'])),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n bug_features.comments(),\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors)),\n ('union', FeatureUnion(\n transformer_list=[\n ('data', Pipeline([\n ('selector', DictSelector(key='data')),\n ('vect', DictVectorizer()),\n ])),\n\n ('title', Pipeline([\n ('selector', DictSelector(key='title')),\n ('tfidf', self.text_vectorizer(stop_words='english')),\n ])),\n\n ('comments', Pipeline([\n ('selector', DictSelector(key='comments')),\n ('tfidf', self.text_vectorizer(stop_words='english')),\n ])),\n ],\n )),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n", "path": "bugbug/models/qaneeded.py"}]} | 892 | 115 |
gh_patches_debug_9063 | rasdani/github-patches | git_diff | pypa__virtualenv-1886 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`virtualenv --version` prints spurious error as of 20.0.24
**Issue**
When running `virtualenv --version`, a logger error is printed to stderr, though the return code is still 0.
**Environment**
Tested with Python 3.7 and 3.8, virtualenvs managed with pipenv
Ubuntu 18.04 on WSL
```
$ rm-rf tmp && mkdir tmp && cd tmp
$ pipenv install "virtualenv==20.0.23"
$ pipenv run virtualenv --version
virtualenv 20.0.23 from tmp/.venv/lib/python3.7/site-packages/virtualenv/__init__.py
$ rm-rf tmp && mkdir tmp && cd tmp
$ pipenv install "virtualenv==20.0.24"
$ pipenv run virtualenv --version
virtualenv 20.0.24 from tmp/.venv/lib/python3.7/site-packages/virtualenv/__init__.py
ERROR:root:SystemExit: 0
$ pipenv run virtualenv --version > /dev/null
ERROR:root:SystemExit: 0
$ echo $?
0
```
Nothing else is printed with `-vvv --with-traceback`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/virtualenv/__main__.py`
Content:
```
1 from __future__ import absolute_import, print_function, unicode_literals
2
3 import logging
4 import os
5 import sys
6 from datetime import datetime
7
8
9 def run(args=None, options=None):
10 start = datetime.now()
11 from virtualenv.util.error import ProcessCallFailed
12 from virtualenv.run import cli_run
13
14 if args is None:
15 args = sys.argv[1:]
16 try:
17 session = cli_run(args, options)
18 logging.warning(LogSession(session, start))
19 except ProcessCallFailed as exception:
20 print("subprocess call failed for {} with code {}".format(exception.cmd, exception.code))
21 print(exception.out, file=sys.stdout, end="")
22 print(exception.err, file=sys.stderr, end="")
23 raise SystemExit(exception.code)
24
25
26 class LogSession(object):
27 def __init__(self, session, start):
28 self.session = session
29 self.start = start
30
31 def __str__(self):
32 from virtualenv.util.six import ensure_text
33
34 spec = self.session.creator.interpreter.spec
35 elapsed = (datetime.now() - self.start).total_seconds() * 1000
36 lines = [
37 "created virtual environment {} in {:.0f}ms".format(spec, elapsed),
38 " creator {}".format(ensure_text(str(self.session.creator))),
39 ]
40 if self.session.seeder.enabled:
41 lines += (
42 " seeder {}".format(ensure_text(str(self.session.seeder))),
43 " added seed packages: {}".format(
44 ", ".join(
45 sorted(
46 "==".join(i.stem.split("-"))
47 for i in self.session.creator.purelib.iterdir()
48 if i.suffix == ".dist-info"
49 ),
50 ),
51 ),
52 )
53 if self.session.activators:
54 lines.append(" activators {}".format(",".join(i.__class__.__name__ for i in self.session.activators)))
55 return os.linesep.join(lines)
56
57
58 def run_with_catch(args=None):
59 from virtualenv.config.cli.parser import VirtualEnvOptions
60
61 options = VirtualEnvOptions()
62 try:
63 run(args, options)
64 except (KeyboardInterrupt, SystemExit, Exception) as exception:
65 try:
66 if getattr(options, "with_traceback", False):
67 raise
68 else:
69 logging.error("%s: %s", type(exception).__name__, exception)
70 code = exception.code if isinstance(exception, SystemExit) else 1
71 sys.exit(code)
72 finally:
73 logging.shutdown() # force flush of log messages before the trace is printed
74
75
76 if __name__ == "__main__": # pragma: no cov
77 run_with_catch() # pragma: no cov
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/virtualenv/__main__.py b/src/virtualenv/__main__.py
--- a/src/virtualenv/__main__.py
+++ b/src/virtualenv/__main__.py
@@ -66,7 +66,8 @@
if getattr(options, "with_traceback", False):
raise
else:
- logging.error("%s: %s", type(exception).__name__, exception)
+ if not (isinstance(exception, SystemExit) and exception.code == 0):
+ logging.error("%s: %s", type(exception).__name__, exception)
code = exception.code if isinstance(exception, SystemExit) else 1
sys.exit(code)
finally:
| {"golden_diff": "diff --git a/src/virtualenv/__main__.py b/src/virtualenv/__main__.py\n--- a/src/virtualenv/__main__.py\n+++ b/src/virtualenv/__main__.py\n@@ -66,7 +66,8 @@\n if getattr(options, \"with_traceback\", False):\n raise\n else:\n- logging.error(\"%s: %s\", type(exception).__name__, exception)\n+ if not (isinstance(exception, SystemExit) and exception.code == 0):\n+ logging.error(\"%s: %s\", type(exception).__name__, exception)\n code = exception.code if isinstance(exception, SystemExit) else 1\n sys.exit(code)\n finally:\n", "issue": "`virtualenv --version` prints spurious error as of 20.0.24\n**Issue**\r\n\r\nWhen running `virtualenv --version`, a logger error is printed to stderr, though the return code is still 0.\r\n\r\n**Environment**\r\n\r\nTested with Python 3.7 and 3.8, virtualenvs managed with pipenv\r\nUbuntu 18.04 on WSL\r\n\r\n```\r\n$ rm-rf tmp && mkdir tmp && cd tmp\r\n$ pipenv install \"virtualenv==20.0.23\"\r\n$ pipenv run virtualenv --version\r\nvirtualenv 20.0.23 from tmp/.venv/lib/python3.7/site-packages/virtualenv/__init__.py\r\n\r\n$ rm-rf tmp && mkdir tmp && cd tmp\r\n$ pipenv install \"virtualenv==20.0.24\"\r\n$ pipenv run virtualenv --version\r\nvirtualenv 20.0.24 from tmp/.venv/lib/python3.7/site-packages/virtualenv/__init__.py\r\nERROR:root:SystemExit: 0\r\n$ pipenv run virtualenv --version > /dev/null\r\nERROR:root:SystemExit: 0\r\n$ echo $?\r\n0\r\n```\r\n\r\nNothing else is printed with `-vvv --with-traceback`\n", "before_files": [{"content": "from __future__ import absolute_import, print_function, unicode_literals\n\nimport logging\nimport os\nimport sys\nfrom datetime import datetime\n\n\ndef run(args=None, options=None):\n start = datetime.now()\n from virtualenv.util.error import ProcessCallFailed\n from virtualenv.run import cli_run\n\n if args is None:\n args = sys.argv[1:]\n try:\n session = cli_run(args, options)\n logging.warning(LogSession(session, start))\n except ProcessCallFailed as exception:\n print(\"subprocess call failed for {} with code {}\".format(exception.cmd, exception.code))\n print(exception.out, file=sys.stdout, end=\"\")\n print(exception.err, file=sys.stderr, end=\"\")\n raise SystemExit(exception.code)\n\n\nclass LogSession(object):\n def __init__(self, session, start):\n self.session = session\n self.start = start\n\n def __str__(self):\n from virtualenv.util.six import ensure_text\n\n spec = self.session.creator.interpreter.spec\n elapsed = (datetime.now() - self.start).total_seconds() * 1000\n lines = [\n \"created virtual environment {} in {:.0f}ms\".format(spec, elapsed),\n \" creator {}\".format(ensure_text(str(self.session.creator))),\n ]\n if self.session.seeder.enabled:\n lines += (\n \" seeder {}\".format(ensure_text(str(self.session.seeder))),\n \" added seed packages: {}\".format(\n \", \".join(\n sorted(\n \"==\".join(i.stem.split(\"-\"))\n for i in self.session.creator.purelib.iterdir()\n if i.suffix == \".dist-info\"\n ),\n ),\n ),\n )\n if self.session.activators:\n lines.append(\" activators {}\".format(\",\".join(i.__class__.__name__ for i in self.session.activators)))\n return os.linesep.join(lines)\n\n\ndef run_with_catch(args=None):\n from virtualenv.config.cli.parser import VirtualEnvOptions\n\n options = VirtualEnvOptions()\n try:\n run(args, options)\n except (KeyboardInterrupt, SystemExit, Exception) as exception:\n try:\n if getattr(options, \"with_traceback\", False):\n raise\n else:\n logging.error(\"%s: %s\", type(exception).__name__, exception)\n code = exception.code if isinstance(exception, SystemExit) else 1\n sys.exit(code)\n finally:\n logging.shutdown() # force flush of log messages before the trace is printed\n\n\nif __name__ == \"__main__\": # pragma: no cov\n run_with_catch() # pragma: no cov\n", "path": "src/virtualenv/__main__.py"}], "after_files": [{"content": "from __future__ import absolute_import, print_function, unicode_literals\n\nimport logging\nimport os\nimport sys\nfrom datetime import datetime\n\n\ndef run(args=None, options=None):\n start = datetime.now()\n from virtualenv.util.error import ProcessCallFailed\n from virtualenv.run import cli_run\n\n if args is None:\n args = sys.argv[1:]\n try:\n session = cli_run(args, options)\n logging.warning(LogSession(session, start))\n except ProcessCallFailed as exception:\n print(\"subprocess call failed for {} with code {}\".format(exception.cmd, exception.code))\n print(exception.out, file=sys.stdout, end=\"\")\n print(exception.err, file=sys.stderr, end=\"\")\n raise SystemExit(exception.code)\n\n\nclass LogSession(object):\n def __init__(self, session, start):\n self.session = session\n self.start = start\n\n def __str__(self):\n from virtualenv.util.six import ensure_text\n\n spec = self.session.creator.interpreter.spec\n elapsed = (datetime.now() - self.start).total_seconds() * 1000\n lines = [\n \"created virtual environment {} in {:.0f}ms\".format(spec, elapsed),\n \" creator {}\".format(ensure_text(str(self.session.creator))),\n ]\n if self.session.seeder.enabled:\n lines += (\n \" seeder {}\".format(ensure_text(str(self.session.seeder))),\n \" added seed packages: {}\".format(\n \", \".join(\n sorted(\n \"==\".join(i.stem.split(\"-\"))\n for i in self.session.creator.purelib.iterdir()\n if i.suffix == \".dist-info\"\n ),\n ),\n ),\n )\n if self.session.activators:\n lines.append(\" activators {}\".format(\",\".join(i.__class__.__name__ for i in self.session.activators)))\n return os.linesep.join(lines)\n\n\ndef run_with_catch(args=None):\n from virtualenv.config.cli.parser import VirtualEnvOptions\n\n options = VirtualEnvOptions()\n try:\n run(args, options)\n except (KeyboardInterrupt, SystemExit, Exception) as exception:\n try:\n if getattr(options, \"with_traceback\", False):\n raise\n else:\n if not (isinstance(exception, SystemExit) and exception.code == 0):\n logging.error(\"%s: %s\", type(exception).__name__, exception)\n code = exception.code if isinstance(exception, SystemExit) else 1\n sys.exit(code)\n finally:\n logging.shutdown() # force flush of log messages before the trace is printed\n\n\nif __name__ == \"__main__\": # pragma: no cov\n run_with_catch() # pragma: no cov\n", "path": "src/virtualenv/__main__.py"}]} | 1,250 | 151 |
gh_patches_debug_10902 | rasdani/github-patches | git_diff | google__flax-362 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pooling: passing "sequence of `n` `(low, high)` integer pairs" resulting in TypeError
Trying to pass a tuple or list of tuples to a pool operation's padding parameter gives out the following errors:
`TypeError: Unknown padding type: (1, 1).`
`TypeError : unhashable type: 'list' `
Sample code for reproducing the bug:
```python3
from flax import nn
from jax import random
class FlaxModel(nn.Module):
def apply(self, x):
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding=[(1, 1), (1, 1)])
return x
rng = random.PRNGKey(0)
model, _ = FlaxModel.init_by_shape(rng, [(1, 100, 100, 1)])
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flax/nn/pooling.py`
Content:
```
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Pooling modules."""
16
17 from jax import lax
18 import jax.numpy as jnp
19
20 import numpy as onp
21
22
23 def pool(inputs, init, reduce_fn, window_shape, strides, padding):
24 """Helper function to define pooling functions.
25
26 Pooling functions are implemented using the ReduceWindow XLA op.
27 NOTE: Be aware that pooling is not generally differentiable.
28 That means providing a reduce_fn that is differentiable does not imply
29 that pool is differentiable.
30
31 Args:
32 inputs: input data with dimensions (batch, window dims..., features).
33 init: the initial value for the reduction
34 reduce_fn: a reduce function of the form `(T, T) -> T`.
35 window_shape: a shape tuple defining the window to reduce over.
36 strides: a sequence of `n` integers, representing the inter-window
37 strides.
38 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
39 of `n` `(low, high)` integer pairs that give the padding to apply before
40 and after each spatial dimension.
41 Returns:
42 The output of the reduction for each window slice.
43 """
44 strides = strides or (1,) * len(window_shape)
45 strides = (1,) + strides + (1,)
46 dims = (1,) + window_shape + (1,)
47 return lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)
48
49
50 def avg_pool(inputs, window_shape, strides=None, padding="VALID"):
51 """Pools the input by taking the average over a window.
52
53 Args:
54 inputs: input data with dimensions (batch, window dims..., features).
55 window_shape: a shape tuple defining the window to reduce over.
56 strides: a sequence of `n` integers, representing the inter-window
57 strides (default: `(1, ..., 1)`).
58 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
59 of `n` `(low, high)` integer pairs that give the padding to apply before
60 and after each spatial dimension (default: `'VALID'`).
61 Returns:
62 The average for each window slice.
63 """
64 y = pool(inputs, 0., lax.add, window_shape, strides, padding)
65 y = y / onp.prod(window_shape)
66 return y
67
68
69 def max_pool(inputs, window_shape, strides=None, padding="VALID"):
70 """Pools the input by taking the maximum of a window slice.
71
72 Args:
73 inputs: input data with dimensions (batch, window dims..., features).
74 window_shape: a shape tuple defining the window to reduce over.
75 strides: a sequence of `n` integers, representing the inter-window
76 strides (default: `(1, ..., 1)`).
77 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
78 of `n` `(low, high)` integer pairs that give the padding to apply before
79 and after each spatial dimension (default: `'VALID'`).
80 Returns:
81 The maximum for each window slice.
82 """
83 y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)
84 return y
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flax/nn/pooling.py b/flax/nn/pooling.py
--- a/flax/nn/pooling.py
+++ b/flax/nn/pooling.py
@@ -44,6 +44,14 @@
strides = strides or (1,) * len(window_shape)
strides = (1,) + strides + (1,)
dims = (1,) + window_shape + (1,)
+ if not isinstance(padding, str):
+ padding = tuple(map(tuple, padding))
+ assert(len(padding) == len(window_shape)), (
+ f"padding {padding} must specify pads for same number of dims as "
+ f"window_shape {window_shape}")
+ assert(all([len(x) == 2 for x in padding])), (
+ f"each entry in padding {padding} must be length 2")
+ padding = ((0,0),) + padding + ((0,0),)
return lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)
| {"golden_diff": "diff --git a/flax/nn/pooling.py b/flax/nn/pooling.py\n--- a/flax/nn/pooling.py\n+++ b/flax/nn/pooling.py\n@@ -44,6 +44,14 @@\n strides = strides or (1,) * len(window_shape)\n strides = (1,) + strides + (1,)\n dims = (1,) + window_shape + (1,)\n+ if not isinstance(padding, str):\n+ padding = tuple(map(tuple, padding))\n+ assert(len(padding) == len(window_shape)), (\n+ f\"padding {padding} must specify pads for same number of dims as \"\n+ f\"window_shape {window_shape}\")\n+ assert(all([len(x) == 2 for x in padding])), (\n+ f\"each entry in padding {padding} must be length 2\")\n+ padding = ((0,0),) + padding + ((0,0),)\n return lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n", "issue": "Pooling: passing \"sequence of `n` `(low, high)` integer pairs\" resulting in TypeError\nTrying to pass a tuple or list of tuples to a pool operation's padding parameter gives out the following errors: \r\n`TypeError: Unknown padding type: (1, 1).`\r\n`TypeError : unhashable type: 'list' `\r\n\r\n\r\nSample code for reproducing the bug:\r\n```python3\r\nfrom flax import nn\r\nfrom jax import random\r\n\r\nclass FlaxModel(nn.Module):\r\n def apply(self, x):\r\n x = nn.max_pool(x, (3, 3), strides=(2, 2), padding=[(1, 1), (1, 1)])\r\n return x\r\n\r\nrng = random.PRNGKey(0)\r\nmodel, _ = FlaxModel.init_by_shape(rng, [(1, 100, 100, 1)])\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pooling modules.\"\"\"\n\nfrom jax import lax\nimport jax.numpy as jnp\n\nimport numpy as onp\n\n\ndef pool(inputs, init, reduce_fn, window_shape, strides, padding):\n \"\"\"Helper function to define pooling functions.\n\n Pooling functions are implemented using the ReduceWindow XLA op.\n NOTE: Be aware that pooling is not generally differentiable.\n That means providing a reduce_fn that is differentiable does not imply\n that pool is differentiable.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n init: the initial value for the reduction\n reduce_fn: a reduce function of the form `(T, T) -> T`.\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides.\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n Returns:\n The output of the reduction for each window slice.\n \"\"\"\n strides = strides or (1,) * len(window_shape)\n strides = (1,) + strides + (1,)\n dims = (1,) + window_shape + (1,)\n return lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n\n\ndef avg_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the average over a window.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The average for each window slice.\n \"\"\"\n y = pool(inputs, 0., lax.add, window_shape, strides, padding)\n y = y / onp.prod(window_shape)\n return y\n\n\ndef max_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the maximum of a window slice.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The maximum for each window slice.\n \"\"\"\n y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)\n return y\n", "path": "flax/nn/pooling.py"}], "after_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pooling modules.\"\"\"\n\nfrom jax import lax\nimport jax.numpy as jnp\n\nimport numpy as onp\n\n\ndef pool(inputs, init, reduce_fn, window_shape, strides, padding):\n \"\"\"Helper function to define pooling functions.\n\n Pooling functions are implemented using the ReduceWindow XLA op.\n NOTE: Be aware that pooling is not generally differentiable.\n That means providing a reduce_fn that is differentiable does not imply\n that pool is differentiable.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n init: the initial value for the reduction\n reduce_fn: a reduce function of the form `(T, T) -> T`.\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides.\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n Returns:\n The output of the reduction for each window slice.\n \"\"\"\n strides = strides or (1,) * len(window_shape)\n strides = (1,) + strides + (1,)\n dims = (1,) + window_shape + (1,)\n if not isinstance(padding, str):\n padding = tuple(map(tuple, padding))\n assert(len(padding) == len(window_shape)), (\n f\"padding {padding} must specify pads for same number of dims as \"\n f\"window_shape {window_shape}\")\n assert(all([len(x) == 2 for x in padding])), (\n f\"each entry in padding {padding} must be length 2\")\n padding = ((0,0),) + padding + ((0,0),)\n return lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n\n\ndef avg_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the average over a window.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The average for each window slice.\n \"\"\"\n y = pool(inputs, 0., lax.add, window_shape, strides, padding)\n y = y / onp.prod(window_shape)\n return y\n\n\ndef max_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the maximum of a window slice.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The maximum for each window slice.\n \"\"\"\n y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)\n return y\n", "path": "flax/nn/pooling.py"}]} | 1,428 | 225 |
gh_patches_debug_18242 | rasdani/github-patches | git_diff | Mailu__Mailu-1542 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dovecot does not use redis, so it should be removed from start script
In core/dovecot/start.py REDIS_ADDRESS is resolved but redis is not used on dovecot. It should be removed from the script.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/dovecot/start.py`
Content:
```
1 #!/usr/bin/python3
2
3 import os
4 import glob
5 import multiprocessing
6 import logging as log
7 import sys
8
9 from podop import run_server
10 from socrate import system, conf
11
12 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
13
14 def start_podop():
15 os.setuid(8)
16 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§"
17 run_server(0, "dovecot", "/tmp/podop.socket", [
18 ("quota", "url", url ),
19 ("auth", "url", url),
20 ("sieve", "url", url),
21 ])
22
23 # Actual startup script
24
25 os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
26 os.environ["REDIS_ADDRESS"] = system.get_host_address_from_environment("REDIS", "redis")
27 os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
28 os.environ["ANTISPAM_WEBUI_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_WEBUI", "antispam:11334")
29 if os.environ["WEBMAIL"] != "none":
30 os.environ["WEBMAIL_ADDRESS"] = system.get_host_address_from_environment("WEBMAIL", "webmail")
31
32 for dovecot_file in glob.glob("/conf/*.conf"):
33 conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
34
35 os.makedirs("/conf/bin", exist_ok=True)
36 for script_file in glob.glob("/conf/*.script"):
37 out_file = os.path.join("/conf/bin/", os.path.basename(script_file).replace('.script',''))
38 conf.jinja(script_file, os.environ, out_file)
39 os.chmod(out_file, 0o555)
40
41 # Run Podop, then postfix
42 multiprocessing.Process(target=start_podop).start()
43 os.system("chown mail:mail /mail")
44 os.system("chown -R mail:mail /var/lib/dovecot /conf")
45 os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/dovecot/start.py b/core/dovecot/start.py
--- a/core/dovecot/start.py
+++ b/core/dovecot/start.py
@@ -21,13 +21,9 @@
])
# Actual startup script
-
os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
-os.environ["REDIS_ADDRESS"] = system.get_host_address_from_environment("REDIS", "redis")
os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
os.environ["ANTISPAM_WEBUI_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_WEBUI", "antispam:11334")
-if os.environ["WEBMAIL"] != "none":
- os.environ["WEBMAIL_ADDRESS"] = system.get_host_address_from_environment("WEBMAIL", "webmail")
for dovecot_file in glob.glob("/conf/*.conf"):
conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
| {"golden_diff": "diff --git a/core/dovecot/start.py b/core/dovecot/start.py\n--- a/core/dovecot/start.py\n+++ b/core/dovecot/start.py\n@@ -21,13 +21,9 @@\n ])\n \n # Actual startup script\n-\n os.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\n-os.environ[\"REDIS_ADDRESS\"] = system.get_host_address_from_environment(\"REDIS\", \"redis\")\n os.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\n os.environ[\"ANTISPAM_WEBUI_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_WEBUI\", \"antispam:11334\")\n-if os.environ[\"WEBMAIL\"] != \"none\":\n- os.environ[\"WEBMAIL_ADDRESS\"] = system.get_host_address_from_environment(\"WEBMAIL\", \"webmail\")\n \n for dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n", "issue": "Dovecot does not use redis, so it should be removed from start script \nIn core/dovecot/start.py REDIS_ADDRESS is resolved but redis is not used on dovecot. It should be removed from the script.\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(8)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/dovecot/\u00a7\"\n run_server(0, \"dovecot\", \"/tmp/podop.socket\", [\n\t\t(\"quota\", \"url\", url ),\n\t\t(\"auth\", \"url\", url),\n\t\t(\"sieve\", \"url\", url),\n ])\n\n# Actual startup script\n\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"REDIS_ADDRESS\"] = system.get_host_address_from_environment(\"REDIS\", \"redis\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_WEBUI_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_WEBUI\", \"antispam:11334\")\nif os.environ[\"WEBMAIL\"] != \"none\":\n os.environ[\"WEBMAIL_ADDRESS\"] = system.get_host_address_from_environment(\"WEBMAIL\", \"webmail\")\n\nfor dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n\nos.makedirs(\"/conf/bin\", exist_ok=True)\nfor script_file in glob.glob(\"/conf/*.script\"):\n out_file = os.path.join(\"/conf/bin/\", os.path.basename(script_file).replace('.script',''))\n conf.jinja(script_file, os.environ, out_file)\n os.chmod(out_file, 0o555)\n\n# Run Podop, then postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"chown mail:mail /mail\")\nos.system(\"chown -R mail:mail /var/lib/dovecot /conf\")\nos.execv(\"/usr/sbin/dovecot\", [\"dovecot\", \"-c\", \"/etc/dovecot/dovecot.conf\", \"-F\"])\n", "path": "core/dovecot/start.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(8)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/dovecot/\u00a7\"\n run_server(0, \"dovecot\", \"/tmp/podop.socket\", [\n\t\t(\"quota\", \"url\", url ),\n\t\t(\"auth\", \"url\", url),\n\t\t(\"sieve\", \"url\", url),\n ])\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_WEBUI_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_WEBUI\", \"antispam:11334\")\n\nfor dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n\nos.makedirs(\"/conf/bin\", exist_ok=True)\nfor script_file in glob.glob(\"/conf/*.script\"):\n out_file = os.path.join(\"/conf/bin/\", os.path.basename(script_file).replace('.script',''))\n conf.jinja(script_file, os.environ, out_file)\n os.chmod(out_file, 0o555)\n\n# Run Podop, then postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"chown mail:mail /mail\")\nos.system(\"chown -R mail:mail /var/lib/dovecot /conf\")\nos.execv(\"/usr/sbin/dovecot\", [\"dovecot\", \"-c\", \"/etc/dovecot/dovecot.conf\", \"-F\"])\n", "path": "core/dovecot/start.py"}]} | 866 | 230 |
gh_patches_debug_17431 | rasdani/github-patches | git_diff | translate__pootle-5736 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
update_stores' last updated date doesn't tickle up to project overview/language list
When updating against templates, the /projects/projectname/ listing doesn't reflect the **last update**, **unless** the update affected a file in the **toplevel** dir.
Within a language overview (/lang/projectname), changes deep in a directory hierarchy will also affect the parent directory's last-change date.
using pootle 2.8.0b5 (TDF)
screenshots to clarify. overview lists last update as e.g. 3 weeks ago:

drilling down to the language reveals that the files in xmlsecurity actually had been updated only 8 hours ago (in fact xmlsecurity/uiconfig/ui.po)

(also sorting by the last updated is not working properly, goes from 10months to 8 hours, to 3weeks…)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_data/project_data.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from .utils import RelatedStoresDataTool, RelatedTPsDataTool
10
11
12 class ProjectDataTool(RelatedTPsDataTool):
13 """Retrieves aggregate stats for a Project"""
14
15 cache_key_name = "project"
16
17 def filter_data(self, qs):
18 return qs.filter(tp__project=self.context)
19
20
21 class ProjectResourceDataTool(RelatedStoresDataTool):
22 group_by = ("store__translation_project__language__code", )
23 cache_key_name = "project_resource"
24
25 @property
26 def project_path(self):
27 return (
28 "/%s%s"
29 % (self.project_code, self.tp_path))
30
31 @property
32 def tp_path(self):
33 return (
34 "/%s%s"
35 % (self.dir_path,
36 self.filename))
37
38 def filter_data(self, qs):
39 return (
40 qs.filter(store__translation_project__project__code=self.project_code)
41 .filter(store__tp_path__startswith=self.tp_path))
42
43 @property
44 def context_name(self):
45 return "/projects%s" % self.project_path
46
47
48 class ProjectSetDataTool(RelatedTPsDataTool):
49 group_by = ("tp__project__code", )
50 cache_key_name = "projects"
51
52 def get_root_child_path(self, child):
53 return child[self.group_by[0]]
54
55 @property
56 def context_name(self):
57 return "ALL"
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_data/project_data.py b/pootle/apps/pootle_data/project_data.py
--- a/pootle/apps/pootle_data/project_data.py
+++ b/pootle/apps/pootle_data/project_data.py
@@ -6,6 +6,8 @@
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
+from pootle.core.delegate import revision
+
from .utils import RelatedStoresDataTool, RelatedTPsDataTool
@@ -17,6 +19,11 @@
def filter_data(self, qs):
return qs.filter(tp__project=self.context)
+ @property
+ def rev_cache_key(self):
+ return revision.get(
+ self.context.__class__)(self.context.directory).get(key="stats")
+
class ProjectResourceDataTool(RelatedStoresDataTool):
group_by = ("store__translation_project__language__code", )
| {"golden_diff": "diff --git a/pootle/apps/pootle_data/project_data.py b/pootle/apps/pootle_data/project_data.py\n--- a/pootle/apps/pootle_data/project_data.py\n+++ b/pootle/apps/pootle_data/project_data.py\n@@ -6,6 +6,8 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n+from pootle.core.delegate import revision\n+\n from .utils import RelatedStoresDataTool, RelatedTPsDataTool\n \n \n@@ -17,6 +19,11 @@\n def filter_data(self, qs):\n return qs.filter(tp__project=self.context)\n \n+ @property\n+ def rev_cache_key(self):\n+ return revision.get(\n+ self.context.__class__)(self.context.directory).get(key=\"stats\")\n+\n \n class ProjectResourceDataTool(RelatedStoresDataTool):\n group_by = (\"store__translation_project__language__code\", )\n", "issue": "update_stores' last updated date doesn't tickle up to project overview/language list\nWhen updating against templates, the /projects/projectname/ listing doesn't reflect the **last update**, **unless** the update affected a file in the **toplevel** dir.\r\n\r\nWithin a language overview (/lang/projectname), changes deep in a directory hierarchy will also affect the parent directory's last-change date.\r\n\r\nusing pootle 2.8.0b5 (TDF)\r\n\r\nscreenshots to clarify. overview lists last update as e.g. 3 weeks ago:\r\n\r\n\r\ndrilling down to the language reveals that the files in xmlsecurity actually had been updated only 8 hours ago (in fact xmlsecurity/uiconfig/ui.po)\r\n\r\n\r\n(also sorting by the last updated is not working properly, goes from 10months to 8 hours, to 3weeks\u2026) \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom .utils import RelatedStoresDataTool, RelatedTPsDataTool\n\n\nclass ProjectDataTool(RelatedTPsDataTool):\n \"\"\"Retrieves aggregate stats for a Project\"\"\"\n\n cache_key_name = \"project\"\n\n def filter_data(self, qs):\n return qs.filter(tp__project=self.context)\n\n\nclass ProjectResourceDataTool(RelatedStoresDataTool):\n group_by = (\"store__translation_project__language__code\", )\n cache_key_name = \"project_resource\"\n\n @property\n def project_path(self):\n return (\n \"/%s%s\"\n % (self.project_code, self.tp_path))\n\n @property\n def tp_path(self):\n return (\n \"/%s%s\"\n % (self.dir_path,\n self.filename))\n\n def filter_data(self, qs):\n return (\n qs.filter(store__translation_project__project__code=self.project_code)\n .filter(store__tp_path__startswith=self.tp_path))\n\n @property\n def context_name(self):\n return \"/projects%s\" % self.project_path\n\n\nclass ProjectSetDataTool(RelatedTPsDataTool):\n group_by = (\"tp__project__code\", )\n cache_key_name = \"projects\"\n\n def get_root_child_path(self, child):\n return child[self.group_by[0]]\n\n @property\n def context_name(self):\n return \"ALL\"\n", "path": "pootle/apps/pootle_data/project_data.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom pootle.core.delegate import revision\n\nfrom .utils import RelatedStoresDataTool, RelatedTPsDataTool\n\n\nclass ProjectDataTool(RelatedTPsDataTool):\n \"\"\"Retrieves aggregate stats for a Project\"\"\"\n\n cache_key_name = \"project\"\n\n def filter_data(self, qs):\n return qs.filter(tp__project=self.context)\n\n @property\n def rev_cache_key(self):\n return revision.get(\n self.context.__class__)(self.context.directory).get(key=\"stats\")\n\n\nclass ProjectResourceDataTool(RelatedStoresDataTool):\n group_by = (\"store__translation_project__language__code\", )\n cache_key_name = \"project_resource\"\n\n @property\n def project_path(self):\n return (\n \"/%s%s\"\n % (self.project_code, self.tp_path))\n\n @property\n def tp_path(self):\n return (\n \"/%s%s\"\n % (self.dir_path,\n self.filename))\n\n def filter_data(self, qs):\n return (\n qs.filter(store__translation_project__project__code=self.project_code)\n .filter(store__tp_path__startswith=self.tp_path))\n\n @property\n def context_name(self):\n return \"/projects%s\" % self.project_path\n\n\nclass ProjectSetDataTool(RelatedTPsDataTool):\n group_by = (\"tp__project__code\", )\n cache_key_name = \"projects\"\n\n def get_root_child_path(self, child):\n return child[self.group_by[0]]\n\n @property\n def context_name(self):\n return \"ALL\"\n", "path": "pootle/apps/pootle_data/project_data.py"}]} | 1,086 | 219 |
gh_patches_debug_38407 | rasdani/github-patches | git_diff | wagtail__wagtail-556 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Search: Make update_index update all backends
Currently, it only updates the default backend. It should update all search backends.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailsearch/management/commands/update_index.py`
Content:
```
1 from django.core.management.base import BaseCommand
2 from django.db import models
3
4 from wagtail.wagtailsearch import Indexed, get_search_backend
5
6
7 class Command(BaseCommand):
8 def handle(self, **options):
9 # Print info
10 self.stdout.write("Getting object list")
11
12 # Get list of indexed models
13 indexed_models = [model for model in models.get_models() if issubclass(model, Indexed)]
14
15 # Object set
16 object_set = {}
17
18 # Add all objects to object set and detect any duplicates
19 # Duplicates are caused when both a model and a derived model are indexed
20 # Eg, if BlogPost inherits from Page and both of these models are indexed
21 # If we were to add all objects from both models into the index, all the BlogPosts will have two entries
22 for model in indexed_models:
23 # Get toplevel content type
24 toplevel_content_type = model.indexed_get_toplevel_content_type()
25
26 # Loop through objects
27 for obj in model.get_indexed_objects():
28 # Get key for this object
29 key = toplevel_content_type + ':' + str(obj.pk)
30
31 # Check if this key already exists
32 if key in object_set:
33 # Conflict, work out who should get this space
34 # The object with the longest content type string gets the space
35 # Eg, "wagtailcore.Page-myapp.BlogPost" kicks out "wagtailcore.Page"
36 if len(obj.indexed_get_content_type()) > len(object_set[key].indexed_get_content_type()):
37 # Take the spot
38 object_set[key] = obj
39 else:
40 # Space free, take it
41 object_set[key] = obj
42
43 # Search backend
44 if 'backend' in options:
45 s = options['backend']
46 else:
47 s = get_search_backend()
48
49 # Reset the index
50 self.stdout.write("Reseting index")
51 s.reset_index()
52
53 # Add types
54 self.stdout.write("Adding types")
55 for model in indexed_models:
56 s.add_type(model)
57
58 # Add objects to index
59 self.stdout.write("Adding objects")
60 for result in s.add_bulk(object_set.values()):
61 self.stdout.write(result[0] + ' ' + str(result[1]))
62
63 # Refresh index
64 self.stdout.write("Refreshing index")
65 s.refresh_index()
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/wagtailsearch/management/commands/update_index.py b/wagtail/wagtailsearch/management/commands/update_index.py
--- a/wagtail/wagtailsearch/management/commands/update_index.py
+++ b/wagtail/wagtailsearch/management/commands/update_index.py
@@ -1,11 +1,22 @@
+from optparse import make_option
+
from django.core.management.base import BaseCommand
from django.db import models
+from django.conf import settings
from wagtail.wagtailsearch import Indexed, get_search_backend
+def get_search_backends():
+ if hasattr(settings, 'WAGTAILSEARCH_BACKENDS'):
+ for backend in settings.WAGTAILSEARCH_BACKENDS.keys():
+ yield backend, get_search_backend(backend)
+ else:
+ yield 'default', get_search_backend('default')
+
+
class Command(BaseCommand):
- def handle(self, **options):
+ def get_object_list(self):
# Print info
self.stdout.write("Getting object list")
@@ -40,26 +51,51 @@
# Space free, take it
object_set[key] = obj
- # Search backend
- if 'backend' in options:
- s = options['backend']
- else:
- s = get_search_backend()
+ return indexed_models, object_set.values()
+
+ def update_backend(self, backend, models, object_list, backend_name=''):
+ # Print info
+ self.stdout.write("Updating backend: " + backend_name)
+
+ # Get backend
+ if backend is None:
+ backend = get_search_backend(backend_name)
# Reset the index
- self.stdout.write("Reseting index")
- s.reset_index()
+ self.stdout.write(backend_name + ": Reseting index")
+ backend.reset_index()
# Add types
- self.stdout.write("Adding types")
- for model in indexed_models:
- s.add_type(model)
+ self.stdout.write(backend_name + ": Adding types")
+ for model in models:
+ backend.add_type(model)
# Add objects to index
- self.stdout.write("Adding objects")
- for result in s.add_bulk(object_set.values()):
+ self.stdout.write(backend_name + ": Adding objects")
+ for result in backend.add_bulk(object_list):
self.stdout.write(result[0] + ' ' + str(result[1]))
# Refresh index
- self.stdout.write("Refreshing index")
- s.refresh_index()
+ self.stdout.write(backend_name + ": Refreshing index")
+ backend.refresh_index()
+
+ option_list = BaseCommand.option_list + (
+ make_option('--backend',
+ action='store',
+ dest='backend_name',
+ default=False,
+ help="Specify a backend to update",
+ ),
+ )
+
+ def handle(self, **options):
+ # Get object list
+ models, object_list = self.get_object_list()
+
+ # Update backends
+ if 'backend_name' in options:
+ backend = dict(get_search_backends())[options['backend_name']]
+ self.update_backend(backend, models, object_list, backend_name=options['backend_name'])
+ else:
+ for backend_name, backend in get_search_backends():
+ self.update_backend(backend, models, object_list, backend_name=backend_name)
| {"golden_diff": "diff --git a/wagtail/wagtailsearch/management/commands/update_index.py b/wagtail/wagtailsearch/management/commands/update_index.py\n--- a/wagtail/wagtailsearch/management/commands/update_index.py\n+++ b/wagtail/wagtailsearch/management/commands/update_index.py\n@@ -1,11 +1,22 @@\n+from optparse import make_option\n+\n from django.core.management.base import BaseCommand\n from django.db import models\n+from django.conf import settings\n \n from wagtail.wagtailsearch import Indexed, get_search_backend\n \n \n+def get_search_backends():\n+ if hasattr(settings, 'WAGTAILSEARCH_BACKENDS'):\n+ for backend in settings.WAGTAILSEARCH_BACKENDS.keys():\n+ yield backend, get_search_backend(backend)\n+ else:\n+ yield 'default', get_search_backend('default')\n+\n+\n class Command(BaseCommand):\n- def handle(self, **options):\n+ def get_object_list(self):\n # Print info\n self.stdout.write(\"Getting object list\")\n \n@@ -40,26 +51,51 @@\n # Space free, take it\n object_set[key] = obj\n \n- # Search backend\n- if 'backend' in options:\n- s = options['backend']\n- else:\n- s = get_search_backend()\n+ return indexed_models, object_set.values()\n+\n+ def update_backend(self, backend, models, object_list, backend_name=''):\n+ # Print info\n+ self.stdout.write(\"Updating backend: \" + backend_name)\n+\n+ # Get backend\n+ if backend is None:\n+ backend = get_search_backend(backend_name)\n \n # Reset the index\n- self.stdout.write(\"Reseting index\")\n- s.reset_index()\n+ self.stdout.write(backend_name + \": Reseting index\")\n+ backend.reset_index()\n \n # Add types\n- self.stdout.write(\"Adding types\")\n- for model in indexed_models:\n- s.add_type(model)\n+ self.stdout.write(backend_name + \": Adding types\")\n+ for model in models:\n+ backend.add_type(model)\n \n # Add objects to index\n- self.stdout.write(\"Adding objects\")\n- for result in s.add_bulk(object_set.values()):\n+ self.stdout.write(backend_name + \": Adding objects\")\n+ for result in backend.add_bulk(object_list):\n self.stdout.write(result[0] + ' ' + str(result[1]))\n \n # Refresh index\n- self.stdout.write(\"Refreshing index\")\n- s.refresh_index()\n+ self.stdout.write(backend_name + \": Refreshing index\")\n+ backend.refresh_index()\n+\n+ option_list = BaseCommand.option_list + (\n+ make_option('--backend',\n+ action='store',\n+ dest='backend_name',\n+ default=False,\n+ help=\"Specify a backend to update\",\n+ ),\n+ )\n+\n+ def handle(self, **options):\n+ # Get object list\n+ models, object_list = self.get_object_list()\n+\n+ # Update backends\n+ if 'backend_name' in options:\n+ backend = dict(get_search_backends())[options['backend_name']]\n+ self.update_backend(backend, models, object_list, backend_name=options['backend_name'])\n+ else:\n+ for backend_name, backend in get_search_backends():\n+ self.update_backend(backend, models, object_list, backend_name=backend_name)\n", "issue": "Search: Make update_index update all backends\nCurrently, it only updates the default backend. It should update all search backends.\n\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\nfrom django.db import models\n\nfrom wagtail.wagtailsearch import Indexed, get_search_backend\n\n\nclass Command(BaseCommand):\n def handle(self, **options):\n # Print info\n self.stdout.write(\"Getting object list\")\n\n # Get list of indexed models\n indexed_models = [model for model in models.get_models() if issubclass(model, Indexed)]\n\n # Object set\n object_set = {}\n\n # Add all objects to object set and detect any duplicates\n # Duplicates are caused when both a model and a derived model are indexed\n # Eg, if BlogPost inherits from Page and both of these models are indexed\n # If we were to add all objects from both models into the index, all the BlogPosts will have two entries\n for model in indexed_models:\n # Get toplevel content type\n toplevel_content_type = model.indexed_get_toplevel_content_type()\n\n # Loop through objects\n for obj in model.get_indexed_objects():\n # Get key for this object\n key = toplevel_content_type + ':' + str(obj.pk)\n\n # Check if this key already exists\n if key in object_set:\n # Conflict, work out who should get this space\n # The object with the longest content type string gets the space\n # Eg, \"wagtailcore.Page-myapp.BlogPost\" kicks out \"wagtailcore.Page\"\n if len(obj.indexed_get_content_type()) > len(object_set[key].indexed_get_content_type()):\n # Take the spot\n object_set[key] = obj\n else:\n # Space free, take it\n object_set[key] = obj\n\n # Search backend\n if 'backend' in options:\n s = options['backend']\n else:\n s = get_search_backend()\n\n # Reset the index\n self.stdout.write(\"Reseting index\")\n s.reset_index()\n\n # Add types\n self.stdout.write(\"Adding types\")\n for model in indexed_models:\n s.add_type(model)\n\n # Add objects to index\n self.stdout.write(\"Adding objects\")\n for result in s.add_bulk(object_set.values()):\n self.stdout.write(result[0] + ' ' + str(result[1]))\n\n # Refresh index\n self.stdout.write(\"Refreshing index\")\n s.refresh_index()\n", "path": "wagtail/wagtailsearch/management/commands/update_index.py"}], "after_files": [{"content": "from optparse import make_option\n\nfrom django.core.management.base import BaseCommand\nfrom django.db import models\nfrom django.conf import settings\n\nfrom wagtail.wagtailsearch import Indexed, get_search_backend\n\n\ndef get_search_backends():\n if hasattr(settings, 'WAGTAILSEARCH_BACKENDS'):\n for backend in settings.WAGTAILSEARCH_BACKENDS.keys():\n yield backend, get_search_backend(backend)\n else:\n yield 'default', get_search_backend('default')\n\n\nclass Command(BaseCommand):\n def get_object_list(self):\n # Print info\n self.stdout.write(\"Getting object list\")\n\n # Get list of indexed models\n indexed_models = [model for model in models.get_models() if issubclass(model, Indexed)]\n\n # Object set\n object_set = {}\n\n # Add all objects to object set and detect any duplicates\n # Duplicates are caused when both a model and a derived model are indexed\n # Eg, if BlogPost inherits from Page and both of these models are indexed\n # If we were to add all objects from both models into the index, all the BlogPosts will have two entries\n for model in indexed_models:\n # Get toplevel content type\n toplevel_content_type = model.indexed_get_toplevel_content_type()\n\n # Loop through objects\n for obj in model.get_indexed_objects():\n # Get key for this object\n key = toplevel_content_type + ':' + str(obj.pk)\n\n # Check if this key already exists\n if key in object_set:\n # Conflict, work out who should get this space\n # The object with the longest content type string gets the space\n # Eg, \"wagtailcore.Page-myapp.BlogPost\" kicks out \"wagtailcore.Page\"\n if len(obj.indexed_get_content_type()) > len(object_set[key].indexed_get_content_type()):\n # Take the spot\n object_set[key] = obj\n else:\n # Space free, take it\n object_set[key] = obj\n\n return indexed_models, object_set.values()\n\n def update_backend(self, backend, models, object_list, backend_name=''):\n # Print info\n self.stdout.write(\"Updating backend: \" + backend_name)\n\n # Get backend\n if backend is None:\n backend = get_search_backend(backend_name)\n\n # Reset the index\n self.stdout.write(backend_name + \": Reseting index\")\n backend.reset_index()\n\n # Add types\n self.stdout.write(backend_name + \": Adding types\")\n for model in models:\n backend.add_type(model)\n\n # Add objects to index\n self.stdout.write(backend_name + \": Adding objects\")\n for result in backend.add_bulk(object_list):\n self.stdout.write(result[0] + ' ' + str(result[1]))\n\n # Refresh index\n self.stdout.write(backend_name + \": Refreshing index\")\n backend.refresh_index()\n\n option_list = BaseCommand.option_list + (\n make_option('--backend',\n action='store',\n dest='backend_name',\n default=False,\n help=\"Specify a backend to update\",\n ),\n )\n\n def handle(self, **options):\n # Get object list\n models, object_list = self.get_object_list()\n\n # Update backends\n if 'backend_name' in options:\n backend = dict(get_search_backends())[options['backend_name']]\n self.update_backend(backend, models, object_list, backend_name=options['backend_name'])\n else:\n for backend_name, backend in get_search_backends():\n self.update_backend(backend, models, object_list, backend_name=backend_name)\n", "path": "wagtail/wagtailsearch/management/commands/update_index.py"}]} | 924 | 747 |
gh_patches_debug_12742 | rasdani/github-patches | git_diff | ocadotechnology__codeforlife-portal-782 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Speak to legal team about updating our T&Cs for GDPR
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `portal/admin.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Code for Life
3 #
4 # Copyright (C) 2018, Ocado Innovation Limited
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence
20 #
21 # This licence does not grant any right, title or interest in any “Ocado” logos,
22 # trade names or the trademark “Ocado” or any other trademarks or domain names
23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other
24 # distinctive brand features of “Ocado” as may be secured from time to time. You
25 # must not distribute any modification of this program using the trademark
26 # “Ocado” or claim any affiliation or association with Ocado or its employees.
27 #
28 # You are not authorised to use the name Ocado (or any of its trade names) or
29 # the names of any author or contributor in advertising or for publicity purposes
30 # pertaining to the distribution of this program, without the prior written
31 # authorisation of Ocado.
32 #
33 # Any propagation, distribution or conveyance of this program must include this
34 # copyright notice and these terms. You must not misrepresent the origins of this
35 # program; modified versions of the program must be marked as such and not
36 # identified as the original program.
37 from django.contrib import admin
38 from django.contrib.auth.models import User
39 from django.contrib.auth.admin import UserAdmin
40
41
42 from portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification
43
44
45 class ClassAdmin(admin.ModelAdmin):
46 search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']
47 list_filter = ['teacher']
48 readonly_fields = ['teacher']
49
50
51 class SchoolAdmin(admin.ModelAdmin):
52 search_fields = ['name', 'country', 'postcode', 'town']
53 list_filter = ['postcode', 'country']
54
55
56 class StudentAdmin(admin.ModelAdmin):
57 search_fields = ['new_user__first_name', 'new_user__last_name']
58 list_filter = ['class_field', 'class_field__teacher']
59 readonly_fields = ['user', 'new_user']
60 raw_id_fields = ['class_field', 'pending_class_request']
61
62
63 class TeacherAdmin(admin.ModelAdmin):
64 search_fields = ['new_user__first_name', 'new_user__last_name']
65 list_filter = ['school']
66 readonly_fields = ['user', 'new_user']
67 raw_id_fields = ['school', 'pending_join_request']
68
69
70 class UserProfileAdmin(admin.ModelAdmin):
71 search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']
72 list_filter = ['user__date_joined']
73 list_display = ['user', 'joined_recently']
74 readonly_fields = ['user']
75
76
77 class EmailVerificationAdmin(admin.ModelAdmin):
78 search_fields = ['new_user']
79
80
81 UserAdmin.list_display += ('date_joined',)
82 UserAdmin.list_filter += ('date_joined',)
83
84
85 admin.site.register(Class, ClassAdmin)
86 admin.site.register(Student, StudentAdmin)
87 admin.site.register(Guardian)
88 admin.site.register(Teacher, TeacherAdmin)
89 admin.site.register(School, SchoolAdmin)
90 admin.site.unregister(User)
91 admin.site.register(User, UserAdmin)
92 admin.site.register(UserProfile, UserProfileAdmin)
93 admin.site.register(FrontPageNews)
94 admin.site.register(EmailVerification, EmailVerificationAdmin)
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/portal/admin.py b/portal/admin.py
--- a/portal/admin.py
+++ b/portal/admin.py
@@ -68,14 +68,14 @@
class UserProfileAdmin(admin.ModelAdmin):
- search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']
+ search_fields = ['user__first_name', 'user__last_name', 'user__username', 'user__date_joined']
list_filter = ['user__date_joined']
list_display = ['user', 'joined_recently']
readonly_fields = ['user']
class EmailVerificationAdmin(admin.ModelAdmin):
- search_fields = ['new_user']
+ search_fields = ['user__first_name', 'user__last_name', 'user__username', 'user__date_joined']
UserAdmin.list_display += ('date_joined',)
| {"golden_diff": "diff --git a/portal/admin.py b/portal/admin.py\n--- a/portal/admin.py\n+++ b/portal/admin.py\n@@ -68,14 +68,14 @@\n \n \n class UserProfileAdmin(admin.ModelAdmin):\n- search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n+ search_fields = ['user__first_name', 'user__last_name', 'user__username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n readonly_fields = ['user']\n \n \n class EmailVerificationAdmin(admin.ModelAdmin):\n- search_fields = ['new_user']\n+ search_fields = ['user__first_name', 'user__last_name', 'user__username', 'user__date_joined']\n \n \n UserAdmin.list_display += ('date_joined',)\n", "issue": "Speak to legal team about updating our T&Cs for GDPR\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2018, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.admin import UserAdmin\n\n\nfrom portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification\n\n\nclass ClassAdmin(admin.ModelAdmin):\n search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']\n list_filter = ['teacher']\n readonly_fields = ['teacher']\n\n\nclass SchoolAdmin(admin.ModelAdmin):\n search_fields = ['name', 'country', 'postcode', 'town']\n list_filter = ['postcode', 'country']\n\n\nclass StudentAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['class_field', 'class_field__teacher']\n readonly_fields = ['user', 'new_user']\n raw_id_fields = ['class_field', 'pending_class_request']\n\n\nclass TeacherAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['school']\n readonly_fields = ['user', 'new_user']\n raw_id_fields = ['school', 'pending_join_request']\n\n\nclass UserProfileAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n readonly_fields = ['user']\n\n\nclass EmailVerificationAdmin(admin.ModelAdmin):\n search_fields = ['new_user']\n\n\nUserAdmin.list_display += ('date_joined',)\nUserAdmin.list_filter += ('date_joined',)\n\n\nadmin.site.register(Class, ClassAdmin)\nadmin.site.register(Student, StudentAdmin)\nadmin.site.register(Guardian)\nadmin.site.register(Teacher, TeacherAdmin)\nadmin.site.register(School, SchoolAdmin)\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(UserProfile, UserProfileAdmin)\nadmin.site.register(FrontPageNews)\nadmin.site.register(EmailVerification, EmailVerificationAdmin)\n", "path": "portal/admin.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2018, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.admin import UserAdmin\n\n\nfrom portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification\n\n\nclass ClassAdmin(admin.ModelAdmin):\n search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']\n list_filter = ['teacher']\n readonly_fields = ['teacher']\n\n\nclass SchoolAdmin(admin.ModelAdmin):\n search_fields = ['name', 'country', 'postcode', 'town']\n list_filter = ['postcode', 'country']\n\n\nclass StudentAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['class_field', 'class_field__teacher']\n readonly_fields = ['user', 'new_user']\n raw_id_fields = ['class_field', 'pending_class_request']\n\n\nclass TeacherAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['school']\n readonly_fields = ['user', 'new_user']\n raw_id_fields = ['school', 'pending_join_request']\n\n\nclass UserProfileAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'user__username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n readonly_fields = ['user']\n\n\nclass EmailVerificationAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'user__username', 'user__date_joined']\n\n\nUserAdmin.list_display += ('date_joined',)\nUserAdmin.list_filter += ('date_joined',)\n\n\nadmin.site.register(Class, ClassAdmin)\nadmin.site.register(Student, StudentAdmin)\nadmin.site.register(Guardian)\nadmin.site.register(Teacher, TeacherAdmin)\nadmin.site.register(School, SchoolAdmin)\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(UserProfile, UserProfileAdmin)\nadmin.site.register(FrontPageNews)\nadmin.site.register(EmailVerification, EmailVerificationAdmin)\n", "path": "portal/admin.py"}]} | 1,304 | 193 |
gh_patches_debug_22097 | rasdani/github-patches | git_diff | svthalia__concrexit-2199 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add filter/display of members-only value to document admin
### Is your feature request related to a problem? Please describe.
It is not really issue to see which documents are marked as members only. And it is impossible to easily get a list with documents that have a true/false value.
### Describe the solution you'd like
I'd like to see more information about the documents in the admin page so that I do not have to open the detail page.
### Motivation
Easier to manage these files.
### Describe alternatives you've considered
The only alternative is not doing this.
### Additional context
#2084 could have been prevented.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/documents/admin.py`
Content:
```
1 """Registers admin interfaces for the documents module."""
2 from django.contrib import admin
3 from django.contrib.admin import ModelAdmin
4 from django.utils.translation import gettext_lazy as _
5
6 from documents import forms
7 from documents.models import (
8 AnnualDocument,
9 AssociationDocument,
10 EventDocument,
11 GeneralMeeting,
12 Minutes,
13 MiscellaneousDocument,
14 )
15 from documents.services import is_owner
16
17
18 class MinutesInline(admin.StackedInline):
19 """Inline for minutes of a general meeting."""
20
21 model = Minutes
22 form = forms.MinutesForm
23 extra = 0
24
25
26 @admin.register(GeneralMeeting)
27 class GeneralMeetingAdmin(ModelAdmin):
28 """Manage the general meetings."""
29
30 form = forms.GeneralMeetingForm
31 inlines = [
32 MinutesInline,
33 ]
34 list_filter = ("datetime",)
35
36
37 class LectureYearFilter(admin.SimpleListFilter):
38 """Filter the memberships on those started or ended in a lecture year."""
39
40 title = _("lecture year")
41 parameter_name = "lecture_year"
42
43 def lookups(self, request, model_admin):
44 if AnnualDocument.objects.count() > 0:
45 first_year = AnnualDocument.objects.order_by("year").first().year
46 last_year = AnnualDocument.objects.order_by("year").last().year
47
48 return [
49 (year, f"{year}-{year + 1}")
50 for year in range(last_year, first_year - 1, -1)
51 ]
52 return []
53
54 def queryset(self, request, queryset):
55 if not self.value():
56 return queryset
57
58 year = int(self.value())
59
60 return queryset.filter(year=year)
61
62
63 @admin.register(AnnualDocument)
64 class AnnualDocumentAdmin(ModelAdmin):
65 """Manage the annual documents."""
66
67 form = forms.AnnualDocumentForm
68 list_filter = (
69 LectureYearFilter,
70 "created",
71 "last_updated",
72 )
73
74
75 @admin.register(AssociationDocument)
76 class AssociationDocumentAdmin(ModelAdmin):
77 """Manage the association documents."""
78
79 form = forms.AssociationDocumentForm
80 list_filter = (
81 "created",
82 "last_updated",
83 )
84
85
86 @admin.register(EventDocument)
87 class EventDocumentAdmin(ModelAdmin):
88 """Manage the event documents."""
89
90 form = forms.EventDocumentForm
91 list_filter = (
92 "created",
93 "last_updated",
94 )
95
96 def has_change_permission(self, request, obj=None):
97 """Only allow access to the change form if the user is an owner."""
98 if obj is not None and not is_owner(request.member, obj):
99 return False
100 return super().has_change_permission(request, obj)
101
102 def has_delete_permission(self, request, obj=None):
103 """Only allow delete access if the user is an owner."""
104 if obj is not None and not is_owner(request.member, obj):
105 return False
106 return super().has_delete_permission(request, obj)
107
108
109 @admin.register(MiscellaneousDocument)
110 class MiscellaneousDocumentAdmin(ModelAdmin):
111 """Manage the miscellaneous documents."""
112
113 form = forms.MiscellaneousDocumentForm
114 list_filter = (
115 "created",
116 "last_updated",
117 )
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/documents/admin.py b/website/documents/admin.py
--- a/website/documents/admin.py
+++ b/website/documents/admin.py
@@ -69,6 +69,11 @@
LectureYearFilter,
"created",
"last_updated",
+ "members_only",
+ )
+ list_display = (
+ "__str__",
+ "members_only",
)
@@ -80,6 +85,11 @@
list_filter = (
"created",
"last_updated",
+ "members_only",
+ )
+ list_display = (
+ "__str__",
+ "members_only",
)
@@ -91,6 +101,11 @@
list_filter = (
"created",
"last_updated",
+ "members_only",
+ )
+ list_display = (
+ "__str__",
+ "members_only",
)
def has_change_permission(self, request, obj=None):
@@ -114,4 +129,9 @@
list_filter = (
"created",
"last_updated",
+ "members_only",
+ )
+ list_display = (
+ "__str__",
+ "members_only",
)
| {"golden_diff": "diff --git a/website/documents/admin.py b/website/documents/admin.py\n--- a/website/documents/admin.py\n+++ b/website/documents/admin.py\n@@ -69,6 +69,11 @@\n LectureYearFilter,\n \"created\",\n \"last_updated\",\n+ \"members_only\",\n+ )\n+ list_display = (\n+ \"__str__\",\n+ \"members_only\",\n )\n \n \n@@ -80,6 +85,11 @@\n list_filter = (\n \"created\",\n \"last_updated\",\n+ \"members_only\",\n+ )\n+ list_display = (\n+ \"__str__\",\n+ \"members_only\",\n )\n \n \n@@ -91,6 +101,11 @@\n list_filter = (\n \"created\",\n \"last_updated\",\n+ \"members_only\",\n+ )\n+ list_display = (\n+ \"__str__\",\n+ \"members_only\",\n )\n \n def has_change_permission(self, request, obj=None):\n@@ -114,4 +129,9 @@\n list_filter = (\n \"created\",\n \"last_updated\",\n+ \"members_only\",\n+ )\n+ list_display = (\n+ \"__str__\",\n+ \"members_only\",\n )\n", "issue": "Add filter/display of members-only value to document admin\n### Is your feature request related to a problem? Please describe.\r\nIt is not really issue to see which documents are marked as members only. And it is impossible to easily get a list with documents that have a true/false value.\r\n\r\n### Describe the solution you'd like\r\nI'd like to see more information about the documents in the admin page so that I do not have to open the detail page.\r\n\r\n### Motivation\r\nEasier to manage these files.\r\n\r\n### Describe alternatives you've considered\r\nThe only alternative is not doing this.\r\n\r\n### Additional context\r\n#2084 could have been prevented.\r\n\n", "before_files": [{"content": "\"\"\"Registers admin interfaces for the documents module.\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\nfrom django.utils.translation import gettext_lazy as _\n\nfrom documents import forms\nfrom documents.models import (\n AnnualDocument,\n AssociationDocument,\n EventDocument,\n GeneralMeeting,\n Minutes,\n MiscellaneousDocument,\n)\nfrom documents.services import is_owner\n\n\nclass MinutesInline(admin.StackedInline):\n \"\"\"Inline for minutes of a general meeting.\"\"\"\n\n model = Minutes\n form = forms.MinutesForm\n extra = 0\n\n\[email protected](GeneralMeeting)\nclass GeneralMeetingAdmin(ModelAdmin):\n \"\"\"Manage the general meetings.\"\"\"\n\n form = forms.GeneralMeetingForm\n inlines = [\n MinutesInline,\n ]\n list_filter = (\"datetime\",)\n\n\nclass LectureYearFilter(admin.SimpleListFilter):\n \"\"\"Filter the memberships on those started or ended in a lecture year.\"\"\"\n\n title = _(\"lecture year\")\n parameter_name = \"lecture_year\"\n\n def lookups(self, request, model_admin):\n if AnnualDocument.objects.count() > 0:\n first_year = AnnualDocument.objects.order_by(\"year\").first().year\n last_year = AnnualDocument.objects.order_by(\"year\").last().year\n\n return [\n (year, f\"{year}-{year + 1}\")\n for year in range(last_year, first_year - 1, -1)\n ]\n return []\n\n def queryset(self, request, queryset):\n if not self.value():\n return queryset\n\n year = int(self.value())\n\n return queryset.filter(year=year)\n\n\[email protected](AnnualDocument)\nclass AnnualDocumentAdmin(ModelAdmin):\n \"\"\"Manage the annual documents.\"\"\"\n\n form = forms.AnnualDocumentForm\n list_filter = (\n LectureYearFilter,\n \"created\",\n \"last_updated\",\n )\n\n\[email protected](AssociationDocument)\nclass AssociationDocumentAdmin(ModelAdmin):\n \"\"\"Manage the association documents.\"\"\"\n\n form = forms.AssociationDocumentForm\n list_filter = (\n \"created\",\n \"last_updated\",\n )\n\n\[email protected](EventDocument)\nclass EventDocumentAdmin(ModelAdmin):\n \"\"\"Manage the event documents.\"\"\"\n\n form = forms.EventDocumentForm\n list_filter = (\n \"created\",\n \"last_updated\",\n )\n\n def has_change_permission(self, request, obj=None):\n \"\"\"Only allow access to the change form if the user is an owner.\"\"\"\n if obj is not None and not is_owner(request.member, obj):\n return False\n return super().has_change_permission(request, obj)\n\n def has_delete_permission(self, request, obj=None):\n \"\"\"Only allow delete access if the user is an owner.\"\"\"\n if obj is not None and not is_owner(request.member, obj):\n return False\n return super().has_delete_permission(request, obj)\n\n\[email protected](MiscellaneousDocument)\nclass MiscellaneousDocumentAdmin(ModelAdmin):\n \"\"\"Manage the miscellaneous documents.\"\"\"\n\n form = forms.MiscellaneousDocumentForm\n list_filter = (\n \"created\",\n \"last_updated\",\n )\n", "path": "website/documents/admin.py"}], "after_files": [{"content": "\"\"\"Registers admin interfaces for the documents module.\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\nfrom django.utils.translation import gettext_lazy as _\n\nfrom documents import forms\nfrom documents.models import (\n AnnualDocument,\n AssociationDocument,\n EventDocument,\n GeneralMeeting,\n Minutes,\n MiscellaneousDocument,\n)\nfrom documents.services import is_owner\n\n\nclass MinutesInline(admin.StackedInline):\n \"\"\"Inline for minutes of a general meeting.\"\"\"\n\n model = Minutes\n form = forms.MinutesForm\n extra = 0\n\n\[email protected](GeneralMeeting)\nclass GeneralMeetingAdmin(ModelAdmin):\n \"\"\"Manage the general meetings.\"\"\"\n\n form = forms.GeneralMeetingForm\n inlines = [\n MinutesInline,\n ]\n list_filter = (\"datetime\",)\n\n\nclass LectureYearFilter(admin.SimpleListFilter):\n \"\"\"Filter the memberships on those started or ended in a lecture year.\"\"\"\n\n title = _(\"lecture year\")\n parameter_name = \"lecture_year\"\n\n def lookups(self, request, model_admin):\n if AnnualDocument.objects.count() > 0:\n first_year = AnnualDocument.objects.order_by(\"year\").first().year\n last_year = AnnualDocument.objects.order_by(\"year\").last().year\n\n return [\n (year, f\"{year}-{year + 1}\")\n for year in range(last_year, first_year - 1, -1)\n ]\n return []\n\n def queryset(self, request, queryset):\n if not self.value():\n return queryset\n\n year = int(self.value())\n\n return queryset.filter(year=year)\n\n\[email protected](AnnualDocument)\nclass AnnualDocumentAdmin(ModelAdmin):\n \"\"\"Manage the annual documents.\"\"\"\n\n form = forms.AnnualDocumentForm\n list_filter = (\n LectureYearFilter,\n \"created\",\n \"last_updated\",\n \"members_only\",\n )\n list_display = (\n \"__str__\",\n \"members_only\",\n )\n\n\[email protected](AssociationDocument)\nclass AssociationDocumentAdmin(ModelAdmin):\n \"\"\"Manage the association documents.\"\"\"\n\n form = forms.AssociationDocumentForm\n list_filter = (\n \"created\",\n \"last_updated\",\n \"members_only\",\n )\n list_display = (\n \"__str__\",\n \"members_only\",\n )\n\n\[email protected](EventDocument)\nclass EventDocumentAdmin(ModelAdmin):\n \"\"\"Manage the event documents.\"\"\"\n\n form = forms.EventDocumentForm\n list_filter = (\n \"created\",\n \"last_updated\",\n \"members_only\",\n )\n list_display = (\n \"__str__\",\n \"members_only\",\n )\n\n def has_change_permission(self, request, obj=None):\n \"\"\"Only allow access to the change form if the user is an owner.\"\"\"\n if obj is not None and not is_owner(request.member, obj):\n return False\n return super().has_change_permission(request, obj)\n\n def has_delete_permission(self, request, obj=None):\n \"\"\"Only allow delete access if the user is an owner.\"\"\"\n if obj is not None and not is_owner(request.member, obj):\n return False\n return super().has_delete_permission(request, obj)\n\n\[email protected](MiscellaneousDocument)\nclass MiscellaneousDocumentAdmin(ModelAdmin):\n \"\"\"Manage the miscellaneous documents.\"\"\"\n\n form = forms.MiscellaneousDocumentForm\n list_filter = (\n \"created\",\n \"last_updated\",\n \"members_only\",\n )\n list_display = (\n \"__str__\",\n \"members_only\",\n )\n", "path": "website/documents/admin.py"}]} | 1,294 | 273 |
gh_patches_debug_8038 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-302 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NumberPrompt doesn't accept retry value
## Version
v4.5
## Describe the bug
When you send an invalid number to a `NumberPrompt`, it sends out a retry prompt.
When attempting to send a 2nd response after being reprompted, you get a timeout error.
## To Reproduce
1. Create a `NumberPrompt` object
2. When it prompts you for a number, send in a non-numeric value (e.g. `"hello"`)
* this will trigger a retry prompt (e.g. `"You must enter a number."`)
3. Try sending in another value--no matter what type of value, you get a timeout error


## Expected behavior
To be able to send in a 2nd value when reprompted
## Additional context
```python
async def test_number_prompt_retry(self):
async def exec_test(turn_context: TurnContext) -> None:
dialog_context: DialogContext = await dialogs.create_context(turn_context)
results: DialogTurnResult = await dialog_context.continue_dialog()
if results.status == DialogTurnStatus.Empty:
options = PromptOptions(
prompt=Activity(type=ActivityTypes.message, text="Enter a number."),
retry_prompt=Activity(
type=ActivityTypes.message, text="You must enter a number."
),
)
await dialog_context.prompt("NumberPrompt", options)
elif results.status == DialogTurnStatus.Complete:
number_result = results.result
await turn_context.send_activity(
MessageFactory.text(f"Bot received the number '{number_result}'.")
)
await convo_state.save_changes(turn_context)
adapter = TestAdapter(exec_test)
convo_state = ConversationState(MemoryStorage())
dialog_state = convo_state.create_property("dialogState")
dialogs = DialogSet(dialog_state)
number_prompt = NumberPrompt(
dialog_id="NumberPrompt", validator=None, default_locale=Culture.English
)
dialogs.add(number_prompt)
step1 = await adapter.send("hello")
step2 = await step1.assert_reply("Enter a number.")
# TODO: something is breaking in the validators or retry prompt
# where it does not accept the 2nd answer after reprompting the user
# for another value
step3 = await step2.send("hello")
step4 = await step3.assert_reply("You must enter a number.")
step5 = await step4.send("64")
await step5.assert_reply("Bot received the number '64'.")
```
[bug]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from typing import Callable, Dict
5
6 from recognizers_number import recognize_number
7 from recognizers_text import Culture, ModelResult
8 from babel.numbers import parse_decimal
9
10 from botbuilder.core.turn_context import TurnContext
11 from botbuilder.schema import ActivityTypes
12
13 from .prompt import Prompt, PromptValidatorContext
14 from .prompt_options import PromptOptions
15 from .prompt_recognizer_result import PromptRecognizerResult
16
17
18 class NumberPrompt(Prompt):
19 # TODO: PromptValidator needs to be fixed
20 # Does not accept answer as intended (times out)
21 def __init__(
22 self,
23 dialog_id: str,
24 validator: Callable[[PromptValidatorContext], bool] = None,
25 default_locale: str = None,
26 ):
27 super(NumberPrompt, self).__init__(dialog_id, validator)
28 self.default_locale = default_locale
29
30 async def on_prompt(
31 self,
32 turn_context: TurnContext,
33 state: Dict[str, object],
34 options: PromptOptions,
35 is_retry: bool,
36 ):
37 if not turn_context:
38 raise TypeError("NumberPrompt.on_prompt(): turn_context cannot be None.")
39 if not options:
40 raise TypeError("NumberPrompt.on_prompt(): options cannot be None.")
41
42 if is_retry and options.retry_prompt is not None:
43 turn_context.send_activity(options.retry_prompt)
44 elif options.prompt is not None:
45 await turn_context.send_activity(options.prompt)
46
47 async def on_recognize(
48 self,
49 turn_context: TurnContext,
50 state: Dict[str, object],
51 options: PromptOptions,
52 ) -> PromptRecognizerResult:
53 if not turn_context:
54 raise TypeError("NumberPrompt.on_recognize(): turn_context cannot be None.")
55
56 result = PromptRecognizerResult()
57 if turn_context.activity.type == ActivityTypes.message:
58 message = turn_context.activity
59 culture = self._get_culture(turn_context)
60 results: [ModelResult] = recognize_number(message.text, culture)
61
62 if results:
63 result.succeeded = True
64 result.value = parse_decimal(
65 results[0].resolution["value"], locale=culture.replace("-", "_")
66 )
67
68 return result
69
70 def _get_culture(self, turn_context: TurnContext):
71 culture = (
72 turn_context.activity.locale
73 if turn_context.activity.locale
74 else self.default_locale
75 )
76
77 if not culture:
78 culture = Culture.English
79
80 return culture
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py
--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py
+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py
@@ -40,7 +40,7 @@
raise TypeError("NumberPrompt.on_prompt(): options cannot be None.")
if is_retry and options.retry_prompt is not None:
- turn_context.send_activity(options.retry_prompt)
+ await turn_context.send_activity(options.retry_prompt)
elif options.prompt is not None:
await turn_context.send_activity(options.prompt)
| {"golden_diff": "diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py\n--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py\n+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py\n@@ -40,7 +40,7 @@\n raise TypeError(\"NumberPrompt.on_prompt(): options cannot be None.\")\n \n if is_retry and options.retry_prompt is not None:\n- turn_context.send_activity(options.retry_prompt)\n+ await turn_context.send_activity(options.retry_prompt)\n elif options.prompt is not None:\n await turn_context.send_activity(options.prompt)\n", "issue": "NumberPrompt doesn't accept retry value\n## Version\r\nv4.5\r\n\r\n## Describe the bug\r\nWhen you send an invalid number to a `NumberPrompt`, it sends out a retry prompt.\r\nWhen attempting to send a 2nd response after being reprompted, you get a timeout error.\r\n\r\n\r\n\r\n## To Reproduce\r\n1. Create a `NumberPrompt` object\r\n2. When it prompts you for a number, send in a non-numeric value (e.g. `\"hello\"`)\r\n * this will trigger a retry prompt (e.g. `\"You must enter a number.\"`)\r\n3. Try sending in another value--no matter what type of value, you get a timeout error\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n## Expected behavior\r\nTo be able to send in a 2nd value when reprompted\r\n\r\n## Additional context\r\n```python\r\nasync def test_number_prompt_retry(self):\r\n async def exec_test(turn_context: TurnContext) -> None:\r\n dialog_context: DialogContext = await dialogs.create_context(turn_context)\r\n\r\n results: DialogTurnResult = await dialog_context.continue_dialog()\r\n\r\n if results.status == DialogTurnStatus.Empty:\r\n options = PromptOptions(\r\n prompt=Activity(type=ActivityTypes.message, text=\"Enter a number.\"),\r\n retry_prompt=Activity(\r\n type=ActivityTypes.message, text=\"You must enter a number.\"\r\n ),\r\n )\r\n await dialog_context.prompt(\"NumberPrompt\", options)\r\n elif results.status == DialogTurnStatus.Complete:\r\n number_result = results.result\r\n await turn_context.send_activity(\r\n MessageFactory.text(f\"Bot received the number '{number_result}'.\")\r\n )\r\n\r\n await convo_state.save_changes(turn_context)\r\n\r\n adapter = TestAdapter(exec_test)\r\n\r\n convo_state = ConversationState(MemoryStorage())\r\n dialog_state = convo_state.create_property(\"dialogState\")\r\n dialogs = DialogSet(dialog_state)\r\n number_prompt = NumberPrompt(\r\n dialog_id=\"NumberPrompt\", validator=None, default_locale=Culture.English\r\n )\r\n dialogs.add(number_prompt)\r\n\r\n step1 = await adapter.send(\"hello\")\r\n step2 = await step1.assert_reply(\"Enter a number.\")\r\n # TODO: something is breaking in the validators or retry prompt\r\n # where it does not accept the 2nd answer after reprompting the user\r\n # for another value\r\n step3 = await step2.send(\"hello\")\r\n step4 = await step3.assert_reply(\"You must enter a number.\")\r\n step5 = await step4.send(\"64\")\r\n await step5.assert_reply(\"Bot received the number '64'.\")\r\n```\r\n\r\n[bug]\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import Callable, Dict\n\nfrom recognizers_number import recognize_number\nfrom recognizers_text import Culture, ModelResult\nfrom babel.numbers import parse_decimal\n\nfrom botbuilder.core.turn_context import TurnContext\nfrom botbuilder.schema import ActivityTypes\n\nfrom .prompt import Prompt, PromptValidatorContext\nfrom .prompt_options import PromptOptions\nfrom .prompt_recognizer_result import PromptRecognizerResult\n\n\nclass NumberPrompt(Prompt):\n # TODO: PromptValidator needs to be fixed\n # Does not accept answer as intended (times out)\n def __init__(\n self,\n dialog_id: str,\n validator: Callable[[PromptValidatorContext], bool] = None,\n default_locale: str = None,\n ):\n super(NumberPrompt, self).__init__(dialog_id, validator)\n self.default_locale = default_locale\n\n async def on_prompt(\n self,\n turn_context: TurnContext,\n state: Dict[str, object],\n options: PromptOptions,\n is_retry: bool,\n ):\n if not turn_context:\n raise TypeError(\"NumberPrompt.on_prompt(): turn_context cannot be None.\")\n if not options:\n raise TypeError(\"NumberPrompt.on_prompt(): options cannot be None.\")\n\n if is_retry and options.retry_prompt is not None:\n turn_context.send_activity(options.retry_prompt)\n elif options.prompt is not None:\n await turn_context.send_activity(options.prompt)\n\n async def on_recognize(\n self,\n turn_context: TurnContext,\n state: Dict[str, object],\n options: PromptOptions,\n ) -> PromptRecognizerResult:\n if not turn_context:\n raise TypeError(\"NumberPrompt.on_recognize(): turn_context cannot be None.\")\n\n result = PromptRecognizerResult()\n if turn_context.activity.type == ActivityTypes.message:\n message = turn_context.activity\n culture = self._get_culture(turn_context)\n results: [ModelResult] = recognize_number(message.text, culture)\n\n if results:\n result.succeeded = True\n result.value = parse_decimal(\n results[0].resolution[\"value\"], locale=culture.replace(\"-\", \"_\")\n )\n\n return result\n\n def _get_culture(self, turn_context: TurnContext):\n culture = (\n turn_context.activity.locale\n if turn_context.activity.locale\n else self.default_locale\n )\n\n if not culture:\n culture = Culture.English\n\n return culture\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import Callable, Dict\n\nfrom recognizers_number import recognize_number\nfrom recognizers_text import Culture, ModelResult\nfrom babel.numbers import parse_decimal\n\nfrom botbuilder.core.turn_context import TurnContext\nfrom botbuilder.schema import ActivityTypes\n\nfrom .prompt import Prompt, PromptValidatorContext\nfrom .prompt_options import PromptOptions\nfrom .prompt_recognizer_result import PromptRecognizerResult\n\n\nclass NumberPrompt(Prompt):\n # TODO: PromptValidator needs to be fixed\n # Does not accept answer as intended (times out)\n def __init__(\n self,\n dialog_id: str,\n validator: Callable[[PromptValidatorContext], bool] = None,\n default_locale: str = None,\n ):\n super(NumberPrompt, self).__init__(dialog_id, validator)\n self.default_locale = default_locale\n\n async def on_prompt(\n self,\n turn_context: TurnContext,\n state: Dict[str, object],\n options: PromptOptions,\n is_retry: bool,\n ):\n if not turn_context:\n raise TypeError(\"NumberPrompt.on_prompt(): turn_context cannot be None.\")\n if not options:\n raise TypeError(\"NumberPrompt.on_prompt(): options cannot be None.\")\n\n if is_retry and options.retry_prompt is not None:\n await turn_context.send_activity(options.retry_prompt)\n elif options.prompt is not None:\n await turn_context.send_activity(options.prompt)\n\n async def on_recognize(\n self,\n turn_context: TurnContext,\n state: Dict[str, object],\n options: PromptOptions,\n ) -> PromptRecognizerResult:\n if not turn_context:\n raise TypeError(\"NumberPrompt.on_recognize(): turn_context cannot be None.\")\n\n result = PromptRecognizerResult()\n if turn_context.activity.type == ActivityTypes.message:\n message = turn_context.activity\n culture = self._get_culture(turn_context)\n results: [ModelResult] = recognize_number(message.text, culture)\n\n if results:\n result.succeeded = True\n result.value = parse_decimal(\n results[0].resolution[\"value\"], locale=culture.replace(\"-\", \"_\")\n )\n\n return result\n\n def _get_culture(self, turn_context: TurnContext):\n culture = (\n turn_context.activity.locale\n if turn_context.activity.locale\n else self.default_locale\n )\n\n if not culture:\n culture = Culture.English\n\n return culture\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py"}]} | 1,610 | 161 |
gh_patches_debug_24268 | rasdani/github-patches | git_diff | dmlc__gluon-nlp-832 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ATIS/SNIPS datasets and GLUE datasets don't appear in the website API doc
http://gluon-nlp.mxnet.io/api/modules/data.html
does not show the details of ATISDataset/SNIPSDataset and GlueCoLA, GlueSST2, GlueSTSB, GlueQQP, GlueRTE, GlueMNLI, GlueQNLI, GlueWNLI
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/gluonnlp/data/__init__.py`
Content:
```
1 # coding: utf-8
2
3 # Licensed to the Apache Software Foundation (ASF) under one
4 # or more contributor license agreements. See the NOTICE file
5 # distributed with this work for additional information
6 # regarding copyright ownership. The ASF licenses this file
7 # to you under the Apache License, Version 2.0 (the
8 # "License"); you may not use this file except in compliance
9 # with the License. You may obtain a copy of the License at
10 #
11 # http://www.apache.org/licenses/LICENSE-2.0
12 #
13 # Unless required by applicable law or agreed to in writing,
14 # software distributed under the License is distributed on an
15 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16 # KIND, either express or implied. See the License for the
17 # specific language governing permissions and limitations
18 # under the License.
19
20 # pylint: disable=wildcard-import
21 """This module includes common utilities such as data readers and counter."""
22
23 from . import (batchify, candidate_sampler, conll, corpora, dataloader,
24 dataset, question_answering, registry, sampler, sentiment,
25 stream, transforms, translation, utils,
26 word_embedding_evaluation, intent_slot)
27 from .candidate_sampler import *
28 from .conll import *
29 from .glue import *
30 from .corpora import *
31 from .dataloader import *
32 from .dataset import *
33 from .question_answering import *
34 from .registry import *
35 from .sampler import *
36 from .sentiment import *
37 from .stream import *
38 from .transforms import *
39 from .translation import *
40 from .utils import *
41 from .word_embedding_evaluation import *
42 from .intent_slot import *
43
44 __all__ = (['batchify'] + utils.__all__ + transforms.__all__ + sampler.__all__
45 + dataset.__all__ + corpora.__all__ + sentiment.__all__ +
46 word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__ +
47 translation.__all__ + registry.__all__ + question_answering.__all__
48 + dataloader.__all__ + candidate_sampler.__all__)
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/gluonnlp/data/__init__.py b/src/gluonnlp/data/__init__.py
--- a/src/gluonnlp/data/__init__.py
+++ b/src/gluonnlp/data/__init__.py
@@ -23,7 +23,7 @@
from . import (batchify, candidate_sampler, conll, corpora, dataloader,
dataset, question_answering, registry, sampler, sentiment,
stream, transforms, translation, utils,
- word_embedding_evaluation, intent_slot)
+ word_embedding_evaluation, intent_slot, glue)
from .candidate_sampler import *
from .conll import *
from .glue import *
@@ -42,7 +42,8 @@
from .intent_slot import *
__all__ = (['batchify'] + utils.__all__ + transforms.__all__ + sampler.__all__
- + dataset.__all__ + corpora.__all__ + sentiment.__all__ +
- word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__ +
- translation.__all__ + registry.__all__ + question_answering.__all__
- + dataloader.__all__ + candidate_sampler.__all__)
+ + dataset.__all__ + corpora.__all__ + sentiment.__all__
+ + word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__
+ + translation.__all__ + registry.__all__ + question_answering.__all__
+ + dataloader.__all__ + candidate_sampler.__all__ + intent_slot.__all__
+ + glue.__all__)
| {"golden_diff": "diff --git a/src/gluonnlp/data/__init__.py b/src/gluonnlp/data/__init__.py\n--- a/src/gluonnlp/data/__init__.py\n+++ b/src/gluonnlp/data/__init__.py\n@@ -23,7 +23,7 @@\n from . import (batchify, candidate_sampler, conll, corpora, dataloader,\n dataset, question_answering, registry, sampler, sentiment,\n stream, transforms, translation, utils,\n- word_embedding_evaluation, intent_slot)\n+ word_embedding_evaluation, intent_slot, glue)\n from .candidate_sampler import *\n from .conll import *\n from .glue import *\n@@ -42,7 +42,8 @@\n from .intent_slot import *\n \n __all__ = (['batchify'] + utils.__all__ + transforms.__all__ + sampler.__all__\n- + dataset.__all__ + corpora.__all__ + sentiment.__all__ +\n- word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__ +\n- translation.__all__ + registry.__all__ + question_answering.__all__\n- + dataloader.__all__ + candidate_sampler.__all__)\n+ + dataset.__all__ + corpora.__all__ + sentiment.__all__\n+ + word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__\n+ + translation.__all__ + registry.__all__ + question_answering.__all__\n+ + dataloader.__all__ + candidate_sampler.__all__ + intent_slot.__all__\n+ + glue.__all__)\n", "issue": "ATIS/SNIPS datasets and GLUE datasets don't appear in the website API doc \nhttp://gluon-nlp.mxnet.io/api/modules/data.html\r\n\r\ndoes not show the details of ATISDataset/SNIPSDataset and GlueCoLA, GlueSST2, GlueSTSB, GlueQQP, GlueRTE, GlueMNLI, GlueQNLI, GlueWNLI\r\n\n", "before_files": [{"content": "# coding: utf-8\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=wildcard-import\n\"\"\"This module includes common utilities such as data readers and counter.\"\"\"\n\nfrom . import (batchify, candidate_sampler, conll, corpora, dataloader,\n dataset, question_answering, registry, sampler, sentiment,\n stream, transforms, translation, utils,\n word_embedding_evaluation, intent_slot)\nfrom .candidate_sampler import *\nfrom .conll import *\nfrom .glue import *\nfrom .corpora import *\nfrom .dataloader import *\nfrom .dataset import *\nfrom .question_answering import *\nfrom .registry import *\nfrom .sampler import *\nfrom .sentiment import *\nfrom .stream import *\nfrom .transforms import *\nfrom .translation import *\nfrom .utils import *\nfrom .word_embedding_evaluation import *\nfrom .intent_slot import *\n\n__all__ = (['batchify'] + utils.__all__ + transforms.__all__ + sampler.__all__\n + dataset.__all__ + corpora.__all__ + sentiment.__all__ +\n word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__ +\n translation.__all__ + registry.__all__ + question_answering.__all__\n + dataloader.__all__ + candidate_sampler.__all__)\n", "path": "src/gluonnlp/data/__init__.py"}], "after_files": [{"content": "# coding: utf-8\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=wildcard-import\n\"\"\"This module includes common utilities such as data readers and counter.\"\"\"\n\nfrom . import (batchify, candidate_sampler, conll, corpora, dataloader,\n dataset, question_answering, registry, sampler, sentiment,\n stream, transforms, translation, utils,\n word_embedding_evaluation, intent_slot, glue)\nfrom .candidate_sampler import *\nfrom .conll import *\nfrom .glue import *\nfrom .corpora import *\nfrom .dataloader import *\nfrom .dataset import *\nfrom .question_answering import *\nfrom .registry import *\nfrom .sampler import *\nfrom .sentiment import *\nfrom .stream import *\nfrom .transforms import *\nfrom .translation import *\nfrom .utils import *\nfrom .word_embedding_evaluation import *\nfrom .intent_slot import *\n\n__all__ = (['batchify'] + utils.__all__ + transforms.__all__ + sampler.__all__\n + dataset.__all__ + corpora.__all__ + sentiment.__all__\n + word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__\n + translation.__all__ + registry.__all__ + question_answering.__all__\n + dataloader.__all__ + candidate_sampler.__all__ + intent_slot.__all__\n + glue.__all__)\n", "path": "src/gluonnlp/data/__init__.py"}]} | 888 | 347 |
gh_patches_debug_29639 | rasdani/github-patches | git_diff | frappe__frappe-2519 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move app installation to background
Long installs timeout the installation of the app and leads to broken installs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/desk/page/applications/applications.py`
Content:
```
1 # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
2 # MIT License. See license.txt
3
4 from __future__ import unicode_literals
5 import frappe
6 import frappe.utils
7 import frappe.installer
8 import frappe.sessions
9 import subprocess
10 import os
11 import json
12 from frappe import _
13 from distutils.spawn import find_executable
14
15 @frappe.whitelist()
16 def get_app_list():
17 """Get list of all apps with properties, installed, category from hooks and
18 `frappe/data/app_listing/` if an entry exists"""
19 out = {}
20 installed = frappe.get_installed_apps()
21 for app in frappe.get_all_apps(True):
22 app_hooks = frappe.get_hooks(app_name=app)
23
24 if app not in installed and app_hooks.get('hide_in_installer'):
25 continue
26
27 out[app] = {}
28 for key in ("app_name", "app_title", "app_description", "app_icon",
29 "app_publisher", "app_version", "app_url", "app_color"):
30 val = app_hooks.get(key) or []
31 out[app][key] = val[0] if len(val) else ""
32
33 if app in installed:
34 out[app]["installed"] = 1
35
36 for app_from_list in get_app_listing().values():
37 if app_from_list.app_name in out:
38 out[app_from_list.app_name].update(app_from_list)
39 else:
40 if not frappe.conf.disallow_app_listing:
41 out[app_from_list.app_name] = app_from_list
42
43 return out
44
45 def get_app_listing():
46 """Get apps listed in `frappe/data/app_listing/`"""
47 apps_listing_dir = os.path.join(os.path.dirname(frappe.__file__), 'data', 'app_listing')
48 out = {}
49 for app in os.listdir(apps_listing_dir):
50 if app.endswith(".json"):
51 with open(os.path.join(apps_listing_dir, app)) as f:
52 out[app[:-5]] = frappe._dict(json.load(f))
53 return out
54
55 @frappe.whitelist()
56 def install_app(name):
57 """Install app, if app is not installed in local environment, install it via git url in
58 `frappe/data/app_listing/`"""
59 frappe.only_for("System Manager")
60
61 if name not in frappe.get_all_apps(True):
62 if not frappe.conf.disallow_app_listing:
63 get_app(name)
64 frappe.cache().delete_value(["app_hooks"])
65 # reload sys.path
66 import site
67 reload(site)
68 else:
69 # will only come via direct API
70 frappe.throw("Listing app not allowed")
71
72 app_hooks = frappe.get_hooks(app_name=name)
73 if app_hooks.get('hide_in_installer'):
74 frappe.throw(_("You cannot install this app"))
75
76 frappe.publish_realtime("install_app_progress", {"status": _("Installing App {0}").format(name)},
77 user=frappe.session.user)
78
79 frappe.installer.install_app(name)
80
81 frappe.publish_realtime("install_app_progress", {"status": _("{0} Installed").format(name)},
82 user=frappe.session.user)
83
84 def get_app(name):
85 """Get app using git clone and install it in bench environment"""
86 app_listing = get_app_listing()
87 if name not in app_listing:
88 frappe.throw(_("Unknown app {0}").format(name))
89 raise frappe.ValidationError
90
91 frappe.publish_realtime("install_app_progress", {"status": _("Downloading App {0}").format(name)},
92 user=frappe.session.user)
93
94 args = [find_executable('bench'), 'get-app', name, app_listing[name]['repo_url']]
95
96 try:
97 subprocess.check_call(args, cwd=frappe.utils.get_bench_path(),
98 stderr=subprocess.STDOUT)
99 return "okay"
100 except subprocess.CalledProcessError as e:
101 frappe.msgprint("<b>" + " ".join(args) + "</b>")
102 frappe.msgprint(e.output)
103 return e.output
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/frappe/desk/page/applications/applications.py b/frappe/desk/page/applications/applications.py
--- a/frappe/desk/page/applications/applications.py
+++ b/frappe/desk/page/applications/applications.py
@@ -11,6 +11,7 @@
import json
from frappe import _
from distutils.spawn import find_executable
+from frappe.utils.background_jobs import enqueue
@frappe.whitelist()
def get_app_list():
@@ -73,6 +74,12 @@
if app_hooks.get('hide_in_installer'):
frappe.throw(_("You cannot install this app"))
+ enqueue('frappe.desk.page.applications.applications.start_install', name=name)
+
+ frappe.msgprint(_('Queued for install'))
+
+
+def start_install(name):
frappe.publish_realtime("install_app_progress", {"status": _("Installing App {0}").format(name)},
user=frappe.session.user)
@@ -81,6 +88,20 @@
frappe.publish_realtime("install_app_progress", {"status": _("{0} Installed").format(name)},
user=frappe.session.user)
[email protected]()
+def remove_app(name):
+ """Remove installed app"""
+ frappe.only_for("System Manager")
+
+ if name in frappe.get_installed_apps():
+ enqueue('frappe.desk.page.applications.applications.start_remove', name=name)
+
+ frappe.msgprint(_('Queued for backup and removing {0}').format(frappe.bold(name)))
+
+def start_remove(name):
+ frappe.installer.remove_app(app_name=name, yes=True)
+ frappe.publish_realtime('msgprint', _('App {0} removed').format(frappe.bold(name)))
+
def get_app(name):
"""Get app using git clone and install it in bench environment"""
app_listing = get_app_listing()
| {"golden_diff": "diff --git a/frappe/desk/page/applications/applications.py b/frappe/desk/page/applications/applications.py\n--- a/frappe/desk/page/applications/applications.py\n+++ b/frappe/desk/page/applications/applications.py\n@@ -11,6 +11,7 @@\n import json\n from frappe import _\n from distutils.spawn import find_executable\n+from frappe.utils.background_jobs import enqueue\n \n @frappe.whitelist()\n def get_app_list():\n@@ -73,6 +74,12 @@\n \tif app_hooks.get('hide_in_installer'):\n \t\tfrappe.throw(_(\"You cannot install this app\"))\n \n+\tenqueue('frappe.desk.page.applications.applications.start_install', name=name)\n+\n+\tfrappe.msgprint(_('Queued for install'))\n+\n+\n+def start_install(name):\n \tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"Installing App {0}\").format(name)},\n \t\tuser=frappe.session.user)\n \n@@ -81,6 +88,20 @@\n \tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"{0} Installed\").format(name)},\n \t\tuser=frappe.session.user)\n \[email protected]()\n+def remove_app(name):\n+\t\"\"\"Remove installed app\"\"\"\n+\tfrappe.only_for(\"System Manager\")\n+\n+\tif name in frappe.get_installed_apps():\n+\t\tenqueue('frappe.desk.page.applications.applications.start_remove', name=name)\n+\n+\tfrappe.msgprint(_('Queued for backup and removing {0}').format(frappe.bold(name)))\n+\n+def start_remove(name):\n+\tfrappe.installer.remove_app(app_name=name, yes=True)\n+\tfrappe.publish_realtime('msgprint', _('App {0} removed').format(frappe.bold(name)))\n+\n def get_app(name):\n \t\"\"\"Get app using git clone and install it in bench environment\"\"\"\n \tapp_listing = get_app_listing()\n", "issue": "Move app installation to background\nLong installs timeout the installation of the app and leads to broken installs.\n\n", "before_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nimport frappe.utils\nimport frappe.installer\nimport frappe.sessions\nimport subprocess\nimport os\nimport json\nfrom frappe import _\nfrom distutils.spawn import find_executable\n\[email protected]()\ndef get_app_list():\n\t\"\"\"Get list of all apps with properties, installed, category from hooks and\n\t`frappe/data/app_listing/` if an entry exists\"\"\"\n\tout = {}\n\tinstalled = frappe.get_installed_apps()\n\tfor app in frappe.get_all_apps(True):\n\t\tapp_hooks = frappe.get_hooks(app_name=app)\n\n\t\tif app not in installed and app_hooks.get('hide_in_installer'):\n\t\t\tcontinue\n\n\t\tout[app] = {}\n\t\tfor key in (\"app_name\", \"app_title\", \"app_description\", \"app_icon\",\n\t\t\t\"app_publisher\", \"app_version\", \"app_url\", \"app_color\"):\n\t\t\t val = app_hooks.get(key) or []\n\t\t\t out[app][key] = val[0] if len(val) else \"\"\n\n\t\tif app in installed:\n\t\t\tout[app][\"installed\"] = 1\n\n\tfor app_from_list in get_app_listing().values():\n\t\tif app_from_list.app_name in out:\n\t\t\tout[app_from_list.app_name].update(app_from_list)\n\t\telse:\n\t\t\tif not frappe.conf.disallow_app_listing:\n\t\t\t\tout[app_from_list.app_name] = app_from_list\n\n\treturn out\n\ndef get_app_listing():\n\t\"\"\"Get apps listed in `frappe/data/app_listing/`\"\"\"\n\tapps_listing_dir = os.path.join(os.path.dirname(frappe.__file__), 'data', 'app_listing')\n\tout = {}\n\tfor app in os.listdir(apps_listing_dir):\n\t\tif app.endswith(\".json\"):\n\t\t\twith open(os.path.join(apps_listing_dir, app)) as f:\n\t\t\t\tout[app[:-5]] = frappe._dict(json.load(f))\n\treturn out\n\[email protected]()\ndef install_app(name):\n\t\"\"\"Install app, if app is not installed in local environment, install it via git url in\n\t`frappe/data/app_listing/`\"\"\"\n\tfrappe.only_for(\"System Manager\")\n\n\tif name not in frappe.get_all_apps(True):\n\t\tif not frappe.conf.disallow_app_listing:\n\t\t\tget_app(name)\n\t\t\tfrappe.cache().delete_value([\"app_hooks\"])\n\t\t\t# reload sys.path\n\t\t\timport site\n\t\t\treload(site)\n\t\telse:\n\t\t\t# will only come via direct API\n\t\t\tfrappe.throw(\"Listing app not allowed\")\n\n\tapp_hooks = frappe.get_hooks(app_name=name)\n\tif app_hooks.get('hide_in_installer'):\n\t\tfrappe.throw(_(\"You cannot install this app\"))\n\n\tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"Installing App {0}\").format(name)},\n\t\tuser=frappe.session.user)\n\n\tfrappe.installer.install_app(name)\n\n\tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"{0} Installed\").format(name)},\n\t\tuser=frappe.session.user)\n\ndef get_app(name):\n\t\"\"\"Get app using git clone and install it in bench environment\"\"\"\n\tapp_listing = get_app_listing()\n\tif name not in app_listing:\n\t\tfrappe.throw(_(\"Unknown app {0}\").format(name))\n\t\traise frappe.ValidationError\n\n\tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"Downloading App {0}\").format(name)},\n\t\tuser=frappe.session.user)\n\n\targs = [find_executable('bench'), 'get-app', name, app_listing[name]['repo_url']]\n\n\ttry:\n\t\tsubprocess.check_call(args, cwd=frappe.utils.get_bench_path(),\n\t\t\tstderr=subprocess.STDOUT)\n\t\treturn \"okay\"\n\texcept subprocess.CalledProcessError as e:\n\t\tfrappe.msgprint(\"<b>\" + \" \".join(args) + \"</b>\")\n\t\tfrappe.msgprint(e.output)\n\t\treturn e.output\n", "path": "frappe/desk/page/applications/applications.py"}], "after_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nimport frappe.utils\nimport frappe.installer\nimport frappe.sessions\nimport subprocess\nimport os\nimport json\nfrom frappe import _\nfrom distutils.spawn import find_executable\nfrom frappe.utils.background_jobs import enqueue\n\[email protected]()\ndef get_app_list():\n\t\"\"\"Get list of all apps with properties, installed, category from hooks and\n\t`frappe/data/app_listing/` if an entry exists\"\"\"\n\tout = {}\n\tinstalled = frappe.get_installed_apps()\n\tfor app in frappe.get_all_apps(True):\n\t\tapp_hooks = frappe.get_hooks(app_name=app)\n\n\t\tif app not in installed and app_hooks.get('hide_in_installer'):\n\t\t\tcontinue\n\n\t\tout[app] = {}\n\t\tfor key in (\"app_name\", \"app_title\", \"app_description\", \"app_icon\",\n\t\t\t\"app_publisher\", \"app_version\", \"app_url\", \"app_color\"):\n\t\t\t val = app_hooks.get(key) or []\n\t\t\t out[app][key] = val[0] if len(val) else \"\"\n\n\t\tif app in installed:\n\t\t\tout[app][\"installed\"] = 1\n\n\tfor app_from_list in get_app_listing().values():\n\t\tif app_from_list.app_name in out:\n\t\t\tout[app_from_list.app_name].update(app_from_list)\n\t\telse:\n\t\t\tif not frappe.conf.disallow_app_listing:\n\t\t\t\tout[app_from_list.app_name] = app_from_list\n\n\treturn out\n\ndef get_app_listing():\n\t\"\"\"Get apps listed in `frappe/data/app_listing/`\"\"\"\n\tapps_listing_dir = os.path.join(os.path.dirname(frappe.__file__), 'data', 'app_listing')\n\tout = {}\n\tfor app in os.listdir(apps_listing_dir):\n\t\tif app.endswith(\".json\"):\n\t\t\twith open(os.path.join(apps_listing_dir, app)) as f:\n\t\t\t\tout[app[:-5]] = frappe._dict(json.load(f))\n\treturn out\n\[email protected]()\ndef install_app(name):\n\t\"\"\"Install app, if app is not installed in local environment, install it via git url in\n\t`frappe/data/app_listing/`\"\"\"\n\tfrappe.only_for(\"System Manager\")\n\n\tif name not in frappe.get_all_apps(True):\n\t\tif not frappe.conf.disallow_app_listing:\n\t\t\tget_app(name)\n\t\t\tfrappe.cache().delete_value([\"app_hooks\"])\n\t\t\t# reload sys.path\n\t\t\timport site\n\t\t\treload(site)\n\t\telse:\n\t\t\t# will only come via direct API\n\t\t\tfrappe.throw(\"Listing app not allowed\")\n\n\tapp_hooks = frappe.get_hooks(app_name=name)\n\tif app_hooks.get('hide_in_installer'):\n\t\tfrappe.throw(_(\"You cannot install this app\"))\n\n\tenqueue('frappe.desk.page.applications.applications.start_install', name=name)\n\n\tfrappe.msgprint(_('Queued for install'))\n\n\ndef start_install(name):\n\tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"Installing App {0}\").format(name)},\n\t\tuser=frappe.session.user)\n\n\tfrappe.installer.install_app(name)\n\n\tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"{0} Installed\").format(name)},\n\t\tuser=frappe.session.user)\n\[email protected]()\ndef remove_app(name):\n\t\"\"\"Remove installed app\"\"\"\n\tfrappe.only_for(\"System Manager\")\n\n\tif name in frappe.get_installed_apps():\n\t\tenqueue('frappe.desk.page.applications.applications.start_remove', name=name)\n\n\tfrappe.msgprint(_('Queued for backup and removing {0}').format(frappe.bold(name)))\n\ndef start_remove(name):\n\tfrappe.installer.remove_app(app_name=name, yes=True)\n\tfrappe.publish_realtime('msgprint', _('App {0} removed').format(frappe.bold(name)))\n\ndef get_app(name):\n\t\"\"\"Get app using git clone and install it in bench environment\"\"\"\n\tapp_listing = get_app_listing()\n\tif name not in app_listing:\n\t\tfrappe.throw(_(\"Unknown app {0}\").format(name))\n\t\traise frappe.ValidationError\n\n\tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"Downloading App {0}\").format(name)},\n\t\tuser=frappe.session.user)\n\n\targs = [find_executable('bench'), 'get-app', name, app_listing[name]['repo_url']]\n\n\ttry:\n\t\tsubprocess.check_call(args, cwd=frappe.utils.get_bench_path(),\n\t\t\tstderr=subprocess.STDOUT)\n\t\treturn \"okay\"\n\texcept subprocess.CalledProcessError as e:\n\t\tfrappe.msgprint(\"<b>\" + \" \".join(args) + \"</b>\")\n\t\tfrappe.msgprint(e.output)\n\t\treturn e.output\n", "path": "frappe/desk/page/applications/applications.py"}]} | 1,381 | 421 |
gh_patches_debug_1748 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-750 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typing error in recognize_google() methode
In mycroft/stt/\_\_init\_\_.py line 74 :
Replacing mistyped 's' parameter by self.lang fixed the problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mycroft/stt/__init__.py`
Content:
```
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17 from abc import ABCMeta, abstractmethod
18
19 from speech_recognition import Recognizer
20
21 from mycroft.api import STTApi
22 from mycroft.configuration import ConfigurationManager
23 from mycroft.util.log import getLogger
24
25 __author__ = "jdorleans"
26
27 LOG = getLogger("STT")
28
29
30 class STT(object):
31 __metaclass__ = ABCMeta
32
33 def __init__(self):
34 config_core = ConfigurationManager.get()
35 self.lang = str(self.init_language(config_core))
36 config_stt = config_core.get("stt", {})
37 self.config = config_stt.get(config_stt.get("module"), {})
38 self.credential = self.config.get("credential", {})
39 self.recognizer = Recognizer()
40
41 @staticmethod
42 def init_language(config_core):
43 langs = config_core.get("lang", "en-US").split("-")
44 return langs[0].lower() + "-" + langs[1].upper()
45
46 @abstractmethod
47 def execute(self, audio, language=None):
48 pass
49
50
51 class TokenSTT(STT):
52 __metaclass__ = ABCMeta
53
54 def __init__(self):
55 super(TokenSTT, self).__init__()
56 self.token = str(self.credential.get("token"))
57
58
59 class BasicSTT(STT):
60 __metaclass__ = ABCMeta
61
62 def __init__(self):
63 super(BasicSTT, self).__init__()
64 self.username = str(self.credential.get("username"))
65 self.password = str(self.credential.get("password"))
66
67
68 class GoogleSTT(TokenSTT):
69 def __init__(self):
70 super(GoogleSTT, self).__init__()
71
72 def execute(self, audio, language=None):
73 self.lang = language or self.lang
74 return self.recognizer.recognize_google(audio, self.token, s)
75
76
77 class WITSTT(TokenSTT):
78 def __init__(self):
79 super(WITSTT, self).__init__()
80
81 def execute(self, audio, language=None):
82 LOG.warn("WITSTT language should be configured at wit.ai settings.")
83 return self.recognizer.recognize_wit(audio, self.token)
84
85
86 class IBMSTT(BasicSTT):
87 def __init__(self):
88 super(IBMSTT, self).__init__()
89
90 def execute(self, audio, language=None):
91 self.lang = language or self.lang
92 return self.recognizer.recognize_ibm(audio, self.username,
93 self.password, self.lang)
94
95
96 class MycroftSTT(STT):
97 def __init__(self):
98 super(MycroftSTT, self).__init__()
99 self.api = STTApi()
100
101 def execute(self, audio, language=None):
102 self.lang = language or self.lang
103 return self.api.stt(audio.get_flac_data(), self.lang, 1)[0]
104
105
106 class STTFactory(object):
107 CLASSES = {
108 "mycroft": MycroftSTT,
109 "google": GoogleSTT,
110 "wit": WITSTT,
111 "ibm": IBMSTT
112 }
113
114 @staticmethod
115 def create():
116 config = ConfigurationManager.get().get("stt", {})
117 module = config.get("module", "mycroft")
118 clazz = STTFactory.CLASSES.get(module)
119 return clazz()
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mycroft/stt/__init__.py b/mycroft/stt/__init__.py
--- a/mycroft/stt/__init__.py
+++ b/mycroft/stt/__init__.py
@@ -71,7 +71,7 @@
def execute(self, audio, language=None):
self.lang = language or self.lang
- return self.recognizer.recognize_google(audio, self.token, s)
+ return self.recognizer.recognize_google(audio, self.token, self.lang)
class WITSTT(TokenSTT):
| {"golden_diff": "diff --git a/mycroft/stt/__init__.py b/mycroft/stt/__init__.py\n--- a/mycroft/stt/__init__.py\n+++ b/mycroft/stt/__init__.py\n@@ -71,7 +71,7 @@\n \n def execute(self, audio, language=None):\n self.lang = language or self.lang\n- return self.recognizer.recognize_google(audio, self.token, s)\n+ return self.recognizer.recognize_google(audio, self.token, self.lang)\n \n \n class WITSTT(TokenSTT):\n", "issue": "Typing error in recognize_google() methode\nIn mycroft/stt/\\_\\_init\\_\\_.py line 74 :\r\nReplacing mistyped 's' parameter by self.lang fixed the problem.\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\nfrom abc import ABCMeta, abstractmethod\n\nfrom speech_recognition import Recognizer\n\nfrom mycroft.api import STTApi\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.util.log import getLogger\n\n__author__ = \"jdorleans\"\n\nLOG = getLogger(\"STT\")\n\n\nclass STT(object):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n config_core = ConfigurationManager.get()\n self.lang = str(self.init_language(config_core))\n config_stt = config_core.get(\"stt\", {})\n self.config = config_stt.get(config_stt.get(\"module\"), {})\n self.credential = self.config.get(\"credential\", {})\n self.recognizer = Recognizer()\n\n @staticmethod\n def init_language(config_core):\n langs = config_core.get(\"lang\", \"en-US\").split(\"-\")\n return langs[0].lower() + \"-\" + langs[1].upper()\n\n @abstractmethod\n def execute(self, audio, language=None):\n pass\n\n\nclass TokenSTT(STT):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n super(TokenSTT, self).__init__()\n self.token = str(self.credential.get(\"token\"))\n\n\nclass BasicSTT(STT):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n super(BasicSTT, self).__init__()\n self.username = str(self.credential.get(\"username\"))\n self.password = str(self.credential.get(\"password\"))\n\n\nclass GoogleSTT(TokenSTT):\n def __init__(self):\n super(GoogleSTT, self).__init__()\n\n def execute(self, audio, language=None):\n self.lang = language or self.lang\n return self.recognizer.recognize_google(audio, self.token, s)\n\n\nclass WITSTT(TokenSTT):\n def __init__(self):\n super(WITSTT, self).__init__()\n\n def execute(self, audio, language=None):\n LOG.warn(\"WITSTT language should be configured at wit.ai settings.\")\n return self.recognizer.recognize_wit(audio, self.token)\n\n\nclass IBMSTT(BasicSTT):\n def __init__(self):\n super(IBMSTT, self).__init__()\n\n def execute(self, audio, language=None):\n self.lang = language or self.lang\n return self.recognizer.recognize_ibm(audio, self.username,\n self.password, self.lang)\n\n\nclass MycroftSTT(STT):\n def __init__(self):\n super(MycroftSTT, self).__init__()\n self.api = STTApi()\n\n def execute(self, audio, language=None):\n self.lang = language or self.lang\n return self.api.stt(audio.get_flac_data(), self.lang, 1)[0]\n\n\nclass STTFactory(object):\n CLASSES = {\n \"mycroft\": MycroftSTT,\n \"google\": GoogleSTT,\n \"wit\": WITSTT,\n \"ibm\": IBMSTT\n }\n\n @staticmethod\n def create():\n config = ConfigurationManager.get().get(\"stt\", {})\n module = config.get(\"module\", \"mycroft\")\n clazz = STTFactory.CLASSES.get(module)\n return clazz()\n", "path": "mycroft/stt/__init__.py"}], "after_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\nfrom abc import ABCMeta, abstractmethod\n\nfrom speech_recognition import Recognizer\n\nfrom mycroft.api import STTApi\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.util.log import getLogger\n\n__author__ = \"jdorleans\"\n\nLOG = getLogger(\"STT\")\n\n\nclass STT(object):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n config_core = ConfigurationManager.get()\n self.lang = str(self.init_language(config_core))\n config_stt = config_core.get(\"stt\", {})\n self.config = config_stt.get(config_stt.get(\"module\"), {})\n self.credential = self.config.get(\"credential\", {})\n self.recognizer = Recognizer()\n\n @staticmethod\n def init_language(config_core):\n langs = config_core.get(\"lang\", \"en-US\").split(\"-\")\n return langs[0].lower() + \"-\" + langs[1].upper()\n\n @abstractmethod\n def execute(self, audio, language=None):\n pass\n\n\nclass TokenSTT(STT):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n super(TokenSTT, self).__init__()\n self.token = str(self.credential.get(\"token\"))\n\n\nclass BasicSTT(STT):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n super(BasicSTT, self).__init__()\n self.username = str(self.credential.get(\"username\"))\n self.password = str(self.credential.get(\"password\"))\n\n\nclass GoogleSTT(TokenSTT):\n def __init__(self):\n super(GoogleSTT, self).__init__()\n\n def execute(self, audio, language=None):\n self.lang = language or self.lang\n return self.recognizer.recognize_google(audio, self.token, self.lang)\n\n\nclass WITSTT(TokenSTT):\n def __init__(self):\n super(WITSTT, self).__init__()\n\n def execute(self, audio, language=None):\n LOG.warn(\"WITSTT language should be configured at wit.ai settings.\")\n return self.recognizer.recognize_wit(audio, self.token)\n\n\nclass IBMSTT(BasicSTT):\n def __init__(self):\n super(IBMSTT, self).__init__()\n\n def execute(self, audio, language=None):\n self.lang = language or self.lang\n return self.recognizer.recognize_ibm(audio, self.username,\n self.password, self.lang)\n\n\nclass MycroftSTT(STT):\n def __init__(self):\n super(MycroftSTT, self).__init__()\n self.api = STTApi()\n\n def execute(self, audio, language=None):\n self.lang = language or self.lang\n return self.api.stt(audio.get_flac_data(), self.lang, 1)[0]\n\n\nclass STTFactory(object):\n CLASSES = {\n \"mycroft\": MycroftSTT,\n \"google\": GoogleSTT,\n \"wit\": WITSTT,\n \"ibm\": IBMSTT\n }\n\n @staticmethod\n def create():\n config = ConfigurationManager.get().get(\"stt\", {})\n module = config.get(\"module\", \"mycroft\")\n clazz = STTFactory.CLASSES.get(module)\n return clazz()\n", "path": "mycroft/stt/__init__.py"}]} | 1,436 | 123 |
gh_patches_debug_38147 | rasdani/github-patches | git_diff | WeblateOrg__weblate-8675 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Freezing in certain strings
### Describe the issue
Hi.
We just updated Weblate to 4.15.1, and our instance is hanging when we access certain strings, weird strings (that should be ignored when creating the PO files, I know).
Instance logs, sometimes show this:
```
[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040
[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040
```
This is an example of the string that cause the issue:
https://github.com/freebsd/freebsd-doc-translate/blob/main/documentation/content/es/articles/serial-uart/_index.po#L38-L52
```
#. type: Plain text
#: documentation/content/en/articles/serial-uart/_index.adoc:48
msgid "'''"
msgstr "'''"
```
postgres be stuck in selects.
Do you know if there is something we can do here?
Regards.
### I already tried
- [X] I've read and searched [the documentation](https://docs.weblate.org/).
- [X] I've searched for similar issues in this repository.
### Steps to reproduce the behavior
Go to any string like this:
```
#. type: Plain text
#: documentation/content/en/articles/serial-uart/_index.adoc:48
msgid "'''"
msgstr "'''"
```
### Expected behavior
_No response_
### Screenshots
_No response_
### Exception traceback
```pytb
Only this:
[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040
[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040
```
### How do you run Weblate?
weblate.org service
### Weblate versions
`4.15.1`
We have updated docker containers from `4.10.1`.
### Weblate deploy checks
_No response_
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/utils/db.py`
Content:
```
1 # Copyright © Michal Čihař <[email protected]>
2 #
3 # SPDX-License-Identifier: GPL-3.0-or-later
4
5 """Database specific code to extend Django."""
6
7 from django.db import connection, models
8 from django.db.models import Case, IntegerField, Sum, When
9 from django.db.models.lookups import PatternLookup
10
11 ESCAPED = frozenset(".\\+*?[^]$(){}=!<>|:-")
12
13 PG_TRGM = "CREATE INDEX {0}_{1}_fulltext ON trans_{0} USING GIN ({1} gin_trgm_ops {2})"
14 PG_DROP = "DROP INDEX {0}_{1}_fulltext"
15
16 MY_FTX = "CREATE FULLTEXT INDEX {0}_{1}_fulltext ON trans_{0}({1})"
17 MY_DROP = "ALTER TABLE trans_{0} DROP INDEX {0}_{1}_fulltext"
18
19
20 def conditional_sum(value=1, **cond):
21 """Wrapper to generate SUM on boolean/enum values."""
22 return Sum(Case(When(then=value, **cond), default=0, output_field=IntegerField()))
23
24
25 def using_postgresql():
26 return connection.vendor == "postgresql"
27
28
29 def adjust_similarity_threshold(value: float):
30 """
31 Adjusts pg_trgm.similarity_threshold for the % operator.
32
33 Ideally we would use directly similarity() in the search, but that doesn't seem
34 to use index, while using % does.
35 """
36 if not using_postgresql():
37 return
38 with connection.cursor() as cursor:
39 # The SELECT has to be executed first as othervise the trgm extension
40 # might not yet be loaded and GUC setting not possible.
41 if not hasattr(connection, "weblate_similarity"):
42 cursor.execute("SELECT show_limit()")
43 connection.weblate_similarity = cursor.fetchone()[0]
44 # Change setting only for reasonably big difference
45 if abs(connection.weblate_similarity - value) > 0.01:
46 cursor.execute("SELECT set_limit(%s)", [value])
47 connection.weblate_similarity = value
48
49
50 class PostgreSQLSearchLookup(PatternLookup):
51 lookup_name = "search"
52 param_pattern = "%s"
53
54 def as_sql(self, qn, connection):
55 lhs, lhs_params = self.process_lhs(qn, connection)
56 rhs, rhs_params = self.process_rhs(qn, connection)
57 params = lhs_params + rhs_params
58 return f"{lhs} %% {rhs} = true", params
59
60
61 class MySQLSearchLookup(models.Lookup):
62 lookup_name = "search"
63
64 def as_sql(self, compiler, connection):
65 lhs, lhs_params = self.process_lhs(compiler, connection)
66 rhs, rhs_params = self.process_rhs(compiler, connection)
67 params = lhs_params + rhs_params
68 return f"MATCH ({lhs}) AGAINST ({rhs} IN NATURAL LANGUAGE MODE)", params
69
70
71 class PostgreSQLSubstringLookup(PatternLookup):
72 """
73 Case insensitive substring lookup.
74
75 This is essentially same as icontains in Django, but utilizes ILIKE
76 operator which can use pg_trgm index.
77 """
78
79 lookup_name = "substring"
80
81 def as_sql(self, compiler, connection):
82 lhs, lhs_params = self.process_lhs(compiler, connection)
83 rhs, rhs_params = self.process_rhs(compiler, connection)
84 params = lhs_params + rhs_params
85 return f"{lhs} ILIKE {rhs}", params
86
87
88 class PostgreSQLILikeLookup(PostgreSQLSubstringLookup):
89 """
90 Case insensitive string lookup.
91
92 This is essentially same as iexact in Django, but utilizes ILIKE
93 operator which can use pg_trgm index.
94 """
95
96 lookup_name = "ilike"
97 param_pattern = "%s"
98
99
100 def re_escape(pattern):
101 """Escape for use in database regexp match.
102
103 This is based on re.escape, but that one escapes too much.
104 """
105 string = list(pattern)
106 for i, char in enumerate(pattern):
107 if char == "\000":
108 string[i] = "\\000"
109 elif char in ESCAPED:
110 string[i] = "\\" + char
111 return "".join(string)
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/weblate/utils/db.py b/weblate/utils/db.py
--- a/weblate/utils/db.py
+++ b/weblate/utils/db.py
@@ -6,7 +6,7 @@
from django.db import connection, models
from django.db.models import Case, IntegerField, Sum, When
-from django.db.models.lookups import PatternLookup
+from django.db.models.lookups import IContains, IExact, PatternLookup
ESCAPED = frozenset(".\\+*?[^]$(){}=!<>|:-")
@@ -47,13 +47,27 @@
connection.weblate_similarity = value
-class PostgreSQLSearchLookup(PatternLookup):
+class PostgreSQLFallbackLookup(PatternLookup):
+ def __init__(self, lhs, rhs):
+ self.orig_lhs = lhs
+ self.orig_rhs = rhs
+ super().__init__(lhs, rhs)
+
+ def needs_fallback(self):
+ return isinstance(self.orig_rhs, str) and not any(
+ char.isalnum() for char in self.orig_rhs
+ )
+
+
+class PostgreSQLSearchLookup(PostgreSQLFallbackLookup):
lookup_name = "search"
param_pattern = "%s"
- def as_sql(self, qn, connection):
- lhs, lhs_params = self.process_lhs(qn, connection)
- rhs, rhs_params = self.process_rhs(qn, connection)
+ def as_sql(self, compiler, connection):
+ if self.needs_fallback():
+ return IContains(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)
+ lhs, lhs_params = self.process_lhs(compiler, connection)
+ rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
return f"{lhs} %% {rhs} = true", params
@@ -68,7 +82,7 @@
return f"MATCH ({lhs}) AGAINST ({rhs} IN NATURAL LANGUAGE MODE)", params
-class PostgreSQLSubstringLookup(PatternLookup):
+class PostgreSQLSubstringLookup(PostgreSQLFallbackLookup):
"""
Case insensitive substring lookup.
@@ -79,6 +93,8 @@
lookup_name = "substring"
def as_sql(self, compiler, connection):
+ if self.needs_fallback():
+ return IContains(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
@@ -96,6 +112,11 @@
lookup_name = "ilike"
param_pattern = "%s"
+ def as_sql(self, compiler, connection):
+ if self.needs_fallback():
+ return IExact(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)
+ return super().as_sql(compiler, connection)
+
def re_escape(pattern):
"""Escape for use in database regexp match.
| {"golden_diff": "diff --git a/weblate/utils/db.py b/weblate/utils/db.py\n--- a/weblate/utils/db.py\n+++ b/weblate/utils/db.py\n@@ -6,7 +6,7 @@\n \n from django.db import connection, models\n from django.db.models import Case, IntegerField, Sum, When\n-from django.db.models.lookups import PatternLookup\n+from django.db.models.lookups import IContains, IExact, PatternLookup\n \n ESCAPED = frozenset(\".\\\\+*?[^]$(){}=!<>|:-\")\n \n@@ -47,13 +47,27 @@\n connection.weblate_similarity = value\n \n \n-class PostgreSQLSearchLookup(PatternLookup):\n+class PostgreSQLFallbackLookup(PatternLookup):\n+ def __init__(self, lhs, rhs):\n+ self.orig_lhs = lhs\n+ self.orig_rhs = rhs\n+ super().__init__(lhs, rhs)\n+\n+ def needs_fallback(self):\n+ return isinstance(self.orig_rhs, str) and not any(\n+ char.isalnum() for char in self.orig_rhs\n+ )\n+\n+\n+class PostgreSQLSearchLookup(PostgreSQLFallbackLookup):\n lookup_name = \"search\"\n param_pattern = \"%s\"\n \n- def as_sql(self, qn, connection):\n- lhs, lhs_params = self.process_lhs(qn, connection)\n- rhs, rhs_params = self.process_rhs(qn, connection)\n+ def as_sql(self, compiler, connection):\n+ if self.needs_fallback():\n+ return IContains(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)\n+ lhs, lhs_params = self.process_lhs(compiler, connection)\n+ rhs, rhs_params = self.process_rhs(compiler, connection)\n params = lhs_params + rhs_params\n return f\"{lhs} %% {rhs} = true\", params\n \n@@ -68,7 +82,7 @@\n return f\"MATCH ({lhs}) AGAINST ({rhs} IN NATURAL LANGUAGE MODE)\", params\n \n \n-class PostgreSQLSubstringLookup(PatternLookup):\n+class PostgreSQLSubstringLookup(PostgreSQLFallbackLookup):\n \"\"\"\n Case insensitive substring lookup.\n \n@@ -79,6 +93,8 @@\n lookup_name = \"substring\"\n \n def as_sql(self, compiler, connection):\n+ if self.needs_fallback():\n+ return IContains(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)\n lhs, lhs_params = self.process_lhs(compiler, connection)\n rhs, rhs_params = self.process_rhs(compiler, connection)\n params = lhs_params + rhs_params\n@@ -96,6 +112,11 @@\n lookup_name = \"ilike\"\n param_pattern = \"%s\"\n \n+ def as_sql(self, compiler, connection):\n+ if self.needs_fallback():\n+ return IExact(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)\n+ return super().as_sql(compiler, connection)\n+\n \n def re_escape(pattern):\n \"\"\"Escape for use in database regexp match.\n", "issue": "Freezing in certain strings\n### Describe the issue\r\n\r\nHi.\r\n\r\nWe just updated Weblate to 4.15.1, and our instance is hanging when we access certain strings, weird strings (that should be ignored when creating the PO files, I know).\r\n\r\nInstance logs, sometimes show this:\r\n```\r\n[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040\r\n[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040\r\n```\r\n\r\nThis is an example of the string that cause the issue:\r\n\r\nhttps://github.com/freebsd/freebsd-doc-translate/blob/main/documentation/content/es/articles/serial-uart/_index.po#L38-L52\r\n\r\n```\r\n#. type: Plain text\r\n#: documentation/content/en/articles/serial-uart/_index.adoc:48\r\nmsgid \"'''\"\r\nmsgstr \"'''\"\r\n```\r\n\r\npostgres be stuck in selects.\r\n\r\nDo you know if there is something we can do here?\r\n\r\nRegards.\r\n\r\n### I already tried\r\n\r\n- [X] I've read and searched [the documentation](https://docs.weblate.org/).\r\n- [X] I've searched for similar issues in this repository.\r\n\r\n### Steps to reproduce the behavior\r\n\r\nGo to any string like this:\r\n\r\n```\r\n#. type: Plain text\r\n#: documentation/content/en/articles/serial-uart/_index.adoc:48\r\nmsgid \"'''\"\r\nmsgstr \"'''\"\r\n```\r\n\r\n### Expected behavior\r\n\r\n_No response_\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Exception traceback\r\n\r\n```pytb\r\nOnly this:\r\n\r\n\r\n[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040\r\n[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040\r\n```\r\n\r\n\r\n### How do you run Weblate?\r\n\r\nweblate.org service\r\n\r\n### Weblate versions\r\n\r\n`4.15.1`\r\nWe have updated docker containers from `4.10.1`.\r\n\r\n### Weblate deploy checks\r\n\r\n_No response_\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\n\"\"\"Database specific code to extend Django.\"\"\"\n\nfrom django.db import connection, models\nfrom django.db.models import Case, IntegerField, Sum, When\nfrom django.db.models.lookups import PatternLookup\n\nESCAPED = frozenset(\".\\\\+*?[^]$(){}=!<>|:-\")\n\nPG_TRGM = \"CREATE INDEX {0}_{1}_fulltext ON trans_{0} USING GIN ({1} gin_trgm_ops {2})\"\nPG_DROP = \"DROP INDEX {0}_{1}_fulltext\"\n\nMY_FTX = \"CREATE FULLTEXT INDEX {0}_{1}_fulltext ON trans_{0}({1})\"\nMY_DROP = \"ALTER TABLE trans_{0} DROP INDEX {0}_{1}_fulltext\"\n\n\ndef conditional_sum(value=1, **cond):\n \"\"\"Wrapper to generate SUM on boolean/enum values.\"\"\"\n return Sum(Case(When(then=value, **cond), default=0, output_field=IntegerField()))\n\n\ndef using_postgresql():\n return connection.vendor == \"postgresql\"\n\n\ndef adjust_similarity_threshold(value: float):\n \"\"\"\n Adjusts pg_trgm.similarity_threshold for the % operator.\n\n Ideally we would use directly similarity() in the search, but that doesn't seem\n to use index, while using % does.\n \"\"\"\n if not using_postgresql():\n return\n with connection.cursor() as cursor:\n # The SELECT has to be executed first as othervise the trgm extension\n # might not yet be loaded and GUC setting not possible.\n if not hasattr(connection, \"weblate_similarity\"):\n cursor.execute(\"SELECT show_limit()\")\n connection.weblate_similarity = cursor.fetchone()[0]\n # Change setting only for reasonably big difference\n if abs(connection.weblate_similarity - value) > 0.01:\n cursor.execute(\"SELECT set_limit(%s)\", [value])\n connection.weblate_similarity = value\n\n\nclass PostgreSQLSearchLookup(PatternLookup):\n lookup_name = \"search\"\n param_pattern = \"%s\"\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n params = lhs_params + rhs_params\n return f\"{lhs} %% {rhs} = true\", params\n\n\nclass MySQLSearchLookup(models.Lookup):\n lookup_name = \"search\"\n\n def as_sql(self, compiler, connection):\n lhs, lhs_params = self.process_lhs(compiler, connection)\n rhs, rhs_params = self.process_rhs(compiler, connection)\n params = lhs_params + rhs_params\n return f\"MATCH ({lhs}) AGAINST ({rhs} IN NATURAL LANGUAGE MODE)\", params\n\n\nclass PostgreSQLSubstringLookup(PatternLookup):\n \"\"\"\n Case insensitive substring lookup.\n\n This is essentially same as icontains in Django, but utilizes ILIKE\n operator which can use pg_trgm index.\n \"\"\"\n\n lookup_name = \"substring\"\n\n def as_sql(self, compiler, connection):\n lhs, lhs_params = self.process_lhs(compiler, connection)\n rhs, rhs_params = self.process_rhs(compiler, connection)\n params = lhs_params + rhs_params\n return f\"{lhs} ILIKE {rhs}\", params\n\n\nclass PostgreSQLILikeLookup(PostgreSQLSubstringLookup):\n \"\"\"\n Case insensitive string lookup.\n\n This is essentially same as iexact in Django, but utilizes ILIKE\n operator which can use pg_trgm index.\n \"\"\"\n\n lookup_name = \"ilike\"\n param_pattern = \"%s\"\n\n\ndef re_escape(pattern):\n \"\"\"Escape for use in database regexp match.\n\n This is based on re.escape, but that one escapes too much.\n \"\"\"\n string = list(pattern)\n for i, char in enumerate(pattern):\n if char == \"\\000\":\n string[i] = \"\\\\000\"\n elif char in ESCAPED:\n string[i] = \"\\\\\" + char\n return \"\".join(string)\n", "path": "weblate/utils/db.py"}], "after_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\n\"\"\"Database specific code to extend Django.\"\"\"\n\nfrom django.db import connection, models\nfrom django.db.models import Case, IntegerField, Sum, When\nfrom django.db.models.lookups import IContains, IExact, PatternLookup\n\nESCAPED = frozenset(\".\\\\+*?[^]$(){}=!<>|:-\")\n\nPG_TRGM = \"CREATE INDEX {0}_{1}_fulltext ON trans_{0} USING GIN ({1} gin_trgm_ops {2})\"\nPG_DROP = \"DROP INDEX {0}_{1}_fulltext\"\n\nMY_FTX = \"CREATE FULLTEXT INDEX {0}_{1}_fulltext ON trans_{0}({1})\"\nMY_DROP = \"ALTER TABLE trans_{0} DROP INDEX {0}_{1}_fulltext\"\n\n\ndef conditional_sum(value=1, **cond):\n \"\"\"Wrapper to generate SUM on boolean/enum values.\"\"\"\n return Sum(Case(When(then=value, **cond), default=0, output_field=IntegerField()))\n\n\ndef using_postgresql():\n return connection.vendor == \"postgresql\"\n\n\ndef adjust_similarity_threshold(value: float):\n \"\"\"\n Adjusts pg_trgm.similarity_threshold for the % operator.\n\n Ideally we would use directly similarity() in the search, but that doesn't seem\n to use index, while using % does.\n \"\"\"\n if not using_postgresql():\n return\n with connection.cursor() as cursor:\n # The SELECT has to be executed first as othervise the trgm extension\n # might not yet be loaded and GUC setting not possible.\n if not hasattr(connection, \"weblate_similarity\"):\n cursor.execute(\"SELECT show_limit()\")\n connection.weblate_similarity = cursor.fetchone()[0]\n # Change setting only for reasonably big difference\n if abs(connection.weblate_similarity - value) > 0.01:\n cursor.execute(\"SELECT set_limit(%s)\", [value])\n connection.weblate_similarity = value\n\n\nclass PostgreSQLFallbackLookup(PatternLookup):\n def __init__(self, lhs, rhs):\n self.orig_lhs = lhs\n self.orig_rhs = rhs\n super().__init__(lhs, rhs)\n\n def needs_fallback(self):\n return isinstance(self.orig_rhs, str) and not any(\n char.isalnum() for char in self.orig_rhs\n )\n\n\nclass PostgreSQLSearchLookup(PostgreSQLFallbackLookup):\n lookup_name = \"search\"\n param_pattern = \"%s\"\n\n def as_sql(self, compiler, connection):\n if self.needs_fallback():\n return IContains(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)\n lhs, lhs_params = self.process_lhs(compiler, connection)\n rhs, rhs_params = self.process_rhs(compiler, connection)\n params = lhs_params + rhs_params\n return f\"{lhs} %% {rhs} = true\", params\n\n\nclass MySQLSearchLookup(models.Lookup):\n lookup_name = \"search\"\n\n def as_sql(self, compiler, connection):\n lhs, lhs_params = self.process_lhs(compiler, connection)\n rhs, rhs_params = self.process_rhs(compiler, connection)\n params = lhs_params + rhs_params\n return f\"MATCH ({lhs}) AGAINST ({rhs} IN NATURAL LANGUAGE MODE)\", params\n\n\nclass PostgreSQLSubstringLookup(PostgreSQLFallbackLookup):\n \"\"\"\n Case insensitive substring lookup.\n\n This is essentially same as icontains in Django, but utilizes ILIKE\n operator which can use pg_trgm index.\n \"\"\"\n\n lookup_name = \"substring\"\n\n def as_sql(self, compiler, connection):\n if self.needs_fallback():\n return IContains(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)\n lhs, lhs_params = self.process_lhs(compiler, connection)\n rhs, rhs_params = self.process_rhs(compiler, connection)\n params = lhs_params + rhs_params\n return f\"{lhs} ILIKE {rhs}\", params\n\n\nclass PostgreSQLILikeLookup(PostgreSQLSubstringLookup):\n \"\"\"\n Case insensitive string lookup.\n\n This is essentially same as iexact in Django, but utilizes ILIKE\n operator which can use pg_trgm index.\n \"\"\"\n\n lookup_name = \"ilike\"\n param_pattern = \"%s\"\n\n def as_sql(self, compiler, connection):\n if self.needs_fallback():\n return IExact(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)\n return super().as_sql(compiler, connection)\n\n\ndef re_escape(pattern):\n \"\"\"Escape for use in database regexp match.\n\n This is based on re.escape, but that one escapes too much.\n \"\"\"\n string = list(pattern)\n for i, char in enumerate(pattern):\n if char == \"\\000\":\n string[i] = \"\\\\000\"\n elif char in ESCAPED:\n string[i] = \"\\\\\" + char\n return \"\".join(string)\n", "path": "weblate/utils/db.py"}]} | 2,034 | 655 |
gh_patches_debug_39507 | rasdani/github-patches | git_diff | Nitrate__Nitrate-1106 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop RPM package build completely
Major reason and consideration:
- reduce the effort to maintain the builds
- easy to pin the dependencies
- make it clear to install and distribute via container images
AC:
- [x] Remove from CI
- [ ] Remove the Fedora Copr project
- [x] Refactor the Containerfile to build images directly from the source tree
- [x] Update README and documentation to remove the content about RPM packages
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `container/init.py`
Content:
```
1 #!/usr/bin/python3
2
3 import logging
4 import os
5 import time
6
7 logging.basicConfig(
8 level=logging.INFO,
9 format='%(asctime)s %(levelname)s %(name)s: %(message)s'
10 )
11 logger = logging.getLogger('entrypoint')
12
13 import django
14 django.setup()
15
16 from django.contrib.auth.models import User
17 from django.core.management import call_command
18 from django.db import connection
19
20
21 def create_superuser():
22 username = os.environ.get('NITRATE_SUPERUSER_USERNAME')
23 password = os.environ.get('NITRATE_SUPERUSER_PASSWORD')
24 email = os.environ.get('NITRATE_SUPERUSER_EMAIL')
25
26 if not (username and password and email):
27 logger.info(
28 'NITRATE_SUPERUSER_USERNAME, NITRATE_SUPERUSER_PASSWORD and NITRATE_SUPERUSER_EMAIL are not set. '
29 'Skip creating a superuser.'
30 )
31 return
32
33 try:
34 if User.objects.filter(username=username, email=email, is_superuser=True).exists():
35 logger.info('Superuser %s has been created.', username)
36 return
37 except: # noqa
38 pass
39
40 try:
41 User.objects.create_superuser(username, email=email, password=password)
42 logger.info('Superuser %s is created successfully.', username)
43 except Exception as e:
44 logger.warning('Failed to create superuser %s: %s', username, e)
45 logger.warning('Please check if the database is initialized properly.')
46
47
48 def set_default_permissions():
49 if os.environ.get('NITRATE_SET_DEFAULT_PERMS'):
50 try:
51 call_command('setdefaultperms')
52 logger.info('Default groups are created and permissions are set to groups properly.')
53 except Exception as e:
54 logger.warning('Failed to run command setdefaultperms: %s', e)
55 logger.warning('Please check if the database is initialized properly.')
56 else:
57 logger.info(
58 'Environment variable NITRATE_SET_DEFAULT_PERMS is not set. '
59 'Skip creating default groups and granting permissions to specific group.'
60 )
61
62
63 def migrate_db():
64 if os.environ.get('NITRATE_MIGRATE_DB'):
65 try:
66 call_command('migrate')
67 logger.info('Database is migrated successfully.')
68 except Exception as e:
69 logger.warning('Failed to migrate the database: %s', e)
70 else:
71 logger.info('Environment variable NITRATE_MIGRATE_DB is not set. Skip migrating database.')
72
73
74 def wait_for_db():
75 while 1:
76 try:
77 connection.cursor()
78 except: # noqa
79 logger.debug('Failed to connect to database. Sleep for a while and try again ...')
80 time.sleep(0.5)
81 else:
82 break
83
84
85 if __name__ == '__main__':
86 wait_for_db()
87 migrate_db()
88 create_superuser()
89 set_default_permissions()
90
```
Path: `contrib/scripts/make-release.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import re
4 import argparse
5 import subprocess
6 from pathlib import Path
7
8 from datetime import datetime
9 from typing import Tuple
10 from pygit2 import Commit, Repository
11
12
13 def extract_short_log(commit: Commit) -> Tuple[str, None or str]:
14 lines = commit.message.split('\n')
15 subject = lines[0]
16 match = re.search(r'\((#\d+)\)$', subject)
17 return subject, match.groups()[0] if match else None
18
19
20 def generate_changelog(args: argparse.Namespace):
21 repo: Repository = Repository(args.repo or '.')
22 if args.since_version:
23 release_tag = repo.revparse_single(args.since_version)
24 else:
25 release_tag = repo.revparse_single(repo.describe().split('-')[0])
26
27 walker = repo.walk(repo.head.target)
28 walker.hide(release_tag.id)
29 logs = []
30 found_issue_keys = []
31
32 for commit in walker:
33 subject, issue_key = extract_short_log(commit)
34 if issue_key is not None:
35 found_issue_keys.append(issue_key)
36 subject = subject.replace(issue_key, f'`{issue_key}`_')
37 logs.append(f'* {subject}')
38
39 logs.append('')
40 found_issue_keys.sort()
41 for item in found_issue_keys:
42 logs.append(f'.. _{item}: https://github.com/Nitrate/Nitrate/issues/{item[1:]}')
43
44 return '\n'.join(logs)
45
46
47 def validate_version(value):
48 if value.startswith('v'):
49 raise argparse.ArgumentTypeError('Version should not be prefixed with v.')
50 return value
51
52
53 parser = argparse.ArgumentParser()
54 parser.add_argument('--repo', help='Path to git repository.')
55 parser.add_argument('--since-version', required=False,
56 type=validate_version,
57 help='Collect commits since this version.')
58 parser.add_argument('new_version', metavar='NEW_VERSION',
59 type=validate_version,
60 help='The version to be released.')
61
62 args = parser.parse_args()
63 new_version = args.new_version
64
65 Path('VERSION.txt').unlink()
66 Path('VERSION.txt').write_text(new_version, "utf-8")
67
68 template = Path('contrib/scripts/release-notes.tmpl.rst').read_text("utf-8")
69 Path(f'docs/source/releases/{new_version}.rst').write_text(
70 template.format(
71 new_version=new_version,
72 doc_ref=new_version,
73 title_marker=len(new_version) * '=',
74 change_logs=generate_changelog(args),
75 release_date=datetime.now().strftime('%b %d, %Y')
76 ),
77 "utf-8",
78 )
79
80 readme_md = Path('container/README.md')
81 content = readme_md.read_text("utf-8")
82 readme_md.unlink()
83 readme_md.write_text(
84 re.sub(r'quay.io/nitrate/nitrate:\d+\.\d+(\.\d+)?',
85 f'quay.io/nitrate/nitrate:{new_version}',
86 content),
87 "utf-8",
88 )
89
90 subprocess.check_call([
91 'rpmdev-bumpspec',
92 '-n', new_version,
93 '-c', f'Built for version {new_version}',
94 'python-nitrate-tcms.spec'
95 ])
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/container/init.py b/container/init.py
deleted file mode 100755
--- a/container/init.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/python3
-
-import logging
-import os
-import time
-
-logging.basicConfig(
- level=logging.INFO,
- format='%(asctime)s %(levelname)s %(name)s: %(message)s'
-)
-logger = logging.getLogger('entrypoint')
-
-import django
-django.setup()
-
-from django.contrib.auth.models import User
-from django.core.management import call_command
-from django.db import connection
-
-
-def create_superuser():
- username = os.environ.get('NITRATE_SUPERUSER_USERNAME')
- password = os.environ.get('NITRATE_SUPERUSER_PASSWORD')
- email = os.environ.get('NITRATE_SUPERUSER_EMAIL')
-
- if not (username and password and email):
- logger.info(
- 'NITRATE_SUPERUSER_USERNAME, NITRATE_SUPERUSER_PASSWORD and NITRATE_SUPERUSER_EMAIL are not set. '
- 'Skip creating a superuser.'
- )
- return
-
- try:
- if User.objects.filter(username=username, email=email, is_superuser=True).exists():
- logger.info('Superuser %s has been created.', username)
- return
- except: # noqa
- pass
-
- try:
- User.objects.create_superuser(username, email=email, password=password)
- logger.info('Superuser %s is created successfully.', username)
- except Exception as e:
- logger.warning('Failed to create superuser %s: %s', username, e)
- logger.warning('Please check if the database is initialized properly.')
-
-
-def set_default_permissions():
- if os.environ.get('NITRATE_SET_DEFAULT_PERMS'):
- try:
- call_command('setdefaultperms')
- logger.info('Default groups are created and permissions are set to groups properly.')
- except Exception as e:
- logger.warning('Failed to run command setdefaultperms: %s', e)
- logger.warning('Please check if the database is initialized properly.')
- else:
- logger.info(
- 'Environment variable NITRATE_SET_DEFAULT_PERMS is not set. '
- 'Skip creating default groups and granting permissions to specific group.'
- )
-
-
-def migrate_db():
- if os.environ.get('NITRATE_MIGRATE_DB'):
- try:
- call_command('migrate')
- logger.info('Database is migrated successfully.')
- except Exception as e:
- logger.warning('Failed to migrate the database: %s', e)
- else:
- logger.info('Environment variable NITRATE_MIGRATE_DB is not set. Skip migrating database.')
-
-
-def wait_for_db():
- while 1:
- try:
- connection.cursor()
- except: # noqa
- logger.debug('Failed to connect to database. Sleep for a while and try again ...')
- time.sleep(0.5)
- else:
- break
-
-
-if __name__ == '__main__':
- wait_for_db()
- migrate_db()
- create_superuser()
- set_default_permissions()
diff --git a/contrib/scripts/make-release.py b/contrib/scripts/make-release.py
--- a/contrib/scripts/make-release.py
+++ b/contrib/scripts/make-release.py
@@ -2,7 +2,6 @@
import re
import argparse
-import subprocess
from pathlib import Path
from datetime import datetime
@@ -76,20 +75,3 @@
),
"utf-8",
)
-
-readme_md = Path('container/README.md')
-content = readme_md.read_text("utf-8")
-readme_md.unlink()
-readme_md.write_text(
- re.sub(r'quay.io/nitrate/nitrate:\d+\.\d+(\.\d+)?',
- f'quay.io/nitrate/nitrate:{new_version}',
- content),
- "utf-8",
-)
-
-subprocess.check_call([
- 'rpmdev-bumpspec',
- '-n', new_version,
- '-c', f'Built for version {new_version}',
- 'python-nitrate-tcms.spec'
-])
| {"golden_diff": "diff --git a/container/init.py b/container/init.py\ndeleted file mode 100755\n--- a/container/init.py\n+++ /dev/null\n@@ -1,89 +0,0 @@\n-#!/usr/bin/python3\n-\n-import logging\n-import os\n-import time\n-\n-logging.basicConfig(\n- level=logging.INFO,\n- format='%(asctime)s %(levelname)s %(name)s: %(message)s'\n-)\n-logger = logging.getLogger('entrypoint')\n-\n-import django\n-django.setup()\n-\n-from django.contrib.auth.models import User\n-from django.core.management import call_command\n-from django.db import connection\n-\n-\n-def create_superuser():\n- username = os.environ.get('NITRATE_SUPERUSER_USERNAME')\n- password = os.environ.get('NITRATE_SUPERUSER_PASSWORD')\n- email = os.environ.get('NITRATE_SUPERUSER_EMAIL')\n-\n- if not (username and password and email):\n- logger.info(\n- 'NITRATE_SUPERUSER_USERNAME, NITRATE_SUPERUSER_PASSWORD and NITRATE_SUPERUSER_EMAIL are not set. '\n- 'Skip creating a superuser.'\n- )\n- return\n-\n- try:\n- if User.objects.filter(username=username, email=email, is_superuser=True).exists():\n- logger.info('Superuser %s has been created.', username)\n- return\n- except: # noqa\n- pass\n-\n- try:\n- User.objects.create_superuser(username, email=email, password=password)\n- logger.info('Superuser %s is created successfully.', username)\n- except Exception as e:\n- logger.warning('Failed to create superuser %s: %s', username, e)\n- logger.warning('Please check if the database is initialized properly.')\n-\n-\n-def set_default_permissions():\n- if os.environ.get('NITRATE_SET_DEFAULT_PERMS'):\n- try:\n- call_command('setdefaultperms')\n- logger.info('Default groups are created and permissions are set to groups properly.')\n- except Exception as e:\n- logger.warning('Failed to run command setdefaultperms: %s', e)\n- logger.warning('Please check if the database is initialized properly.')\n- else:\n- logger.info(\n- 'Environment variable NITRATE_SET_DEFAULT_PERMS is not set. '\n- 'Skip creating default groups and granting permissions to specific group.'\n- )\n-\n-\n-def migrate_db():\n- if os.environ.get('NITRATE_MIGRATE_DB'):\n- try:\n- call_command('migrate')\n- logger.info('Database is migrated successfully.')\n- except Exception as e:\n- logger.warning('Failed to migrate the database: %s', e)\n- else:\n- logger.info('Environment variable NITRATE_MIGRATE_DB is not set. Skip migrating database.')\n-\n-\n-def wait_for_db():\n- while 1:\n- try:\n- connection.cursor()\n- except: # noqa\n- logger.debug('Failed to connect to database. Sleep for a while and try again ...')\n- time.sleep(0.5)\n- else:\n- break\n-\n-\n-if __name__ == '__main__':\n- wait_for_db()\n- migrate_db()\n- create_superuser()\n- set_default_permissions()\ndiff --git a/contrib/scripts/make-release.py b/contrib/scripts/make-release.py\n--- a/contrib/scripts/make-release.py\n+++ b/contrib/scripts/make-release.py\n@@ -2,7 +2,6 @@\n \n import re\n import argparse\n-import subprocess\n from pathlib import Path\n \n from datetime import datetime\n@@ -76,20 +75,3 @@\n ),\n \"utf-8\",\n )\n-\n-readme_md = Path('container/README.md')\n-content = readme_md.read_text(\"utf-8\")\n-readme_md.unlink()\n-readme_md.write_text(\n- re.sub(r'quay.io/nitrate/nitrate:\\d+\\.\\d+(\\.\\d+)?',\n- f'quay.io/nitrate/nitrate:{new_version}',\n- content),\n- \"utf-8\",\n-)\n-\n-subprocess.check_call([\n- 'rpmdev-bumpspec',\n- '-n', new_version,\n- '-c', f'Built for version {new_version}',\n- 'python-nitrate-tcms.spec'\n-])\n", "issue": "Drop RPM package build completely\nMajor reason and consideration:\r\n\r\n- reduce the effort to maintain the builds\r\n- easy to pin the dependencies\r\n- make it clear to install and distribute via container images\r\n\r\nAC:\r\n\r\n- [x] Remove from CI\r\n- [ ] Remove the Fedora Copr project\r\n- [x] Refactor the Containerfile to build images directly from the source tree\r\n- [x] Update README and documentation to remove the content about RPM packages\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport logging\nimport os\nimport time\n\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s %(levelname)s %(name)s: %(message)s'\n)\nlogger = logging.getLogger('entrypoint')\n\nimport django\ndjango.setup()\n\nfrom django.contrib.auth.models import User\nfrom django.core.management import call_command\nfrom django.db import connection\n\n\ndef create_superuser():\n username = os.environ.get('NITRATE_SUPERUSER_USERNAME')\n password = os.environ.get('NITRATE_SUPERUSER_PASSWORD')\n email = os.environ.get('NITRATE_SUPERUSER_EMAIL')\n\n if not (username and password and email):\n logger.info(\n 'NITRATE_SUPERUSER_USERNAME, NITRATE_SUPERUSER_PASSWORD and NITRATE_SUPERUSER_EMAIL are not set. '\n 'Skip creating a superuser.'\n )\n return\n\n try:\n if User.objects.filter(username=username, email=email, is_superuser=True).exists():\n logger.info('Superuser %s has been created.', username)\n return\n except: # noqa\n pass\n\n try:\n User.objects.create_superuser(username, email=email, password=password)\n logger.info('Superuser %s is created successfully.', username)\n except Exception as e:\n logger.warning('Failed to create superuser %s: %s', username, e)\n logger.warning('Please check if the database is initialized properly.')\n\n\ndef set_default_permissions():\n if os.environ.get('NITRATE_SET_DEFAULT_PERMS'):\n try:\n call_command('setdefaultperms')\n logger.info('Default groups are created and permissions are set to groups properly.')\n except Exception as e:\n logger.warning('Failed to run command setdefaultperms: %s', e)\n logger.warning('Please check if the database is initialized properly.')\n else:\n logger.info(\n 'Environment variable NITRATE_SET_DEFAULT_PERMS is not set. '\n 'Skip creating default groups and granting permissions to specific group.'\n )\n\n\ndef migrate_db():\n if os.environ.get('NITRATE_MIGRATE_DB'):\n try:\n call_command('migrate')\n logger.info('Database is migrated successfully.')\n except Exception as e:\n logger.warning('Failed to migrate the database: %s', e)\n else:\n logger.info('Environment variable NITRATE_MIGRATE_DB is not set. Skip migrating database.')\n\n\ndef wait_for_db():\n while 1:\n try:\n connection.cursor()\n except: # noqa\n logger.debug('Failed to connect to database. Sleep for a while and try again ...')\n time.sleep(0.5)\n else:\n break\n\n\nif __name__ == '__main__':\n wait_for_db()\n migrate_db()\n create_superuser()\n set_default_permissions()\n", "path": "container/init.py"}, {"content": "#!/usr/bin/env python3\n\nimport re\nimport argparse\nimport subprocess\nfrom pathlib import Path\n\nfrom datetime import datetime\nfrom typing import Tuple\nfrom pygit2 import Commit, Repository\n\n\ndef extract_short_log(commit: Commit) -> Tuple[str, None or str]:\n lines = commit.message.split('\\n')\n subject = lines[0]\n match = re.search(r'\\((#\\d+)\\)$', subject)\n return subject, match.groups()[0] if match else None\n\n\ndef generate_changelog(args: argparse.Namespace):\n repo: Repository = Repository(args.repo or '.')\n if args.since_version:\n release_tag = repo.revparse_single(args.since_version)\n else:\n release_tag = repo.revparse_single(repo.describe().split('-')[0])\n\n walker = repo.walk(repo.head.target)\n walker.hide(release_tag.id)\n logs = []\n found_issue_keys = []\n\n for commit in walker:\n subject, issue_key = extract_short_log(commit)\n if issue_key is not None:\n found_issue_keys.append(issue_key)\n subject = subject.replace(issue_key, f'`{issue_key}`_')\n logs.append(f'* {subject}')\n\n logs.append('')\n found_issue_keys.sort()\n for item in found_issue_keys:\n logs.append(f'.. _{item}: https://github.com/Nitrate/Nitrate/issues/{item[1:]}')\n\n return '\\n'.join(logs)\n\n\ndef validate_version(value):\n if value.startswith('v'):\n raise argparse.ArgumentTypeError('Version should not be prefixed with v.')\n return value\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--repo', help='Path to git repository.')\nparser.add_argument('--since-version', required=False,\n type=validate_version,\n help='Collect commits since this version.')\nparser.add_argument('new_version', metavar='NEW_VERSION',\n type=validate_version,\n help='The version to be released.')\n\nargs = parser.parse_args()\nnew_version = args.new_version\n\nPath('VERSION.txt').unlink()\nPath('VERSION.txt').write_text(new_version, \"utf-8\")\n\ntemplate = Path('contrib/scripts/release-notes.tmpl.rst').read_text(\"utf-8\")\nPath(f'docs/source/releases/{new_version}.rst').write_text(\n template.format(\n new_version=new_version,\n doc_ref=new_version,\n title_marker=len(new_version) * '=',\n change_logs=generate_changelog(args),\n release_date=datetime.now().strftime('%b %d, %Y')\n ),\n \"utf-8\",\n)\n\nreadme_md = Path('container/README.md')\ncontent = readme_md.read_text(\"utf-8\")\nreadme_md.unlink()\nreadme_md.write_text(\n re.sub(r'quay.io/nitrate/nitrate:\\d+\\.\\d+(\\.\\d+)?',\n f'quay.io/nitrate/nitrate:{new_version}',\n content),\n \"utf-8\",\n)\n\nsubprocess.check_call([\n 'rpmdev-bumpspec',\n '-n', new_version,\n '-c', f'Built for version {new_version}',\n 'python-nitrate-tcms.spec'\n])\n", "path": "contrib/scripts/make-release.py"}], "after_files": [{"content": null, "path": "container/init.py"}, {"content": "#!/usr/bin/env python3\n\nimport re\nimport argparse\nfrom pathlib import Path\n\nfrom datetime import datetime\nfrom typing import Tuple\nfrom pygit2 import Commit, Repository\n\n\ndef extract_short_log(commit: Commit) -> Tuple[str, None or str]:\n lines = commit.message.split('\\n')\n subject = lines[0]\n match = re.search(r'\\((#\\d+)\\)$', subject)\n return subject, match.groups()[0] if match else None\n\n\ndef generate_changelog(args: argparse.Namespace):\n repo: Repository = Repository(args.repo or '.')\n if args.since_version:\n release_tag = repo.revparse_single(args.since_version)\n else:\n release_tag = repo.revparse_single(repo.describe().split('-')[0])\n\n walker = repo.walk(repo.head.target)\n walker.hide(release_tag.id)\n logs = []\n found_issue_keys = []\n\n for commit in walker:\n subject, issue_key = extract_short_log(commit)\n if issue_key is not None:\n found_issue_keys.append(issue_key)\n subject = subject.replace(issue_key, f'`{issue_key}`_')\n logs.append(f'* {subject}')\n\n logs.append('')\n found_issue_keys.sort()\n for item in found_issue_keys:\n logs.append(f'.. _{item}: https://github.com/Nitrate/Nitrate/issues/{item[1:]}')\n\n return '\\n'.join(logs)\n\n\ndef validate_version(value):\n if value.startswith('v'):\n raise argparse.ArgumentTypeError('Version should not be prefixed with v.')\n return value\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--repo', help='Path to git repository.')\nparser.add_argument('--since-version', required=False,\n type=validate_version,\n help='Collect commits since this version.')\nparser.add_argument('new_version', metavar='NEW_VERSION',\n type=validate_version,\n help='The version to be released.')\n\nargs = parser.parse_args()\nnew_version = args.new_version\n\nPath('VERSION.txt').unlink()\nPath('VERSION.txt').write_text(new_version, \"utf-8\")\n\ntemplate = Path('contrib/scripts/release-notes.tmpl.rst').read_text(\"utf-8\")\nPath(f'docs/source/releases/{new_version}.rst').write_text(\n template.format(\n new_version=new_version,\n doc_ref=new_version,\n title_marker=len(new_version) * '=',\n change_logs=generate_changelog(args),\n release_date=datetime.now().strftime('%b %d, %Y')\n ),\n \"utf-8\",\n)\n", "path": "contrib/scripts/make-release.py"}]} | 1,989 | 931 |
gh_patches_debug_18654 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-430 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move dev dependencies from setup.py to Pipfile
The dev extra dependencies in setup.py are not required anymore, because we don't install it with setuptools for local development. Instead, we could move all dependencies in the `extras_require`-section to the Pipfile, which would have the advantage to be able to install new dev dependencies with `pipenv install <new-dependency>`.
Move dev dependencies from setup.py to Pipfile
The dev extra dependencies in setup.py are not required anymore, because we don't install it with setuptools for local development. Instead, we could move all dependencies in the `extras_require`-section to the Pipfile, which would have the advantage to be able to install new dev dependencies with `pipenv install <new-dependency>`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2 """ Setup.py """
3
4 import os
5 import sys
6
7 from setuptools import find_packages, setup
8
9 # Add source directory to PATH variable to enable import of version number
10 sys.path.append(os.path.abspath('src'))
11 # pylint: disable=wrong-import-position
12 from backend.settings import VERSION
13
14 setup(
15 name='integreat_cms',
16 version=VERSION,
17 packages=find_packages('src'),
18 package_dir={'': 'src'},
19 include_package_data=True,
20 scripts=['src/integreat-cms-cli'],
21 data_files=[
22 (f'lib/integreat-{root}', [os.path.join(root, f) for f in files])
23 for root, _, files in os.walk('src/cms/templates/')
24 ] + [
25 (f'lib/integreat-{root}', [os.path.join(root, f) for f in files])
26 for root, _, files in os.walk('src/cms/static/')
27 ] + [
28 ('usr/lib/systemd/system/', ['systemd/[email protected]'])
29 ],
30 install_requires=[
31 'cffi',
32 'Django~=2.2.13',
33 'django-cors-headers',
34 'django-filer',
35 'django-mptt',
36 'django-widget-tweaks',
37 'idna',
38 'lxml',
39 'psycopg2-binary',
40 'python-dateutil',
41 'requests',
42 'rules',
43 'six',
44 'webauthn',
45 ],
46 extras_require={
47 'dev': [
48 'django-compressor',
49 'django-compressor-toolkit',
50 'packaging',
51 'pylint',
52 'pylint-django',
53 'pylint_runner',
54 'sphinx',
55 'sphinxcontrib-django',
56 'sphinx_rtd_theme',
57 'coverage',
58 'django_coverage_plugin',
59 ]
60 },
61 author='Integreat App Project',
62 author_email='[email protected]',
63 description='Content Management System for the Integreat App',
64 license='GPL-2.0-or-later',
65 keywords='Django Integreat CMS',
66 url='http://github.com/Integreat/',
67 classifiers=[
68 'Development Status :: 5 - Production/Stable',
69 'Intended Audience :: Developers',
70 'Programming Language :: Python :: 3.7',
71 ]
72 )
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@
from backend.settings import VERSION
setup(
- name='integreat_cms',
+ name='integreat-cms',
version=VERSION,
packages=find_packages('src'),
package_dir={'': 'src'},
@@ -43,21 +43,6 @@
'six',
'webauthn',
],
- extras_require={
- 'dev': [
- 'django-compressor',
- 'django-compressor-toolkit',
- 'packaging',
- 'pylint',
- 'pylint-django',
- 'pylint_runner',
- 'sphinx',
- 'sphinxcontrib-django',
- 'sphinx_rtd_theme',
- 'coverage',
- 'django_coverage_plugin',
- ]
- },
author='Integreat App Project',
author_email='[email protected]',
description='Content Management System for the Integreat App',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,7 +12,7 @@\n from backend.settings import VERSION\n \n setup(\n- name='integreat_cms',\n+ name='integreat-cms',\n version=VERSION,\n packages=find_packages('src'),\n package_dir={'': 'src'},\n@@ -43,21 +43,6 @@\n 'six',\n 'webauthn',\n ],\n- extras_require={\n- 'dev': [\n- 'django-compressor',\n- 'django-compressor-toolkit',\n- 'packaging',\n- 'pylint',\n- 'pylint-django',\n- 'pylint_runner',\n- 'sphinx',\n- 'sphinxcontrib-django',\n- 'sphinx_rtd_theme',\n- 'coverage',\n- 'django_coverage_plugin',\n- ]\n- },\n author='Integreat App Project',\n author_email='[email protected]',\n description='Content Management System for the Integreat App',\n", "issue": "Move dev dependencies from setup.py to Pipfile\nThe dev extra dependencies in setup.py are not required anymore, because we don't install it with setuptools for local development. Instead, we could move all dependencies in the `extras_require`-section to the Pipfile, which would have the advantage to be able to install new dev dependencies with `pipenv install <new-dependency>`.\nMove dev dependencies from setup.py to Pipfile\nThe dev extra dependencies in setup.py are not required anymore, because we don't install it with setuptools for local development. Instead, we could move all dependencies in the `extras_require`-section to the Pipfile, which would have the advantage to be able to install new dev dependencies with `pipenv install <new-dependency>`.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\" Setup.py \"\"\"\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\n# Add source directory to PATH variable to enable import of version number\nsys.path.append(os.path.abspath('src'))\n# pylint: disable=wrong-import-position\nfrom backend.settings import VERSION\n\nsetup(\n name='integreat_cms',\n version=VERSION,\n packages=find_packages('src'),\n package_dir={'': 'src'},\n include_package_data=True,\n scripts=['src/integreat-cms-cli'],\n data_files=[\n (f'lib/integreat-{root}', [os.path.join(root, f) for f in files])\n for root, _, files in os.walk('src/cms/templates/')\n ] + [\n (f'lib/integreat-{root}', [os.path.join(root, f) for f in files])\n for root, _, files in os.walk('src/cms/static/')\n ] + [\n ('usr/lib/systemd/system/', ['systemd/[email protected]'])\n ],\n install_requires=[\n 'cffi',\n 'Django~=2.2.13',\n 'django-cors-headers',\n 'django-filer',\n 'django-mptt',\n 'django-widget-tweaks',\n 'idna',\n 'lxml',\n 'psycopg2-binary',\n 'python-dateutil',\n 'requests',\n 'rules',\n 'six',\n 'webauthn',\n ],\n extras_require={\n 'dev': [\n 'django-compressor',\n 'django-compressor-toolkit',\n 'packaging',\n 'pylint',\n 'pylint-django',\n 'pylint_runner',\n 'sphinx',\n 'sphinxcontrib-django',\n 'sphinx_rtd_theme',\n 'coverage',\n 'django_coverage_plugin',\n ]\n },\n author='Integreat App Project',\n author_email='[email protected]',\n description='Content Management System for the Integreat App',\n license='GPL-2.0-or-later',\n keywords='Django Integreat CMS',\n url='http://github.com/Integreat/',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3.7',\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\"\"\" Setup.py \"\"\"\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\n# Add source directory to PATH variable to enable import of version number\nsys.path.append(os.path.abspath('src'))\n# pylint: disable=wrong-import-position\nfrom backend.settings import VERSION\n\nsetup(\n name='integreat-cms',\n version=VERSION,\n packages=find_packages('src'),\n package_dir={'': 'src'},\n include_package_data=True,\n scripts=['src/integreat-cms-cli'],\n data_files=[\n (f'lib/integreat-{root}', [os.path.join(root, f) for f in files])\n for root, _, files in os.walk('src/cms/templates/')\n ] + [\n (f'lib/integreat-{root}', [os.path.join(root, f) for f in files])\n for root, _, files in os.walk('src/cms/static/')\n ] + [\n ('usr/lib/systemd/system/', ['systemd/[email protected]'])\n ],\n install_requires=[\n 'cffi',\n 'Django~=2.2.13',\n 'django-cors-headers',\n 'django-filer',\n 'django-mptt',\n 'django-widget-tweaks',\n 'idna',\n 'lxml',\n 'psycopg2-binary',\n 'python-dateutil',\n 'requests',\n 'rules',\n 'six',\n 'webauthn',\n ],\n author='Integreat App Project',\n author_email='[email protected]',\n description='Content Management System for the Integreat App',\n license='GPL-2.0-or-later',\n keywords='Django Integreat CMS',\n url='http://github.com/Integreat/',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3.7',\n ]\n)\n", "path": "setup.py"}]} | 1,055 | 235 |
gh_patches_debug_9950 | rasdani/github-patches | git_diff | ManimCommunity__manim-684 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add (opengraph) metadata to documentation
Previews to links to the documentation are currently not available due to missing opengraph metadata.
Also, a description meta tag should be added.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12
13 import os
14 import subprocess
15 import sys
16 from distutils.sysconfig import get_python_lib
17 from pathlib import Path
18
19 sys.path.insert(0, os.path.abspath("."))
20
21
22 if os.environ.get("READTHEDOCS") == "True":
23 site_path = get_python_lib()
24 # bindings for pangocffi, cairocffi, pangocairocffi need to be generated
25 subprocess.run(["python", "pangocffi/ffi_build.py"], cwd=site_path)
26 subprocess.run(["python", "cairocffi/ffi_build.py"], cwd=site_path)
27 subprocess.run(["python", "pangocairocffi/ffi_build.py"], cwd=site_path)
28 # we need to add ffmpeg to the path
29 ffmpeg_path = os.path.join(site_path, "imageio_ffmpeg", "binaries")
30 # the included binary is named ffmpeg-linux..., create a symlink
31 [ffmpeg_bin] = [
32 file for file in os.listdir(ffmpeg_path) if file.startswith("ffmpeg-")
33 ]
34 os.symlink(
35 os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, "ffmpeg")
36 )
37 os.environ["PATH"] += os.pathsep + ffmpeg_path
38
39
40 # -- Project information -----------------------------------------------------
41
42 project = "Manim"
43 copyright = "2020, The Manim Community Dev Team"
44 author = "The Manim Community Dev Team"
45
46
47 # -- General configuration ---------------------------------------------------
48
49 # Add any Sphinx extension module names here, as strings. They can be
50 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
51 # ones.
52 extensions = [
53 "sphinx.ext.autodoc",
54 "recommonmark",
55 "sphinx_copybutton",
56 "sphinx.ext.napoleon",
57 "sphinx.ext.autosummary",
58 "sphinx.ext.doctest",
59 "manim_directive",
60 ]
61
62 # Automatically generate stub pages when using the .. autosummary directive
63 autosummary_generate = True
64
65 # controls whether functions documented by the autofunction directive
66 # appear with their full module names
67 add_module_names = False
68
69 # Add any paths that contain templates here, relative to this directory.
70 templates_path = ["_templates"]
71
72 # List of patterns, relative to source directory, that match files and
73 # directories to ignore when looking for source files.
74 # This pattern also affects html_static_path and html_extra_path.
75 exclude_patterns = []
76
77
78 # -- Options for HTML output -------------------------------------------------
79
80 # The theme to use for HTML and HTML Help pages. See the documentation for
81 # a list of builtin themes.
82 #
83 import guzzle_sphinx_theme
84
85 html_theme_path = guzzle_sphinx_theme.html_theme_path()
86 html_theme = "guzzle_sphinx_theme"
87 html_favicon = str(Path("_static/favicon.ico"))
88
89 # There's a standing issue with Sphinx's new-style sidebars. This is a
90 # workaround. Taken from
91 # https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826
92 html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "searchbox.html"]}
93
94 # Register the theme as an extension to generate a sitemap.xml
95 extensions.append("guzzle_sphinx_theme")
96
97 # Add any paths that contain custom static files (such as style sheets) here,
98 # relative to this directory. They are copied after the builtin static files,
99 # so a file named "default.css" will overwrite the builtin "default.css".
100 html_static_path = ["_static"]
101
102 # This specifies any additional css files that will override the theme's
103 html_css_files = ["custom.css"]
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -56,6 +56,7 @@
"sphinx.ext.napoleon",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
+ "sphinxext.opengraph",
"manim_directive",
]
@@ -101,3 +102,8 @@
# This specifies any additional css files that will override the theme's
html_css_files = ["custom.css"]
+
+# opengraph settings
+ogp_image = "https://www.manim.community/logo.png"
+ogp_site_name = "Manim Community | Documentation"
+ogp_site_url = "https://docs.manim.community/"
| {"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -56,6 +56,7 @@\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n+ \"sphinxext.opengraph\",\n \"manim_directive\",\n ]\n \n@@ -101,3 +102,8 @@\n \n # This specifies any additional css files that will override the theme's\n html_css_files = [\"custom.css\"]\n+\n+# opengraph settings\n+ogp_image = \"https://www.manim.community/logo.png\"\n+ogp_site_name = \"Manim Community | Documentation\"\n+ogp_site_url = \"https://docs.manim.community/\"\n", "issue": "Add (opengraph) metadata to documentation\nPreviews to links to the documentation are currently not available due to missing opengraph metadata.\r\n\r\nAlso, a description meta tag should be added.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport subprocess\nimport sys\nfrom distutils.sysconfig import get_python_lib\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\n\nif os.environ.get(\"READTHEDOCS\") == \"True\":\n site_path = get_python_lib()\n # bindings for pangocffi, cairocffi, pangocairocffi need to be generated\n subprocess.run([\"python\", \"pangocffi/ffi_build.py\"], cwd=site_path)\n subprocess.run([\"python\", \"cairocffi/ffi_build.py\"], cwd=site_path)\n subprocess.run([\"python\", \"pangocairocffi/ffi_build.py\"], cwd=site_path)\n # we need to add ffmpeg to the path\n ffmpeg_path = os.path.join(site_path, \"imageio_ffmpeg\", \"binaries\")\n # the included binary is named ffmpeg-linux..., create a symlink\n [ffmpeg_bin] = [\n file for file in os.listdir(ffmpeg_path) if file.startswith(\"ffmpeg-\")\n ]\n os.symlink(\n os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, \"ffmpeg\")\n )\n os.environ[\"PATH\"] += os.pathsep + ffmpeg_path\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Manim\"\ncopyright = \"2020, The Manim Community Dev Team\"\nauthor = \"The Manim Community Dev Team\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"recommonmark\",\n \"sphinx_copybutton\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"manim_directive\",\n]\n\n# Automatically generate stub pages when using the .. autosummary directive\nautosummary_generate = True\n\n# controls whether functions documented by the autofunction directive\n# appear with their full module names\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nimport guzzle_sphinx_theme\n\nhtml_theme_path = guzzle_sphinx_theme.html_theme_path()\nhtml_theme = \"guzzle_sphinx_theme\"\nhtml_favicon = str(Path(\"_static/favicon.ico\"))\n\n# There's a standing issue with Sphinx's new-style sidebars. This is a\n# workaround. Taken from\n# https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826\nhtml_sidebars = {\"**\": [\"logo-text.html\", \"globaltoc.html\", \"searchbox.html\"]}\n\n# Register the theme as an extension to generate a sitemap.xml\nextensions.append(\"guzzle_sphinx_theme\")\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# This specifies any additional css files that will override the theme's\nhtml_css_files = [\"custom.css\"]\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport subprocess\nimport sys\nfrom distutils.sysconfig import get_python_lib\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\n\nif os.environ.get(\"READTHEDOCS\") == \"True\":\n site_path = get_python_lib()\n # bindings for pangocffi, cairocffi, pangocairocffi need to be generated\n subprocess.run([\"python\", \"pangocffi/ffi_build.py\"], cwd=site_path)\n subprocess.run([\"python\", \"cairocffi/ffi_build.py\"], cwd=site_path)\n subprocess.run([\"python\", \"pangocairocffi/ffi_build.py\"], cwd=site_path)\n # we need to add ffmpeg to the path\n ffmpeg_path = os.path.join(site_path, \"imageio_ffmpeg\", \"binaries\")\n # the included binary is named ffmpeg-linux..., create a symlink\n [ffmpeg_bin] = [\n file for file in os.listdir(ffmpeg_path) if file.startswith(\"ffmpeg-\")\n ]\n os.symlink(\n os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, \"ffmpeg\")\n )\n os.environ[\"PATH\"] += os.pathsep + ffmpeg_path\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Manim\"\ncopyright = \"2020, The Manim Community Dev Team\"\nauthor = \"The Manim Community Dev Team\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"recommonmark\",\n \"sphinx_copybutton\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinxext.opengraph\",\n \"manim_directive\",\n]\n\n# Automatically generate stub pages when using the .. autosummary directive\nautosummary_generate = True\n\n# controls whether functions documented by the autofunction directive\n# appear with their full module names\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nimport guzzle_sphinx_theme\n\nhtml_theme_path = guzzle_sphinx_theme.html_theme_path()\nhtml_theme = \"guzzle_sphinx_theme\"\nhtml_favicon = str(Path(\"_static/favicon.ico\"))\n\n# There's a standing issue with Sphinx's new-style sidebars. This is a\n# workaround. Taken from\n# https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826\nhtml_sidebars = {\"**\": [\"logo-text.html\", \"globaltoc.html\", \"searchbox.html\"]}\n\n# Register the theme as an extension to generate a sitemap.xml\nextensions.append(\"guzzle_sphinx_theme\")\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# This specifies any additional css files that will override the theme's\nhtml_css_files = [\"custom.css\"]\n\n# opengraph settings\nogp_image = \"https://www.manim.community/logo.png\"\nogp_site_name = \"Manim Community | Documentation\"\nogp_site_url = \"https://docs.manim.community/\"\n", "path": "docs/source/conf.py"}]} | 1,395 | 171 |
gh_patches_debug_11488 | rasdani/github-patches | git_diff | pytorch__vision-355 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
utils.save_image fails when passing list of images
utils.save_image fails when passing in a list of images, as the code tries to call .cpu on the list.
Passing in a list should be possible according to the function's documentation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/utils.py`
Content:
```
1 import torch
2 import math
3 irange = range
4
5
6 def make_grid(tensor, nrow=8, padding=2,
7 normalize=False, range=None, scale_each=False, pad_value=0):
8 """Make a grid of images.
9
10 Args:
11 tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
12 or a list of images all of the same size.
13 nrow (int, optional): Number of images displayed in each row of the grid.
14 The Final grid size is (B / nrow, nrow). Default is 8.
15 padding (int, optional): amount of padding. Default is 2.
16 normalize (bool, optional): If True, shift the image to the range (0, 1),
17 by subtracting the minimum and dividing by the maximum pixel value.
18 range (tuple, optional): tuple (min, max) where min and max are numbers,
19 then these numbers are used to normalize the image. By default, min and max
20 are computed from the tensor.
21 scale_each (bool, optional): If True, scale each image in the batch of
22 images separately rather than the (min, max) over all images.
23 pad_value (float, optional): Value for the padded pixels.
24
25 Example:
26 See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
27
28 """
29 if not (torch.is_tensor(tensor) or
30 (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
31 raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
32
33 # if list of tensors, convert to a 4D mini-batch Tensor
34 if isinstance(tensor, list):
35 tensor = torch.stack(tensor, dim=0)
36
37 if tensor.dim() == 2: # single image H x W
38 tensor = tensor.view(1, tensor.size(0), tensor.size(1))
39 if tensor.dim() == 3: # single image
40 if tensor.size(0) == 1: # if single-channel, convert to 3-channel
41 tensor = torch.cat((tensor, tensor, tensor), 0)
42 return tensor
43 if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images
44 tensor = torch.cat((tensor, tensor, tensor), 1)
45
46 if normalize is True:
47 tensor = tensor.clone() # avoid modifying tensor in-place
48 if range is not None:
49 assert isinstance(range, tuple), \
50 "range has to be a tuple (min, max) if specified. min and max are numbers"
51
52 def norm_ip(img, min, max):
53 img.clamp_(min=min, max=max)
54 img.add_(-min).div_(max - min)
55
56 def norm_range(t, range):
57 if range is not None:
58 norm_ip(t, range[0], range[1])
59 else:
60 norm_ip(t, t.min(), t.max())
61
62 if scale_each is True:
63 for t in tensor: # loop over mini-batch dimension
64 norm_range(t, range)
65 else:
66 norm_range(tensor, range)
67
68 # make the mini-batch of images into a grid
69 nmaps = tensor.size(0)
70 xmaps = min(nrow, nmaps)
71 ymaps = int(math.ceil(float(nmaps) / xmaps))
72 height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
73 grid = tensor.new(3, height * ymaps + padding, width * xmaps + padding).fill_(pad_value)
74 k = 0
75 for y in irange(ymaps):
76 for x in irange(xmaps):
77 if k >= nmaps:
78 break
79 grid.narrow(1, y * height + padding, height - padding)\
80 .narrow(2, x * width + padding, width - padding)\
81 .copy_(tensor[k])
82 k = k + 1
83 return grid
84
85
86 def save_image(tensor, filename, nrow=8, padding=2,
87 normalize=False, range=None, scale_each=False, pad_value=0):
88 """Save a given Tensor into an image file.
89
90 Args:
91 tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
92 saves the tensor as a grid of images by calling ``make_grid``.
93 **kwargs: Other arguments are documented in ``make_grid``.
94 """
95 from PIL import Image
96 tensor = tensor.cpu()
97 grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
98 normalize=normalize, range=range, scale_each=scale_each)
99 ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()
100 im = Image.fromarray(ndarr)
101 im.save(filename)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/utils.py b/torchvision/utils.py
--- a/torchvision/utils.py
+++ b/torchvision/utils.py
@@ -93,9 +93,8 @@
**kwargs: Other arguments are documented in ``make_grid``.
"""
from PIL import Image
- tensor = tensor.cpu()
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
normalize=normalize, range=range, scale_each=scale_each)
- ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()
+ ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
im = Image.fromarray(ndarr)
im.save(filename)
| {"golden_diff": "diff --git a/torchvision/utils.py b/torchvision/utils.py\n--- a/torchvision/utils.py\n+++ b/torchvision/utils.py\n@@ -93,9 +93,8 @@\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n- tensor = tensor.cpu()\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n- ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()\n+ ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()\n im = Image.fromarray(ndarr)\n im.save(filename)\n", "issue": "utils.save_image fails when passing list of images\nutils.save_image fails when passing in a list of images, as the code tries to call .cpu on the list. \r\nPassing in a list should be possible according to the function's documentation.\n", "before_files": [{"content": "import torch\nimport math\nirange = range\n\n\ndef make_grid(tensor, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The Final grid size is (B / nrow, nrow). Default is 8.\n padding (int, optional): amount of padding. Default is 2.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by subtracting the minimum and dividing by the maximum pixel value.\n range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If True, scale each image in the batch of\n images separately rather than the (min, max) over all images.\n pad_value (float, optional): Value for the padded pixels.\n\n Example:\n See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_\n\n \"\"\"\n if not (torch.is_tensor(tensor) or\n (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.view(1, tensor.size(0), tensor.size(1))\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n return tensor\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if range is not None:\n assert isinstance(range, tuple), \\\n \"range has to be a tuple (min, max) if specified. min and max are numbers\"\n\n def norm_ip(img, min, max):\n img.clamp_(min=min, max=max)\n img.add_(-min).div_(max - min)\n\n def norm_range(t, range):\n if range is not None:\n norm_ip(t, range[0], range[1])\n else:\n norm_ip(t, t.min(), t.max())\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, range)\n else:\n norm_range(tensor, range)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n grid = tensor.new(3, height * ymaps + padding, width * xmaps + padding).fill_(pad_value)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(tensor[k])\n k = k + 1\n return grid\n\n\ndef save_image(tensor, filename, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n tensor = tensor.cpu()\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()\n im = Image.fromarray(ndarr)\n im.save(filename)\n", "path": "torchvision/utils.py"}], "after_files": [{"content": "import torch\nimport math\nirange = range\n\n\ndef make_grid(tensor, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The Final grid size is (B / nrow, nrow). Default is 8.\n padding (int, optional): amount of padding. Default is 2.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by subtracting the minimum and dividing by the maximum pixel value.\n range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If True, scale each image in the batch of\n images separately rather than the (min, max) over all images.\n pad_value (float, optional): Value for the padded pixels.\n\n Example:\n See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_\n\n \"\"\"\n if not (torch.is_tensor(tensor) or\n (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.view(1, tensor.size(0), tensor.size(1))\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n return tensor\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if range is not None:\n assert isinstance(range, tuple), \\\n \"range has to be a tuple (min, max) if specified. min and max are numbers\"\n\n def norm_ip(img, min, max):\n img.clamp_(min=min, max=max)\n img.add_(-min).div_(max - min)\n\n def norm_range(t, range):\n if range is not None:\n norm_ip(t, range[0], range[1])\n else:\n norm_ip(t, t.min(), t.max())\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, range)\n else:\n norm_range(tensor, range)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n grid = tensor.new(3, height * ymaps + padding, width * xmaps + padding).fill_(pad_value)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(tensor[k])\n k = k + 1\n return grid\n\n\ndef save_image(tensor, filename, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()\n im = Image.fromarray(ndarr)\n im.save(filename)\n", "path": "torchvision/utils.py"}]} | 1,615 | 198 |
gh_patches_debug_35355 | rasdani/github-patches | git_diff | scikit-image__scikit-image-2134 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`min_size` is not strictly conformed in the implementation of felzenszwalb
## Description
With `min_size` specified, there're still some segments with sizes that less than it. I don't know if it is an inherent flaw of the algorithm.
## Way to reproduce
```
>>> I = skimage.io.imread('dragonbaby.jpg')
>>> fz = felzenszwalb(I, scale=300, sigma=0.8, min_size=80)
>>> (fz==9).sum()
1
```

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/segmentation/_felzenszwalb.py`
Content:
```
1 import numpy as np
2
3 from .._shared.utils import warn
4 from ._felzenszwalb_cy import _felzenszwalb_grey
5
6
7 def felzenszwalb(image, scale=1, sigma=0.8, min_size=20):
8 """Computes Felsenszwalb's efficient graph based image segmentation.
9
10 Produces an oversegmentation of a multichannel (i.e. RGB) image
11 using a fast, minimum spanning tree based clustering on the image grid.
12 The parameter ``scale`` sets an observation level. Higher scale means
13 less and larger segments. ``sigma`` is the diameter of a Gaussian kernel,
14 used for smoothing the image prior to segmentation.
15
16 The number of produced segments as well as their size can only be
17 controlled indirectly through ``scale``. Segment size within an image can
18 vary greatly depending on local contrast.
19
20 For RGB images, the algorithm computes a separate segmentation for each
21 channel and then combines these. The combined segmentation is the
22 intersection of the separate segmentations on the color channels.
23
24 Parameters
25 ----------
26 image : (width, height, 3) or (width, height) ndarray
27 Input image.
28 scale : float
29 Free parameter. Higher means larger clusters.
30 sigma : float
31 Width of Gaussian kernel used in preprocessing.
32 min_size : int
33 Minimum component size. Enforced using postprocessing.
34
35 Returns
36 -------
37 segment_mask : (width, height) ndarray
38 Integer mask indicating segment labels.
39
40 References
41 ----------
42 .. [1] Efficient graph-based image segmentation, Felzenszwalb, P.F. and
43 Huttenlocher, D.P. International Journal of Computer Vision, 2004
44
45 Examples
46 --------
47 >>> from skimage.segmentation import felzenszwalb
48 >>> from skimage.data import coffee
49 >>> img = coffee()
50 >>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5)
51 """
52
53 if image.ndim == 2:
54 # assume single channel image
55 return _felzenszwalb_grey(image, scale=scale, sigma=sigma,
56 min_size=min_size)
57
58 elif image.ndim != 3:
59 raise ValueError("Felzenswalb segmentation can only operate on RGB and"
60 " grey images, but input array of ndim %d given."
61 % image.ndim)
62
63 # assume we got 2d image with multiple channels
64 n_channels = image.shape[2]
65 if n_channels != 3:
66 warn("Got image with %d channels. Is that really what you"
67 " wanted?" % image.shape[2])
68 segmentations = []
69 # compute quickshift for each channel
70 for c in range(n_channels):
71 channel = np.ascontiguousarray(image[:, :, c])
72 s = _felzenszwalb_grey(channel, scale=scale, sigma=sigma,
73 min_size=min_size)
74 segmentations.append(s)
75
76 # put pixels in same segment only if in the same segment in all images
77 # we do this by combining the channels to one number
78 n0 = segmentations[0].max() + 1
79 n1 = segmentations[1].max() + 1
80 segmentation = (segmentations[0] + segmentations[1] * n0
81 + segmentations[2] * n0 * n1)
82 # make segment labels consecutive numbers starting at 0
83 labels = np.unique(segmentation, return_inverse=True)[1]
84 return labels.reshape(image.shape[:2])
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/segmentation/_felzenszwalb.py b/skimage/segmentation/_felzenszwalb.py
--- a/skimage/segmentation/_felzenszwalb.py
+++ b/skimage/segmentation/_felzenszwalb.py
@@ -1,7 +1,7 @@
import numpy as np
from .._shared.utils import warn
-from ._felzenszwalb_cy import _felzenszwalb_grey
+from ._felzenszwalb_cy import _felzenszwalb_cython
def felzenszwalb(image, scale=1, sigma=0.8, min_size=20):
@@ -17,9 +17,8 @@
controlled indirectly through ``scale``. Segment size within an image can
vary greatly depending on local contrast.
- For RGB images, the algorithm computes a separate segmentation for each
- channel and then combines these. The combined segmentation is the
- intersection of the separate segmentations on the color channels.
+ For RGB images, the algorithm uses the euclidean distance between pixels in
+ color space.
Parameters
----------
@@ -50,35 +49,6 @@
>>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5)
"""
- if image.ndim == 2:
- # assume single channel image
- return _felzenszwalb_grey(image, scale=scale, sigma=sigma,
- min_size=min_size)
-
- elif image.ndim != 3:
- raise ValueError("Felzenswalb segmentation can only operate on RGB and"
- " grey images, but input array of ndim %d given."
- % image.ndim)
-
- # assume we got 2d image with multiple channels
- n_channels = image.shape[2]
- if n_channels != 3:
- warn("Got image with %d channels. Is that really what you"
- " wanted?" % image.shape[2])
- segmentations = []
- # compute quickshift for each channel
- for c in range(n_channels):
- channel = np.ascontiguousarray(image[:, :, c])
- s = _felzenszwalb_grey(channel, scale=scale, sigma=sigma,
- min_size=min_size)
- segmentations.append(s)
-
- # put pixels in same segment only if in the same segment in all images
- # we do this by combining the channels to one number
- n0 = segmentations[0].max() + 1
- n1 = segmentations[1].max() + 1
- segmentation = (segmentations[0] + segmentations[1] * n0
- + segmentations[2] * n0 * n1)
- # make segment labels consecutive numbers starting at 0
- labels = np.unique(segmentation, return_inverse=True)[1]
- return labels.reshape(image.shape[:2])
+ image = np.atleast_3d(image)
+ return _felzenszwalb_cython(image, scale=scale, sigma=sigma,
+ min_size=min_size)
| {"golden_diff": "diff --git a/skimage/segmentation/_felzenszwalb.py b/skimage/segmentation/_felzenszwalb.py\n--- a/skimage/segmentation/_felzenszwalb.py\n+++ b/skimage/segmentation/_felzenszwalb.py\n@@ -1,7 +1,7 @@\n import numpy as np\n \n from .._shared.utils import warn\n-from ._felzenszwalb_cy import _felzenszwalb_grey\n+from ._felzenszwalb_cy import _felzenszwalb_cython\n \n \n def felzenszwalb(image, scale=1, sigma=0.8, min_size=20):\n@@ -17,9 +17,8 @@\n controlled indirectly through ``scale``. Segment size within an image can\n vary greatly depending on local contrast.\n \n- For RGB images, the algorithm computes a separate segmentation for each\n- channel and then combines these. The combined segmentation is the\n- intersection of the separate segmentations on the color channels.\n+ For RGB images, the algorithm uses the euclidean distance between pixels in\n+ color space.\n \n Parameters\n ----------\n@@ -50,35 +49,6 @@\n >>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5)\n \"\"\"\n \n- if image.ndim == 2:\n- # assume single channel image\n- return _felzenszwalb_grey(image, scale=scale, sigma=sigma,\n- min_size=min_size)\n-\n- elif image.ndim != 3:\n- raise ValueError(\"Felzenswalb segmentation can only operate on RGB and\"\n- \" grey images, but input array of ndim %d given.\"\n- % image.ndim)\n-\n- # assume we got 2d image with multiple channels\n- n_channels = image.shape[2]\n- if n_channels != 3:\n- warn(\"Got image with %d channels. Is that really what you\"\n- \" wanted?\" % image.shape[2])\n- segmentations = []\n- # compute quickshift for each channel\n- for c in range(n_channels):\n- channel = np.ascontiguousarray(image[:, :, c])\n- s = _felzenszwalb_grey(channel, scale=scale, sigma=sigma,\n- min_size=min_size)\n- segmentations.append(s)\n-\n- # put pixels in same segment only if in the same segment in all images\n- # we do this by combining the channels to one number\n- n0 = segmentations[0].max() + 1\n- n1 = segmentations[1].max() + 1\n- segmentation = (segmentations[0] + segmentations[1] * n0\n- + segmentations[2] * n0 * n1)\n- # make segment labels consecutive numbers starting at 0\n- labels = np.unique(segmentation, return_inverse=True)[1]\n- return labels.reshape(image.shape[:2])\n+ image = np.atleast_3d(image)\n+ return _felzenszwalb_cython(image, scale=scale, sigma=sigma,\n+ min_size=min_size)\n", "issue": "`min_size` is not strictly conformed in the implementation of felzenszwalb\n## Description\n\nWith `min_size` specified, there're still some segments with sizes that less than it. I don't know if it is an inherent flaw of the algorithm.\n## Way to reproduce\n\n```\n>>> I = skimage.io.imread('dragonbaby.jpg')\n>>> fz = felzenszwalb(I, scale=300, sigma=0.8, min_size=80)\n>>> (fz==9).sum()\n1\n```\n\n\n\n", "before_files": [{"content": "import numpy as np\n\nfrom .._shared.utils import warn\nfrom ._felzenszwalb_cy import _felzenszwalb_grey\n\n\ndef felzenszwalb(image, scale=1, sigma=0.8, min_size=20):\n \"\"\"Computes Felsenszwalb's efficient graph based image segmentation.\n\n Produces an oversegmentation of a multichannel (i.e. RGB) image\n using a fast, minimum spanning tree based clustering on the image grid.\n The parameter ``scale`` sets an observation level. Higher scale means\n less and larger segments. ``sigma`` is the diameter of a Gaussian kernel,\n used for smoothing the image prior to segmentation.\n\n The number of produced segments as well as their size can only be\n controlled indirectly through ``scale``. Segment size within an image can\n vary greatly depending on local contrast.\n\n For RGB images, the algorithm computes a separate segmentation for each\n channel and then combines these. The combined segmentation is the\n intersection of the separate segmentations on the color channels.\n\n Parameters\n ----------\n image : (width, height, 3) or (width, height) ndarray\n Input image.\n scale : float\n Free parameter. Higher means larger clusters.\n sigma : float\n Width of Gaussian kernel used in preprocessing.\n min_size : int\n Minimum component size. Enforced using postprocessing.\n\n Returns\n -------\n segment_mask : (width, height) ndarray\n Integer mask indicating segment labels.\n\n References\n ----------\n .. [1] Efficient graph-based image segmentation, Felzenszwalb, P.F. and\n Huttenlocher, D.P. International Journal of Computer Vision, 2004\n\n Examples\n --------\n >>> from skimage.segmentation import felzenszwalb\n >>> from skimage.data import coffee\n >>> img = coffee()\n >>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5)\n \"\"\"\n\n if image.ndim == 2:\n # assume single channel image\n return _felzenszwalb_grey(image, scale=scale, sigma=sigma,\n min_size=min_size)\n\n elif image.ndim != 3:\n raise ValueError(\"Felzenswalb segmentation can only operate on RGB and\"\n \" grey images, but input array of ndim %d given.\"\n % image.ndim)\n\n # assume we got 2d image with multiple channels\n n_channels = image.shape[2]\n if n_channels != 3:\n warn(\"Got image with %d channels. Is that really what you\"\n \" wanted?\" % image.shape[2])\n segmentations = []\n # compute quickshift for each channel\n for c in range(n_channels):\n channel = np.ascontiguousarray(image[:, :, c])\n s = _felzenszwalb_grey(channel, scale=scale, sigma=sigma,\n min_size=min_size)\n segmentations.append(s)\n\n # put pixels in same segment only if in the same segment in all images\n # we do this by combining the channels to one number\n n0 = segmentations[0].max() + 1\n n1 = segmentations[1].max() + 1\n segmentation = (segmentations[0] + segmentations[1] * n0\n + segmentations[2] * n0 * n1)\n # make segment labels consecutive numbers starting at 0\n labels = np.unique(segmentation, return_inverse=True)[1]\n return labels.reshape(image.shape[:2])\n", "path": "skimage/segmentation/_felzenszwalb.py"}], "after_files": [{"content": "import numpy as np\n\nfrom .._shared.utils import warn\nfrom ._felzenszwalb_cy import _felzenszwalb_cython\n\n\ndef felzenszwalb(image, scale=1, sigma=0.8, min_size=20):\n \"\"\"Computes Felsenszwalb's efficient graph based image segmentation.\n\n Produces an oversegmentation of a multichannel (i.e. RGB) image\n using a fast, minimum spanning tree based clustering on the image grid.\n The parameter ``scale`` sets an observation level. Higher scale means\n less and larger segments. ``sigma`` is the diameter of a Gaussian kernel,\n used for smoothing the image prior to segmentation.\n\n The number of produced segments as well as their size can only be\n controlled indirectly through ``scale``. Segment size within an image can\n vary greatly depending on local contrast.\n\n For RGB images, the algorithm uses the euclidean distance between pixels in\n color space.\n\n Parameters\n ----------\n image : (width, height, 3) or (width, height) ndarray\n Input image.\n scale : float\n Free parameter. Higher means larger clusters.\n sigma : float\n Width of Gaussian kernel used in preprocessing.\n min_size : int\n Minimum component size. Enforced using postprocessing.\n\n Returns\n -------\n segment_mask : (width, height) ndarray\n Integer mask indicating segment labels.\n\n References\n ----------\n .. [1] Efficient graph-based image segmentation, Felzenszwalb, P.F. and\n Huttenlocher, D.P. International Journal of Computer Vision, 2004\n\n Examples\n --------\n >>> from skimage.segmentation import felzenszwalb\n >>> from skimage.data import coffee\n >>> img = coffee()\n >>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5)\n \"\"\"\n\n image = np.atleast_3d(image)\n return _felzenszwalb_cython(image, scale=scale, sigma=sigma,\n min_size=min_size)\n", "path": "skimage/segmentation/_felzenszwalb.py"}]} | 1,388 | 711 |
gh_patches_debug_248 | rasdani/github-patches | git_diff | statsmodels__statsmodels-3976 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The compat modules should use absolute imports
The [statsmodels.compat.collections](https://github.com/statsmodels/statsmodels/blob/a88830efc3a99cfbe0ebc9fbfd77820fe748fc59/statsmodels/compat/collections.py#L7) imports the namesake standard library module without requesting absolute imports. While it seems to work in many cases, it causes a problem to packages that override `__import__`. See enlnt/pyq#18.
Please consider adding
```python
from __future__ import absolute_import
```
to the compat modules.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `statsmodels/compat/collections.py`
Content:
```
1 '''backported compatibility functions for Python's collections
2
3 '''
4
5 try:
6 #python >= 2.7
7 from collections import OrderedDict
8 except ImportError:
9 #http://code.activestate.com/recipes/576693/
10 #author: Raymond Hettinger
11 from .ordereddict import OrderedDict
12
13 try:
14 #python >= 2.7
15 from collections import Counter
16 except ImportError:
17 #http://code.activestate.com/recipes/576611/
18 #author: Raymond Hettinger
19 from .counter import Counter
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/statsmodels/compat/collections.py b/statsmodels/compat/collections.py
--- a/statsmodels/compat/collections.py
+++ b/statsmodels/compat/collections.py
@@ -1,6 +1,7 @@
'''backported compatibility functions for Python's collections
'''
+from __future__ import absolute_import
try:
#python >= 2.7
| {"golden_diff": "diff --git a/statsmodels/compat/collections.py b/statsmodels/compat/collections.py\n--- a/statsmodels/compat/collections.py\n+++ b/statsmodels/compat/collections.py\n@@ -1,6 +1,7 @@\n '''backported compatibility functions for Python's collections\n \n '''\n+from __future__ import absolute_import\n \n try:\n #python >= 2.7\n", "issue": "The compat modules should use absolute imports\nThe [statsmodels.compat.collections](https://github.com/statsmodels/statsmodels/blob/a88830efc3a99cfbe0ebc9fbfd77820fe748fc59/statsmodels/compat/collections.py#L7) imports the namesake standard library module without requesting absolute imports. While it seems to work in many cases, it causes a problem to packages that override `__import__`. See enlnt/pyq#18.\r\n\r\nPlease consider adding\r\n\r\n```python\r\nfrom __future__ import absolute_import\r\n```\r\nto the compat modules.\n", "before_files": [{"content": "'''backported compatibility functions for Python's collections\n\n'''\n\ntry:\n #python >= 2.7\n from collections import OrderedDict\nexcept ImportError:\n #http://code.activestate.com/recipes/576693/\n #author: Raymond Hettinger\n from .ordereddict import OrderedDict\n\ntry:\n #python >= 2.7\n from collections import Counter\nexcept ImportError:\n #http://code.activestate.com/recipes/576611/\n #author: Raymond Hettinger\n from .counter import Counter\n", "path": "statsmodels/compat/collections.py"}], "after_files": [{"content": "'''backported compatibility functions for Python's collections\n\n'''\nfrom __future__ import absolute_import\n\ntry:\n #python >= 2.7\n from collections import OrderedDict\nexcept ImportError:\n #http://code.activestate.com/recipes/576693/\n #author: Raymond Hettinger\n from .ordereddict import OrderedDict\n\ntry:\n #python >= 2.7\n from collections import Counter\nexcept ImportError:\n #http://code.activestate.com/recipes/576611/\n #author: Raymond Hettinger\n from .counter import Counter\n", "path": "statsmodels/compat/collections.py"}]} | 550 | 81 |
gh_patches_debug_2646 | rasdani/github-patches | git_diff | jupyter__docker-stacks-1964 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] - Healthcheck fails when using proxy
### What docker image(s) are you using?
base-notebook
### Host OS system and architecture running docker image
Windows 11 as host and linux/amd64 for docker
### What Docker command are you running?
docker compose up with the following dockerfile:
```Dockerfile
version: '3.4'
services:
datamining:
container_name: xxxx
image: xxxx
build:
context: .
dockerfile: ./Dockerfile
ports:
- "8888:8888"
volumes:
- xxxx:/home/jovyan/work
environment:
- DOCKER_STACKS_JUPYTER_CMD=lab
restart: on-failure
```
### How to Reproduce the problem?
Precondition is that the machine has to operate in a corporate environment using the companies proxy.
Start the container as above.
Check the state of the container with ```docker container ls```
The container is marked as unhealthy.
### Command output
```bash session
abcdefghijk "tini -g -- start-no…" x hours ago Up x hours (unhealthy) 0.0.0.0:8888->8888/tcp xxxx
```
### Expected behavior
```abcdedfghi abcdefghijk "tini -g -- start-no…" x hours ago Up x hours (healthy) 0.0.0.0:8888->8888/tcp xxxx```
### Actual behavior
After investigating the issue the problem is that docker_healthcheck.py does not run successfully giving the following error message:
```
Traceback (most recent call last):
File "/opt/conda/lib/python3.11/site-packages/urllib3/connectionpool.py", line 790, in urlopen
response = self._make_request(
^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/urllib3/connectionpool.py", line 536, in _make_request
response = conn.getresponse()
^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/urllib3/connection.py", line 461, in getresponse
httplib_response = super().getresponse()
^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/http/client.py", line 1378, in getresponse
response.begin()
File "/opt/conda/lib/python3.11/http/client.py", line 318, in begin
version, status, reason = self._read_status()
^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/http/client.py", line 287, in _read_status
raise RemoteDisconnected("Remote end closed connection without"
http.client.RemoteDisconnected: Remote end closed connection without response
The above exception was the direct cause of the following exception:
urllib3.exceptions.ProxyError: ('Unable to connect to proxy', RemoteDisconnected('Remote end closed connection without response'))
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/conda/lib/python3.11/site-packages/requests/adapters.py", line 486, in send
resp = conn.urlopen(
^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/urllib3/connectionpool.py", line 844, in urlopen
retries = retries.increment(
^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/urllib3/util/retry.py", line 515, in increment
raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
urllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='host.docker.internal', port=9000): Max retries exceeded with url: http://7702f0e1c7d4:8888/api (Caused by ProxyError('Unable to connect to proxy', RemoteDisconnected('Remote end closed connection without response')))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/etc/jupyter/docker_healthcheck.py", line 19, in <module>
r = requests.get(url, verify=False) # request without SSL verification
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/requests/api.py", line 73, in get
return request("get", url, params=params, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/requests/api.py", line 59, in request
return session.request(method=method, url=url, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/requests/sessions.py", line 589, in request
resp = self.send(prep, **send_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/requests/sessions.py", line 703, in send
r = adapter.send(request, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/requests/adapters.py", line 513, in send
raise ProxyError(e, request=request)
requests.exceptions.ProxyError: HTTPConnectionPool(host='host.docker.internal', port=9000): Max retries exceeded with url: http://7702f0e1c7d4:8888/api (Caused by ProxyError('Unable to connect to proxy', RemoteDisconnected('Remote end closed connection without response')))
```
### Anything else?
After investigating the issue further I came to the conclusion that using the proxy will be the problem. So I applied the following fix to ```docker_healthcheck.py```:
```python
proxies = {
"http": None,
"https": None,
}
r = requests.get(url, proxies=proxies, verify=False) # request without SSL verification
```
Now the healthcheck works!
### Latest Docker version
- [X] I've updated my Docker version to the latest available, and the issue still persists
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `base-notebook/docker_healthcheck.py`
Content:
```
1 #!/usr/bin/env python3
2 # Copyright (c) Jupyter Development Team.
3 # Distributed under the terms of the Modified BSD License.
4 import json
5 import os
6 from pathlib import Path
7
8 import requests
9
10 # A number of operations below deliberately don't check for possible errors
11 # As this is a healthcheck, it should succeed or raise an exception on error
12
13 runtime_dir = Path("/home/") / os.environ["NB_USER"] / ".local/share/jupyter/runtime/"
14 json_file = next(runtime_dir.glob("*server-*.json"))
15
16 url = json.loads(json_file.read_bytes())["url"]
17 url = url + "api"
18
19 r = requests.get(url, verify=False) # request without SSL verification
20 r.raise_for_status()
21 print(r.content)
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/base-notebook/docker_healthcheck.py b/base-notebook/docker_healthcheck.py
--- a/base-notebook/docker_healthcheck.py
+++ b/base-notebook/docker_healthcheck.py
@@ -16,6 +16,11 @@
url = json.loads(json_file.read_bytes())["url"]
url = url + "api"
-r = requests.get(url, verify=False) # request without SSL verification
+proxies = {
+ "http": "",
+ "https": "",
+}
+
+r = requests.get(url, proxies=proxies, verify=False) # request without SSL verification
r.raise_for_status()
print(r.content)
| {"golden_diff": "diff --git a/base-notebook/docker_healthcheck.py b/base-notebook/docker_healthcheck.py\n--- a/base-notebook/docker_healthcheck.py\n+++ b/base-notebook/docker_healthcheck.py\n@@ -16,6 +16,11 @@\n url = json.loads(json_file.read_bytes())[\"url\"]\n url = url + \"api\"\n \n-r = requests.get(url, verify=False) # request without SSL verification\n+proxies = {\n+ \"http\": \"\",\n+ \"https\": \"\",\n+}\n+\n+r = requests.get(url, proxies=proxies, verify=False) # request without SSL verification\n r.raise_for_status()\n print(r.content)\n", "issue": "[BUG] - Healthcheck fails when using proxy\n### What docker image(s) are you using?\r\n\r\nbase-notebook\r\n\r\n### Host OS system and architecture running docker image\r\n\r\nWindows 11 as host and linux/amd64 for docker\r\n\r\n### What Docker command are you running?\r\n\r\ndocker compose up with the following dockerfile:\r\n\r\n```Dockerfile\r\nversion: '3.4'\r\n\r\nservices:\r\n datamining:\r\n container_name: xxxx\r\n image: xxxx\r\n build:\r\n context: .\r\n dockerfile: ./Dockerfile\r\n ports:\r\n - \"8888:8888\"\r\n volumes:\r\n - xxxx:/home/jovyan/work\r\n environment:\r\n - DOCKER_STACKS_JUPYTER_CMD=lab\r\n restart: on-failure\r\n```\r\n\r\n### How to Reproduce the problem?\r\n\r\nPrecondition is that the machine has to operate in a corporate environment using the companies proxy.\r\nStart the container as above.\r\nCheck the state of the container with ```docker container ls```\r\nThe container is marked as unhealthy.\r\n\r\n### Command output\r\n\r\n```bash session\r\nabcdefghijk \"tini -g -- start-no\u2026\" x hours ago Up x hours (unhealthy) 0.0.0.0:8888->8888/tcp xxxx\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\n```abcdedfghi abcdefghijk \"tini -g -- start-no\u2026\" x hours ago Up x hours (healthy) 0.0.0.0:8888->8888/tcp xxxx```\r\n\r\n### Actual behavior\r\n\r\nAfter investigating the issue the problem is that docker_healthcheck.py does not run successfully giving the following error message:\r\n```\r\nTraceback (most recent call last):\r\n File \"/opt/conda/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 790, in urlopen\r\n response = self._make_request(\r\n ^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 536, in _make_request\r\n response = conn.getresponse()\r\n ^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/urllib3/connection.py\", line 461, in getresponse\r\n httplib_response = super().getresponse()\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/http/client.py\", line 1378, in getresponse\r\n response.begin()\r\n File \"/opt/conda/lib/python3.11/http/client.py\", line 318, in begin\r\n version, status, reason = self._read_status()\r\n ^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/http/client.py\", line 287, in _read_status\r\n raise RemoteDisconnected(\"Remote end closed connection without\"\r\nhttp.client.RemoteDisconnected: Remote end closed connection without response\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nurllib3.exceptions.ProxyError: ('Unable to connect to proxy', RemoteDisconnected('Remote end closed connection without response'))\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/opt/conda/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\r\n resp = conn.urlopen(\r\n ^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 844, in urlopen\r\n retries = retries.increment(\r\n ^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/urllib3/util/retry.py\", line 515, in increment\r\n raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nurllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='host.docker.internal', port=9000): Max retries exceeded with url: http://7702f0e1c7d4:8888/api (Caused by ProxyError('Unable to connect to proxy', RemoteDisconnected('Remote end closed connection without response')))\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/etc/jupyter/docker_healthcheck.py\", line 19, in <module>\r\n r = requests.get(url, verify=False) # request without SSL verification\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/requests/api.py\", line 73, in get\r\n return request(\"get\", url, params=params, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/requests/api.py\", line 59, in request\r\n return session.request(method=method, url=url, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\r\n resp = self.send(prep, **send_kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\r\n r = adapter.send(request, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/requests/adapters.py\", line 513, in send\r\n raise ProxyError(e, request=request)\r\nrequests.exceptions.ProxyError: HTTPConnectionPool(host='host.docker.internal', port=9000): Max retries exceeded with url: http://7702f0e1c7d4:8888/api (Caused by ProxyError('Unable to connect to proxy', RemoteDisconnected('Remote end closed connection without response')))\r\n```\r\n\r\n### Anything else?\r\n\r\nAfter investigating the issue further I came to the conclusion that using the proxy will be the problem. So I applied the following fix to ```docker_healthcheck.py```:\r\n```python\r\nproxies = {\r\n \"http\": None,\r\n \"https\": None,\r\n}\r\n\r\nr = requests.get(url, proxies=proxies, verify=False) # request without SSL verification\r\n```\r\nNow the healthcheck works!\r\n\r\n### Latest Docker version\r\n\r\n- [X] I've updated my Docker version to the latest available, and the issue still persists\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nimport json\nimport os\nfrom pathlib import Path\n\nimport requests\n\n# A number of operations below deliberately don't check for possible errors\n# As this is a healthcheck, it should succeed or raise an exception on error\n\nruntime_dir = Path(\"/home/\") / os.environ[\"NB_USER\"] / \".local/share/jupyter/runtime/\"\njson_file = next(runtime_dir.glob(\"*server-*.json\"))\n\nurl = json.loads(json_file.read_bytes())[\"url\"]\nurl = url + \"api\"\n\nr = requests.get(url, verify=False) # request without SSL verification\nr.raise_for_status()\nprint(r.content)\n", "path": "base-notebook/docker_healthcheck.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nimport json\nimport os\nfrom pathlib import Path\n\nimport requests\n\n# A number of operations below deliberately don't check for possible errors\n# As this is a healthcheck, it should succeed or raise an exception on error\n\nruntime_dir = Path(\"/home/\") / os.environ[\"NB_USER\"] / \".local/share/jupyter/runtime/\"\njson_file = next(runtime_dir.glob(\"*server-*.json\"))\n\nurl = json.loads(json_file.read_bytes())[\"url\"]\nurl = url + \"api\"\n\nproxies = {\n \"http\": \"\",\n \"https\": \"\",\n}\n\nr = requests.get(url, proxies=proxies, verify=False) # request without SSL verification\nr.raise_for_status()\nprint(r.content)\n", "path": "base-notebook/docker_healthcheck.py"}]} | 1,896 | 139 |
gh_patches_debug_26103 | rasdani/github-patches | git_diff | pytorch__PiPPy-528 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Buck run device error
buck run reported the following error:
```
[trainer1]:RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0! (when checking argument for argument weight in method wrapper__native_layer_norm)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pippy/utils.py`
Content:
```
1 # Copyright (c) Meta Platforms, Inc. and affiliates
2 import os
3 import socket
4 import logging
5
6 # Pinning process to a separate GPU if not yet done by launch script
7 # Notes:
8 # 1. Needed to work around the issue of RPC not automatically pinning spawned worker threads to CUDA device of the main
9 # thread
10 # 2. Must be done before `import torch` at which point CUDA context may be created
11 cuda_devices_str = os.getenv('CUDA_VISIBLE_DEVICES')
12 if (cuda_devices_str is None # not set
13 or len(cuda_devices_str.split(',')) > 1): # or set to all devices
14 # If launchers like Torchrun sets `LOCAL_RANK`, we would use this information
15 local_rank_str = os.getenv('LOCAL_RANK')
16 if local_rank_str is not None:
17 os.environ['CUDA_VISIBLE_DEVICES'] = local_rank_str
18 print(f"Pinning local process {local_rank_str} to gpu {os.getenv('CUDA_VISIBLE_DEVICES')}")
19
20 import torch
21 import torch.multiprocessing as mp
22 import torch.distributed.rpc as rpc
23
24
25 VERBOSE = bool(int(os.environ.get('VERBOSE', False)))
26
27 if VERBOSE:
28 logging.getLogger().setLevel(logging.DEBUG)
29
30
31 def has_efa() -> bool:
32 try:
33 import subprocess
34 return subprocess.run(["fi_info", "-p", "efa", "-t", "FI_EP_RDM"],
35 stdout=subprocess.DEVNULL,
36 stderr=subprocess.DEVNULL).returncode == 0
37 except FileNotFoundError:
38 return False
39 except PermissionError:
40 return False
41
42
43 def tp_transports():
44 return ["shm", "uv"] if has_efa() else None
45
46
47 def run_pippy(run_master, args, *extra_args):
48 if not hasattr(args, 'world_size'):
49 assert hasattr(args, 'pp_group_size')
50 args.dp_group_size = args.dp_group_size if hasattr(args, 'dp_group_size') else 1
51 else:
52 if not hasattr(args, 'dp_group_size'):
53 args.pp_group_size = args.pp_group_size if hasattr(args, 'pp_group_size') else args.world_size
54 assert args.world_size % args.pp_group_size == 0
55 args.dp_group_size = args.world_size // args.pp_group_size
56 elif not hasattr(args, 'pp_group_size'):
57 args.dp_group_size = args.dp_group_size if hasattr(args, 'dp_group_size') else 1
58 assert args.world_size % args.dp_group_size == 0
59 args.pp_group_size = args.world_size // args.dp_group_size
60 else:
61 pass
62 # TODO: doesn't work for PiPPyTrainingArguments
63 # assert args.world_size == args.dp_group_size * args.pp_group_size
64
65 actual_world_size = args.dp_group_size * args.pp_group_size
66 print(f'[PiPPy] World size: {actual_world_size}, '
67 f'DP group size: {args.dp_group_size}, '
68 f'PP group size: {args.pp_group_size}')
69
70 if args.rank == -1:
71 mp.spawn(run_worker, args=(run_master, args, *extra_args), nprocs=actual_world_size, join=True)
72 elif args.rank < actual_world_size:
73 run_worker(args.rank, run_master, args, *extra_args)
74 else:
75 print("I'm unused, exiting")
76
77
78 def run_worker(rank, run_master, args, *extra_args):
79 args.rank = rank
80
81 os.environ['MASTER_ADDR'] = args.master_addr
82 os.environ['MASTER_PORT'] = args.master_port
83
84 actual_world_size = args.dp_group_size * args.pp_group_size
85
86 # TODO: Move to training args, blocked by: cannot pickle 'TensorPipeRpcBackendOptions' object
87 # Exclude IB for metadata transport due to lack of EFA support on AWS
88 options = rpc.TensorPipeRpcBackendOptions(num_worker_threads=512,
89 rpc_timeout=1800,
90 _transports=tp_transports())
91 if args.cuda:
92 n_devs = torch.cuda.device_count()
93 if n_devs > 0:
94 dev_id = rank % n_devs
95 for i in range(actual_world_size):
96 options.set_device_map(f"worker{i}", {dev_id: i % n_devs})
97 # Does not seem effective for RPC device pinning. TODO
98 # options.set_devices([f'cuda:{dev_id}'])
99 else:
100 args.cuda = 0
101 print('Warning: no CUDA device found. Running on CPU instead.')
102
103 args.device = f'cuda:{dev_id}' if args.cuda else 'cpu'
104 print(f"rank = {rank} host/pid/device = "
105 f"{socket.gethostname()}/{os.getpid()}/{args.device}")
106
107 # Init DDP process group
108 backend = "nccl" if args.cuda else "gloo"
109 torch.distributed.init_process_group(backend=backend, rank=rank, world_size=actual_world_size)
110
111 rpc.init_rpc(
112 f"worker{rank}",
113 rank=rank,
114 world_size=actual_world_size,
115 rpc_backend_options=options
116 )
117
118 global dp_pg_per_pp_rank
119 dp_ranks_per_pp_rank = torch.arange(actual_world_size).reshape(args.pp_group_size,
120 args.dp_group_size).tolist()
121 dp_pg_per_pp_rank = [torch.distributed.new_group(ranks) for ranks in dp_ranks_per_pp_rank]
122
123 pp_ranks_per_dp_group = [[i * args.dp_group_size + rank for i in range(args.pp_group_size)]
124 for rank in range(args.dp_group_size)]
125
126 args.driver_group = torch.distributed.new_group(list(range(args.dp_group_size)))
127
128 global exclude_master
129 exclude_master = args.exclude_master if hasattr(args, 'exclude_master') else 0
130
131 if rank >= 0 and rank // args.dp_group_size == 0:
132 args.driver_index = rank
133 args.local_driver_index = os.getenv('LOCAL_RANK', rank)
134 run_master(pp_ranks_per_dp_group[rank], args, *extra_args)
135 rpc.shutdown()
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pippy/utils.py b/pippy/utils.py
--- a/pippy/utils.py
+++ b/pippy/utils.py
@@ -8,14 +8,17 @@
# 1. Needed to work around the issue of RPC not automatically pinning spawned worker threads to CUDA device of the main
# thread
# 2. Must be done before `import torch` at which point CUDA context may be created
-cuda_devices_str = os.getenv('CUDA_VISIBLE_DEVICES')
-if (cuda_devices_str is None # not set
- or len(cuda_devices_str.split(',')) > 1): # or set to all devices
- # If launchers like Torchrun sets `LOCAL_RANK`, we would use this information
- local_rank_str = os.getenv('LOCAL_RANK')
- if local_rank_str is not None:
- os.environ['CUDA_VISIBLE_DEVICES'] = local_rank_str
- print(f"Pinning local process {local_rank_str} to gpu {os.getenv('CUDA_VISIBLE_DEVICES')}")
+# 3. Currently this is enabled by default (as long as #1 is not implemented in RPC). Users may set `PIPPY_PIN_DEVICE` to
+# 0 to disable the pinning
+if os.getenv('PIPPY_PIN_DEVICE', '1') == '1':
+ cuda_devices_str = os.getenv('CUDA_VISIBLE_DEVICES')
+ if (cuda_devices_str is None # not set
+ or len(cuda_devices_str.split(',')) > 1): # or set to all devices
+ # If launchers like Torchrun sets `LOCAL_RANK`, we would use this information
+ local_rank_str = os.getenv('LOCAL_RANK')
+ if local_rank_str is not None:
+ os.environ['CUDA_VISIBLE_DEVICES'] = local_rank_str
+ print(f"Pinning local process {local_rank_str} to gpu {os.getenv('CUDA_VISIBLE_DEVICES')}")
import torch
import torch.multiprocessing as mp
| {"golden_diff": "diff --git a/pippy/utils.py b/pippy/utils.py\n--- a/pippy/utils.py\n+++ b/pippy/utils.py\n@@ -8,14 +8,17 @@\n # 1. Needed to work around the issue of RPC not automatically pinning spawned worker threads to CUDA device of the main\n # thread\n # 2. Must be done before `import torch` at which point CUDA context may be created\n-cuda_devices_str = os.getenv('CUDA_VISIBLE_DEVICES')\n-if (cuda_devices_str is None # not set\n- or len(cuda_devices_str.split(',')) > 1): # or set to all devices\n- # If launchers like Torchrun sets `LOCAL_RANK`, we would use this information\n- local_rank_str = os.getenv('LOCAL_RANK')\n- if local_rank_str is not None:\n- os.environ['CUDA_VISIBLE_DEVICES'] = local_rank_str\n- print(f\"Pinning local process {local_rank_str} to gpu {os.getenv('CUDA_VISIBLE_DEVICES')}\")\n+# 3. Currently this is enabled by default (as long as #1 is not implemented in RPC). Users may set `PIPPY_PIN_DEVICE` to\n+# 0 to disable the pinning\n+if os.getenv('PIPPY_PIN_DEVICE', '1') == '1':\n+ cuda_devices_str = os.getenv('CUDA_VISIBLE_DEVICES')\n+ if (cuda_devices_str is None # not set\n+ or len(cuda_devices_str.split(',')) > 1): # or set to all devices\n+ # If launchers like Torchrun sets `LOCAL_RANK`, we would use this information\n+ local_rank_str = os.getenv('LOCAL_RANK')\n+ if local_rank_str is not None:\n+ os.environ['CUDA_VISIBLE_DEVICES'] = local_rank_str\n+ print(f\"Pinning local process {local_rank_str} to gpu {os.getenv('CUDA_VISIBLE_DEVICES')}\")\n \n import torch\n import torch.multiprocessing as mp\n", "issue": "Buck run device error\nbuck run reported the following error:\r\n```\r\n[trainer1]:RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0! (when checking argument for argument weight in method wrapper__native_layer_norm)\r\n```\n", "before_files": [{"content": "# Copyright (c) Meta Platforms, Inc. and affiliates\nimport os\nimport socket\nimport logging\n\n# Pinning process to a separate GPU if not yet done by launch script\n# Notes:\n# 1. Needed to work around the issue of RPC not automatically pinning spawned worker threads to CUDA device of the main\n# thread\n# 2. Must be done before `import torch` at which point CUDA context may be created\ncuda_devices_str = os.getenv('CUDA_VISIBLE_DEVICES')\nif (cuda_devices_str is None # not set\n or len(cuda_devices_str.split(',')) > 1): # or set to all devices\n # If launchers like Torchrun sets `LOCAL_RANK`, we would use this information\n local_rank_str = os.getenv('LOCAL_RANK')\n if local_rank_str is not None:\n os.environ['CUDA_VISIBLE_DEVICES'] = local_rank_str\n print(f\"Pinning local process {local_rank_str} to gpu {os.getenv('CUDA_VISIBLE_DEVICES')}\")\n\nimport torch\nimport torch.multiprocessing as mp\nimport torch.distributed.rpc as rpc\n\n\nVERBOSE = bool(int(os.environ.get('VERBOSE', False)))\n\nif VERBOSE:\n logging.getLogger().setLevel(logging.DEBUG)\n\n\ndef has_efa() -> bool:\n try:\n import subprocess\n return subprocess.run([\"fi_info\", \"-p\", \"efa\", \"-t\", \"FI_EP_RDM\"],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL).returncode == 0\n except FileNotFoundError:\n return False\n except PermissionError:\n return False\n\n\ndef tp_transports():\n return [\"shm\", \"uv\"] if has_efa() else None\n\n\ndef run_pippy(run_master, args, *extra_args):\n if not hasattr(args, 'world_size'):\n assert hasattr(args, 'pp_group_size')\n args.dp_group_size = args.dp_group_size if hasattr(args, 'dp_group_size') else 1\n else:\n if not hasattr(args, 'dp_group_size'):\n args.pp_group_size = args.pp_group_size if hasattr(args, 'pp_group_size') else args.world_size\n assert args.world_size % args.pp_group_size == 0\n args.dp_group_size = args.world_size // args.pp_group_size\n elif not hasattr(args, 'pp_group_size'):\n args.dp_group_size = args.dp_group_size if hasattr(args, 'dp_group_size') else 1\n assert args.world_size % args.dp_group_size == 0\n args.pp_group_size = args.world_size // args.dp_group_size\n else:\n pass\n # TODO: doesn't work for PiPPyTrainingArguments\n # assert args.world_size == args.dp_group_size * args.pp_group_size\n\n actual_world_size = args.dp_group_size * args.pp_group_size\n print(f'[PiPPy] World size: {actual_world_size}, '\n f'DP group size: {args.dp_group_size}, '\n f'PP group size: {args.pp_group_size}')\n\n if args.rank == -1:\n mp.spawn(run_worker, args=(run_master, args, *extra_args), nprocs=actual_world_size, join=True)\n elif args.rank < actual_world_size:\n run_worker(args.rank, run_master, args, *extra_args)\n else:\n print(\"I'm unused, exiting\")\n\n\ndef run_worker(rank, run_master, args, *extra_args):\n args.rank = rank\n\n os.environ['MASTER_ADDR'] = args.master_addr\n os.environ['MASTER_PORT'] = args.master_port\n\n actual_world_size = args.dp_group_size * args.pp_group_size\n\n # TODO: Move to training args, blocked by: cannot pickle 'TensorPipeRpcBackendOptions' object\n # Exclude IB for metadata transport due to lack of EFA support on AWS\n options = rpc.TensorPipeRpcBackendOptions(num_worker_threads=512,\n rpc_timeout=1800,\n _transports=tp_transports())\n if args.cuda:\n n_devs = torch.cuda.device_count()\n if n_devs > 0:\n dev_id = rank % n_devs\n for i in range(actual_world_size):\n options.set_device_map(f\"worker{i}\", {dev_id: i % n_devs})\n # Does not seem effective for RPC device pinning. TODO\n # options.set_devices([f'cuda:{dev_id}'])\n else:\n args.cuda = 0\n print('Warning: no CUDA device found. Running on CPU instead.')\n\n args.device = f'cuda:{dev_id}' if args.cuda else 'cpu'\n print(f\"rank = {rank} host/pid/device = \"\n f\"{socket.gethostname()}/{os.getpid()}/{args.device}\")\n\n # Init DDP process group\n backend = \"nccl\" if args.cuda else \"gloo\"\n torch.distributed.init_process_group(backend=backend, rank=rank, world_size=actual_world_size)\n\n rpc.init_rpc(\n f\"worker{rank}\",\n rank=rank,\n world_size=actual_world_size,\n rpc_backend_options=options\n )\n\n global dp_pg_per_pp_rank\n dp_ranks_per_pp_rank = torch.arange(actual_world_size).reshape(args.pp_group_size,\n args.dp_group_size).tolist()\n dp_pg_per_pp_rank = [torch.distributed.new_group(ranks) for ranks in dp_ranks_per_pp_rank]\n\n pp_ranks_per_dp_group = [[i * args.dp_group_size + rank for i in range(args.pp_group_size)]\n for rank in range(args.dp_group_size)]\n\n args.driver_group = torch.distributed.new_group(list(range(args.dp_group_size)))\n\n global exclude_master\n exclude_master = args.exclude_master if hasattr(args, 'exclude_master') else 0\n\n if rank >= 0 and rank // args.dp_group_size == 0:\n args.driver_index = rank\n args.local_driver_index = os.getenv('LOCAL_RANK', rank)\n run_master(pp_ranks_per_dp_group[rank], args, *extra_args)\n rpc.shutdown()\n", "path": "pippy/utils.py"}], "after_files": [{"content": "# Copyright (c) Meta Platforms, Inc. and affiliates\nimport os\nimport socket\nimport logging\n\n# Pinning process to a separate GPU if not yet done by launch script\n# Notes:\n# 1. Needed to work around the issue of RPC not automatically pinning spawned worker threads to CUDA device of the main\n# thread\n# 2. Must be done before `import torch` at which point CUDA context may be created\n# 3. Currently this is enabled by default (as long as #1 is not implemented in RPC). Users may set `PIPPY_PIN_DEVICE` to\n# 0 to disable the pinning\nif os.getenv('PIPPY_PIN_DEVICE', '1') == '1':\n cuda_devices_str = os.getenv('CUDA_VISIBLE_DEVICES')\n if (cuda_devices_str is None # not set\n or len(cuda_devices_str.split(',')) > 1): # or set to all devices\n # If launchers like Torchrun sets `LOCAL_RANK`, we would use this information\n local_rank_str = os.getenv('LOCAL_RANK')\n if local_rank_str is not None:\n os.environ['CUDA_VISIBLE_DEVICES'] = local_rank_str\n print(f\"Pinning local process {local_rank_str} to gpu {os.getenv('CUDA_VISIBLE_DEVICES')}\")\n\nimport torch\nimport torch.multiprocessing as mp\nimport torch.distributed.rpc as rpc\n\n\nVERBOSE = bool(int(os.environ.get('VERBOSE', False)))\n\nif VERBOSE:\n logging.getLogger().setLevel(logging.DEBUG)\n\n\ndef has_efa() -> bool:\n try:\n import subprocess\n return subprocess.run([\"fi_info\", \"-p\", \"efa\", \"-t\", \"FI_EP_RDM\"],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL).returncode == 0\n except FileNotFoundError:\n return False\n except PermissionError:\n return False\n\n\ndef tp_transports():\n return [\"shm\", \"uv\"] if has_efa() else None\n\n\ndef run_pippy(run_master, args, *extra_args):\n if not hasattr(args, 'world_size'):\n assert hasattr(args, 'pp_group_size')\n args.dp_group_size = args.dp_group_size if hasattr(args, 'dp_group_size') else 1\n else:\n if not hasattr(args, 'dp_group_size'):\n args.pp_group_size = args.pp_group_size if hasattr(args, 'pp_group_size') else args.world_size\n assert args.world_size % args.pp_group_size == 0\n args.dp_group_size = args.world_size // args.pp_group_size\n elif not hasattr(args, 'pp_group_size'):\n args.dp_group_size = args.dp_group_size if hasattr(args, 'dp_group_size') else 1\n assert args.world_size % args.dp_group_size == 0\n args.pp_group_size = args.world_size // args.dp_group_size\n else:\n pass\n # TODO: doesn't work for PiPPyTrainingArguments\n # assert args.world_size == args.dp_group_size * args.pp_group_size\n\n actual_world_size = args.dp_group_size * args.pp_group_size\n print(f'[PiPPy] World size: {actual_world_size}, '\n f'DP group size: {args.dp_group_size}, '\n f'PP group size: {args.pp_group_size}')\n\n if args.rank == -1:\n mp.spawn(run_worker, args=(run_master, args, *extra_args), nprocs=actual_world_size, join=True)\n elif args.rank < actual_world_size:\n run_worker(args.rank, run_master, args, *extra_args)\n else:\n print(\"I'm unused, exiting\")\n\n\ndef run_worker(rank, run_master, args, *extra_args):\n args.rank = rank\n\n os.environ['MASTER_ADDR'] = args.master_addr\n os.environ['MASTER_PORT'] = args.master_port\n\n actual_world_size = args.dp_group_size * args.pp_group_size\n\n # TODO: Move to training args, blocked by: cannot pickle 'TensorPipeRpcBackendOptions' object\n # Exclude IB for metadata transport due to lack of EFA support on AWS\n options = rpc.TensorPipeRpcBackendOptions(num_worker_threads=512,\n rpc_timeout=1800,\n _transports=tp_transports())\n if args.cuda:\n n_devs = torch.cuda.device_count()\n if n_devs > 0:\n dev_id = rank % n_devs\n for i in range(actual_world_size):\n options.set_device_map(f\"worker{i}\", {dev_id: i % n_devs})\n # Does not seem effective for RPC device pinning. TODO\n # options.set_devices([f'cuda:{dev_id}'])\n else:\n args.cuda = 0\n print('Warning: no CUDA device found. Running on CPU instead.')\n\n args.device = f'cuda:{dev_id}' if args.cuda else 'cpu'\n print(f\"rank = {rank} host/pid/device = \"\n f\"{socket.gethostname()}/{os.getpid()}/{args.device}\")\n\n # Init DDP process group\n backend = \"nccl\" if args.cuda else \"gloo\"\n torch.distributed.init_process_group(backend=backend, rank=rank, world_size=actual_world_size)\n\n rpc.init_rpc(\n f\"worker{rank}\",\n rank=rank,\n world_size=actual_world_size,\n rpc_backend_options=options\n )\n\n global dp_pg_per_pp_rank\n dp_ranks_per_pp_rank = torch.arange(actual_world_size).reshape(args.pp_group_size,\n args.dp_group_size).tolist()\n dp_pg_per_pp_rank = [torch.distributed.new_group(ranks) for ranks in dp_ranks_per_pp_rank]\n\n pp_ranks_per_dp_group = [[i * args.dp_group_size + rank for i in range(args.pp_group_size)]\n for rank in range(args.dp_group_size)]\n\n args.driver_group = torch.distributed.new_group(list(range(args.dp_group_size)))\n\n global exclude_master\n exclude_master = args.exclude_master if hasattr(args, 'exclude_master') else 0\n\n if rank >= 0 and rank // args.dp_group_size == 0:\n args.driver_index = rank\n args.local_driver_index = os.getenv('LOCAL_RANK', rank)\n run_master(pp_ranks_per_dp_group[rank], args, *extra_args)\n rpc.shutdown()\n", "path": "pippy/utils.py"}]} | 1,928 | 418 |
gh_patches_debug_22807 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-907 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Double in mapping thrown E7001 error
*cfn-lint version: cfn-lint 0.20.1*
*Description of issue.*
When a mapping value is a double (ex. 1.1) it returns the error `E7001:Mapping [map] has invalid property at [property]`
Examples:
With double value:

Changed to Int:

Example CFT: [environment.yaml.txt](https://github.com/aws-cloudformation/cfn-python-lint/files/3179852/environment.yaml.txt)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/mappings/Configuration.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import six
18 from cfnlint import CloudFormationLintRule
19 from cfnlint import RuleMatch
20
21
22 class Configuration(CloudFormationLintRule):
23 """Check if Mappings are configured correctly"""
24 id = 'E7001'
25 shortdesc = 'Mappings are appropriately configured'
26 description = 'Check if Mappings are properly configured'
27 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html'
28 tags = ['mappings']
29
30 def match(self, cfn):
31 """Check CloudFormation Parameters"""
32
33 matches = []
34
35 mappings = cfn.template.get('Mappings', {})
36 if mappings:
37 for mapname, mapobj in mappings.items():
38 if not isinstance(mapobj, dict):
39 message = 'Mapping {0} has invalid property'
40 matches.append(RuleMatch(
41 ['Mappings', mapname],
42 message.format(mapname)
43 ))
44 else:
45 for firstkey in mapobj:
46 firstkeyobj = mapobj[firstkey]
47 if not isinstance(firstkeyobj, dict):
48 message = 'Mapping {0} has invalid property at {1}'
49 matches.append(RuleMatch(
50 ['Mappings', mapname, firstkey],
51 message.format(mapname, firstkeyobj)
52 ))
53 else:
54 for secondkey in firstkeyobj:
55 if not isinstance(
56 firstkeyobj[secondkey],
57 (six.string_types, list, six.integer_types)):
58 message = 'Mapping {0} has invalid property at {1}'
59 matches.append(RuleMatch(
60 ['Mappings', mapname, firstkey, secondkey],
61 message.format(mapname, secondkey)
62 ))
63
64 return matches
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/mappings/Configuration.py b/src/cfnlint/rules/mappings/Configuration.py
--- a/src/cfnlint/rules/mappings/Configuration.py
+++ b/src/cfnlint/rules/mappings/Configuration.py
@@ -32,6 +32,8 @@
matches = []
+ valid_map_types = (six.string_types, list, six.integer_types, float)
+
mappings = cfn.template.get('Mappings', {})
if mappings:
for mapname, mapobj in mappings.items():
@@ -53,8 +55,7 @@
else:
for secondkey in firstkeyobj:
if not isinstance(
- firstkeyobj[secondkey],
- (six.string_types, list, six.integer_types)):
+ firstkeyobj[secondkey], valid_map_types):
message = 'Mapping {0} has invalid property at {1}'
matches.append(RuleMatch(
['Mappings', mapname, firstkey, secondkey],
| {"golden_diff": "diff --git a/src/cfnlint/rules/mappings/Configuration.py b/src/cfnlint/rules/mappings/Configuration.py\n--- a/src/cfnlint/rules/mappings/Configuration.py\n+++ b/src/cfnlint/rules/mappings/Configuration.py\n@@ -32,6 +32,8 @@\n \n matches = []\n \n+ valid_map_types = (six.string_types, list, six.integer_types, float)\n+\n mappings = cfn.template.get('Mappings', {})\n if mappings:\n for mapname, mapobj in mappings.items():\n@@ -53,8 +55,7 @@\n else:\n for secondkey in firstkeyobj:\n if not isinstance(\n- firstkeyobj[secondkey],\n- (six.string_types, list, six.integer_types)):\n+ firstkeyobj[secondkey], valid_map_types):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey, secondkey],\n", "issue": "Double in mapping thrown E7001 error\n*cfn-lint version: cfn-lint 0.20.1*\r\n\r\n*Description of issue.*\r\nWhen a mapping value is a double (ex. 1.1) it returns the error `E7001:Mapping [map] has invalid property at [property]`\r\n\r\nExamples:\r\nWith double value:\r\n\r\n\r\nChanged to Int:\r\n\r\n\r\nExample CFT: [environment.yaml.txt](https://github.com/aws-cloudformation/cfn-python-lint/files/3179852/environment.yaml.txt)\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check if Mappings are configured correctly\"\"\"\n id = 'E7001'\n shortdesc = 'Mappings are appropriately configured'\n description = 'Check if Mappings are properly configured'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html'\n tags = ['mappings']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n\n matches = []\n\n mappings = cfn.template.get('Mappings', {})\n if mappings:\n for mapname, mapobj in mappings.items():\n if not isinstance(mapobj, dict):\n message = 'Mapping {0} has invalid property'\n matches.append(RuleMatch(\n ['Mappings', mapname],\n message.format(mapname)\n ))\n else:\n for firstkey in mapobj:\n firstkeyobj = mapobj[firstkey]\n if not isinstance(firstkeyobj, dict):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey],\n message.format(mapname, firstkeyobj)\n ))\n else:\n for secondkey in firstkeyobj:\n if not isinstance(\n firstkeyobj[secondkey],\n (six.string_types, list, six.integer_types)):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey, secondkey],\n message.format(mapname, secondkey)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/mappings/Configuration.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check if Mappings are configured correctly\"\"\"\n id = 'E7001'\n shortdesc = 'Mappings are appropriately configured'\n description = 'Check if Mappings are properly configured'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html'\n tags = ['mappings']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n\n matches = []\n\n valid_map_types = (six.string_types, list, six.integer_types, float)\n\n mappings = cfn.template.get('Mappings', {})\n if mappings:\n for mapname, mapobj in mappings.items():\n if not isinstance(mapobj, dict):\n message = 'Mapping {0} has invalid property'\n matches.append(RuleMatch(\n ['Mappings', mapname],\n message.format(mapname)\n ))\n else:\n for firstkey in mapobj:\n firstkeyobj = mapobj[firstkey]\n if not isinstance(firstkeyobj, dict):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey],\n message.format(mapname, firstkeyobj)\n ))\n else:\n for secondkey in firstkeyobj:\n if not isinstance(\n firstkeyobj[secondkey], valid_map_types):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey, secondkey],\n message.format(mapname, secondkey)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/mappings/Configuration.py"}]} | 1,197 | 215 |
gh_patches_debug_21935 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1321 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Modify domain invitation script to process "friends of the show" first, then incrementally process others
### Issue description
We have a few domain managers that we'll invite to the registrar first. Let's modify the domain invitation script to send to a defined set of domains first.
We also shouldn't blast thousands of emails out to the internet, but incrementally roll them out.
### Acceptance criteria
- [ ] Invitation script works with a product owner-specified list of domains/contacts (before sending to everyone else)
- [ ] Script slow rolls out invitations. Could be percentage-based (1/2/5/10/20/45/75/100) or volume-based (a few hundred at a time)
### Additional context
_No response_
### Links to other issues
🔄 Related to PR #1038
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/management/commands/send_domain_invitations.py`
Content:
```
1 """Data migration: Send domain invitations once to existing customers."""
2
3 import logging
4 import copy
5
6 from django.core.management import BaseCommand
7 from registrar.models import TransitionDomain
8 from ...utility.email import send_templated_email, EmailSendingError
9 from typing import List
10
11 logger = logging.getLogger(__name__)
12
13
14 class Command(BaseCommand):
15 help = "Send domain invitations once to existing customers."
16
17 # this array is used to store and process the transition_domains
18 transition_domains: List[str] = []
19 # this array is used to store domains with errors, which are not
20 # sent emails; this array is used to update the succesful
21 # transition_domains to email_sent=True, and also to report
22 # out errors
23 domains_with_errors: List[str] = []
24 # this array is used to store email_context; each item in the array
25 # contains the context for a single email; single emails may be 1
26 # or more transition_domains, as they are grouped by username
27 emails_to_send: List[str] = []
28
29 def add_arguments(self, parser):
30 """Add command line arguments."""
31 parser.add_argument(
32 "-s",
33 "--send_emails",
34 action="store_true",
35 default=False,
36 dest="send_emails",
37 help="Send emails ",
38 )
39
40 def handle(self, **options):
41 """Process the objects in TransitionDomain."""
42
43 logger.info("checking domains and preparing emails")
44 # Get all TransitionDomain objects
45 self.transition_domains = TransitionDomain.objects.filter(
46 email_sent=False,
47 ).order_by("username")
48 logger.info("Found %d transition domains", len(self.transition_domains))
49
50 self.build_emails_to_send_array()
51 logger.info("Prepared %d emails to send", len(self.emails_to_send))
52
53 if options["send_emails"]:
54 logger.info("about to send emails")
55 self.send_emails()
56 logger.info("done sending emails")
57
58 self.update_domains_as_sent()
59
60 logger.info("done sending emails and updating transition_domains")
61 else:
62 logger.info("not sending emails")
63 for email_context in self.emails_to_send:
64 logger.info(
65 "would send email to %s for %s",
66 email_context["email"],
67 email_context["domains"],
68 )
69
70 def build_emails_to_send_array(self):
71 """this method sends emails to distinct usernames"""
72
73 # data structure to hold email context for a single email;
74 # transition_domains ordered by username, a single email_context
75 # may include information from more than one transition_domain
76 email_context = {"email": ""}
77
78 # loop through all transition_domains; group them by username
79 # into emails_to_send_array
80 for transition_domain in self.transition_domains:
81 # attempt to get the domain from domain objects; if there is
82 # an error getting the domain, skip this domain and add it to
83 # domains_with_errors
84 try:
85 # if prior username does not match current username
86 if not email_context["email"] or email_context["email"] != transition_domain.username:
87 # if not first in list of transition_domains
88 if email_context["email"]:
89 # append the email context to the emails_to_send array
90 self.emails_to_send.append(copy.deepcopy(email_context))
91 email_context["domains"] = []
92 email_context["email"] = transition_domain.username
93 email_context["domains"].append(transition_domain.domain_name)
94 except Exception as err:
95 # error condition if domain not in database
96 self.domains_with_errors.append(copy.deepcopy(transition_domain.domain_name))
97 logger.error(f"error retrieving domain {transition_domain.domain_name}: {err}")
98 # if there are at least one more transition domains than errors,
99 # then append one more item
100 if len(self.transition_domains) > len(self.domains_with_errors):
101 self.emails_to_send.append(email_context)
102
103 def send_emails(self):
104 if len(self.emails_to_send) > 0:
105 for email_data in self.emails_to_send:
106 self.send_email(email_data)
107 else:
108 logger.info("no emails to send")
109
110 def send_email(self, email_data):
111 try:
112 send_templated_email(
113 "emails/transition_domain_invitation.txt",
114 "emails/transition_domain_invitation_subject.txt",
115 to_address=email_data["email"],
116 context={
117 "domains": email_data["domains"],
118 },
119 )
120 # success message is logged
121 logger.info(
122 f"email sent successfully to {email_data['email']} for "
123 f"{[domain for domain in email_data['domains']]}"
124 )
125 except EmailSendingError as err:
126 logger.error(
127 f"email did not send successfully to {email_data['email']} "
128 f"for {[domain for domain in email_data['domains']]}"
129 f": {err}"
130 )
131 # if email failed to send, set error in domains_with_errors for each
132 # domain in the email so that transition domain email_sent is not set
133 # to True
134 for domain in email_data["domains"]:
135 self.domains_with_errors.append(domain)
136
137 def update_domains_as_sent(self):
138 """set email_sent to True in all transition_domains which have
139 been processed successfully"""
140 for transition_domain in self.transition_domains:
141 if transition_domain.domain_name not in self.domains_with_errors:
142 transition_domain.email_sent = True
143 transition_domain.save()
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/registrar/management/commands/send_domain_invitations.py b/src/registrar/management/commands/send_domain_invitations.py
--- a/src/registrar/management/commands/send_domain_invitations.py
+++ b/src/registrar/management/commands/send_domain_invitations.py
@@ -37,14 +37,24 @@
help="Send emails ",
)
+ parser.add_argument("emails", nargs="*", help="Email addresses to send invitations to")
+
def handle(self, **options):
"""Process the objects in TransitionDomain."""
logger.info("checking domains and preparing emails")
- # Get all TransitionDomain objects
- self.transition_domains = TransitionDomain.objects.filter(
- email_sent=False,
- ).order_by("username")
+
+ if options["emails"]:
+ # this option is a list of email addresses
+ self.transition_domains = TransitionDomain.objects.filter(
+ username__in=options["emails"],
+ email_sent=False,
+ ).order_by("username")
+ else:
+ # Get all TransitionDomain objects
+ self.transition_domains = TransitionDomain.objects.filter(
+ email_sent=False,
+ ).order_by("username")
logger.info("Found %d transition domains", len(self.transition_domains))
self.build_emails_to_send_array()
| {"golden_diff": "diff --git a/src/registrar/management/commands/send_domain_invitations.py b/src/registrar/management/commands/send_domain_invitations.py\n--- a/src/registrar/management/commands/send_domain_invitations.py\n+++ b/src/registrar/management/commands/send_domain_invitations.py\n@@ -37,14 +37,24 @@\n help=\"Send emails \",\n )\n \n+ parser.add_argument(\"emails\", nargs=\"*\", help=\"Email addresses to send invitations to\")\n+\n def handle(self, **options):\n \"\"\"Process the objects in TransitionDomain.\"\"\"\n \n logger.info(\"checking domains and preparing emails\")\n- # Get all TransitionDomain objects\n- self.transition_domains = TransitionDomain.objects.filter(\n- email_sent=False,\n- ).order_by(\"username\")\n+\n+ if options[\"emails\"]:\n+ # this option is a list of email addresses\n+ self.transition_domains = TransitionDomain.objects.filter(\n+ username__in=options[\"emails\"],\n+ email_sent=False,\n+ ).order_by(\"username\")\n+ else:\n+ # Get all TransitionDomain objects\n+ self.transition_domains = TransitionDomain.objects.filter(\n+ email_sent=False,\n+ ).order_by(\"username\")\n logger.info(\"Found %d transition domains\", len(self.transition_domains))\n \n self.build_emails_to_send_array()\n", "issue": "Modify domain invitation script to process \"friends of the show\" first, then incrementally process others\n### Issue description\n\nWe have a few domain managers that we'll invite to the registrar first. Let's modify the domain invitation script to send to a defined set of domains first. \r\n\r\nWe also shouldn't blast thousands of emails out to the internet, but incrementally roll them out.\n\n### Acceptance criteria\n\n- [ ] Invitation script works with a product owner-specified list of domains/contacts (before sending to everyone else)\r\n- [ ] Script slow rolls out invitations. Could be percentage-based (1/2/5/10/20/45/75/100) or volume-based (a few hundred at a time)\n\n### Additional context\n\n_No response_\n\n### Links to other issues\n\n\ud83d\udd04 Related to PR #1038\n", "before_files": [{"content": "\"\"\"Data migration: Send domain invitations once to existing customers.\"\"\"\n\nimport logging\nimport copy\n\nfrom django.core.management import BaseCommand\nfrom registrar.models import TransitionDomain\nfrom ...utility.email import send_templated_email, EmailSendingError\nfrom typing import List\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n help = \"Send domain invitations once to existing customers.\"\n\n # this array is used to store and process the transition_domains\n transition_domains: List[str] = []\n # this array is used to store domains with errors, which are not\n # sent emails; this array is used to update the succesful\n # transition_domains to email_sent=True, and also to report\n # out errors\n domains_with_errors: List[str] = []\n # this array is used to store email_context; each item in the array\n # contains the context for a single email; single emails may be 1\n # or more transition_domains, as they are grouped by username\n emails_to_send: List[str] = []\n\n def add_arguments(self, parser):\n \"\"\"Add command line arguments.\"\"\"\n parser.add_argument(\n \"-s\",\n \"--send_emails\",\n action=\"store_true\",\n default=False,\n dest=\"send_emails\",\n help=\"Send emails \",\n )\n\n def handle(self, **options):\n \"\"\"Process the objects in TransitionDomain.\"\"\"\n\n logger.info(\"checking domains and preparing emails\")\n # Get all TransitionDomain objects\n self.transition_domains = TransitionDomain.objects.filter(\n email_sent=False,\n ).order_by(\"username\")\n logger.info(\"Found %d transition domains\", len(self.transition_domains))\n\n self.build_emails_to_send_array()\n logger.info(\"Prepared %d emails to send\", len(self.emails_to_send))\n\n if options[\"send_emails\"]:\n logger.info(\"about to send emails\")\n self.send_emails()\n logger.info(\"done sending emails\")\n\n self.update_domains_as_sent()\n\n logger.info(\"done sending emails and updating transition_domains\")\n else:\n logger.info(\"not sending emails\")\n for email_context in self.emails_to_send:\n logger.info(\n \"would send email to %s for %s\",\n email_context[\"email\"],\n email_context[\"domains\"],\n )\n\n def build_emails_to_send_array(self):\n \"\"\"this method sends emails to distinct usernames\"\"\"\n\n # data structure to hold email context for a single email;\n # transition_domains ordered by username, a single email_context\n # may include information from more than one transition_domain\n email_context = {\"email\": \"\"}\n\n # loop through all transition_domains; group them by username\n # into emails_to_send_array\n for transition_domain in self.transition_domains:\n # attempt to get the domain from domain objects; if there is\n # an error getting the domain, skip this domain and add it to\n # domains_with_errors\n try:\n # if prior username does not match current username\n if not email_context[\"email\"] or email_context[\"email\"] != transition_domain.username:\n # if not first in list of transition_domains\n if email_context[\"email\"]:\n # append the email context to the emails_to_send array\n self.emails_to_send.append(copy.deepcopy(email_context))\n email_context[\"domains\"] = []\n email_context[\"email\"] = transition_domain.username\n email_context[\"domains\"].append(transition_domain.domain_name)\n except Exception as err:\n # error condition if domain not in database\n self.domains_with_errors.append(copy.deepcopy(transition_domain.domain_name))\n logger.error(f\"error retrieving domain {transition_domain.domain_name}: {err}\")\n # if there are at least one more transition domains than errors,\n # then append one more item\n if len(self.transition_domains) > len(self.domains_with_errors):\n self.emails_to_send.append(email_context)\n\n def send_emails(self):\n if len(self.emails_to_send) > 0:\n for email_data in self.emails_to_send:\n self.send_email(email_data)\n else:\n logger.info(\"no emails to send\")\n\n def send_email(self, email_data):\n try:\n send_templated_email(\n \"emails/transition_domain_invitation.txt\",\n \"emails/transition_domain_invitation_subject.txt\",\n to_address=email_data[\"email\"],\n context={\n \"domains\": email_data[\"domains\"],\n },\n )\n # success message is logged\n logger.info(\n f\"email sent successfully to {email_data['email']} for \"\n f\"{[domain for domain in email_data['domains']]}\"\n )\n except EmailSendingError as err:\n logger.error(\n f\"email did not send successfully to {email_data['email']} \"\n f\"for {[domain for domain in email_data['domains']]}\"\n f\": {err}\"\n )\n # if email failed to send, set error in domains_with_errors for each\n # domain in the email so that transition domain email_sent is not set\n # to True\n for domain in email_data[\"domains\"]:\n self.domains_with_errors.append(domain)\n\n def update_domains_as_sent(self):\n \"\"\"set email_sent to True in all transition_domains which have\n been processed successfully\"\"\"\n for transition_domain in self.transition_domains:\n if transition_domain.domain_name not in self.domains_with_errors:\n transition_domain.email_sent = True\n transition_domain.save()\n", "path": "src/registrar/management/commands/send_domain_invitations.py"}], "after_files": [{"content": "\"\"\"Data migration: Send domain invitations once to existing customers.\"\"\"\n\nimport logging\nimport copy\n\nfrom django.core.management import BaseCommand\nfrom registrar.models import TransitionDomain\nfrom ...utility.email import send_templated_email, EmailSendingError\nfrom typing import List\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n help = \"Send domain invitations once to existing customers.\"\n\n # this array is used to store and process the transition_domains\n transition_domains: List[str] = []\n # this array is used to store domains with errors, which are not\n # sent emails; this array is used to update the succesful\n # transition_domains to email_sent=True, and also to report\n # out errors\n domains_with_errors: List[str] = []\n # this array is used to store email_context; each item in the array\n # contains the context for a single email; single emails may be 1\n # or more transition_domains, as they are grouped by username\n emails_to_send: List[str] = []\n\n def add_arguments(self, parser):\n \"\"\"Add command line arguments.\"\"\"\n parser.add_argument(\n \"-s\",\n \"--send_emails\",\n action=\"store_true\",\n default=False,\n dest=\"send_emails\",\n help=\"Send emails \",\n )\n\n parser.add_argument(\"emails\", nargs=\"*\", help=\"Email addresses to send invitations to\")\n\n def handle(self, **options):\n \"\"\"Process the objects in TransitionDomain.\"\"\"\n\n logger.info(\"checking domains and preparing emails\")\n\n if options[\"emails\"]:\n # this option is a list of email addresses\n self.transition_domains = TransitionDomain.objects.filter(\n username__in=options[\"emails\"],\n email_sent=False,\n ).order_by(\"username\")\n else:\n # Get all TransitionDomain objects\n self.transition_domains = TransitionDomain.objects.filter(\n email_sent=False,\n ).order_by(\"username\")\n logger.info(\"Found %d transition domains\", len(self.transition_domains))\n\n self.build_emails_to_send_array()\n logger.info(\"Prepared %d emails to send\", len(self.emails_to_send))\n\n if options[\"send_emails\"]:\n logger.info(\"about to send emails\")\n self.send_emails()\n logger.info(\"done sending emails\")\n\n self.update_domains_as_sent()\n\n logger.info(\"done sending emails and updating transition_domains\")\n else:\n logger.info(\"not sending emails\")\n for email_context in self.emails_to_send:\n logger.info(\n \"would send email to %s for %s\",\n email_context[\"email\"],\n email_context[\"domains\"],\n )\n\n def build_emails_to_send_array(self):\n \"\"\"this method sends emails to distinct usernames\"\"\"\n\n # data structure to hold email context for a single email;\n # transition_domains ordered by username, a single email_context\n # may include information from more than one transition_domain\n email_context = {\"email\": \"\"}\n\n # loop through all transition_domains; group them by username\n # into emails_to_send_array\n for transition_domain in self.transition_domains:\n # attempt to get the domain from domain objects; if there is\n # an error getting the domain, skip this domain and add it to\n # domains_with_errors\n try:\n # if prior username does not match current username\n if not email_context[\"email\"] or email_context[\"email\"] != transition_domain.username:\n # if not first in list of transition_domains\n if email_context[\"email\"]:\n # append the email context to the emails_to_send array\n self.emails_to_send.append(copy.deepcopy(email_context))\n email_context[\"domains\"] = []\n email_context[\"email\"] = transition_domain.username\n email_context[\"domains\"].append(transition_domain.domain_name)\n except Exception as err:\n # error condition if domain not in database\n self.domains_with_errors.append(copy.deepcopy(transition_domain.domain_name))\n logger.error(f\"error retrieving domain {transition_domain.domain_name}: {err}\")\n # if there are at least one more transition domains than errors,\n # then append one more item\n if len(self.transition_domains) > len(self.domains_with_errors):\n self.emails_to_send.append(email_context)\n\n def send_emails(self):\n if len(self.emails_to_send) > 0:\n for email_data in self.emails_to_send:\n self.send_email(email_data)\n else:\n logger.info(\"no emails to send\")\n\n def send_email(self, email_data):\n try:\n send_templated_email(\n \"emails/transition_domain_invitation.txt\",\n \"emails/transition_domain_invitation_subject.txt\",\n to_address=email_data[\"email\"],\n context={\n \"domains\": email_data[\"domains\"],\n },\n )\n # success message is logged\n logger.info(\n f\"email sent successfully to {email_data['email']} for \"\n f\"{[domain for domain in email_data['domains']]}\"\n )\n except EmailSendingError as err:\n logger.error(\n f\"email did not send successfully to {email_data['email']} \"\n f\"for {[domain for domain in email_data['domains']]}\"\n f\": {err}\"\n )\n # if email failed to send, set error in domains_with_errors for each\n # domain in the email so that transition domain email_sent is not set\n # to True\n for domain in email_data[\"domains\"]:\n self.domains_with_errors.append(domain)\n\n def update_domains_as_sent(self):\n \"\"\"set email_sent to True in all transition_domains which have\n been processed successfully\"\"\"\n for transition_domain in self.transition_domains:\n if transition_domain.domain_name not in self.domains_with_errors:\n transition_domain.email_sent = True\n transition_domain.save()\n", "path": "src/registrar/management/commands/send_domain_invitations.py"}]} | 1,915 | 282 |
gh_patches_debug_11456 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-1334 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SEARCH_PATH for Geotrek DB user
Since Geotrek 0.28, tables and functions have be moved to different schemas, which is a very good point (https://github.com/makinacorpus/Geotrek/releases/tag/v0.28.0).
Schemas are not mentionned in triggers which is OK too, as Django is doing it in his connexions so it is not a problem for GEOTREK applications.
It gets a problem when you try to edit or insert a data from an external tool (QGIS, Talend...).
You have to change the db_user search_path so that he can find tables and functions not only in public schemas.
It could be interesting to do it during GEOTREK installation for the Geotrek DB user mentionned in settings :
ALTER USER $geotrek_db_user SET
search_path=public,django,geotrek,gestion,rando,zonage,foncier,tourisme;
Of course if you are using another user to edit datas in external tools, you will have to do it manually the first time.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geotrek/common/utils/postgresql.py`
Content:
```
1 import re
2 import os
3 import logging
4 import traceback
5 from functools import wraps
6
7 from django.db import connection, models
8 from django.conf import settings
9 from django.db.models import get_app, get_models
10
11
12 logger = logging.getLogger(__name__)
13
14
15 def debug_pg_notices(f):
16
17 @wraps(f)
18 def wrapped(*args, **kwargs):
19 before = len(connection.connection.notices) if connection.connection else 0
20 try:
21 r = f(*args, **kwargs)
22 finally:
23 # Show triggers output
24 allnotices = []
25 current = ''
26 if connection.connection:
27 notices = []
28 for notice in connection.connection.notices[before:]:
29 try:
30 notice, context = notice.split('CONTEXT:', 1)
31 context = re.sub("\s+", " ", context)
32 except ValueError:
33 context = ''
34 notices.append((context, notice))
35 if context != current:
36 allnotices.append(notices)
37 notices = []
38 current = context
39 allnotices.append(notices)
40 current = ''
41 for notices in allnotices:
42 for context, notice in notices:
43 if context != current:
44 if context != '':
45 logger.debug('Context %s...:' % context.strip()[:80])
46 current = context
47 notice = notice.replace('NOTICE: ', '')
48 prefix = '' if context == '' else ' '
49 logger.debug('%s%s' % (prefix, notice.strip()))
50 return r
51
52 return wrapped
53
54
55 def load_sql_files(app_label):
56 """
57 Look for SQL files in Django app, and load them into database.
58 We remove RAISE NOTICE instructions from SQL outside unit testing
59 since they lead to interpolation errors of '%' character in python.
60 """
61 app_dir = os.path.dirname(models.get_app(app_label).__file__)
62 sql_dir = os.path.normpath(os.path.join(app_dir, 'sql'))
63 if not os.path.exists(sql_dir):
64 logger.debug("No SQL folder for %s" % app_label)
65 return
66
67 r = re.compile(r'^.*\.sql$')
68 sql_files = [os.path.join(sql_dir, f)
69 for f in os.listdir(sql_dir)
70 if r.match(f) is not None]
71 sql_files.sort()
72
73 if len(sql_files) == 0:
74 logger.warning("Empty folder %s" % sql_dir)
75
76 cursor = connection.cursor()
77 for sql_file in sql_files:
78 try:
79 logger.info("Loading initial SQL data from '%s'" % sql_file)
80 f = open(sql_file)
81 sql = f.read()
82 f.close()
83 if not settings.TEST:
84 # Remove RAISE NOTICE (/!\ only one-liners)
85 sql = re.sub(r"\n.*RAISE NOTICE.*\n", "\n", sql)
86 # TODO: this is the ugliest driver hack ever
87 sql = sql.replace('%', '%%')
88
89 # Replace curly braces with settings values
90 pattern = re.compile(r'{{\s*(.*)\s*}}')
91 for m in pattern.finditer(sql):
92 value = getattr(settings, m.group(1))
93 sql = sql.replace(m.group(0), unicode(value))
94 cursor.execute(sql)
95 except Exception as e:
96 logger.critical("Failed to install custom SQL file '%s': %s\n" %
97 (sql_file, e))
98 traceback.print_exc()
99 raise
100
101
102 def move_models_to_schemas(app_label):
103 """
104 Move models tables to PostgreSQL schemas.
105
106 Views, functions and triggers will be moved in Geotrek app SQL files.
107 """
108 app = get_app(app_label)
109 default_schema = settings.DATABASE_SCHEMAS.get('default')
110 app_schema = settings.DATABASE_SCHEMAS.get(app_label, default_schema)
111
112 table_schemas = {}
113 for model in get_models(app):
114 model_name = model._meta.module_name
115 table_name = model._meta.db_table
116 model_schema = settings.DATABASE_SCHEMAS.get(model_name, app_schema)
117 table_schemas.setdefault(model_schema, []).append(table_name)
118
119 for m2m_field in model._meta.many_to_many:
120 table_name = m2m_field.db_table
121 if table_name:
122 table_schemas[model_schema].append(table_name)
123
124 cursor = connection.cursor()
125
126 for schema_name in table_schemas.keys():
127 try:
128 sql = "CREATE SCHEMA %s;" % model_schema
129 cursor.execute(sql)
130 logger.info("Created schema %s" % model_schema)
131 except Exception:
132 logger.debug("Schema %s already exists." % model_schema)
133
134 for schema_name, tables in table_schemas.items():
135 for table_name in tables:
136 try:
137 sql = "ALTER TABLE %s SET SCHEMA %s;" % (table_name, schema_name)
138 cursor.execute(sql)
139 logger.info("Moved %s to schema %s" % (table_name, schema_name))
140 except Exception:
141 logger.debug("Table %s already in schema %s" % (table_name, schema_name))
142
143 # For Django, search_path is set in connection options.
144 # But when accessing the database using QGis or ETL, search_path must be
145 # set database level (for all users, and for this database only).
146 if app_label == 'common':
147 dbname = settings.DATABASES['default']['NAME']
148 search_path = 'public,%s' % ','.join(set(settings.DATABASE_SCHEMAS.values()))
149 sql = "ALTER DATABASE %s SET search_path=%s;" % (dbname, search_path)
150 cursor.execute(sql)
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geotrek/common/utils/postgresql.py b/geotrek/common/utils/postgresql.py
--- a/geotrek/common/utils/postgresql.py
+++ b/geotrek/common/utils/postgresql.py
@@ -145,6 +145,7 @@
# set database level (for all users, and for this database only).
if app_label == 'common':
dbname = settings.DATABASES['default']['NAME']
+ dbuser = settings.DATABASES['default']['USER']
search_path = 'public,%s' % ','.join(set(settings.DATABASE_SCHEMAS.values()))
- sql = "ALTER DATABASE %s SET search_path=%s;" % (dbname, search_path)
+ sql = "ALTER ROLE %s IN DATABASE %s SET search_path=%s;" % (dbuser, dbname, search_path)
cursor.execute(sql)
| {"golden_diff": "diff --git a/geotrek/common/utils/postgresql.py b/geotrek/common/utils/postgresql.py\n--- a/geotrek/common/utils/postgresql.py\n+++ b/geotrek/common/utils/postgresql.py\n@@ -145,6 +145,7 @@\n # set database level (for all users, and for this database only).\n if app_label == 'common':\n dbname = settings.DATABASES['default']['NAME']\n+ dbuser = settings.DATABASES['default']['USER']\n search_path = 'public,%s' % ','.join(set(settings.DATABASE_SCHEMAS.values()))\n- sql = \"ALTER DATABASE %s SET search_path=%s;\" % (dbname, search_path)\n+ sql = \"ALTER ROLE %s IN DATABASE %s SET search_path=%s;\" % (dbuser, dbname, search_path)\n cursor.execute(sql)\n", "issue": "SEARCH_PATH for Geotrek DB user\nSince Geotrek 0.28, tables and functions have be moved to different schemas, which is a very good point (https://github.com/makinacorpus/Geotrek/releases/tag/v0.28.0).\n\nSchemas are not mentionned in triggers which is OK too, as Django is doing it in his connexions so it is not a problem for GEOTREK applications.\n\nIt gets a problem when you try to edit or insert a data from an external tool (QGIS, Talend...). \nYou have to change the db_user search_path so that he can find tables and functions not only in public schemas.\n\nIt could be interesting to do it during GEOTREK installation for the Geotrek DB user mentionned in settings : \n\nALTER USER $geotrek_db_user SET \nsearch_path=public,django,geotrek,gestion,rando,zonage,foncier,tourisme; \n\nOf course if you are using another user to edit datas in external tools, you will have to do it manually the first time. \n\n", "before_files": [{"content": "import re\nimport os\nimport logging\nimport traceback\nfrom functools import wraps\n\nfrom django.db import connection, models\nfrom django.conf import settings\nfrom django.db.models import get_app, get_models\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef debug_pg_notices(f):\n\n @wraps(f)\n def wrapped(*args, **kwargs):\n before = len(connection.connection.notices) if connection.connection else 0\n try:\n r = f(*args, **kwargs)\n finally:\n # Show triggers output\n allnotices = []\n current = ''\n if connection.connection:\n notices = []\n for notice in connection.connection.notices[before:]:\n try:\n notice, context = notice.split('CONTEXT:', 1)\n context = re.sub(\"\\s+\", \" \", context)\n except ValueError:\n context = ''\n notices.append((context, notice))\n if context != current:\n allnotices.append(notices)\n notices = []\n current = context\n allnotices.append(notices)\n current = ''\n for notices in allnotices:\n for context, notice in notices:\n if context != current:\n if context != '':\n logger.debug('Context %s...:' % context.strip()[:80])\n current = context\n notice = notice.replace('NOTICE: ', '')\n prefix = '' if context == '' else ' '\n logger.debug('%s%s' % (prefix, notice.strip()))\n return r\n\n return wrapped\n\n\ndef load_sql_files(app_label):\n \"\"\"\n Look for SQL files in Django app, and load them into database.\n We remove RAISE NOTICE instructions from SQL outside unit testing\n since they lead to interpolation errors of '%' character in python.\n \"\"\"\n app_dir = os.path.dirname(models.get_app(app_label).__file__)\n sql_dir = os.path.normpath(os.path.join(app_dir, 'sql'))\n if not os.path.exists(sql_dir):\n logger.debug(\"No SQL folder for %s\" % app_label)\n return\n\n r = re.compile(r'^.*\\.sql$')\n sql_files = [os.path.join(sql_dir, f)\n for f in os.listdir(sql_dir)\n if r.match(f) is not None]\n sql_files.sort()\n\n if len(sql_files) == 0:\n logger.warning(\"Empty folder %s\" % sql_dir)\n\n cursor = connection.cursor()\n for sql_file in sql_files:\n try:\n logger.info(\"Loading initial SQL data from '%s'\" % sql_file)\n f = open(sql_file)\n sql = f.read()\n f.close()\n if not settings.TEST:\n # Remove RAISE NOTICE (/!\\ only one-liners)\n sql = re.sub(r\"\\n.*RAISE NOTICE.*\\n\", \"\\n\", sql)\n # TODO: this is the ugliest driver hack ever\n sql = sql.replace('%', '%%')\n\n # Replace curly braces with settings values\n pattern = re.compile(r'{{\\s*(.*)\\s*}}')\n for m in pattern.finditer(sql):\n value = getattr(settings, m.group(1))\n sql = sql.replace(m.group(0), unicode(value))\n cursor.execute(sql)\n except Exception as e:\n logger.critical(\"Failed to install custom SQL file '%s': %s\\n\" %\n (sql_file, e))\n traceback.print_exc()\n raise\n\n\ndef move_models_to_schemas(app_label):\n \"\"\"\n Move models tables to PostgreSQL schemas.\n\n Views, functions and triggers will be moved in Geotrek app SQL files.\n \"\"\"\n app = get_app(app_label)\n default_schema = settings.DATABASE_SCHEMAS.get('default')\n app_schema = settings.DATABASE_SCHEMAS.get(app_label, default_schema)\n\n table_schemas = {}\n for model in get_models(app):\n model_name = model._meta.module_name\n table_name = model._meta.db_table\n model_schema = settings.DATABASE_SCHEMAS.get(model_name, app_schema)\n table_schemas.setdefault(model_schema, []).append(table_name)\n\n for m2m_field in model._meta.many_to_many:\n table_name = m2m_field.db_table\n if table_name:\n table_schemas[model_schema].append(table_name)\n\n cursor = connection.cursor()\n\n for schema_name in table_schemas.keys():\n try:\n sql = \"CREATE SCHEMA %s;\" % model_schema\n cursor.execute(sql)\n logger.info(\"Created schema %s\" % model_schema)\n except Exception:\n logger.debug(\"Schema %s already exists.\" % model_schema)\n\n for schema_name, tables in table_schemas.items():\n for table_name in tables:\n try:\n sql = \"ALTER TABLE %s SET SCHEMA %s;\" % (table_name, schema_name)\n cursor.execute(sql)\n logger.info(\"Moved %s to schema %s\" % (table_name, schema_name))\n except Exception:\n logger.debug(\"Table %s already in schema %s\" % (table_name, schema_name))\n\n # For Django, search_path is set in connection options.\n # But when accessing the database using QGis or ETL, search_path must be\n # set database level (for all users, and for this database only).\n if app_label == 'common':\n dbname = settings.DATABASES['default']['NAME']\n search_path = 'public,%s' % ','.join(set(settings.DATABASE_SCHEMAS.values()))\n sql = \"ALTER DATABASE %s SET search_path=%s;\" % (dbname, search_path)\n cursor.execute(sql)\n", "path": "geotrek/common/utils/postgresql.py"}], "after_files": [{"content": "import re\nimport os\nimport logging\nimport traceback\nfrom functools import wraps\n\nfrom django.db import connection, models\nfrom django.conf import settings\nfrom django.db.models import get_app, get_models\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef debug_pg_notices(f):\n\n @wraps(f)\n def wrapped(*args, **kwargs):\n before = len(connection.connection.notices) if connection.connection else 0\n try:\n r = f(*args, **kwargs)\n finally:\n # Show triggers output\n allnotices = []\n current = ''\n if connection.connection:\n notices = []\n for notice in connection.connection.notices[before:]:\n try:\n notice, context = notice.split('CONTEXT:', 1)\n context = re.sub(\"\\s+\", \" \", context)\n except ValueError:\n context = ''\n notices.append((context, notice))\n if context != current:\n allnotices.append(notices)\n notices = []\n current = context\n allnotices.append(notices)\n current = ''\n for notices in allnotices:\n for context, notice in notices:\n if context != current:\n if context != '':\n logger.debug('Context %s...:' % context.strip()[:80])\n current = context\n notice = notice.replace('NOTICE: ', '')\n prefix = '' if context == '' else ' '\n logger.debug('%s%s' % (prefix, notice.strip()))\n return r\n\n return wrapped\n\n\ndef load_sql_files(app_label):\n \"\"\"\n Look for SQL files in Django app, and load them into database.\n We remove RAISE NOTICE instructions from SQL outside unit testing\n since they lead to interpolation errors of '%' character in python.\n \"\"\"\n app_dir = os.path.dirname(models.get_app(app_label).__file__)\n sql_dir = os.path.normpath(os.path.join(app_dir, 'sql'))\n if not os.path.exists(sql_dir):\n logger.debug(\"No SQL folder for %s\" % app_label)\n return\n\n r = re.compile(r'^.*\\.sql$')\n sql_files = [os.path.join(sql_dir, f)\n for f in os.listdir(sql_dir)\n if r.match(f) is not None]\n sql_files.sort()\n\n if len(sql_files) == 0:\n logger.warning(\"Empty folder %s\" % sql_dir)\n\n cursor = connection.cursor()\n for sql_file in sql_files:\n try:\n logger.info(\"Loading initial SQL data from '%s'\" % sql_file)\n f = open(sql_file)\n sql = f.read()\n f.close()\n if not settings.TEST:\n # Remove RAISE NOTICE (/!\\ only one-liners)\n sql = re.sub(r\"\\n.*RAISE NOTICE.*\\n\", \"\\n\", sql)\n # TODO: this is the ugliest driver hack ever\n sql = sql.replace('%', '%%')\n\n # Replace curly braces with settings values\n pattern = re.compile(r'{{\\s*(.*)\\s*}}')\n for m in pattern.finditer(sql):\n value = getattr(settings, m.group(1))\n sql = sql.replace(m.group(0), unicode(value))\n cursor.execute(sql)\n except Exception as e:\n logger.critical(\"Failed to install custom SQL file '%s': %s\\n\" %\n (sql_file, e))\n traceback.print_exc()\n raise\n\n\ndef move_models_to_schemas(app_label):\n \"\"\"\n Move models tables to PostgreSQL schemas.\n\n Views, functions and triggers will be moved in Geotrek app SQL files.\n \"\"\"\n app = get_app(app_label)\n default_schema = settings.DATABASE_SCHEMAS.get('default')\n app_schema = settings.DATABASE_SCHEMAS.get(app_label, default_schema)\n\n table_schemas = {}\n for model in get_models(app):\n model_name = model._meta.module_name\n table_name = model._meta.db_table\n model_schema = settings.DATABASE_SCHEMAS.get(model_name, app_schema)\n table_schemas.setdefault(model_schema, []).append(table_name)\n\n for m2m_field in model._meta.many_to_many:\n table_name = m2m_field.db_table\n if table_name:\n table_schemas[model_schema].append(table_name)\n\n cursor = connection.cursor()\n\n for schema_name in table_schemas.keys():\n try:\n sql = \"CREATE SCHEMA %s;\" % model_schema\n cursor.execute(sql)\n logger.info(\"Created schema %s\" % model_schema)\n except Exception:\n logger.debug(\"Schema %s already exists.\" % model_schema)\n\n for schema_name, tables in table_schemas.items():\n for table_name in tables:\n try:\n sql = \"ALTER TABLE %s SET SCHEMA %s;\" % (table_name, schema_name)\n cursor.execute(sql)\n logger.info(\"Moved %s to schema %s\" % (table_name, schema_name))\n except Exception:\n logger.debug(\"Table %s already in schema %s\" % (table_name, schema_name))\n\n # For Django, search_path is set in connection options.\n # But when accessing the database using QGis or ETL, search_path must be\n # set database level (for all users, and for this database only).\n if app_label == 'common':\n dbname = settings.DATABASES['default']['NAME']\n dbuser = settings.DATABASES['default']['USER']\n search_path = 'public,%s' % ','.join(set(settings.DATABASE_SCHEMAS.values()))\n sql = \"ALTER ROLE %s IN DATABASE %s SET search_path=%s;\" % (dbuser, dbname, search_path)\n cursor.execute(sql)\n", "path": "geotrek/common/utils/postgresql.py"}]} | 2,037 | 185 |
gh_patches_debug_29592 | rasdani/github-patches | git_diff | e-valuation__EvaP-1484 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Locked questionnaires failing in editor form
#1445 introduced locked questionnaires. However, they are not dealt with correctly in the evaluation editor form. When initially opening the form, the locked questionnaires are correctly selected but are not handled correctly when saving the form.
Steps to reproduce:
1. As manager, assign a locked questionnaire as the only general questionnaire for an evaluation.
2. Enable the evaluation for editor review.
3. As editor, open the evaluation form and try to save it. Saving will fail with an error for the field "General questionnaires" ("This field is required.").
The locked questionnaire should count as a selected questionnaire and the form should be saved.
A test should be added for this use case.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/contributor/forms.py`
Content:
```
1 from datetime import datetime, timedelta
2 import logging
3
4 from django import forms
5 from django.conf import settings
6 from django.db.models import Q
7 from django.forms.widgets import CheckboxSelectMultiple
8 from django.utils.translation import gettext_lazy as _
9 from evap.evaluation.forms import UserModelMultipleChoiceField, UserModelChoiceField
10 from evap.evaluation.models import Course, Evaluation, Questionnaire, UserProfile
11 from evap.evaluation.tools import date_to_datetime
12 from evap.staff.forms import ContributionForm
13
14 logger = logging.getLogger(__name__)
15
16
17 class EvaluationForm(forms.ModelForm):
18 general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_("General questionnaires"))
19 course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())
20 name_de_field = forms.CharField(label=_("Name (German)"), disabled=True, required=False)
21 name_en_field = forms.CharField(label=_("Name (English)"), disabled=True, required=False)
22
23 class Meta:
24 model = Evaluation
25 fields = ('name_de_field', 'name_en_field', 'vote_start_datetime', 'vote_end_date', 'general_questionnaires', 'course')
26
27 def __init__(self, *args, **kwargs):
28 super().__init__(*args, **kwargs)
29
30 self.fields['name_de_field'].initial = self.instance.full_name_de
31 self.fields['name_en_field'].initial = self.instance.full_name_en
32
33 self.fields['general_questionnaires'].queryset = Questionnaire.objects.general_questionnaires().filter(
34 Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.instance)).distinct()
35
36 self.fields['vote_start_datetime'].localize = True
37 self.fields['vote_end_date'].localize = True
38
39 if self.instance.general_contribution:
40 self.fields['general_questionnaires'].initial = [q.pk for q in self.instance.general_contribution.questionnaires.all()]
41
42 if not self.instance.allow_editors_to_edit:
43 for field in self._meta.fields:
44 self.fields[field].disabled = True
45
46 def clean(self):
47 super().clean()
48
49 vote_start_datetime = self.cleaned_data.get('vote_start_datetime')
50 vote_end_date = self.cleaned_data.get('vote_end_date')
51 if vote_start_datetime and vote_end_date:
52 if vote_start_datetime.date() > vote_end_date:
53 self.add_error("vote_start_datetime", "")
54 self.add_error("vote_end_date", _("The first day of evaluation must be before the last one."))
55
56 def clean_vote_end_date(self):
57 vote_end_date = self.cleaned_data.get('vote_end_date')
58
59 # The actual deadline is EVALUATION_END_OFFSET_HOURS:00 AM of the day after vote_end_date.
60 # Therefore an evaluation date 24h + EVALUATION_END_OFFSET_HOURS in the past would technically still be in the future.
61 if vote_end_date and date_to_datetime(vote_end_date) + timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS) < datetime.now():
62 raise forms.ValidationError(_("The last day of evaluation must be in the future."))
63 return vote_end_date
64
65 def clean_general_questionnaires(self):
66 # Ensure all locked questionnaires still have the same status (included or not)
67 locked_qs = self.fields['general_questionnaires'].queryset.filter(is_locked=True)
68
69 not_locked = [q for q in self.cleaned_data.get('general_questionnaires') if q not in locked_qs]
70 locked = [q.pk for q in self.instance.general_contribution.questionnaires.filter(is_locked=True)]
71
72 return not_locked + locked
73
74 def save(self, *args, **kw):
75 evaluation = super().save(*args, **kw)
76 evaluation.general_contribution.questionnaires.set(self.cleaned_data.get('general_questionnaires'))
77 return evaluation
78
79
80 class EditorContributionForm(ContributionForm):
81 def __init__(self, *args, **kwargs):
82 super().__init__(*args, **kwargs)
83
84 existing_contributor_pk = self.instance.contributor.pk if self.instance.contributor else None
85
86 self.fields['questionnaires'].queryset = Questionnaire.objects.contributor_questionnaires().filter(
87 Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.evaluation)).distinct()
88 self.fields['contributor'].queryset = UserProfile.objects.filter(
89 (Q(is_active=True) & Q(is_proxy_user=False)) | Q(pk=existing_contributor_pk)
90 )
91
92
93 class DelegatesForm(forms.ModelForm):
94 delegates = UserModelMultipleChoiceField(queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True),
95 required=False)
96
97 class Meta:
98 model = UserProfile
99 fields = ('delegates',)
100 field_classes = {
101 'delegates': UserModelMultipleChoiceField,
102 }
103
104 def __init__(self, *args, **kwargs):
105 super().__init__(*args, **kwargs)
106
107 def save(self, *args, **kw):
108 super().save(*args, **kw)
109 logger.info('User "{}" edited the settings.'.format(self.instance.email))
110
111
112 class DelegateSelectionForm(forms.Form):
113 delegate_to = UserModelChoiceField(label=_("Delegate to"),
114 queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True))
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/contributor/forms.py b/evap/contributor/forms.py
--- a/evap/contributor/forms.py
+++ b/evap/contributor/forms.py
@@ -15,7 +15,7 @@
class EvaluationForm(forms.ModelForm):
- general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_("General questionnaires"))
+ general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, required=False, widget=CheckboxSelectMultiple, label=_("General questionnaires"))
course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())
name_de_field = forms.CharField(label=_("Name (German)"), disabled=True, required=False)
name_en_field = forms.CharField(label=_("Name (English)"), disabled=True, required=False)
@@ -64,10 +64,14 @@
def clean_general_questionnaires(self):
# Ensure all locked questionnaires still have the same status (included or not)
- locked_qs = self.fields['general_questionnaires'].queryset.filter(is_locked=True)
+ not_locked = []
+ if self.cleaned_data.get('general_questionnaires'):
+ not_locked = list(self.cleaned_data.get('general_questionnaires').filter(is_locked=False))
- not_locked = [q for q in self.cleaned_data.get('general_questionnaires') if q not in locked_qs]
- locked = [q.pk for q in self.instance.general_contribution.questionnaires.filter(is_locked=True)]
+ locked = list(self.instance.general_contribution.questionnaires.filter(is_locked=True))
+
+ if not not_locked + locked:
+ self.add_error("general_questionnaires", _("At least one questionnaire must be selected."))
return not_locked + locked
| {"golden_diff": "diff --git a/evap/contributor/forms.py b/evap/contributor/forms.py\n--- a/evap/contributor/forms.py\n+++ b/evap/contributor/forms.py\n@@ -15,7 +15,7 @@\n \n \n class EvaluationForm(forms.ModelForm):\n- general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_(\"General questionnaires\"))\n+ general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, required=False, widget=CheckboxSelectMultiple, label=_(\"General questionnaires\"))\n course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())\n name_de_field = forms.CharField(label=_(\"Name (German)\"), disabled=True, required=False)\n name_en_field = forms.CharField(label=_(\"Name (English)\"), disabled=True, required=False)\n@@ -64,10 +64,14 @@\n \n def clean_general_questionnaires(self):\n # Ensure all locked questionnaires still have the same status (included or not)\n- locked_qs = self.fields['general_questionnaires'].queryset.filter(is_locked=True)\n+ not_locked = []\n+ if self.cleaned_data.get('general_questionnaires'):\n+ not_locked = list(self.cleaned_data.get('general_questionnaires').filter(is_locked=False))\n \n- not_locked = [q for q in self.cleaned_data.get('general_questionnaires') if q not in locked_qs]\n- locked = [q.pk for q in self.instance.general_contribution.questionnaires.filter(is_locked=True)]\n+ locked = list(self.instance.general_contribution.questionnaires.filter(is_locked=True))\n+\n+ if not not_locked + locked:\n+ self.add_error(\"general_questionnaires\", _(\"At least one questionnaire must be selected.\"))\n \n return not_locked + locked\n", "issue": "Locked questionnaires failing in editor form\n#1445 introduced locked questionnaires. However, they are not dealt with correctly in the evaluation editor form. When initially opening the form, the locked questionnaires are correctly selected but are not handled correctly when saving the form.\r\n\r\nSteps to reproduce:\r\n1. As manager, assign a locked questionnaire as the only general questionnaire for an evaluation.\r\n2. Enable the evaluation for editor review.\r\n3. As editor, open the evaluation form and try to save it. Saving will fail with an error for the field \"General questionnaires\" (\"This field is required.\").\r\n\r\nThe locked questionnaire should count as a selected questionnaire and the form should be saved.\r\nA test should be added for this use case.\n", "before_files": [{"content": "from datetime import datetime, timedelta\nimport logging\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.forms.widgets import CheckboxSelectMultiple\nfrom django.utils.translation import gettext_lazy as _\nfrom evap.evaluation.forms import UserModelMultipleChoiceField, UserModelChoiceField\nfrom evap.evaluation.models import Course, Evaluation, Questionnaire, UserProfile\nfrom evap.evaluation.tools import date_to_datetime\nfrom evap.staff.forms import ContributionForm\n\nlogger = logging.getLogger(__name__)\n\n\nclass EvaluationForm(forms.ModelForm):\n general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_(\"General questionnaires\"))\n course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())\n name_de_field = forms.CharField(label=_(\"Name (German)\"), disabled=True, required=False)\n name_en_field = forms.CharField(label=_(\"Name (English)\"), disabled=True, required=False)\n\n class Meta:\n model = Evaluation\n fields = ('name_de_field', 'name_en_field', 'vote_start_datetime', 'vote_end_date', 'general_questionnaires', 'course')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['name_de_field'].initial = self.instance.full_name_de\n self.fields['name_en_field'].initial = self.instance.full_name_en\n\n self.fields['general_questionnaires'].queryset = Questionnaire.objects.general_questionnaires().filter(\n Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.instance)).distinct()\n\n self.fields['vote_start_datetime'].localize = True\n self.fields['vote_end_date'].localize = True\n\n if self.instance.general_contribution:\n self.fields['general_questionnaires'].initial = [q.pk for q in self.instance.general_contribution.questionnaires.all()]\n\n if not self.instance.allow_editors_to_edit:\n for field in self._meta.fields:\n self.fields[field].disabled = True\n\n def clean(self):\n super().clean()\n\n vote_start_datetime = self.cleaned_data.get('vote_start_datetime')\n vote_end_date = self.cleaned_data.get('vote_end_date')\n if vote_start_datetime and vote_end_date:\n if vote_start_datetime.date() > vote_end_date:\n self.add_error(\"vote_start_datetime\", \"\")\n self.add_error(\"vote_end_date\", _(\"The first day of evaluation must be before the last one.\"))\n\n def clean_vote_end_date(self):\n vote_end_date = self.cleaned_data.get('vote_end_date')\n\n # The actual deadline is EVALUATION_END_OFFSET_HOURS:00 AM of the day after vote_end_date.\n # Therefore an evaluation date 24h + EVALUATION_END_OFFSET_HOURS in the past would technically still be in the future.\n if vote_end_date and date_to_datetime(vote_end_date) + timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS) < datetime.now():\n raise forms.ValidationError(_(\"The last day of evaluation must be in the future.\"))\n return vote_end_date\n\n def clean_general_questionnaires(self):\n # Ensure all locked questionnaires still have the same status (included or not)\n locked_qs = self.fields['general_questionnaires'].queryset.filter(is_locked=True)\n\n not_locked = [q for q in self.cleaned_data.get('general_questionnaires') if q not in locked_qs]\n locked = [q.pk for q in self.instance.general_contribution.questionnaires.filter(is_locked=True)]\n\n return not_locked + locked\n\n def save(self, *args, **kw):\n evaluation = super().save(*args, **kw)\n evaluation.general_contribution.questionnaires.set(self.cleaned_data.get('general_questionnaires'))\n return evaluation\n\n\nclass EditorContributionForm(ContributionForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n existing_contributor_pk = self.instance.contributor.pk if self.instance.contributor else None\n\n self.fields['questionnaires'].queryset = Questionnaire.objects.contributor_questionnaires().filter(\n Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.evaluation)).distinct()\n self.fields['contributor'].queryset = UserProfile.objects.filter(\n (Q(is_active=True) & Q(is_proxy_user=False)) | Q(pk=existing_contributor_pk)\n )\n\n\nclass DelegatesForm(forms.ModelForm):\n delegates = UserModelMultipleChoiceField(queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True),\n required=False)\n\n class Meta:\n model = UserProfile\n fields = ('delegates',)\n field_classes = {\n 'delegates': UserModelMultipleChoiceField,\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n logger.info('User \"{}\" edited the settings.'.format(self.instance.email))\n\n\nclass DelegateSelectionForm(forms.Form):\n delegate_to = UserModelChoiceField(label=_(\"Delegate to\"),\n queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True))\n", "path": "evap/contributor/forms.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\nimport logging\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.forms.widgets import CheckboxSelectMultiple\nfrom django.utils.translation import gettext_lazy as _\nfrom evap.evaluation.forms import UserModelMultipleChoiceField, UserModelChoiceField\nfrom evap.evaluation.models import Course, Evaluation, Questionnaire, UserProfile\nfrom evap.evaluation.tools import date_to_datetime\nfrom evap.staff.forms import ContributionForm\n\nlogger = logging.getLogger(__name__)\n\n\nclass EvaluationForm(forms.ModelForm):\n general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, required=False, widget=CheckboxSelectMultiple, label=_(\"General questionnaires\"))\n course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())\n name_de_field = forms.CharField(label=_(\"Name (German)\"), disabled=True, required=False)\n name_en_field = forms.CharField(label=_(\"Name (English)\"), disabled=True, required=False)\n\n class Meta:\n model = Evaluation\n fields = ('name_de_field', 'name_en_field', 'vote_start_datetime', 'vote_end_date', 'general_questionnaires', 'course')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['name_de_field'].initial = self.instance.full_name_de\n self.fields['name_en_field'].initial = self.instance.full_name_en\n\n self.fields['general_questionnaires'].queryset = Questionnaire.objects.general_questionnaires().filter(\n Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.instance)).distinct()\n\n self.fields['vote_start_datetime'].localize = True\n self.fields['vote_end_date'].localize = True\n\n if self.instance.general_contribution:\n self.fields['general_questionnaires'].initial = [q.pk for q in self.instance.general_contribution.questionnaires.all()]\n\n if not self.instance.allow_editors_to_edit:\n for field in self._meta.fields:\n self.fields[field].disabled = True\n\n def clean(self):\n super().clean()\n\n vote_start_datetime = self.cleaned_data.get('vote_start_datetime')\n vote_end_date = self.cleaned_data.get('vote_end_date')\n if vote_start_datetime and vote_end_date:\n if vote_start_datetime.date() > vote_end_date:\n self.add_error(\"vote_start_datetime\", \"\")\n self.add_error(\"vote_end_date\", _(\"The first day of evaluation must be before the last one.\"))\n\n def clean_vote_end_date(self):\n vote_end_date = self.cleaned_data.get('vote_end_date')\n\n # The actual deadline is EVALUATION_END_OFFSET_HOURS:00 AM of the day after vote_end_date.\n # Therefore an evaluation date 24h + EVALUATION_END_OFFSET_HOURS in the past would technically still be in the future.\n if vote_end_date and date_to_datetime(vote_end_date) + timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS) < datetime.now():\n raise forms.ValidationError(_(\"The last day of evaluation must be in the future.\"))\n return vote_end_date\n\n def clean_general_questionnaires(self):\n # Ensure all locked questionnaires still have the same status (included or not)\n not_locked = []\n if self.cleaned_data.get('general_questionnaires'):\n not_locked = list(self.cleaned_data.get('general_questionnaires').filter(is_locked=False))\n\n locked = list(self.instance.general_contribution.questionnaires.filter(is_locked=True))\n\n if not not_locked + locked:\n self.add_error(\"general_questionnaires\", _(\"At least one questionnaire must be selected.\"))\n\n return not_locked + locked\n\n def save(self, *args, **kw):\n evaluation = super().save(*args, **kw)\n evaluation.general_contribution.questionnaires.set(self.cleaned_data.get('general_questionnaires'))\n return evaluation\n\n\nclass EditorContributionForm(ContributionForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n existing_contributor_pk = self.instance.contributor.pk if self.instance.contributor else None\n\n self.fields['questionnaires'].queryset = Questionnaire.objects.contributor_questionnaires().filter(\n Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.evaluation)).distinct()\n self.fields['contributor'].queryset = UserProfile.objects.filter(\n (Q(is_active=True) & Q(is_proxy_user=False)) | Q(pk=existing_contributor_pk)\n )\n\n\nclass DelegatesForm(forms.ModelForm):\n delegates = UserModelMultipleChoiceField(queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True),\n required=False)\n\n class Meta:\n model = UserProfile\n fields = ('delegates',)\n field_classes = {\n 'delegates': UserModelMultipleChoiceField,\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n logger.info('User \"{}\" edited the settings.'.format(self.instance.email))\n\n\nclass DelegateSelectionForm(forms.Form):\n delegate_to = UserModelChoiceField(label=_(\"Delegate to\"),\n queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True))\n", "path": "evap/contributor/forms.py"}]} | 1,768 | 384 |
gh_patches_debug_25270 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2635 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
[DOC]: the sphinx theme is too old
### 📚 The doc issue
As stated in #2579 , we want to use Read the Docs to host our documentation. In this way, tutorials and API documentations will be visited from a single entry. This issue will mainly discuss the appearance of the RTD website. Ideally, we should use Tailwind for style consistency. However, it can take some time to implement a tailwind-based theme, therefore, we should use an existing theme which looks more modern first.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 import datetime
10 # If extensions (or modules to document with autodoc) are in another directory,
11 # add these directories to sys.path here. If the directory is relative to the
12 # documentation root, use os.path.abspath to make it absolute, like shown here.
13 #
14 import os
15 import sys
16
17 sys.path.insert(0, os.path.abspath('..'))
18
19 # -- Project information -----------------------------------------------------
20
21 project = 'Colossal-AI'
22 copyright = f'{datetime.datetime.now().year}, HPC-AI Tech'
23 author = 'HPC-AI Technology Inc.'
24
25 # The full version, including alpha/beta/rc tags
26 release = '0.0.1'
27
28
29 # -- General configuration ---------------------------------------------------
30
31 # Add any Sphinx extension module names here, as strings. They can be
32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
33 # ones.
34 extensions = [
35 'sphinx.ext.autodoc',
36 'sphinx.ext.mathjax',
37 'sphinx.ext.napoleon',
38 'sphinx.ext.linkcode',
39 'myst_parser',
40 ]
41
42 # Disable docstring inheritance
43 autodoc_inherit_docstrings = False
44
45 # Disable displaying type annotations, these can be very verbose
46 autodoc_typehints = 'none'
47
48 # Enable overriding of function signatures in the first line of the docstring.
49 autodoc_docstring_signature = True
50 autodoc_default_options = {
51 'member-order': 'bysource',
52 }
53
54 # Add any paths that contain templates here, relative to this directory.
55 templates_path = ['_templates']
56
57 # List of patterns, relative to source directory, that match files and
58 # directories to ignore when looking for source files.
59 # This pattern also affects html_static_path and html_extra_path.
60 exclude_patterns = ['.build', 'Thumbs.db', '.DS_Store']
61
62 # -- Options for HTML output -------------------------------------------------
63
64 # The theme to use for HTML and HTML Help pages. See the documentation for
65 # a list of builtin themes.
66 #
67 html_theme = 'sphinx_rtd_theme'
68 html_show_sourcelink = False
69 html_theme_options = {
70 'navigation_depth': 3,
71 }
72
73 html_context = {
74 'display_github': False,
75 'github_user': 'hpcaitech',
76 'github_repo': 'ColossalAI',
77 # 'github_version': 'master/docs/',
78 }
79
80 # Add any paths that contain custom static files (such as style sheets) here,
81 # relative to this directory. They are copied after the builtin static files,
82 # so a file named "default.css" will overwrite the builtin "default.css".
83 html_static_path = ['_static']
84
85 html_css_files = [
86 'css/rtd_theme.css',
87 ]
88
89 # -- Extension configuration -------------------------------------------------
90 source_suffix = ['.rst', '.md', '.MD']
91
92 import inspect
93 import colossalai
94 def linkcode_resolve(domain, info):
95 """
96 Determine the URL corresponding to Python object
97 """
98 if domain != 'py':
99 return None
100
101 modname = info['module']
102 fullname = info['fullname']
103
104 submod = sys.modules.get(modname)
105 if submod is None:
106 return None
107
108 obj = submod
109 for part in fullname.split('.'):
110 try:
111 obj = getattr(obj, part)
112 except Exception:
113 return None
114
115 try:
116 fn = inspect.getsourcefile(obj)
117 except Exception:
118 fn = None
119 if not fn:
120 return None
121
122 try:
123 source, lineno = inspect.findsource(obj)
124 except Exception:
125 lineno = None
126
127 if lineno:
128 linespec = "#L%d" % (lineno + 1)
129 else:
130 linespec = ""
131
132 fn = os.path.relpath(fn, start=os.path.dirname(colossalai.__file__))
133
134 github = "https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/{}{}"
135 return github.format(fn, linespec)
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -23,8 +23,7 @@
author = 'HPC-AI Technology Inc.'
# The full version, including alpha/beta/rc tags
-release = '0.0.1'
-
+# release = '0.0.1'
# -- General configuration ---------------------------------------------------
@@ -64,14 +63,14 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
-html_theme = 'sphinx_rtd_theme'
+html_theme = 'sphinx_book_theme'
html_show_sourcelink = False
html_theme_options = {
'navigation_depth': 3,
}
html_context = {
- 'display_github': False,
+ 'display_github': True,
'github_user': 'hpcaitech',
'github_repo': 'ColossalAI',
# 'github_version': 'master/docs/',
@@ -90,7 +89,10 @@
source_suffix = ['.rst', '.md', '.MD']
import inspect
+
import colossalai
+
+
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -23,8 +23,7 @@\n author = 'HPC-AI Technology Inc.'\n \n # The full version, including alpha/beta/rc tags\n-release = '0.0.1'\n-\n+# release = '0.0.1'\n \n # -- General configuration ---------------------------------------------------\n \n@@ -64,14 +63,14 @@\n # The theme to use for HTML and HTML Help pages. See the documentation for\n # a list of builtin themes.\n #\n-html_theme = 'sphinx_rtd_theme'\n+html_theme = 'sphinx_book_theme'\n html_show_sourcelink = False\n html_theme_options = {\n 'navigation_depth': 3,\n }\n \n html_context = {\n- 'display_github': False,\n+ 'display_github': True,\n 'github_user': 'hpcaitech',\n 'github_repo': 'ColossalAI',\n # 'github_version': 'master/docs/',\n@@ -90,7 +89,10 @@\n source_suffix = ['.rst', '.md', '.MD']\n \n import inspect\n+\n import colossalai\n+\n+\n def linkcode_resolve(domain, info):\n \"\"\"\n Determine the URL corresponding to Python object\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[DOC]: the sphinx theme is too old\n### \ud83d\udcda The doc issue\n\nAs stated in #2579 , we want to use Read the Docs to host our documentation. In this way, tutorials and API documentations will be visited from a single entry. This issue will mainly discuss the appearance of the RTD website. Ideally, we should use Tailwind for style consistency. However, it can take some time to implement a tailwind-based theme, therefore, we should use an existing theme which looks more modern first.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport datetime\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Colossal-AI'\ncopyright = f'{datetime.datetime.now().year}, HPC-AI Tech'\nauthor = 'HPC-AI Technology Inc.'\n\n# The full version, including alpha/beta/rc tags\nrelease = '0.0.1'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.linkcode',\n 'myst_parser',\n]\n\n# Disable docstring inheritance\nautodoc_inherit_docstrings = False\n\n# Disable displaying type annotations, these can be very verbose\nautodoc_typehints = 'none'\n\n# Enable overriding of function signatures in the first line of the docstring.\nautodoc_docstring_signature = True\nautodoc_default_options = {\n 'member-order': 'bysource',\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['.build', 'Thumbs.db', '.DS_Store']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nhtml_show_sourcelink = False\nhtml_theme_options = {\n 'navigation_depth': 3,\n}\n\nhtml_context = {\n 'display_github': False,\n 'github_user': 'hpcaitech',\n 'github_repo': 'ColossalAI',\n # 'github_version': 'master/docs/',\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_css_files = [\n 'css/rtd_theme.css',\n]\n\n# -- Extension configuration -------------------------------------------------\nsource_suffix = ['.rst', '.md', '.MD']\n\nimport inspect\nimport colossalai\ndef linkcode_resolve(domain, info):\n \"\"\"\n Determine the URL corresponding to Python object\n \"\"\"\n if domain != 'py':\n return None\n\n modname = info['module']\n fullname = info['fullname']\n\n submod = sys.modules.get(modname)\n if submod is None:\n return None\n\n obj = submod\n for part in fullname.split('.'):\n try:\n obj = getattr(obj, part)\n except Exception:\n return None\n\n try:\n fn = inspect.getsourcefile(obj)\n except Exception:\n fn = None\n if not fn:\n return None\n\n try:\n source, lineno = inspect.findsource(obj)\n except Exception:\n lineno = None\n\n if lineno:\n linespec = \"#L%d\" % (lineno + 1)\n else:\n linespec = \"\"\n\n fn = os.path.relpath(fn, start=os.path.dirname(colossalai.__file__))\n\n github = \"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/{}{}\"\n return github.format(fn, linespec)\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport datetime\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Colossal-AI'\ncopyright = f'{datetime.datetime.now().year}, HPC-AI Tech'\nauthor = 'HPC-AI Technology Inc.'\n\n# The full version, including alpha/beta/rc tags\n# release = '0.0.1'\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.linkcode',\n 'myst_parser',\n]\n\n# Disable docstring inheritance\nautodoc_inherit_docstrings = False\n\n# Disable displaying type annotations, these can be very verbose\nautodoc_typehints = 'none'\n\n# Enable overriding of function signatures in the first line of the docstring.\nautodoc_docstring_signature = True\nautodoc_default_options = {\n 'member-order': 'bysource',\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['.build', 'Thumbs.db', '.DS_Store']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_book_theme'\nhtml_show_sourcelink = False\nhtml_theme_options = {\n 'navigation_depth': 3,\n}\n\nhtml_context = {\n 'display_github': True,\n 'github_user': 'hpcaitech',\n 'github_repo': 'ColossalAI',\n # 'github_version': 'master/docs/',\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_css_files = [\n 'css/rtd_theme.css',\n]\n\n# -- Extension configuration -------------------------------------------------\nsource_suffix = ['.rst', '.md', '.MD']\n\nimport inspect\n\nimport colossalai\n\n\ndef linkcode_resolve(domain, info):\n \"\"\"\n Determine the URL corresponding to Python object\n \"\"\"\n if domain != 'py':\n return None\n\n modname = info['module']\n fullname = info['fullname']\n\n submod = sys.modules.get(modname)\n if submod is None:\n return None\n\n obj = submod\n for part in fullname.split('.'):\n try:\n obj = getattr(obj, part)\n except Exception:\n return None\n\n try:\n fn = inspect.getsourcefile(obj)\n except Exception:\n fn = None\n if not fn:\n return None\n\n try:\n source, lineno = inspect.findsource(obj)\n except Exception:\n lineno = None\n\n if lineno:\n linespec = \"#L%d\" % (lineno + 1)\n else:\n linespec = \"\"\n\n fn = os.path.relpath(fn, start=os.path.dirname(colossalai.__file__))\n\n github = \"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/{}{}\"\n return github.format(fn, linespec)\n", "path": "docs/conf.py"}]} | 1,589 | 279 |
gh_patches_debug_25916 | rasdani/github-patches | git_diff | nf-core__tools-381 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
problem with nfcore_cache.sqlite within /tmp
Hi all,
I think will be a nice idea to have the nfcore_cache.sqlite within a subfolder in tmp because if two users use the program at the same time the privileges will prevent to use the tool.
For example I cannot even use nf-core --help
Luca
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nf_core/utils.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 Common utility functions for the nf-core python package.
4 """
5
6 import datetime
7 import json
8 import logging
9 import os
10 import subprocess
11 import tempfile
12
13 def fetch_wf_config(wf_path, wf=None):
14 """Uses Nextflow to retrieve the the configuration variables
15 from a Nextflow workflow.
16
17 Args:
18 wf_path (str): Nextflow workflow file system path.
19
20 Returns:
21 dict: Workflow configuration settings.
22 """
23
24 config = dict()
25 cache_fn = None
26 cache_basedir = None
27 cache_path = None
28
29 # Build a cache directory if we can
30 if os.path.isdir(os.path.join(os.getenv("HOME"), '.nextflow')):
31 cache_basedir = os.path.join(os.getenv("HOME"), '.nextflow', 'nf-core')
32 if not os.path.isdir(cache_basedir):
33 os.mkdir(cache_basedir)
34
35 # If we're given a workflow object with a commit, see if we have a cached copy
36 if cache_basedir and wf and wf.full_name and wf.commit_sha:
37 cache_fn = '{}-{}.json'.format(wf.full_name.replace(os.path.sep, '-'), wf.commit_sha)
38 cache_path = os.path.join(cache_basedir, cache_fn)
39 if os.path.isfile(cache_path):
40 logging.debug("Found a config cache, loading: {}".format(cache_path))
41 with open(cache_path, 'r') as fh:
42 config = json.load(fh)
43 return config
44
45
46 # Call `nextflow config` and pipe stderr to /dev/null
47 try:
48 with open(os.devnull, 'w') as devnull:
49 nfconfig_raw = subprocess.check_output(['nextflow', 'config', '-flat', wf_path], stderr=devnull)
50 except OSError as e:
51 if e.errno == os.errno.ENOENT:
52 raise AssertionError("It looks like Nextflow is not installed. It is required for most nf-core functions.")
53 except subprocess.CalledProcessError as e:
54 raise AssertionError("`nextflow config` returned non-zero error code: %s,\n %s", e.returncode, e.output)
55 else:
56 for l in nfconfig_raw.splitlines():
57 ul = l.decode('utf-8')
58 k, v = ul.split(' = ', 1)
59 config[k] = v
60
61 # If we can, save a cached copy
62 if cache_path:
63 logging.debug("Saving config cache: {}".format(cache_path))
64 with open(cache_path, 'w') as fh:
65 json.dump(config, fh, indent=4)
66
67 return config
68
69
70 def setup_requests_cachedir():
71 """Sets up local caching for faster remote HTTP requests.
72
73 Caching directory will be generated by tempfile.gettempdir() under
74 a nfcore_cache subdir.
75 """
76 # Only import it if we need it
77 import requests_cache
78
79 cachedir = os.path.join(tempfile.gettempdir(), 'nfcore_cache')
80 if not os.path.exists(cachedir):
81 os.mkdir(cachedir)
82 requests_cache.install_cache(
83 os.path.join(cachedir, 'nfcore_cache'),
84 expire_after=datetime.timedelta(hours=1),
85 backend='sqlite',
86 )
87 # Make world-writeable so that multi-user installations work
88 os.chmod(cachedir, 0o777)
89 os.chmod(os.path.join(cachedir, 'nfcore_cache.sqlite'), 0o777)
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nf_core/utils.py b/nf_core/utils.py
--- a/nf_core/utils.py
+++ b/nf_core/utils.py
@@ -8,7 +8,6 @@
import logging
import os
import subprocess
-import tempfile
def fetch_wf_config(wf_path, wf=None):
"""Uses Nextflow to retrieve the the configuration variables
@@ -70,20 +69,18 @@
def setup_requests_cachedir():
"""Sets up local caching for faster remote HTTP requests.
- Caching directory will be generated by tempfile.gettempdir() under
- a nfcore_cache subdir.
+ Caching directory will be set up in the user's home directory under
+ a .nfcore_cache subdir.
"""
# Only import it if we need it
import requests_cache
+
- cachedir = os.path.join(tempfile.gettempdir(), 'nfcore_cache')
+ cachedir = os.path.join(os.getenv("HOME"), os.path.join('.nfcore', 'cache'))
if not os.path.exists(cachedir):
- os.mkdir(cachedir)
+ os.makedirs(cachedir)
requests_cache.install_cache(
- os.path.join(cachedir, 'nfcore_cache'),
+ os.path.join(cachedir, 'github_info'),
expire_after=datetime.timedelta(hours=1),
backend='sqlite',
)
- # Make world-writeable so that multi-user installations work
- os.chmod(cachedir, 0o777)
- os.chmod(os.path.join(cachedir, 'nfcore_cache.sqlite'), 0o777)
| {"golden_diff": "diff --git a/nf_core/utils.py b/nf_core/utils.py\n--- a/nf_core/utils.py\n+++ b/nf_core/utils.py\n@@ -8,7 +8,6 @@\n import logging\n import os\n import subprocess\n-import tempfile\n \n def fetch_wf_config(wf_path, wf=None):\n \"\"\"Uses Nextflow to retrieve the the configuration variables\n@@ -70,20 +69,18 @@\n def setup_requests_cachedir():\n \"\"\"Sets up local caching for faster remote HTTP requests.\n \n- Caching directory will be generated by tempfile.gettempdir() under\n- a nfcore_cache subdir.\n+ Caching directory will be set up in the user's home directory under\n+ a .nfcore_cache subdir.\n \"\"\"\n # Only import it if we need it\n import requests_cache\n+ \n \n- cachedir = os.path.join(tempfile.gettempdir(), 'nfcore_cache')\n+ cachedir = os.path.join(os.getenv(\"HOME\"), os.path.join('.nfcore', 'cache'))\n if not os.path.exists(cachedir):\n- os.mkdir(cachedir)\n+ os.makedirs(cachedir)\n requests_cache.install_cache(\n- os.path.join(cachedir, 'nfcore_cache'),\n+ os.path.join(cachedir, 'github_info'),\n expire_after=datetime.timedelta(hours=1),\n backend='sqlite',\n )\n- # Make world-writeable so that multi-user installations work\n- os.chmod(cachedir, 0o777)\n- os.chmod(os.path.join(cachedir, 'nfcore_cache.sqlite'), 0o777)\n", "issue": "problem with nfcore_cache.sqlite within /tmp\nHi all,\r\nI think will be a nice idea to have the nfcore_cache.sqlite within a subfolder in tmp because if two users use the program at the same time the privileges will prevent to use the tool.\r\n\r\nFor example I cannot even use nf-core --help \r\n\r\nLuca\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nCommon utility functions for the nf-core python package.\n\"\"\"\n\nimport datetime\nimport json\nimport logging\nimport os\nimport subprocess\nimport tempfile\n\ndef fetch_wf_config(wf_path, wf=None):\n \"\"\"Uses Nextflow to retrieve the the configuration variables\n from a Nextflow workflow.\n\n Args:\n wf_path (str): Nextflow workflow file system path.\n\n Returns:\n dict: Workflow configuration settings.\n \"\"\"\n\n config = dict()\n cache_fn = None\n cache_basedir = None\n cache_path = None\n\n # Build a cache directory if we can\n if os.path.isdir(os.path.join(os.getenv(\"HOME\"), '.nextflow')):\n cache_basedir = os.path.join(os.getenv(\"HOME\"), '.nextflow', 'nf-core')\n if not os.path.isdir(cache_basedir):\n os.mkdir(cache_basedir)\n\n # If we're given a workflow object with a commit, see if we have a cached copy\n if cache_basedir and wf and wf.full_name and wf.commit_sha:\n cache_fn = '{}-{}.json'.format(wf.full_name.replace(os.path.sep, '-'), wf.commit_sha)\n cache_path = os.path.join(cache_basedir, cache_fn)\n if os.path.isfile(cache_path):\n logging.debug(\"Found a config cache, loading: {}\".format(cache_path))\n with open(cache_path, 'r') as fh:\n config = json.load(fh)\n return config\n\n\n # Call `nextflow config` and pipe stderr to /dev/null\n try:\n with open(os.devnull, 'w') as devnull:\n nfconfig_raw = subprocess.check_output(['nextflow', 'config', '-flat', wf_path], stderr=devnull)\n except OSError as e:\n if e.errno == os.errno.ENOENT:\n raise AssertionError(\"It looks like Nextflow is not installed. It is required for most nf-core functions.\")\n except subprocess.CalledProcessError as e:\n raise AssertionError(\"`nextflow config` returned non-zero error code: %s,\\n %s\", e.returncode, e.output)\n else:\n for l in nfconfig_raw.splitlines():\n ul = l.decode('utf-8')\n k, v = ul.split(' = ', 1)\n config[k] = v\n\n # If we can, save a cached copy\n if cache_path:\n logging.debug(\"Saving config cache: {}\".format(cache_path))\n with open(cache_path, 'w') as fh:\n json.dump(config, fh, indent=4)\n\n return config\n\n\ndef setup_requests_cachedir():\n \"\"\"Sets up local caching for faster remote HTTP requests.\n\n Caching directory will be generated by tempfile.gettempdir() under\n a nfcore_cache subdir.\n \"\"\"\n # Only import it if we need it\n import requests_cache\n\n cachedir = os.path.join(tempfile.gettempdir(), 'nfcore_cache')\n if not os.path.exists(cachedir):\n os.mkdir(cachedir)\n requests_cache.install_cache(\n os.path.join(cachedir, 'nfcore_cache'),\n expire_after=datetime.timedelta(hours=1),\n backend='sqlite',\n )\n # Make world-writeable so that multi-user installations work\n os.chmod(cachedir, 0o777)\n os.chmod(os.path.join(cachedir, 'nfcore_cache.sqlite'), 0o777)\n", "path": "nf_core/utils.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nCommon utility functions for the nf-core python package.\n\"\"\"\n\nimport datetime\nimport json\nimport logging\nimport os\nimport subprocess\n\ndef fetch_wf_config(wf_path, wf=None):\n \"\"\"Uses Nextflow to retrieve the the configuration variables\n from a Nextflow workflow.\n\n Args:\n wf_path (str): Nextflow workflow file system path.\n\n Returns:\n dict: Workflow configuration settings.\n \"\"\"\n\n config = dict()\n cache_fn = None\n cache_basedir = None\n cache_path = None\n\n # Build a cache directory if we can\n if os.path.isdir(os.path.join(os.getenv(\"HOME\"), '.nextflow')):\n cache_basedir = os.path.join(os.getenv(\"HOME\"), '.nextflow', 'nf-core')\n if not os.path.isdir(cache_basedir):\n os.mkdir(cache_basedir)\n\n # If we're given a workflow object with a commit, see if we have a cached copy\n if cache_basedir and wf and wf.full_name and wf.commit_sha:\n cache_fn = '{}-{}.json'.format(wf.full_name.replace(os.path.sep, '-'), wf.commit_sha)\n cache_path = os.path.join(cache_basedir, cache_fn)\n if os.path.isfile(cache_path):\n logging.debug(\"Found a config cache, loading: {}\".format(cache_path))\n with open(cache_path, 'r') as fh:\n config = json.load(fh)\n return config\n\n\n # Call `nextflow config` and pipe stderr to /dev/null\n try:\n with open(os.devnull, 'w') as devnull:\n nfconfig_raw = subprocess.check_output(['nextflow', 'config', '-flat', wf_path], stderr=devnull)\n except OSError as e:\n if e.errno == os.errno.ENOENT:\n raise AssertionError(\"It looks like Nextflow is not installed. It is required for most nf-core functions.\")\n except subprocess.CalledProcessError as e:\n raise AssertionError(\"`nextflow config` returned non-zero error code: %s,\\n %s\", e.returncode, e.output)\n else:\n for l in nfconfig_raw.splitlines():\n ul = l.decode('utf-8')\n k, v = ul.split(' = ', 1)\n config[k] = v\n\n # If we can, save a cached copy\n if cache_path:\n logging.debug(\"Saving config cache: {}\".format(cache_path))\n with open(cache_path, 'w') as fh:\n json.dump(config, fh, indent=4)\n\n return config\n\n\ndef setup_requests_cachedir():\n \"\"\"Sets up local caching for faster remote HTTP requests.\n\n Caching directory will be set up in the user's home directory under\n a .nfcore_cache subdir.\n \"\"\"\n # Only import it if we need it\n import requests_cache\n \n\n cachedir = os.path.join(os.getenv(\"HOME\"), os.path.join('.nfcore', 'cache'))\n if not os.path.exists(cachedir):\n os.makedirs(cachedir)\n requests_cache.install_cache(\n os.path.join(cachedir, 'github_info'),\n expire_after=datetime.timedelta(hours=1),\n backend='sqlite',\n )\n", "path": "nf_core/utils.py"}]} | 1,235 | 352 |
gh_patches_debug_26270 | rasdani/github-patches | git_diff | e-valuation__EvaP-2036 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Translations in Javascript and Typescript
When writing Javascript and Typescript in separate, non-HTML files, we can't use the Django template functions `trans`, `blocktrans`, etc. anymore. We have worked around this by putting translated strings into the DOM and accessing them via Javascript then.
Instead of doing this, we want to have a unified approach where the use-site can just write `trans("The server is not responding.")` or so. There are two possible approaches:
1. DIY: We have a function `trans(english: string, to: Language = window.LANGUAGE): string` with `type Language = "English" | "German"`. This function looks up the string in a global dictionary (for example `window.translationDictionary` or so). I am not sure what it should do if the string is not present, probably return the English string and emit a warning? This dictionary would be defined in a script tag in a HTML file, something like (possibly with an implementation that doesn't repeat the strings a little less):
```html
<script type="text/javascript">
window.translationDictionary = {
"de": {
{% language 'de' %}
"The server is not responding": "{% trans 'The server is not responding' %}",
{% endlanguage %}
}
};
</script>
```
2. Use Django's builtin functionality: There is a builtin way that configures an extra endpoint to make all translations available (https://docs.djangoproject.com/en/4.2/topics/i18n/translation/#internationalization-in-javascript-code). A plus is that it also supports `ngettext` and so on. It seems like it can also detect all strings used in translations, but the setup may be a bit tricky with Typescript thrown into the mix.
I think I prefer the first approach, but maybe we encounter difficulties with it or decide that we will need `ngettext` etc. in the future and go with the Django versions directly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/development/management/commands/translate.py`
Content:
```
1 from django.core.management import call_command
2 from django.core.management.base import BaseCommand
3
4
5 class Command(BaseCommand):
6 args = ""
7 help = 'Execute "makemessages --locale=de --ignore=node_modules/*"'
8
9 def handle(self, *args, **options):
10 self.stdout.write('Executing "manage.py makemessages --locale=de --ignore=node_modules/*"')
11 call_command("makemessages", "--locale=de", "--ignore=node_modules/*")
12
```
Path: `evap/urls.py`
Content:
```
1 import django.contrib.auth.views
2 from django.conf import settings
3 from django.urls import include, path
4
5 urlpatterns = [
6 path("", include('evap.evaluation.urls')),
7 path("staff/", include('evap.staff.urls')),
8 path("results/", include('evap.results.urls')),
9 path("student/", include('evap.student.urls')),
10 path("contributor/", include('evap.contributor.urls')),
11 path("rewards/", include('evap.rewards.urls')),
12 path("grades/", include('evap.grades.urls')),
13
14 path("logout", django.contrib.auth.views.LogoutView.as_view(next_page="/"), name="django-auth-logout"),
15 path("oidc/", include('mozilla_django_oidc.urls')),
16 ]
17
18 if settings.DEBUG:
19 urlpatterns += [path('development/', include('evap.development.urls'))]
20
21 if settings.ENABLE_DEBUG_TOOLBAR:
22 # pylint does not correctly evaluate this if, so it will raise an import-error on
23 # GitHub actions and a useless-suppression on a vagrant setup. Ignore both cases.
24 import debug_toolbar # pylint: disable=import-error, useless-suppression
25 urlpatterns += [path('__debug__/', include(debug_toolbar.urls))]
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/development/management/commands/translate.py b/evap/development/management/commands/translate.py
--- a/evap/development/management/commands/translate.py
+++ b/evap/development/management/commands/translate.py
@@ -9,3 +9,11 @@
def handle(self, *args, **options):
self.stdout.write('Executing "manage.py makemessages --locale=de --ignore=node_modules/*"')
call_command("makemessages", "--locale=de", "--ignore=node_modules/*")
+ call_command(
+ "makemessages",
+ "--domain=djangojs",
+ "--extension=js,ts",
+ "--locale=de",
+ "--ignore=node_modules/*",
+ "--ignore=evap/static/js/*.min.js",
+ )
diff --git a/evap/urls.py b/evap/urls.py
--- a/evap/urls.py
+++ b/evap/urls.py
@@ -1,6 +1,9 @@
import django.contrib.auth.views
from django.conf import settings
from django.urls import include, path
+from django.views.i18n import JavaScriptCatalog
+
+from evap.middleware import no_login_required
urlpatterns = [
path("", include('evap.evaluation.urls')),
@@ -13,6 +16,8 @@
path("logout", django.contrib.auth.views.LogoutView.as_view(next_page="/"), name="django-auth-logout"),
path("oidc/", include('mozilla_django_oidc.urls')),
+
+ path("catalog.js", no_login_required(JavaScriptCatalog.as_view()), name="javascript-catalog"),
]
if settings.DEBUG:
| {"golden_diff": "diff --git a/evap/development/management/commands/translate.py b/evap/development/management/commands/translate.py\n--- a/evap/development/management/commands/translate.py\n+++ b/evap/development/management/commands/translate.py\n@@ -9,3 +9,11 @@\n def handle(self, *args, **options):\n self.stdout.write('Executing \"manage.py makemessages --locale=de --ignore=node_modules/*\"')\n call_command(\"makemessages\", \"--locale=de\", \"--ignore=node_modules/*\")\n+ call_command(\n+ \"makemessages\",\n+ \"--domain=djangojs\",\n+ \"--extension=js,ts\",\n+ \"--locale=de\",\n+ \"--ignore=node_modules/*\",\n+ \"--ignore=evap/static/js/*.min.js\",\n+ )\ndiff --git a/evap/urls.py b/evap/urls.py\n--- a/evap/urls.py\n+++ b/evap/urls.py\n@@ -1,6 +1,9 @@\n import django.contrib.auth.views\n from django.conf import settings\n from django.urls import include, path\n+from django.views.i18n import JavaScriptCatalog\n+\n+from evap.middleware import no_login_required\n \n urlpatterns = [\n path(\"\", include('evap.evaluation.urls')),\n@@ -13,6 +16,8 @@\n \n path(\"logout\", django.contrib.auth.views.LogoutView.as_view(next_page=\"/\"), name=\"django-auth-logout\"),\n path(\"oidc/\", include('mozilla_django_oidc.urls')),\n+\n+ path(\"catalog.js\", no_login_required(JavaScriptCatalog.as_view()), name=\"javascript-catalog\"),\n ]\n \n if settings.DEBUG:\n", "issue": "Translations in Javascript and Typescript\nWhen writing Javascript and Typescript in separate, non-HTML files, we can't use the Django template functions `trans`, `blocktrans`, etc. anymore. We have worked around this by putting translated strings into the DOM and accessing them via Javascript then.\r\n\r\nInstead of doing this, we want to have a unified approach where the use-site can just write `trans(\"The server is not responding.\")` or so. There are two possible approaches:\r\n\r\n1. DIY: We have a function `trans(english: string, to: Language = window.LANGUAGE): string` with `type Language = \"English\" | \"German\"`. This function looks up the string in a global dictionary (for example `window.translationDictionary` or so). I am not sure what it should do if the string is not present, probably return the English string and emit a warning? This dictionary would be defined in a script tag in a HTML file, something like (possibly with an implementation that doesn't repeat the strings a little less):\r\n```html\r\n<script type=\"text/javascript\">\r\n window.translationDictionary = {\r\n \"de\": {\r\n {% language 'de' %}\r\n \"The server is not responding\": \"{% trans 'The server is not responding' %}\",\r\n {% endlanguage %}\r\n }\r\n };\r\n</script>\r\n```\r\n2. Use Django's builtin functionality: There is a builtin way that configures an extra endpoint to make all translations available (https://docs.djangoproject.com/en/4.2/topics/i18n/translation/#internationalization-in-javascript-code). A plus is that it also supports `ngettext` and so on. It seems like it can also detect all strings used in translations, but the setup may be a bit tricky with Typescript thrown into the mix.\r\n\r\nI think I prefer the first approach, but maybe we encounter difficulties with it or decide that we will need `ngettext` etc. in the future and go with the Django versions directly.\n", "before_files": [{"content": "from django.core.management import call_command\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n args = \"\"\n help = 'Execute \"makemessages --locale=de --ignore=node_modules/*\"'\n\n def handle(self, *args, **options):\n self.stdout.write('Executing \"manage.py makemessages --locale=de --ignore=node_modules/*\"')\n call_command(\"makemessages\", \"--locale=de\", \"--ignore=node_modules/*\")\n", "path": "evap/development/management/commands/translate.py"}, {"content": "import django.contrib.auth.views\nfrom django.conf import settings\nfrom django.urls import include, path\n\nurlpatterns = [\n path(\"\", include('evap.evaluation.urls')),\n path(\"staff/\", include('evap.staff.urls')),\n path(\"results/\", include('evap.results.urls')),\n path(\"student/\", include('evap.student.urls')),\n path(\"contributor/\", include('evap.contributor.urls')),\n path(\"rewards/\", include('evap.rewards.urls')),\n path(\"grades/\", include('evap.grades.urls')),\n\n path(\"logout\", django.contrib.auth.views.LogoutView.as_view(next_page=\"/\"), name=\"django-auth-logout\"),\n path(\"oidc/\", include('mozilla_django_oidc.urls')),\n]\n\nif settings.DEBUG:\n urlpatterns += [path('development/', include('evap.development.urls'))]\n\n if settings.ENABLE_DEBUG_TOOLBAR:\n # pylint does not correctly evaluate this if, so it will raise an import-error on\n # GitHub actions and a useless-suppression on a vagrant setup. Ignore both cases.\n import debug_toolbar # pylint: disable=import-error, useless-suppression\n urlpatterns += [path('__debug__/', include(debug_toolbar.urls))]\n", "path": "evap/urls.py"}], "after_files": [{"content": "from django.core.management import call_command\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n args = \"\"\n help = 'Execute \"makemessages --locale=de --ignore=node_modules/*\"'\n\n def handle(self, *args, **options):\n self.stdout.write('Executing \"manage.py makemessages --locale=de --ignore=node_modules/*\"')\n call_command(\"makemessages\", \"--locale=de\", \"--ignore=node_modules/*\")\n call_command(\n \"makemessages\",\n \"--domain=djangojs\",\n \"--extension=js,ts\",\n \"--locale=de\",\n \"--ignore=node_modules/*\",\n \"--ignore=evap/static/js/*.min.js\",\n )\n", "path": "evap/development/management/commands/translate.py"}, {"content": "import django.contrib.auth.views\nfrom django.conf import settings\nfrom django.urls import include, path\nfrom django.views.i18n import JavaScriptCatalog\n\nfrom evap.middleware import no_login_required\n\nurlpatterns = [\n path(\"\", include('evap.evaluation.urls')),\n path(\"staff/\", include('evap.staff.urls')),\n path(\"results/\", include('evap.results.urls')),\n path(\"student/\", include('evap.student.urls')),\n path(\"contributor/\", include('evap.contributor.urls')),\n path(\"rewards/\", include('evap.rewards.urls')),\n path(\"grades/\", include('evap.grades.urls')),\n\n path(\"logout\", django.contrib.auth.views.LogoutView.as_view(next_page=\"/\"), name=\"django-auth-logout\"),\n path(\"oidc/\", include('mozilla_django_oidc.urls')),\n\n path(\"catalog.js\", no_login_required(JavaScriptCatalog.as_view()), name=\"javascript-catalog\"),\n]\n\nif settings.DEBUG:\n urlpatterns += [path('development/', include('evap.development.urls'))]\n\n if settings.ENABLE_DEBUG_TOOLBAR:\n # pylint does not correctly evaluate this if, so it will raise an import-error on\n # GitHub actions and a useless-suppression on a vagrant setup. Ignore both cases.\n import debug_toolbar # pylint: disable=import-error, useless-suppression\n urlpatterns += [path('__debug__/', include(debug_toolbar.urls))]\n", "path": "evap/urls.py"}]} | 1,103 | 369 |
gh_patches_debug_1560 | rasdani/github-patches | git_diff | NVIDIA__TransformerEngine-813 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`warnings.simplefilter('default')` in global scope causes excessive DeprecationWarnings
https://github.com/NVIDIA/TransformerEngine/blob/f85553ea369da15fd726ab279818e415be48a228/transformer_engine/common/utils.py#L9
Importing the `transformer_engine.common.utils` resets the warning filters to default settings using `warnings.simplefilter('default')` in the global scope. This results in the console being flooded with DeprecationWarnings, which are normally ignored by Python by default.
Would it be possible to move setting the warning filter config to a more controlled scope in this module?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `transformer_engine/common/utils.py`
Content:
```
1 # Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 #
3 # See LICENSE for license information.
4 """The utilities for Transformer Engine"""
5 import inspect
6 import warnings
7 from enum import Enum
8
9 warnings.simplefilter('default')
10
11
12 class DeprecatedEnum: # pylint: disable=too-few-public-methods
13 """DeprecatedEnum"""
14
15 def __init__(self, enum_cls, msg):
16 self.enum_cls = enum_cls
17 self.msg = msg
18
19 def __iter__(self):
20 return iter(list(self.enum_cls.__members__.values()))
21
22 def __getattr__(self, name):
23 if name in self.enum_cls.__members__:
24 warnings.warn(self.msg, DeprecationWarning)
25 return self.enum_cls.__members__[name]
26 raise AttributeError(f"{self.enum_cls} does not contain {name}")
27
28
29 def deprecate_wrapper(obj, msg):
30 """Deprecate wrapper"""
31 if inspect.isclass(obj):
32 if issubclass(obj, Enum):
33 return DeprecatedEnum(obj, msg)
34
35 class DeprecatedCls(obj): # pylint: disable=too-few-public-methods
36 """DeprecatedCls"""
37
38 def __init__(self, *args, **kwargs):
39 warnings.warn(msg, DeprecationWarning)
40 super().__init__(*args, **kwargs)
41
42 return DeprecatedCls
43
44 if inspect.isfunction(obj):
45
46 def deprecated(*args, **kwargs):
47 warnings.warn(msg, DeprecationWarning)
48 return obj(*args, **kwargs)
49
50 return deprecated
51
52 raise NotImplementedError(
53 f"deprecate_cls_wrapper only support Class and Function, but got {type(obj)}.")
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/transformer_engine/common/utils.py b/transformer_engine/common/utils.py
--- a/transformer_engine/common/utils.py
+++ b/transformer_engine/common/utils.py
@@ -6,7 +6,8 @@
import warnings
from enum import Enum
-warnings.simplefilter('default')
+warnings.filterwarnings(
+ "module", category=DeprecationWarning, module="transformer_engine.common.utils")
class DeprecatedEnum: # pylint: disable=too-few-public-methods
| {"golden_diff": "diff --git a/transformer_engine/common/utils.py b/transformer_engine/common/utils.py\n--- a/transformer_engine/common/utils.py\n+++ b/transformer_engine/common/utils.py\n@@ -6,7 +6,8 @@\n import warnings\n from enum import Enum\n \n-warnings.simplefilter('default')\n+warnings.filterwarnings(\n+ \"module\", category=DeprecationWarning, module=\"transformer_engine.common.utils\")\n \n \n class DeprecatedEnum: # pylint: disable=too-few-public-methods\n", "issue": "`warnings.simplefilter('default')` in global scope causes excessive DeprecationWarnings\nhttps://github.com/NVIDIA/TransformerEngine/blob/f85553ea369da15fd726ab279818e415be48a228/transformer_engine/common/utils.py#L9\r\n\r\nImporting the `transformer_engine.common.utils` resets the warning filters to default settings using `warnings.simplefilter('default')` in the global scope. This results in the console being flooded with DeprecationWarnings, which are normally ignored by Python by default.\r\n\r\nWould it be possible to move setting the warning filter config to a more controlled scope in this module?\n", "before_files": [{"content": "# Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# See LICENSE for license information.\n\"\"\"The utilities for Transformer Engine\"\"\"\nimport inspect\nimport warnings\nfrom enum import Enum\n\nwarnings.simplefilter('default')\n\n\nclass DeprecatedEnum: # pylint: disable=too-few-public-methods\n \"\"\"DeprecatedEnum\"\"\"\n\n def __init__(self, enum_cls, msg):\n self.enum_cls = enum_cls\n self.msg = msg\n\n def __iter__(self):\n return iter(list(self.enum_cls.__members__.values()))\n\n def __getattr__(self, name):\n if name in self.enum_cls.__members__:\n warnings.warn(self.msg, DeprecationWarning)\n return self.enum_cls.__members__[name]\n raise AttributeError(f\"{self.enum_cls} does not contain {name}\")\n\n\ndef deprecate_wrapper(obj, msg):\n \"\"\"Deprecate wrapper\"\"\"\n if inspect.isclass(obj):\n if issubclass(obj, Enum):\n return DeprecatedEnum(obj, msg)\n\n class DeprecatedCls(obj): # pylint: disable=too-few-public-methods\n \"\"\"DeprecatedCls\"\"\"\n\n def __init__(self, *args, **kwargs):\n warnings.warn(msg, DeprecationWarning)\n super().__init__(*args, **kwargs)\n\n return DeprecatedCls\n\n if inspect.isfunction(obj):\n\n def deprecated(*args, **kwargs):\n warnings.warn(msg, DeprecationWarning)\n return obj(*args, **kwargs)\n\n return deprecated\n\n raise NotImplementedError(\n f\"deprecate_cls_wrapper only support Class and Function, but got {type(obj)}.\")\n", "path": "transformer_engine/common/utils.py"}], "after_files": [{"content": "# Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# See LICENSE for license information.\n\"\"\"The utilities for Transformer Engine\"\"\"\nimport inspect\nimport warnings\nfrom enum import Enum\n\nwarnings.filterwarnings(\n \"module\", category=DeprecationWarning, module=\"transformer_engine.common.utils\")\n\n\nclass DeprecatedEnum: # pylint: disable=too-few-public-methods\n \"\"\"DeprecatedEnum\"\"\"\n\n def __init__(self, enum_cls, msg):\n self.enum_cls = enum_cls\n self.msg = msg\n\n def __iter__(self):\n return iter(list(self.enum_cls.__members__.values()))\n\n def __getattr__(self, name):\n if name in self.enum_cls.__members__:\n warnings.warn(self.msg, DeprecationWarning)\n return self.enum_cls.__members__[name]\n raise AttributeError(f\"{self.enum_cls} does not contain {name}\")\n\n\ndef deprecate_wrapper(obj, msg):\n \"\"\"Deprecate wrapper\"\"\"\n if inspect.isclass(obj):\n if issubclass(obj, Enum):\n return DeprecatedEnum(obj, msg)\n\n class DeprecatedCls(obj): # pylint: disable=too-few-public-methods\n \"\"\"DeprecatedCls\"\"\"\n\n def __init__(self, *args, **kwargs):\n warnings.warn(msg, DeprecationWarning)\n super().__init__(*args, **kwargs)\n\n return DeprecatedCls\n\n if inspect.isfunction(obj):\n\n def deprecated(*args, **kwargs):\n warnings.warn(msg, DeprecationWarning)\n return obj(*args, **kwargs)\n\n return deprecated\n\n raise NotImplementedError(\n f\"deprecate_cls_wrapper only support Class and Function, but got {type(obj)}.\")\n", "path": "transformer_engine/common/utils.py"}]} | 868 | 108 |
gh_patches_debug_76 | rasdani/github-patches | git_diff | streamlit__streamlit-2570 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
URL markup does not get generated as a link
# Summary
URLs used to generate an anchor tag automatically in markup. Now it does not
# Steps to reproduce
Code snippet:
```
st.write(f"""
As always, thank you to [all our contributors](https://github.com/streamlit/streamlit/graphs/contributors) who help make Streamlit awesome!
---
### Connect With Us
- We can be found at https://streamlit.io and https://twitter.com/streamlit
- Come by
[the forums](https://discuss.streamlit.io/c/official-announcements/6) if you'd like to ask questions,
post awesome apps, or just say hi!
""")
```
## Expected behavior:
[0.73](https://share.streamlit.io/streamlit/release-demos/0.73/0.73/streamlit_app.py)

## Actual behavior:
[0.74](https://share.streamlit.io/streamlit/release-demos/0.74/0.74/streamlit_app.py)

## Is this a regression?
Yes as of 0.74
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/setup.py`
Content:
```
1 import os
2 import setuptools
3 import sys
4
5 from setuptools.command.install import install
6
7 try:
8 from pipenv.project import Project
9 from pipenv.utils import convert_deps_to_pip
10 except:
11 exit_msg = (
12 "pipenv is required to package Streamlit. Please install pipenv and try again"
13 )
14 sys.exit(exit_msg)
15
16 VERSION = "0.74.0" # PEP-440
17
18 NAME = "streamlit"
19
20 DESCRIPTION = "The fastest way to build data apps in Python"
21
22 LONG_DESCRIPTION = (
23 "Streamlit's open-source app framework is the easiest way "
24 "for data scientists and machine learning engineers to "
25 "create beautiful, performant apps in only a few hours! "
26 "All in pure Python. All for free."
27 )
28
29 pipfile = Project(chdir=False).parsed_pipfile
30
31 packages = pipfile["packages"].copy()
32 requirements = convert_deps_to_pip(packages, r=False)
33
34
35 class VerifyVersionCommand(install):
36 """Custom command to verify that the git tag matches our version"""
37
38 description = "verify that the git tag matches our version"
39
40 def run(self):
41 tag = os.getenv("CIRCLE_TAG")
42
43 if tag != VERSION:
44 info = "Git tag: {0} does not match the version of this app: {1}".format(
45 tag, VERSION
46 )
47 sys.exit(info)
48
49
50 setuptools.setup(
51 name=NAME,
52 version=VERSION,
53 description=DESCRIPTION,
54 long_description=LONG_DESCRIPTION,
55 url="https://streamlit.io",
56 author="Streamlit Inc",
57 author_email="[email protected]",
58 python_requires=">=3.6",
59 license="Apache 2",
60 packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
61 # Requirements
62 install_requires=requirements,
63 zip_safe=False, # install source files not egg
64 include_package_data=True, # copy html and friends
65 entry_points={"console_scripts": ["streamlit = streamlit.cli:main"]},
66 # For Windows so that streamlit * commands work ie.
67 # - streamlit version
68 # - streamlit hello
69 scripts=["bin/streamlit.cmd"],
70 cmdclass={
71 "verify": VerifyVersionCommand,
72 },
73 )
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/setup.py b/lib/setup.py
--- a/lib/setup.py
+++ b/lib/setup.py
@@ -13,7 +13,7 @@
)
sys.exit(exit_msg)
-VERSION = "0.74.0" # PEP-440
+VERSION = "0.74.1" # PEP-440
NAME = "streamlit"
| {"golden_diff": "diff --git a/lib/setup.py b/lib/setup.py\n--- a/lib/setup.py\n+++ b/lib/setup.py\n@@ -13,7 +13,7 @@\n )\n sys.exit(exit_msg)\n \n-VERSION = \"0.74.0\" # PEP-440\n+VERSION = \"0.74.1\" # PEP-440\n \n NAME = \"streamlit\"\n", "issue": "URL markup does not get generated as a link\n# Summary\r\nURLs used to generate an anchor tag automatically in markup. Now it does not\r\n\r\n\r\n# Steps to reproduce\r\nCode snippet:\r\n\r\n```\r\nst.write(f\"\"\"\r\n As always, thank you to [all our contributors](https://github.com/streamlit/streamlit/graphs/contributors) who help make Streamlit awesome!\r\n\r\n ---\r\n\r\n ### Connect With Us\r\n\r\n - We can be found at https://streamlit.io and https://twitter.com/streamlit\r\n - Come by\r\n [the forums](https://discuss.streamlit.io/c/official-announcements/6) if you'd like to ask questions,\r\n post awesome apps, or just say hi!\r\n \"\"\")\r\n```\r\n\r\n## Expected behavior:\r\n[0.73](https://share.streamlit.io/streamlit/release-demos/0.73/0.73/streamlit_app.py)\r\n\r\n\r\n\r\n## Actual behavior:\r\n[0.74](https://share.streamlit.io/streamlit/release-demos/0.74/0.74/streamlit_app.py)\r\n\r\n\r\n\r\n## Is this a regression?\r\nYes as of 0.74\r\n\n", "before_files": [{"content": "import os\nimport setuptools\nimport sys\n\nfrom setuptools.command.install import install\n\ntry:\n from pipenv.project import Project\n from pipenv.utils import convert_deps_to_pip\nexcept:\n exit_msg = (\n \"pipenv is required to package Streamlit. Please install pipenv and try again\"\n )\n sys.exit(exit_msg)\n\nVERSION = \"0.74.0\" # PEP-440\n\nNAME = \"streamlit\"\n\nDESCRIPTION = \"The fastest way to build data apps in Python\"\n\nLONG_DESCRIPTION = (\n \"Streamlit's open-source app framework is the easiest way \"\n \"for data scientists and machine learning engineers to \"\n \"create beautiful, performant apps in only a few hours! \"\n \"All in pure Python. All for free.\"\n)\n\npipfile = Project(chdir=False).parsed_pipfile\n\npackages = pipfile[\"packages\"].copy()\nrequirements = convert_deps_to_pip(packages, r=False)\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"CIRCLE_TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=\"https://streamlit.io\",\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n license=\"Apache 2\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=requirements,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py"}], "after_files": [{"content": "import os\nimport setuptools\nimport sys\n\nfrom setuptools.command.install import install\n\ntry:\n from pipenv.project import Project\n from pipenv.utils import convert_deps_to_pip\nexcept:\n exit_msg = (\n \"pipenv is required to package Streamlit. Please install pipenv and try again\"\n )\n sys.exit(exit_msg)\n\nVERSION = \"0.74.1\" # PEP-440\n\nNAME = \"streamlit\"\n\nDESCRIPTION = \"The fastest way to build data apps in Python\"\n\nLONG_DESCRIPTION = (\n \"Streamlit's open-source app framework is the easiest way \"\n \"for data scientists and machine learning engineers to \"\n \"create beautiful, performant apps in only a few hours! \"\n \"All in pure Python. All for free.\"\n)\n\npipfile = Project(chdir=False).parsed_pipfile\n\npackages = pipfile[\"packages\"].copy()\nrequirements = convert_deps_to_pip(packages, r=False)\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"CIRCLE_TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=\"https://streamlit.io\",\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n license=\"Apache 2\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=requirements,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py"}]} | 1,260 | 91 |
gh_patches_debug_19874 | rasdani/github-patches | git_diff | saleor__saleor-10283 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
checkoutCreate mutation issue - { "code": "REQUIRED", "field": "country", "message": "This field cannot be blank." }
### What I'm trying to achieve
try to add checkout step with createCheckout mutation.
### Steps to reproduce the problem
<!-- Adding logs from the console, as well as query/response help us fix the bug faster -->
1. use docker-platform, deploy with docker compose, delete saleor folder and replace it by saleor folder with 3.1 branch clone
2. use playgraphl to test mutation request (checkoutCreate)
### What I expected to happen
i make test on saleor demo site : https://demo.saleor.io/graphql/
```bash
mutation CheckoutCreate {
checkoutCreate(
input: { channel: "default-channel", email: "[email protected]", lines: [] }
)
{ errors {
code
field
message
}
checkout {
id
token
created
}
}
}
```
result on : https://demo.saleor.io/graphql/
```bash
{
"data": {
"checkoutCreate": {
"errors": [],
"checkout": {
"id": "Q2hlY2tvdXQ6MDQ2MmQwMzQtZGJmYi00MTg1LWExZTMtMWUwYTU2YWMxYjJi",
"token": "0462d034-dbfb-4185-a1e3-1e0a56ac1b2b",
"created": "2021-09-17T13:17:33.994853+00:00"
}
}
}
}
```
# this is fine for me but ....
When i try the samething on my local machine (deploy with docker compose)
i get this:
```bash
{
"data": {
"checkoutCreate": {
"errors": [
{
"code": "REQUIRED",
"field": "country",
"message": "This field cannot be blank."
}
],
"checkout": null
}
}
}
```
i want to get checkoutID and token and the system ask me to add some country field.....
**System information**
<!-- Provide the version of Saleor or whether you're using it from the `master` branch. If using Saleor Dashboard or Storefront, provide their versions too. -->
Saleor version:
- [ ] dev (current master)
- [ X] 3.0
- [ ] 2.11
- [ ] 2.10
Operating system:
- [ ] Windows
- [ X] Linux
- [ ] MacOS
- [ ] Other
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/channel/migrations/0001_initial.py`
Content:
```
1 # Generated by Django 3.0.6 on 2020-06-16 07:54
2
3 from django.conf import settings
4 from django.db import migrations, models
5 from django.db.models.signals import post_migrate
6 from django.apps import apps as registry
7
8
9 def assing_permissions(apps, schema_editor):
10 def on_migrations_complete(sender=None, **kwargs):
11 Group = apps.get_model("auth", "Group")
12 Permission = apps.get_model("auth", "Permission")
13 ContentType = apps.get_model("contenttypes", "ContentType")
14
15 ct, _ = ContentType.objects.get_or_create(app_label="channel", model="channel")
16 manage_channels, _ = Permission.objects.get_or_create(
17 name="Manage channels.", content_type=ct, codename="manage_channels"
18 )
19
20 for group in Group.objects.iterator():
21 group.permissions.add(manage_channels)
22
23 sender = registry.get_app_config("channel")
24 post_migrate.connect(on_migrations_complete, weak=False, sender=sender)
25
26
27 def get_default_currency(Checkout, Order, Product, ShippingMethod, Voucher):
28 latest_product = Product.objects.order_by("-pk").first()
29 if latest_product:
30 return latest_product.currency
31 latest_voucher = Voucher.objects.order_by("-pk").first()
32 if latest_voucher:
33 return latest_voucher.currency
34 latest_shipping_method = ShippingMethod.objects.order_by("-pk").first()
35 if latest_shipping_method:
36 return latest_shipping_method.currency
37 latest_order = Order.objects.order_by("-pk").first()
38 if latest_order:
39 return latest_order.currency
40 latest_checkout = Checkout.objects.order_by("-pk").first()
41 if latest_checkout:
42 return latest_checkout.currency
43 return None
44
45
46 def create_default_channel(apps, schema_editor):
47 Channel = apps.get_model("channel", "Channel")
48 Checkout = apps.get_model("checkout", "Checkout")
49 Order = apps.get_model("order", "Order")
50 Product = apps.get_model("product", "Product")
51 ShippingMethod = apps.get_model("shipping", "ShippingMethod")
52 Voucher = apps.get_model("discount", "Voucher")
53
54 default_currency = get_default_currency(
55 Checkout, Order, Product, ShippingMethod, Voucher
56 )
57 if default_currency:
58 Channel.objects.create(
59 name="Default channel",
60 slug=settings.DEFAULT_CHANNEL_SLUG,
61 currency_code=default_currency,
62 is_active=True,
63 )
64
65
66 class Migration(migrations.Migration):
67
68 initial = True
69
70 dependencies = [
71 ("checkout", "0025_auto_20200221_0257"),
72 ("discount", "0019_auto_20200217_0350"),
73 ("order", "0084_auto_20200522_0522"),
74 ("product", "0118_populate_product_variant_price"),
75 ("shipping", "0018_default_zones_countries"),
76 ]
77
78 operations = [
79 migrations.CreateModel(
80 name="Channel",
81 fields=[
82 (
83 "id",
84 models.AutoField(
85 auto_created=True,
86 primary_key=True,
87 serialize=False,
88 verbose_name="ID",
89 ),
90 ),
91 ("name", models.CharField(max_length=250)),
92 ("slug", models.SlugField(max_length=255, unique=True)),
93 ("is_active", models.BooleanField(default=False)),
94 (
95 "currency_code",
96 models.CharField(max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH),
97 ),
98 ],
99 options={
100 "ordering": ("slug",),
101 "permissions": (("manage_channels", "Manage channels."),),
102 },
103 ),
104 migrations.RunPython(create_default_channel, migrations.RunPython.noop),
105 migrations.RunPython(assing_permissions, migrations.RunPython.noop),
106 ]
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/channel/migrations/0001_initial.py b/saleor/channel/migrations/0001_initial.py
--- a/saleor/channel/migrations/0001_initial.py
+++ b/saleor/channel/migrations/0001_initial.py
@@ -1,5 +1,6 @@
# Generated by Django 3.0.6 on 2020-06-16 07:54
+import os
from django.conf import settings
from django.db import migrations, models
from django.db.models.signals import post_migrate
@@ -54,12 +55,14 @@
default_currency = get_default_currency(
Checkout, Order, Product, ShippingMethod, Voucher
)
+ default_country = os.environ.get("DEFAULT_COUNTRY", "US")
if default_currency:
Channel.objects.create(
name="Default channel",
slug=settings.DEFAULT_CHANNEL_SLUG,
currency_code=default_currency,
is_active=True,
+ default_country=default_country,
)
| {"golden_diff": "diff --git a/saleor/channel/migrations/0001_initial.py b/saleor/channel/migrations/0001_initial.py\n--- a/saleor/channel/migrations/0001_initial.py\n+++ b/saleor/channel/migrations/0001_initial.py\n@@ -1,5 +1,6 @@\n # Generated by Django 3.0.6 on 2020-06-16 07:54\n \n+import os\n from django.conf import settings\n from django.db import migrations, models\n from django.db.models.signals import post_migrate\n@@ -54,12 +55,14 @@\n default_currency = get_default_currency(\n Checkout, Order, Product, ShippingMethod, Voucher\n )\n+ default_country = os.environ.get(\"DEFAULT_COUNTRY\", \"US\")\n if default_currency:\n Channel.objects.create(\n name=\"Default channel\",\n slug=settings.DEFAULT_CHANNEL_SLUG,\n currency_code=default_currency,\n is_active=True,\n+ default_country=default_country,\n )\n", "issue": "checkoutCreate mutation issue - { \"code\": \"REQUIRED\", \"field\": \"country\", \"message\": \"This field cannot be blank.\" }\n### What I'm trying to achieve\r\ntry to add checkout step with createCheckout mutation.\r\n\r\n### Steps to reproduce the problem\r\n<!-- Adding logs from the console, as well as query/response help us fix the bug faster -->\r\n1. use docker-platform, deploy with docker compose, delete saleor folder and replace it by saleor folder with 3.1 branch clone\r\n2. use playgraphl to test mutation request (checkoutCreate)\r\n\r\n### What I expected to happen\r\ni make test on saleor demo site : https://demo.saleor.io/graphql/\r\n\r\n```bash\r\nmutation CheckoutCreate {\r\n checkoutCreate(\r\n input: { channel: \"default-channel\", email: \"[email protected]\", lines: [] }\r\n ) \r\n { errors {\r\n code\r\n field\r\n message\r\n }\r\n checkout {\r\n id\r\n token\r\n created\r\n \r\n }\r\n }\r\n}\r\n\r\n```\r\nresult on : https://demo.saleor.io/graphql/\r\n```bash\r\n{\r\n \"data\": {\r\n \"checkoutCreate\": {\r\n \"errors\": [],\r\n \"checkout\": {\r\n \"id\": \"Q2hlY2tvdXQ6MDQ2MmQwMzQtZGJmYi00MTg1LWExZTMtMWUwYTU2YWMxYjJi\",\r\n \"token\": \"0462d034-dbfb-4185-a1e3-1e0a56ac1b2b\",\r\n \"created\": \"2021-09-17T13:17:33.994853+00:00\"\r\n }\r\n }\r\n }\r\n}\r\n\r\n\r\n```\r\n\r\n# this is fine for me but ....\r\nWhen i try the samething on my local machine (deploy with docker compose)\r\ni get this:\r\n\r\n```bash\r\n\r\n{\r\n \"data\": {\r\n \"checkoutCreate\": {\r\n \"errors\": [\r\n {\r\n \"code\": \"REQUIRED\",\r\n \"field\": \"country\",\r\n \"message\": \"This field cannot be blank.\"\r\n }\r\n ],\r\n \"checkout\": null\r\n }\r\n }\r\n}\r\n\r\n```\r\ni want to get checkoutID and token and the system ask me to add some country field.....\r\n\r\n**System information**\r\n<!-- Provide the version of Saleor or whether you're using it from the `master` branch. If using Saleor Dashboard or Storefront, provide their versions too. -->\r\nSaleor version:\r\n- [ ] dev (current master)\r\n- [ X] 3.0\r\n- [ ] 2.11\r\n- [ ] 2.10\r\n\r\nOperating system:\r\n- [ ] Windows\r\n- [ X] Linux\r\n- [ ] MacOS\r\n- [ ] Other\r\n\n", "before_files": [{"content": "# Generated by Django 3.0.6 on 2020-06-16 07:54\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nfrom django.db.models.signals import post_migrate\nfrom django.apps import apps as registry\n\n\ndef assing_permissions(apps, schema_editor):\n def on_migrations_complete(sender=None, **kwargs):\n Group = apps.get_model(\"auth\", \"Group\")\n Permission = apps.get_model(\"auth\", \"Permission\")\n ContentType = apps.get_model(\"contenttypes\", \"ContentType\")\n\n ct, _ = ContentType.objects.get_or_create(app_label=\"channel\", model=\"channel\")\n manage_channels, _ = Permission.objects.get_or_create(\n name=\"Manage channels.\", content_type=ct, codename=\"manage_channels\"\n )\n\n for group in Group.objects.iterator():\n group.permissions.add(manage_channels)\n\n sender = registry.get_app_config(\"channel\")\n post_migrate.connect(on_migrations_complete, weak=False, sender=sender)\n\n\ndef get_default_currency(Checkout, Order, Product, ShippingMethod, Voucher):\n latest_product = Product.objects.order_by(\"-pk\").first()\n if latest_product:\n return latest_product.currency\n latest_voucher = Voucher.objects.order_by(\"-pk\").first()\n if latest_voucher:\n return latest_voucher.currency\n latest_shipping_method = ShippingMethod.objects.order_by(\"-pk\").first()\n if latest_shipping_method:\n return latest_shipping_method.currency\n latest_order = Order.objects.order_by(\"-pk\").first()\n if latest_order:\n return latest_order.currency\n latest_checkout = Checkout.objects.order_by(\"-pk\").first()\n if latest_checkout:\n return latest_checkout.currency\n return None\n\n\ndef create_default_channel(apps, schema_editor):\n Channel = apps.get_model(\"channel\", \"Channel\")\n Checkout = apps.get_model(\"checkout\", \"Checkout\")\n Order = apps.get_model(\"order\", \"Order\")\n Product = apps.get_model(\"product\", \"Product\")\n ShippingMethod = apps.get_model(\"shipping\", \"ShippingMethod\")\n Voucher = apps.get_model(\"discount\", \"Voucher\")\n\n default_currency = get_default_currency(\n Checkout, Order, Product, ShippingMethod, Voucher\n )\n if default_currency:\n Channel.objects.create(\n name=\"Default channel\",\n slug=settings.DEFAULT_CHANNEL_SLUG,\n currency_code=default_currency,\n is_active=True,\n )\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n (\"checkout\", \"0025_auto_20200221_0257\"),\n (\"discount\", \"0019_auto_20200217_0350\"),\n (\"order\", \"0084_auto_20200522_0522\"),\n (\"product\", \"0118_populate_product_variant_price\"),\n (\"shipping\", \"0018_default_zones_countries\"),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"Channel\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"name\", models.CharField(max_length=250)),\n (\"slug\", models.SlugField(max_length=255, unique=True)),\n (\"is_active\", models.BooleanField(default=False)),\n (\n \"currency_code\",\n models.CharField(max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH),\n ),\n ],\n options={\n \"ordering\": (\"slug\",),\n \"permissions\": ((\"manage_channels\", \"Manage channels.\"),),\n },\n ),\n migrations.RunPython(create_default_channel, migrations.RunPython.noop),\n migrations.RunPython(assing_permissions, migrations.RunPython.noop),\n ]\n", "path": "saleor/channel/migrations/0001_initial.py"}], "after_files": [{"content": "# Generated by Django 3.0.6 on 2020-06-16 07:54\n\nimport os\nfrom django.conf import settings\nfrom django.db import migrations, models\nfrom django.db.models.signals import post_migrate\nfrom django.apps import apps as registry\n\n\ndef assing_permissions(apps, schema_editor):\n def on_migrations_complete(sender=None, **kwargs):\n Group = apps.get_model(\"auth\", \"Group\")\n Permission = apps.get_model(\"auth\", \"Permission\")\n ContentType = apps.get_model(\"contenttypes\", \"ContentType\")\n\n ct, _ = ContentType.objects.get_or_create(app_label=\"channel\", model=\"channel\")\n manage_channels, _ = Permission.objects.get_or_create(\n name=\"Manage channels.\", content_type=ct, codename=\"manage_channels\"\n )\n\n for group in Group.objects.iterator():\n group.permissions.add(manage_channels)\n\n sender = registry.get_app_config(\"channel\")\n post_migrate.connect(on_migrations_complete, weak=False, sender=sender)\n\n\ndef get_default_currency(Checkout, Order, Product, ShippingMethod, Voucher):\n latest_product = Product.objects.order_by(\"-pk\").first()\n if latest_product:\n return latest_product.currency\n latest_voucher = Voucher.objects.order_by(\"-pk\").first()\n if latest_voucher:\n return latest_voucher.currency\n latest_shipping_method = ShippingMethod.objects.order_by(\"-pk\").first()\n if latest_shipping_method:\n return latest_shipping_method.currency\n latest_order = Order.objects.order_by(\"-pk\").first()\n if latest_order:\n return latest_order.currency\n latest_checkout = Checkout.objects.order_by(\"-pk\").first()\n if latest_checkout:\n return latest_checkout.currency\n return None\n\n\ndef create_default_channel(apps, schema_editor):\n Channel = apps.get_model(\"channel\", \"Channel\")\n Checkout = apps.get_model(\"checkout\", \"Checkout\")\n Order = apps.get_model(\"order\", \"Order\")\n Product = apps.get_model(\"product\", \"Product\")\n ShippingMethod = apps.get_model(\"shipping\", \"ShippingMethod\")\n Voucher = apps.get_model(\"discount\", \"Voucher\")\n\n default_currency = get_default_currency(\n Checkout, Order, Product, ShippingMethod, Voucher\n )\n default_country = os.environ.get(\"DEFAULT_COUNTRY\", \"US\")\n if default_currency:\n Channel.objects.create(\n name=\"Default channel\",\n slug=settings.DEFAULT_CHANNEL_SLUG,\n currency_code=default_currency,\n is_active=True,\n default_country=default_country,\n )\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n (\"checkout\", \"0025_auto_20200221_0257\"),\n (\"discount\", \"0019_auto_20200217_0350\"),\n (\"order\", \"0084_auto_20200522_0522\"),\n (\"product\", \"0118_populate_product_variant_price\"),\n (\"shipping\", \"0018_default_zones_countries\"),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"Channel\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"name\", models.CharField(max_length=250)),\n (\"slug\", models.SlugField(max_length=255, unique=True)),\n (\"is_active\", models.BooleanField(default=False)),\n (\n \"currency_code\",\n models.CharField(max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH),\n ),\n ],\n options={\n \"ordering\": (\"slug\",),\n \"permissions\": ((\"manage_channels\", \"Manage channels.\"),),\n },\n ),\n migrations.RunPython(create_default_channel, migrations.RunPython.noop),\n migrations.RunPython(assing_permissions, migrations.RunPython.noop),\n ]\n", "path": "saleor/channel/migrations/0001_initial.py"}]} | 1,905 | 227 |
gh_patches_debug_37257 | rasdani/github-patches | git_diff | svthalia__concrexit-3722 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lock admin panel behind 2FA
### What?
<!-- A clear and concise high-level description of what you want to happen. -->
lock the admin panel behind the 2FA functionality
### Why?
<!-- A clear and concise motivation why we should consider implementing this. -->
Admin panel has sensitive data so it should be protected. So requiring 2FA makes sense.
### How?
<!-- Optionally some guidance, ideas, context. -->
Probably nice to have a decorator to be able to lock other things of the site behind 2FA in the future.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/thaliawebsite/admin.py`
Content:
```
1 """Settings for the admin site."""
2 from django.contrib import admin
3 from django.utils.translation import gettext_lazy as _
4
5 admin.site.site_header = _("Thalia administration")
6 admin.site.site_title = _("Thalia")
7
```
Path: `website/thaliawebsite/views.py`
Content:
```
1 """General views for the website."""
2
3 from django.contrib.admin.views.decorators import staff_member_required
4 from django.contrib.auth.views import LogoutView as BaseLogoutView
5 from django.contrib.auth.views import PasswordResetView
6 from django.core.exceptions import PermissionDenied
7 from django.http import HttpResponse, HttpResponseForbidden
8 from django.shortcuts import redirect
9 from django.utils.decorators import method_decorator
10 from django.views.generic import ListView, TemplateView
11 from django.views.generic.base import View
12
13 from django_ratelimit.decorators import ratelimit
14 from two_factor.views import LoginView
15
16
17 class IndexView(TemplateView):
18 template_name = "index.html"
19
20
21 @method_decorator(staff_member_required, "dispatch")
22 class TestCrashView(View):
23 """Test view to intentionally crash to test the error handling."""
24
25 def dispatch(self, request, *args, **kwargs) -> HttpResponse:
26 if not request.user.is_superuser:
27 return HttpResponseForbidden("This is not for you")
28 raise Exception("Test exception")
29
30
31 class PagedView(ListView):
32 """A ListView with automatic pagination."""
33
34 def get_context_data(self, **kwargs) -> dict:
35 context = super().get_context_data(**kwargs)
36 page = context["page_obj"].number
37 paginator = context["paginator"]
38
39 # Show the two pages before and after the current page
40 page_range_start = max(1, page - 2)
41 page_range_stop = min(page + 3, paginator.num_pages + 1)
42
43 # Add extra pages if we show less than 5 pages
44 page_range_start = min(page_range_start, page_range_stop - 5)
45 page_range_start = max(1, page_range_start)
46
47 # Add extra pages if we still show less than 5 pages
48 page_range_stop = max(page_range_stop, page_range_start + 5)
49 page_range_stop = min(page_range_stop, paginator.num_pages + 1)
50
51 page_range = range(page_range_start, page_range_stop)
52
53 querydict = self.request.GET.copy()
54
55 if "page" in querydict:
56 del querydict["page"]
57
58 context.update(
59 {
60 "page_range": page_range,
61 "base_url": f"{self.request.path}?{querydict.urlencode()}&"
62 if querydict
63 else f"{self.request.path}?",
64 }
65 )
66
67 return context
68
69
70 class RateLimitedPasswordResetView(PasswordResetView):
71 @method_decorator(ratelimit(key="ip", rate="5/h"))
72 def post(self, request, *args, **kwargs):
73 return super().post(request, *args, **kwargs)
74
75
76 class RateLimitedLoginView(LoginView):
77 @method_decorator(ratelimit(key="ip", rate="30/h"))
78 @method_decorator(ratelimit(key="post:username", rate="30/h"))
79 def post(self, request, *args, **kwargs):
80 return super().post(request, *args, **kwargs)
81
82
83 class LogoutView(BaseLogoutView):
84 # Allow GET logout still (this was deprecated in Django 5.0).
85 http_method_names = ["get", "post", "options"]
86
87 def get(self, request, *args, **kwargs):
88 return self.post(request, *args, **kwargs)
89
90
91 def rate_limited_view(request, *args, **kwargs):
92 return HttpResponse("You are rate limited", status=429)
93
94
95 def admin_unauthorized_view(request):
96 if not request.member:
97 url = "/user/account/login"
98 args = request.META.get("QUERY_STRING", "")
99 if args:
100 url = f"{url}?{args}"
101 return redirect(url)
102 elif not request.member.is_staff and not request.member.is_superuser:
103 raise PermissionDenied("You are not allowed to access the administration page.")
104 else:
105 return redirect(request.GET.get("next", "/"))
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/thaliawebsite/admin.py b/website/thaliawebsite/admin.py
--- a/website/thaliawebsite/admin.py
+++ b/website/thaliawebsite/admin.py
@@ -1,6 +1,17 @@
"""Settings for the admin site."""
+
+from django.conf import settings
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
-admin.site.site_header = _("Thalia administration")
-admin.site.site_title = _("Thalia")
+from django_otp import user_has_device
+
+
+class ThaliaAdminSite(admin.AdminSite):
+ site_header = _("Thalia administration")
+ site_title = _("Thalia")
+
+ def has_permission(self, request):
+ return super().has_permission(request) and (
+ settings.DEBUG or user_has_device(request.user)
+ )
diff --git a/website/thaliawebsite/views.py b/website/thaliawebsite/views.py
--- a/website/thaliawebsite/views.py
+++ b/website/thaliawebsite/views.py
@@ -1,5 +1,6 @@
"""General views for the website."""
+from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.views import LogoutView as BaseLogoutView
from django.contrib.auth.views import PasswordResetView
@@ -10,6 +11,7 @@
from django.views.generic import ListView, TemplateView
from django.views.generic.base import View
+from django_otp import user_has_device
from django_ratelimit.decorators import ratelimit
from two_factor.views import LoginView
@@ -58,9 +60,11 @@
context.update(
{
"page_range": page_range,
- "base_url": f"{self.request.path}?{querydict.urlencode()}&"
- if querydict
- else f"{self.request.path}?",
+ "base_url": (
+ f"{self.request.path}?{querydict.urlencode()}&"
+ if querydict
+ else f"{self.request.path}?"
+ ),
}
)
@@ -101,5 +105,11 @@
return redirect(url)
elif not request.member.is_staff and not request.member.is_superuser:
raise PermissionDenied("You are not allowed to access the administration page.")
+ elif not user_has_device(request.member):
+ messages.error(
+ request,
+ "You need to set up two-factor authentication to access the administration page.",
+ )
+ return redirect("two_factor:setup")
else:
return redirect(request.GET.get("next", "/"))
| {"golden_diff": "diff --git a/website/thaliawebsite/admin.py b/website/thaliawebsite/admin.py\n--- a/website/thaliawebsite/admin.py\n+++ b/website/thaliawebsite/admin.py\n@@ -1,6 +1,17 @@\n \"\"\"Settings for the admin site.\"\"\"\n+\n+from django.conf import settings\n from django.contrib import admin\n from django.utils.translation import gettext_lazy as _\n \n-admin.site.site_header = _(\"Thalia administration\")\n-admin.site.site_title = _(\"Thalia\")\n+from django_otp import user_has_device\n+\n+\n+class ThaliaAdminSite(admin.AdminSite):\n+ site_header = _(\"Thalia administration\")\n+ site_title = _(\"Thalia\")\n+\n+ def has_permission(self, request):\n+ return super().has_permission(request) and (\n+ settings.DEBUG or user_has_device(request.user)\n+ )\ndiff --git a/website/thaliawebsite/views.py b/website/thaliawebsite/views.py\n--- a/website/thaliawebsite/views.py\n+++ b/website/thaliawebsite/views.py\n@@ -1,5 +1,6 @@\n \"\"\"General views for the website.\"\"\"\n \n+from django.contrib import messages\n from django.contrib.admin.views.decorators import staff_member_required\n from django.contrib.auth.views import LogoutView as BaseLogoutView\n from django.contrib.auth.views import PasswordResetView\n@@ -10,6 +11,7 @@\n from django.views.generic import ListView, TemplateView\n from django.views.generic.base import View\n \n+from django_otp import user_has_device\n from django_ratelimit.decorators import ratelimit\n from two_factor.views import LoginView\n \n@@ -58,9 +60,11 @@\n context.update(\n {\n \"page_range\": page_range,\n- \"base_url\": f\"{self.request.path}?{querydict.urlencode()}&\"\n- if querydict\n- else f\"{self.request.path}?\",\n+ \"base_url\": (\n+ f\"{self.request.path}?{querydict.urlencode()}&\"\n+ if querydict\n+ else f\"{self.request.path}?\"\n+ ),\n }\n )\n \n@@ -101,5 +105,11 @@\n return redirect(url)\n elif not request.member.is_staff and not request.member.is_superuser:\n raise PermissionDenied(\"You are not allowed to access the administration page.\")\n+ elif not user_has_device(request.member):\n+ messages.error(\n+ request,\n+ \"You need to set up two-factor authentication to access the administration page.\",\n+ )\n+ return redirect(\"two_factor:setup\")\n else:\n return redirect(request.GET.get(\"next\", \"/\"))\n", "issue": "Lock admin panel behind 2FA\n### What?\r\n<!-- A clear and concise high-level description of what you want to happen. -->\r\nlock the admin panel behind the 2FA functionality\r\n\r\n### Why?\r\n<!-- A clear and concise motivation why we should consider implementing this. -->\r\nAdmin panel has sensitive data so it should be protected. So requiring 2FA makes sense.\r\n\r\n### How?\r\n<!-- Optionally some guidance, ideas, context. -->\r\nProbably nice to have a decorator to be able to lock other things of the site behind 2FA in the future.\r\n\r\n\n", "before_files": [{"content": "\"\"\"Settings for the admin site.\"\"\"\nfrom django.contrib import admin\nfrom django.utils.translation import gettext_lazy as _\n\nadmin.site.site_header = _(\"Thalia administration\")\nadmin.site.site_title = _(\"Thalia\")\n", "path": "website/thaliawebsite/admin.py"}, {"content": "\"\"\"General views for the website.\"\"\"\n\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.views import LogoutView as BaseLogoutView\nfrom django.contrib.auth.views import PasswordResetView\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponse, HttpResponseForbidden\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import ListView, TemplateView\nfrom django.views.generic.base import View\n\nfrom django_ratelimit.decorators import ratelimit\nfrom two_factor.views import LoginView\n\n\nclass IndexView(TemplateView):\n template_name = \"index.html\"\n\n\n@method_decorator(staff_member_required, \"dispatch\")\nclass TestCrashView(View):\n \"\"\"Test view to intentionally crash to test the error handling.\"\"\"\n\n def dispatch(self, request, *args, **kwargs) -> HttpResponse:\n if not request.user.is_superuser:\n return HttpResponseForbidden(\"This is not for you\")\n raise Exception(\"Test exception\")\n\n\nclass PagedView(ListView):\n \"\"\"A ListView with automatic pagination.\"\"\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n page = context[\"page_obj\"].number\n paginator = context[\"paginator\"]\n\n # Show the two pages before and after the current page\n page_range_start = max(1, page - 2)\n page_range_stop = min(page + 3, paginator.num_pages + 1)\n\n # Add extra pages if we show less than 5 pages\n page_range_start = min(page_range_start, page_range_stop - 5)\n page_range_start = max(1, page_range_start)\n\n # Add extra pages if we still show less than 5 pages\n page_range_stop = max(page_range_stop, page_range_start + 5)\n page_range_stop = min(page_range_stop, paginator.num_pages + 1)\n\n page_range = range(page_range_start, page_range_stop)\n\n querydict = self.request.GET.copy()\n\n if \"page\" in querydict:\n del querydict[\"page\"]\n\n context.update(\n {\n \"page_range\": page_range,\n \"base_url\": f\"{self.request.path}?{querydict.urlencode()}&\"\n if querydict\n else f\"{self.request.path}?\",\n }\n )\n\n return context\n\n\nclass RateLimitedPasswordResetView(PasswordResetView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"5/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass RateLimitedLoginView(LoginView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"30/h\"))\n @method_decorator(ratelimit(key=\"post:username\", rate=\"30/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass LogoutView(BaseLogoutView):\n # Allow GET logout still (this was deprecated in Django 5.0).\n http_method_names = [\"get\", \"post\", \"options\"]\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\ndef rate_limited_view(request, *args, **kwargs):\n return HttpResponse(\"You are rate limited\", status=429)\n\n\ndef admin_unauthorized_view(request):\n if not request.member:\n url = \"/user/account/login\"\n args = request.META.get(\"QUERY_STRING\", \"\")\n if args:\n url = f\"{url}?{args}\"\n return redirect(url)\n elif not request.member.is_staff and not request.member.is_superuser:\n raise PermissionDenied(\"You are not allowed to access the administration page.\")\n else:\n return redirect(request.GET.get(\"next\", \"/\"))\n", "path": "website/thaliawebsite/views.py"}], "after_files": [{"content": "\"\"\"Settings for the admin site.\"\"\"\n\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django_otp import user_has_device\n\n\nclass ThaliaAdminSite(admin.AdminSite):\n site_header = _(\"Thalia administration\")\n site_title = _(\"Thalia\")\n\n def has_permission(self, request):\n return super().has_permission(request) and (\n settings.DEBUG or user_has_device(request.user)\n )\n", "path": "website/thaliawebsite/admin.py"}, {"content": "\"\"\"General views for the website.\"\"\"\n\nfrom django.contrib import messages\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.views import LogoutView as BaseLogoutView\nfrom django.contrib.auth.views import PasswordResetView\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponse, HttpResponseForbidden\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import ListView, TemplateView\nfrom django.views.generic.base import View\n\nfrom django_otp import user_has_device\nfrom django_ratelimit.decorators import ratelimit\nfrom two_factor.views import LoginView\n\n\nclass IndexView(TemplateView):\n template_name = \"index.html\"\n\n\n@method_decorator(staff_member_required, \"dispatch\")\nclass TestCrashView(View):\n \"\"\"Test view to intentionally crash to test the error handling.\"\"\"\n\n def dispatch(self, request, *args, **kwargs) -> HttpResponse:\n if not request.user.is_superuser:\n return HttpResponseForbidden(\"This is not for you\")\n raise Exception(\"Test exception\")\n\n\nclass PagedView(ListView):\n \"\"\"A ListView with automatic pagination.\"\"\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n page = context[\"page_obj\"].number\n paginator = context[\"paginator\"]\n\n # Show the two pages before and after the current page\n page_range_start = max(1, page - 2)\n page_range_stop = min(page + 3, paginator.num_pages + 1)\n\n # Add extra pages if we show less than 5 pages\n page_range_start = min(page_range_start, page_range_stop - 5)\n page_range_start = max(1, page_range_start)\n\n # Add extra pages if we still show less than 5 pages\n page_range_stop = max(page_range_stop, page_range_start + 5)\n page_range_stop = min(page_range_stop, paginator.num_pages + 1)\n\n page_range = range(page_range_start, page_range_stop)\n\n querydict = self.request.GET.copy()\n\n if \"page\" in querydict:\n del querydict[\"page\"]\n\n context.update(\n {\n \"page_range\": page_range,\n \"base_url\": (\n f\"{self.request.path}?{querydict.urlencode()}&\"\n if querydict\n else f\"{self.request.path}?\"\n ),\n }\n )\n\n return context\n\n\nclass RateLimitedPasswordResetView(PasswordResetView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"5/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass RateLimitedLoginView(LoginView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"30/h\"))\n @method_decorator(ratelimit(key=\"post:username\", rate=\"30/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass LogoutView(BaseLogoutView):\n # Allow GET logout still (this was deprecated in Django 5.0).\n http_method_names = [\"get\", \"post\", \"options\"]\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\ndef rate_limited_view(request, *args, **kwargs):\n return HttpResponse(\"You are rate limited\", status=429)\n\n\ndef admin_unauthorized_view(request):\n if not request.member:\n url = \"/user/account/login\"\n args = request.META.get(\"QUERY_STRING\", \"\")\n if args:\n url = f\"{url}?{args}\"\n return redirect(url)\n elif not request.member.is_staff and not request.member.is_superuser:\n raise PermissionDenied(\"You are not allowed to access the administration page.\")\n elif not user_has_device(request.member):\n messages.error(\n request,\n \"You need to set up two-factor authentication to access the administration page.\",\n )\n return redirect(\"two_factor:setup\")\n else:\n return redirect(request.GET.get(\"next\", \"/\"))\n", "path": "website/thaliawebsite/views.py"}]} | 1,473 | 565 |
gh_patches_debug_170 | rasdani/github-patches | git_diff | pydantic__pydantic-4418 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
V1.10 release
To do/decide:
* [x] #2557 - **merged**
* [x] #2745 - needs some tweaks, but we need to decide if it's a good idea before V2
* [x] #2190 - **deferred**
* [x] cherry pick stuff from v1.9 branch, maybe just history #4350
* [x] #3346
* [x] #3593 - **deferred**
* [x] #3946
* [x] #4028 - **API will change in v2**
* [x] #4354
* [x] #4216
* [x] #4191
* [x] #3941 - revert or fix
* [x] #4339
* [x] #4356
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydantic/version.py`
Content:
```
1 __all__ = 'compiled', 'VERSION', 'version_info'
2
3 VERSION = '1.9.2'
4
5 try:
6 import cython # type: ignore
7 except ImportError:
8 compiled: bool = False
9 else: # pragma: no cover
10 try:
11 compiled = cython.compiled
12 except AttributeError:
13 compiled = False
14
15
16 def version_info() -> str:
17 import platform
18 import sys
19 from importlib import import_module
20 from pathlib import Path
21
22 optional_deps = []
23 for p in ('devtools', 'dotenv', 'email-validator', 'typing-extensions'):
24 try:
25 import_module(p.replace('-', '_'))
26 except ImportError:
27 continue
28 optional_deps.append(p)
29
30 info = {
31 'pydantic version': VERSION,
32 'pydantic compiled': compiled,
33 'install path': Path(__file__).resolve().parent,
34 'python version': sys.version,
35 'platform': platform.platform(),
36 'optional deps. installed': optional_deps,
37 }
38 return '\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\n', ' ')) for k, v in info.items())
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pydantic/version.py b/pydantic/version.py
--- a/pydantic/version.py
+++ b/pydantic/version.py
@@ -1,6 +1,6 @@
__all__ = 'compiled', 'VERSION', 'version_info'
-VERSION = '1.9.2'
+VERSION = '1.10.0a1'
try:
import cython # type: ignore
| {"golden_diff": "diff --git a/pydantic/version.py b/pydantic/version.py\n--- a/pydantic/version.py\n+++ b/pydantic/version.py\n@@ -1,6 +1,6 @@\n __all__ = 'compiled', 'VERSION', 'version_info'\n \n-VERSION = '1.9.2'\n+VERSION = '1.10.0a1'\n \n try:\n import cython # type: ignore\n", "issue": "V1.10 release\nTo do/decide:\r\n* [x] #2557 - **merged**\r\n* [x] #2745 - needs some tweaks, but we need to decide if it's a good idea before V2\r\n* [x] #2190 - **deferred**\r\n* [x] cherry pick stuff from v1.9 branch, maybe just history #4350\r\n* [x] #3346\r\n* [x] #3593 - **deferred**\r\n* [x] #3946\r\n* [x] #4028 - **API will change in v2**\r\n* [x] #4354\r\n* [x] #4216\r\n* [x] #4191\r\n* [x] #3941 - revert or fix\r\n* [x] #4339\r\n* [x] #4356\n", "before_files": [{"content": "__all__ = 'compiled', 'VERSION', 'version_info'\n\nVERSION = '1.9.2'\n\ntry:\n import cython # type: ignore\nexcept ImportError:\n compiled: bool = False\nelse: # pragma: no cover\n try:\n compiled = cython.compiled\n except AttributeError:\n compiled = False\n\n\ndef version_info() -> str:\n import platform\n import sys\n from importlib import import_module\n from pathlib import Path\n\n optional_deps = []\n for p in ('devtools', 'dotenv', 'email-validator', 'typing-extensions'):\n try:\n import_module(p.replace('-', '_'))\n except ImportError:\n continue\n optional_deps.append(p)\n\n info = {\n 'pydantic version': VERSION,\n 'pydantic compiled': compiled,\n 'install path': Path(__file__).resolve().parent,\n 'python version': sys.version,\n 'platform': platform.platform(),\n 'optional deps. installed': optional_deps,\n }\n return '\\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\\n', ' ')) for k, v in info.items())\n", "path": "pydantic/version.py"}], "after_files": [{"content": "__all__ = 'compiled', 'VERSION', 'version_info'\n\nVERSION = '1.10.0a1'\n\ntry:\n import cython # type: ignore\nexcept ImportError:\n compiled: bool = False\nelse: # pragma: no cover\n try:\n compiled = cython.compiled\n except AttributeError:\n compiled = False\n\n\ndef version_info() -> str:\n import platform\n import sys\n from importlib import import_module\n from pathlib import Path\n\n optional_deps = []\n for p in ('devtools', 'dotenv', 'email-validator', 'typing-extensions'):\n try:\n import_module(p.replace('-', '_'))\n except ImportError:\n continue\n optional_deps.append(p)\n\n info = {\n 'pydantic version': VERSION,\n 'pydantic compiled': compiled,\n 'install path': Path(__file__).resolve().parent,\n 'python version': sys.version,\n 'platform': platform.platform(),\n 'optional deps. installed': optional_deps,\n }\n return '\\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\\n', ' ')) for k, v in info.items())\n", "path": "pydantic/version.py"}]} | 792 | 93 |
gh_patches_debug_554 | rasdani/github-patches | git_diff | scikit-image__scikit-image-353 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Please add info how to run the skimage unit tests at the end of the installation instructions
I couldn't find instructions how to run the skimage unit tests.
First I tried
```
python -c 'import skimage; skimage.test()
```
which ran 287 tests and gave 16 errors, all the same:
```
ImportError: cannot import name BytesIO
```
Then I tried
```
nosetests --exe skimage
```
which ran 490 tests, no error.
Full output is here: https://gist.github.com/3832077
Apparently it is important to not use `skimage.test()`, but `nosetests` instead?
Could you please add this info somewhere, the first place I would have looked is at the end of http://skimage.org/docs/dev/install.html ( or make "nosetests" or "run tests" in the sphinx search find the appropriate command to run).
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/__init__.py`
Content:
```
1 """Image Processing SciKit (Toolbox for SciPy)
2
3 ``scikits-image`` (a.k.a. ``skimage``) is a collection of algorithms for image
4 processing and computer vision.
5
6 The main package of ``skimage`` only provides a few utilities for converting
7 between image data types; for most features, you need to import one of the
8 following subpackages:
9
10 Subpackages
11 -----------
12 color
13 Color space conversion.
14 data
15 Test images and example data.
16 draw
17 Image drawing primitives (lines, text, etc.).
18 exposure
19 Image intensity adjustment (e.g., histogram equalization).
20 feature
21 Feature detection (e.g. texture analysis, corners, etc.).
22 filter
23 Sharpening, edge finding, denoising, etc.
24 graph
25 Graph-theoretic operations, e.g. dynamic programming (shortest paths).
26 io
27 Reading, saving, and displaying images and video.
28 measure
29 Measurement of image properties, e.g., similarity and contours.
30 morphology
31 Morphological operations, e.g. opening or skeletonization.
32 segmentation
33 Splitting an image into self-similar regions.
34 transform
35 Geometric and other transforms, e.g. rotation or the Radon transform.
36 util
37 Generic utilities.
38
39 Utility Functions
40 -----------------
41 get_log
42 Returns the ``skimage`` log. Use this to print debug output.
43 img_as_float
44 Convert an image to floating point format, with values in [0, 1].
45 img_as_uint
46 Convert an image to unsigned integer format, with values in [0, 65535].
47 img_as_int
48 Convert an image to signed integer format, with values in [-32768, 32767].
49 img_as_ubyte
50 Convert an image to unsigned byte format, with values in [0, 255].
51
52 """
53
54 import os.path as _osp
55
56 pkg_dir = _osp.abspath(_osp.dirname(__file__))
57 data_dir = _osp.join(pkg_dir, 'data')
58
59 try:
60 from .version import version as __version__
61 except ImportError:
62 __version__ = "unbuilt-dev"
63
64
65 def _setup_test(verbose=False):
66 import functools
67
68 args = ['', '--exe', '-w', pkg_dir]
69 if verbose:
70 args.extend(['-v', '-s'])
71
72 try:
73 import nose as _nose
74 except ImportError:
75 def broken_test_func():
76 """This would invoke the skimage test suite, but nose couldn't be
77 imported so the test suite can not run.
78 """
79 raise ImportError("Could not load nose. Unit tests not available.")
80 return broken_test_func
81 else:
82 f = functools.partial(_nose.run, 'skimage', argv=args)
83 f.__doc__ = 'Invoke the skimage test suite.'
84 return f
85
86
87 test = _setup_test()
88 test_verbose = _setup_test(verbose=True)
89
90
91 def get_log(name=None):
92 """Return a console logger.
93
94 Output may be sent to the logger using the `debug`, `info`, `warning`,
95 `error` and `critical` methods.
96
97 Parameters
98 ----------
99 name : str
100 Name of the log.
101
102 References
103 ----------
104 .. [1] Logging facility for Python,
105 http://docs.python.org/library/logging.html
106
107 """
108 import logging
109
110 if name is None:
111 name = 'skimage'
112 else:
113 name = 'skimage.' + name
114
115 log = logging.getLogger(name)
116 return log
117
118
119 def _setup_log():
120 """Configure root logger.
121
122 """
123 import logging
124 import sys
125
126 formatter = logging.Formatter(
127 '%(name)s: %(levelname)s: %(message)s'
128 )
129
130 try:
131 handler = logging.StreamHandler(stream=sys.stdout)
132 except TypeError:
133 handler = logging.StreamHandler(strm=sys.stdout)
134 handler.setFormatter(formatter)
135
136 log = get_log()
137 log.addHandler(handler)
138 log.setLevel(logging.WARNING)
139 log.propagate = False
140
141 _setup_log()
142
143 from .util.dtype import *
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/__init__.py b/skimage/__init__.py
--- a/skimage/__init__.py
+++ b/skimage/__init__.py
@@ -65,7 +65,7 @@
def _setup_test(verbose=False):
import functools
- args = ['', '--exe', '-w', pkg_dir]
+ args = ['', pkg_dir, '--exe']
if verbose:
args.extend(['-v', '-s'])
| {"golden_diff": "diff --git a/skimage/__init__.py b/skimage/__init__.py\n--- a/skimage/__init__.py\n+++ b/skimage/__init__.py\n@@ -65,7 +65,7 @@\n def _setup_test(verbose=False):\n import functools\n \n- args = ['', '--exe', '-w', pkg_dir]\n+ args = ['', pkg_dir, '--exe']\n if verbose:\n args.extend(['-v', '-s'])\n", "issue": "Please add info how to run the skimage unit tests at the end of the installation instructions\nI couldn't find instructions how to run the skimage unit tests.\n\nFirst I tried\n\n```\npython -c 'import skimage; skimage.test()\n```\n\nwhich ran 287 tests and gave 16 errors, all the same:\n\n```\nImportError: cannot import name BytesIO\n```\n\nThen I tried\n\n```\nnosetests --exe skimage\n```\n\nwhich ran 490 tests, no error.\n\nFull output is here: https://gist.github.com/3832077\n\nApparently it is important to not use `skimage.test()`, but `nosetests` instead?\nCould you please add this info somewhere, the first place I would have looked is at the end of http://skimage.org/docs/dev/install.html ( or make \"nosetests\" or \"run tests\" in the sphinx search find the appropriate command to run).\n\nThanks!\n\n", "before_files": [{"content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikits-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Image drawing primitives (lines, text, etc.).\nexposure\n Image intensity adjustment (e.g., histogram equalization).\nfeature\n Feature detection (e.g. texture analysis, corners, etc.).\nfilter\n Sharpening, edge finding, denoising, etc.\ngraph\n Graph-theoretic operations, e.g. dynamic programming (shortest paths).\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g. opening or skeletonization.\nsegmentation\n Splitting an image into self-similar regions.\ntransform\n Geometric and other transforms, e.g. rotation or the Radon transform.\nutil\n Generic utilities.\n\nUtility Functions\n-----------------\nget_log\n Returns the ``skimage`` log. Use this to print debug output.\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as _osp\n\npkg_dir = _osp.abspath(_osp.dirname(__file__))\ndata_dir = _osp.join(pkg_dir, 'data')\n\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\n\n\ndef _setup_test(verbose=False):\n import functools\n\n args = ['', '--exe', '-w', pkg_dir]\n if verbose:\n args.extend(['-v', '-s'])\n\n try:\n import nose as _nose\n except ImportError:\n def broken_test_func():\n \"\"\"This would invoke the skimage test suite, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Unit tests not available.\")\n return broken_test_func\n else:\n f = functools.partial(_nose.run, 'skimage', argv=args)\n f.__doc__ = 'Invoke the skimage test suite.'\n return f\n\n\ntest = _setup_test()\ntest_verbose = _setup_test(verbose=True)\n\n\ndef get_log(name=None):\n \"\"\"Return a console logger.\n\n Output may be sent to the logger using the `debug`, `info`, `warning`,\n `error` and `critical` methods.\n\n Parameters\n ----------\n name : str\n Name of the log.\n\n References\n ----------\n .. [1] Logging facility for Python,\n http://docs.python.org/library/logging.html\n\n \"\"\"\n import logging\n\n if name is None:\n name = 'skimage'\n else:\n name = 'skimage.' + name\n\n log = logging.getLogger(name)\n return log\n\n\ndef _setup_log():\n \"\"\"Configure root logger.\n\n \"\"\"\n import logging\n import sys\n\n formatter = logging.Formatter(\n '%(name)s: %(levelname)s: %(message)s'\n )\n\n try:\n handler = logging.StreamHandler(stream=sys.stdout)\n except TypeError:\n handler = logging.StreamHandler(strm=sys.stdout)\n handler.setFormatter(formatter)\n\n log = get_log()\n log.addHandler(handler)\n log.setLevel(logging.WARNING)\n log.propagate = False\n\n_setup_log()\n\nfrom .util.dtype import *\n", "path": "skimage/__init__.py"}], "after_files": [{"content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikits-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Image drawing primitives (lines, text, etc.).\nexposure\n Image intensity adjustment (e.g., histogram equalization).\nfeature\n Feature detection (e.g. texture analysis, corners, etc.).\nfilter\n Sharpening, edge finding, denoising, etc.\ngraph\n Graph-theoretic operations, e.g. dynamic programming (shortest paths).\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g. opening or skeletonization.\nsegmentation\n Splitting an image into self-similar regions.\ntransform\n Geometric and other transforms, e.g. rotation or the Radon transform.\nutil\n Generic utilities.\n\nUtility Functions\n-----------------\nget_log\n Returns the ``skimage`` log. Use this to print debug output.\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as _osp\n\npkg_dir = _osp.abspath(_osp.dirname(__file__))\ndata_dir = _osp.join(pkg_dir, 'data')\n\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\n\n\ndef _setup_test(verbose=False):\n import functools\n\n args = ['', pkg_dir, '--exe']\n if verbose:\n args.extend(['-v', '-s'])\n\n try:\n import nose as _nose\n except ImportError:\n def broken_test_func():\n \"\"\"This would invoke the skimage test suite, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Unit tests not available.\")\n return broken_test_func\n else:\n f = functools.partial(_nose.run, 'skimage', argv=args)\n f.__doc__ = 'Invoke the skimage test suite.'\n return f\n\n\ntest = _setup_test()\ntest_verbose = _setup_test(verbose=True)\n\n\ndef get_log(name=None):\n \"\"\"Return a console logger.\n\n Output may be sent to the logger using the `debug`, `info`, `warning`,\n `error` and `critical` methods.\n\n Parameters\n ----------\n name : str\n Name of the log.\n\n References\n ----------\n .. [1] Logging facility for Python,\n http://docs.python.org/library/logging.html\n\n \"\"\"\n import logging\n\n if name is None:\n name = 'skimage'\n else:\n name = 'skimage.' + name\n\n log = logging.getLogger(name)\n return log\n\n\ndef _setup_log():\n \"\"\"Configure root logger.\n\n \"\"\"\n import logging\n import sys\n\n formatter = logging.Formatter(\n '%(name)s: %(levelname)s: %(message)s'\n )\n\n try:\n handler = logging.StreamHandler(stream=sys.stdout)\n except TypeError:\n handler = logging.StreamHandler(strm=sys.stdout)\n handler.setFormatter(formatter)\n\n log = get_log()\n log.addHandler(handler)\n log.setLevel(logging.WARNING)\n log.propagate = False\n\n_setup_log()\n\nfrom .util.dtype import *\n", "path": "skimage/__init__.py"}]} | 1,658 | 102 |
gh_patches_debug_36057 | rasdani/github-patches | git_diff | PennyLaneAI__pennylane-1581 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change order of technical description and list of functions in documentation
Three modules, [`kernels`](https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html), [`grouping`](https://pennylane.readthedocs.io/en/latest/code/qml_grouping.html), and [`qaoa`](https://pennylane.readthedocs.io/en/latest/code/qml_qaoa.html) have their module documentation ordered such that there is first a lengthy description of the theory, and the actual list of functions comes after. We should update the docs of these modules so that the functions appear *first*, and the technical details come afterwards (as was recently discussed in #1160). This will improve readability of the documentation and make it easier to find the details of a desired function.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pennylane/qaoa/__init__.py`
Content:
```
1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 r"""
15 This module contains functionality to construct QAOA workflows in PennyLane.
16 """
17
18 from .mixers import *
19 from .cost import *
20 from .layers import *
21 import pennylane.qaoa.cycle
22
```
Path: `pennylane/kernels/__init__.py`
Content:
```
1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 r"""
15 This subpackage defines functions that relate to quantum kernel methods.
16 On one hand this includes functions to call a quantum kernel systematically
17 on training and test datasets to obtain the *kernel matrix*.
18 On the other hand it provides postprocessing methods for those kernel
19 matrices which can be used to mitigate device noise and sampling errors.
20
21 Given a kernel
22
23 .. math ::
24
25 k: \mathbb{R}^d \times \mathbb{R}^d \to \mathbb{R}, \quad
26 (x_1, x_2)\mapsto k(x_1, x_2)
27
28 the kernel matrix of :math:`k` on a training dataset
29 :math:`\{(x_1, y_1),\cdots (x_n, y_n)\}` with :math:`x_i\in\mathbb{R}^d`
30 and :math:`y_i\in\{-1, 1\}` is defined as
31
32 .. math ::
33
34 K_{ij} = k(x_i, x_j).
35
36 For valid kernels, this is a real symmetric positive semi-definite matrix.
37 We also define the *ideal kernel matrix* for the training dataset which
38 perfectly predicts whether two points have identical labels or not:
39
40 .. math ::
41
42 K^\ast_{ij} = y_i y_j
43
44 We can measure the similarity between :math:`K` and :math:`K^\ast`,
45 through the *kernel polarity* which can be expressed as the Frobenius inner
46 product between the two matrices:
47
48 .. math ::
49
50 \operatorname{P}(k) = \langle K^\ast, K \rangle_F = \sum_{i,j=1}^n y_i y_j k(x_i, x_j)
51
52 Additionally, there is the *kernel-target alignment*, which is the normalized
53 counterpart to the kernel polarity:
54
55 .. math ::
56
57 \operatorname{TA}(k) &= \frac{P(k)}{\lVert K^\ast \rVert_F\;\lVert K \rVert_F}\\
58 \lVert K\rVert_F &= \sqrt{\sum_{i,j=1}^n k(x_i, x_j)^2}\\
59 \lVert K^\ast\rVert_F &= \sqrt{\sum_{i,j=1}^n (y_iy_j)^2}
60
61 For datasets with different numbers of training points per class the labels are rescaled
62 by the number of datapoints in the respective class to avoid that kernel polarity and
63 kernel-target alignment are dominated by the properties of the kernel for just a single class.
64
65 Given a callable kernel function, all these quantities can readily be computed
66 using the methods in this module.
67 """
68 from .cost_functions import (
69 polarity,
70 target_alignment,
71 )
72 from .postprocessing import (
73 threshold_matrix,
74 displace_matrix,
75 flip_matrix,
76 closest_psd_matrix,
77 mitigate_depolarizing_noise,
78 )
79 from .utils import (
80 kernel_matrix,
81 square_kernel_matrix,
82 )
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pennylane/kernels/__init__.py b/pennylane/kernels/__init__.py
--- a/pennylane/kernels/__init__.py
+++ b/pennylane/kernels/__init__.py
@@ -13,58 +13,8 @@
# limitations under the License.
r"""
This subpackage defines functions that relate to quantum kernel methods.
-On one hand this includes functions to call a quantum kernel systematically
-on training and test datasets to obtain the *kernel matrix*.
-On the other hand it provides postprocessing methods for those kernel
-matrices which can be used to mitigate device noise and sampling errors.
-
-Given a kernel
-
-.. math ::
-
- k: \mathbb{R}^d \times \mathbb{R}^d \to \mathbb{R}, \quad
- (x_1, x_2)\mapsto k(x_1, x_2)
-
-the kernel matrix of :math:`k` on a training dataset
-:math:`\{(x_1, y_1),\cdots (x_n, y_n)\}` with :math:`x_i\in\mathbb{R}^d`
-and :math:`y_i\in\{-1, 1\}` is defined as
-
-.. math ::
-
- K_{ij} = k(x_i, x_j).
-
-For valid kernels, this is a real symmetric positive semi-definite matrix.
-We also define the *ideal kernel matrix* for the training dataset which
-perfectly predicts whether two points have identical labels or not:
-
-.. math ::
-
- K^\ast_{ij} = y_i y_j
-
-We can measure the similarity between :math:`K` and :math:`K^\ast`,
-through the *kernel polarity* which can be expressed as the Frobenius inner
-product between the two matrices:
-
-.. math ::
-
- \operatorname{P}(k) = \langle K^\ast, K \rangle_F = \sum_{i,j=1}^n y_i y_j k(x_i, x_j)
-
-Additionally, there is the *kernel-target alignment*, which is the normalized
-counterpart to the kernel polarity:
-
-.. math ::
-
- \operatorname{TA}(k) &= \frac{P(k)}{\lVert K^\ast \rVert_F\;\lVert K \rVert_F}\\
- \lVert K\rVert_F &= \sqrt{\sum_{i,j=1}^n k(x_i, x_j)^2}\\
- \lVert K^\ast\rVert_F &= \sqrt{\sum_{i,j=1}^n (y_iy_j)^2}
-
-For datasets with different numbers of training points per class the labels are rescaled
-by the number of datapoints in the respective class to avoid that kernel polarity and
-kernel-target alignment are dominated by the properties of the kernel for just a single class.
-
-Given a callable kernel function, all these quantities can readily be computed
-using the methods in this module.
"""
+
from .cost_functions import (
polarity,
target_alignment,
diff --git a/pennylane/qaoa/__init__.py b/pennylane/qaoa/__init__.py
--- a/pennylane/qaoa/__init__.py
+++ b/pennylane/qaoa/__init__.py
@@ -12,10 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
-This module contains functionality to construct QAOA workflows in PennyLane.
+This module provides a collection of methods that help in the construction of
+QAOA workflows.
"""
+import pennylane.qaoa.cycle
from .mixers import *
from .cost import *
from .layers import *
-import pennylane.qaoa.cycle
| {"golden_diff": "diff --git a/pennylane/kernels/__init__.py b/pennylane/kernels/__init__.py\n--- a/pennylane/kernels/__init__.py\n+++ b/pennylane/kernels/__init__.py\n@@ -13,58 +13,8 @@\n # limitations under the License.\n r\"\"\"\n This subpackage defines functions that relate to quantum kernel methods.\n-On one hand this includes functions to call a quantum kernel systematically\n-on training and test datasets to obtain the *kernel matrix*.\n-On the other hand it provides postprocessing methods for those kernel\n-matrices which can be used to mitigate device noise and sampling errors.\n-\n-Given a kernel\n-\n-.. math ::\n-\n- k: \\mathbb{R}^d \\times \\mathbb{R}^d \\to \\mathbb{R}, \\quad\n- (x_1, x_2)\\mapsto k(x_1, x_2)\n-\n-the kernel matrix of :math:`k` on a training dataset\n-:math:`\\{(x_1, y_1),\\cdots (x_n, y_n)\\}` with :math:`x_i\\in\\mathbb{R}^d`\n-and :math:`y_i\\in\\{-1, 1\\}` is defined as\n-\n-.. math ::\n-\n- K_{ij} = k(x_i, x_j).\n-\n-For valid kernels, this is a real symmetric positive semi-definite matrix.\n-We also define the *ideal kernel matrix* for the training dataset which\n-perfectly predicts whether two points have identical labels or not:\n-\n-.. math ::\n-\n- K^\\ast_{ij} = y_i y_j\n-\n-We can measure the similarity between :math:`K` and :math:`K^\\ast`,\n-through the *kernel polarity* which can be expressed as the Frobenius inner\n-product between the two matrices:\n-\n-.. math ::\n-\n- \\operatorname{P}(k) = \\langle K^\\ast, K \\rangle_F = \\sum_{i,j=1}^n y_i y_j k(x_i, x_j)\n-\n-Additionally, there is the *kernel-target alignment*, which is the normalized\n-counterpart to the kernel polarity:\n-\n-.. math ::\n-\n- \\operatorname{TA}(k) &= \\frac{P(k)}{\\lVert K^\\ast \\rVert_F\\;\\lVert K \\rVert_F}\\\\\n- \\lVert K\\rVert_F &= \\sqrt{\\sum_{i,j=1}^n k(x_i, x_j)^2}\\\\\n- \\lVert K^\\ast\\rVert_F &= \\sqrt{\\sum_{i,j=1}^n (y_iy_j)^2}\n-\n-For datasets with different numbers of training points per class the labels are rescaled\n-by the number of datapoints in the respective class to avoid that kernel polarity and\n-kernel-target alignment are dominated by the properties of the kernel for just a single class.\n-\n-Given a callable kernel function, all these quantities can readily be computed\n-using the methods in this module.\n \"\"\"\n+\n from .cost_functions import (\n polarity,\n target_alignment,\ndiff --git a/pennylane/qaoa/__init__.py b/pennylane/qaoa/__init__.py\n--- a/pennylane/qaoa/__init__.py\n+++ b/pennylane/qaoa/__init__.py\n@@ -12,10 +12,11 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n r\"\"\"\n-This module contains functionality to construct QAOA workflows in PennyLane.\n+This module provides a collection of methods that help in the construction of\n+QAOA workflows.\n \"\"\"\n \n+import pennylane.qaoa.cycle\n from .mixers import *\n from .cost import *\n from .layers import *\n-import pennylane.qaoa.cycle\n", "issue": "Change order of technical description and list of functions in documentation\nThree modules, [`kernels`](https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html), [`grouping`](https://pennylane.readthedocs.io/en/latest/code/qml_grouping.html), and [`qaoa`](https://pennylane.readthedocs.io/en/latest/code/qml_qaoa.html) have their module documentation ordered such that there is first a lengthy description of the theory, and the actual list of functions comes after. We should update the docs of these modules so that the functions appear *first*, and the technical details come afterwards (as was recently discussed in #1160). This will improve readability of the documentation and make it easier to find the details of a desired function.\n", "before_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nThis module contains functionality to construct QAOA workflows in PennyLane.\n\"\"\"\n\nfrom .mixers import *\nfrom .cost import *\nfrom .layers import *\nimport pennylane.qaoa.cycle\n", "path": "pennylane/qaoa/__init__.py"}, {"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nThis subpackage defines functions that relate to quantum kernel methods.\nOn one hand this includes functions to call a quantum kernel systematically\non training and test datasets to obtain the *kernel matrix*.\nOn the other hand it provides postprocessing methods for those kernel\nmatrices which can be used to mitigate device noise and sampling errors.\n\nGiven a kernel\n\n.. math ::\n\n k: \\mathbb{R}^d \\times \\mathbb{R}^d \\to \\mathbb{R}, \\quad\n (x_1, x_2)\\mapsto k(x_1, x_2)\n\nthe kernel matrix of :math:`k` on a training dataset\n:math:`\\{(x_1, y_1),\\cdots (x_n, y_n)\\}` with :math:`x_i\\in\\mathbb{R}^d`\nand :math:`y_i\\in\\{-1, 1\\}` is defined as\n\n.. math ::\n\n K_{ij} = k(x_i, x_j).\n\nFor valid kernels, this is a real symmetric positive semi-definite matrix.\nWe also define the *ideal kernel matrix* for the training dataset which\nperfectly predicts whether two points have identical labels or not:\n\n.. math ::\n\n K^\\ast_{ij} = y_i y_j\n\nWe can measure the similarity between :math:`K` and :math:`K^\\ast`,\nthrough the *kernel polarity* which can be expressed as the Frobenius inner\nproduct between the two matrices:\n\n.. math ::\n\n \\operatorname{P}(k) = \\langle K^\\ast, K \\rangle_F = \\sum_{i,j=1}^n y_i y_j k(x_i, x_j)\n\nAdditionally, there is the *kernel-target alignment*, which is the normalized\ncounterpart to the kernel polarity:\n\n.. math ::\n\n \\operatorname{TA}(k) &= \\frac{P(k)}{\\lVert K^\\ast \\rVert_F\\;\\lVert K \\rVert_F}\\\\\n \\lVert K\\rVert_F &= \\sqrt{\\sum_{i,j=1}^n k(x_i, x_j)^2}\\\\\n \\lVert K^\\ast\\rVert_F &= \\sqrt{\\sum_{i,j=1}^n (y_iy_j)^2}\n\nFor datasets with different numbers of training points per class the labels are rescaled\nby the number of datapoints in the respective class to avoid that kernel polarity and\nkernel-target alignment are dominated by the properties of the kernel for just a single class.\n\nGiven a callable kernel function, all these quantities can readily be computed\nusing the methods in this module.\n\"\"\"\nfrom .cost_functions import (\n polarity,\n target_alignment,\n)\nfrom .postprocessing import (\n threshold_matrix,\n displace_matrix,\n flip_matrix,\n closest_psd_matrix,\n mitigate_depolarizing_noise,\n)\nfrom .utils import (\n kernel_matrix,\n square_kernel_matrix,\n)\n", "path": "pennylane/kernels/__init__.py"}], "after_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nThis module provides a collection of methods that help in the construction of\nQAOA workflows.\n\"\"\"\n\nimport pennylane.qaoa.cycle\nfrom .mixers import *\nfrom .cost import *\nfrom .layers import *\n", "path": "pennylane/qaoa/__init__.py"}, {"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nThis subpackage defines functions that relate to quantum kernel methods.\n\"\"\"\n\nfrom .cost_functions import (\n polarity,\n target_alignment,\n)\nfrom .postprocessing import (\n threshold_matrix,\n displace_matrix,\n flip_matrix,\n closest_psd_matrix,\n mitigate_depolarizing_noise,\n)\nfrom .utils import (\n kernel_matrix,\n square_kernel_matrix,\n)\n", "path": "pennylane/kernels/__init__.py"}]} | 1,603 | 848 |
gh_patches_debug_34754 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1531 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add organisation filter for maps API resources
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/project_update_location.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import ProjectUpdateLocation
9 from ..serializers import ProjectUpdateLocationSerializer, MapProjectUpdateLocationSerializer
10 from ..viewsets import BaseRSRViewSet
11
12
13 class ProjectUpdateLocationViewSet(BaseRSRViewSet):
14 """
15 API endpoint that allows organisation locations to be viewed or edited.
16 """
17 queryset = ProjectUpdateLocation.objects.all()
18 serializer_class = ProjectUpdateLocationSerializer
19
20
21 class MapProjectUpdateLocationViewSet(BaseRSRViewSet):
22
23 """Returns a resource tailored for generating a map of update locations.
24
25 Allowed parameters are:
26 limit (default 100 / max 500), and
27 location_target__project (filter on project ID)
28 """
29
30 filter_fields = ('location_target__project', )
31 max_paginate_by = 500
32 paginate_by = 100
33 queryset = ProjectUpdateLocation.objects.select_related(
34 'location_target',
35 'location_target__project').only(
36 'id', 'latitude', 'longitude',
37 'location_target__id', 'location_target__project', 'location_target__title',
38 'location_target__photo', 'location_target__video')
39 serializer_class = MapProjectUpdateLocationSerializer
40
```
Path: `akvo/rest/views/organisation_location.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Akvo RSR is covered by the GNU Affero General Public License.
3 See more details in the license.txt file located at the root folder of the Akvo RSR module.
4 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
5 """
6
7 from akvo.rsr.models import OrganisationLocation
8 from ..serializers import OrganisationLocationSerializer, MapOrganisationLocationSerializer
9 from ..viewsets import BaseRSRViewSet
10
11
12 class OrganisationLocationViewSet(BaseRSRViewSet):
13 """
14 API endpoint that allows organisation locations to be viewed or edited.
15 """
16 queryset = OrganisationLocation.objects.all()
17 serializer_class = OrganisationLocationSerializer
18
19
20 class MapOrganisationLocationViewSet(BaseRSRViewSet):
21
22 """Returns a resource tailored for generating a map of organisation locations.
23
24 Allowed parameters are:
25 limit (default 100 / max 500),
26 location_target (filter on organisation ID), and
27 country (filter on country ID)
28 """
29
30 filter_fields = ('location_target', 'country')
31 max_paginate_by = 500
32 paginate_by = 100
33 queryset = OrganisationLocation.objects.select_related(
34 'location_target', 'country').only(
35 'id', 'latitude', 'longitude',
36 'location_target__id', 'location_target__name',
37 'location_target__logo',
38 'country')
39 serializer_class = MapOrganisationLocationSerializer
40
```
Path: `akvo/rest/views/project_location.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Akvo RSR is covered by the GNU Affero General Public License.
3 See more details in the license.txt file located at the root folder of the Akvo RSR module.
4 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
5 """
6
7 from akvo.rsr.models import ProjectLocation
8 from ..serializers import ProjectLocationSerializer, MapProjectLocationSerializer
9 from ..viewsets import BaseRSRViewSet
10
11
12 class ProjectLocationViewSet(BaseRSRViewSet):
13 """
14 """
15 queryset = ProjectLocation.objects.all()
16 serializer_class = ProjectLocationSerializer
17 filter_fields = ('location_target', 'country', )
18
19
20 class MapProjectLocationViewSet(BaseRSRViewSet):
21
22 """Returns a resource tailored for generating a map of project locations.
23
24 Allowed parameters are:
25 limit (default 100 / max 500),
26 location_target (filter on project ID), and
27 country (filter on country ID)
28 """
29
30 filter_fields = ('location_target', 'country')
31 max_paginate_by = 500
32 paginate_by = 100
33 queryset = ProjectLocation.objects.select_related(
34 'location_target', 'country').only(
35 'id', 'latitude', 'longitude',
36 'location_target__id', 'location_target__title',
37 'location_target__current_image',
38 'country')
39 serializer_class = MapProjectLocationSerializer
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/organisation_location.py b/akvo/rest/views/organisation_location.py
--- a/akvo/rest/views/organisation_location.py
+++ b/akvo/rest/views/organisation_location.py
@@ -22,9 +22,9 @@
"""Returns a resource tailored for generating a map of organisation locations.
Allowed parameters are:
- limit (default 100 / max 500),
- location_target (filter on organisation ID), and
- country (filter on country ID)
+ __limit__ (default 100 / max 500),
+ __location_target__ (filter on organisation ID), and
+ __country__ (filter on country ID)
"""
filter_fields = ('location_target', 'country')
diff --git a/akvo/rest/views/project_location.py b/akvo/rest/views/project_location.py
--- a/akvo/rest/views/project_location.py
+++ b/akvo/rest/views/project_location.py
@@ -22,12 +22,17 @@
"""Returns a resource tailored for generating a map of project locations.
Allowed parameters are:
- limit (default 100 / max 500),
- location_target (filter on project ID), and
- country (filter on country ID)
+ __limit__ (default 100 / max 500),
+ __location_target__ (filter on project ID),
+ __location_target\__partners__ (filter on organisation ID), and
+ __country__ (filter on country ID)
"""
- filter_fields = ('location_target', 'country')
+ filter_fields = (
+ 'location_target',
+ 'location_target__partners',
+ 'country'
+ )
max_paginate_by = 500
paginate_by = 100
queryset = ProjectLocation.objects.select_related(
diff --git a/akvo/rest/views/project_update_location.py b/akvo/rest/views/project_update_location.py
--- a/akvo/rest/views/project_update_location.py
+++ b/akvo/rest/views/project_update_location.py
@@ -23,11 +23,18 @@
"""Returns a resource tailored for generating a map of update locations.
Allowed parameters are:
- limit (default 100 / max 500), and
- location_target__project (filter on project ID)
+ __limit__ (default 100 / max 500),
+ __location_target\__project__ (filter on project ID),
+ __location_target\__project\__partners__
+ (filter on organisation ID of the projects' organisations),
+ __location_target\__user\__employers__ (filter on organisation ID of the users' organisations)
"""
- filter_fields = ('location_target__project', )
+ filter_fields = (
+ 'location_target__project',
+ 'location_target__project__partners',
+ 'location_target__user__employers'
+ )
max_paginate_by = 500
paginate_by = 100
queryset = ProjectUpdateLocation.objects.select_related(
| {"golden_diff": "diff --git a/akvo/rest/views/organisation_location.py b/akvo/rest/views/organisation_location.py\n--- a/akvo/rest/views/organisation_location.py\n+++ b/akvo/rest/views/organisation_location.py\n@@ -22,9 +22,9 @@\n \"\"\"Returns a resource tailored for generating a map of organisation locations.\n \n Allowed parameters are:\n- limit (default 100 / max 500),\n- location_target (filter on organisation ID), and\n- country (filter on country ID)\n+ __limit__ (default 100 / max 500),\n+ __location_target__ (filter on organisation ID), and\n+ __country__ (filter on country ID)\n \"\"\"\n \n filter_fields = ('location_target', 'country')\ndiff --git a/akvo/rest/views/project_location.py b/akvo/rest/views/project_location.py\n--- a/akvo/rest/views/project_location.py\n+++ b/akvo/rest/views/project_location.py\n@@ -22,12 +22,17 @@\n \"\"\"Returns a resource tailored for generating a map of project locations.\n \n Allowed parameters are:\n- limit (default 100 / max 500),\n- location_target (filter on project ID), and\n- country (filter on country ID)\n+ __limit__ (default 100 / max 500),\n+ __location_target__ (filter on project ID),\n+ __location_target\\__partners__ (filter on organisation ID), and\n+ __country__ (filter on country ID)\n \"\"\"\n \n- filter_fields = ('location_target', 'country')\n+ filter_fields = (\n+ 'location_target',\n+ 'location_target__partners',\n+ 'country'\n+ )\n max_paginate_by = 500\n paginate_by = 100\n queryset = ProjectLocation.objects.select_related(\ndiff --git a/akvo/rest/views/project_update_location.py b/akvo/rest/views/project_update_location.py\n--- a/akvo/rest/views/project_update_location.py\n+++ b/akvo/rest/views/project_update_location.py\n@@ -23,11 +23,18 @@\n \"\"\"Returns a resource tailored for generating a map of update locations.\n \n Allowed parameters are:\n- limit (default 100 / max 500), and\n- location_target__project (filter on project ID)\n+ __limit__ (default 100 / max 500),\n+ __location_target\\__project__ (filter on project ID),\n+ __location_target\\__project\\__partners__\n+ (filter on organisation ID of the projects' organisations),\n+ __location_target\\__user\\__employers__ (filter on organisation ID of the users' organisations)\n \"\"\"\n \n- filter_fields = ('location_target__project', )\n+ filter_fields = (\n+ 'location_target__project',\n+ 'location_target__project__partners',\n+ 'location_target__user__employers'\n+ )\n max_paginate_by = 500\n paginate_by = 100\n queryset = ProjectUpdateLocation.objects.select_related(\n", "issue": "Add organisation filter for maps API resources\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import ProjectUpdateLocation\nfrom ..serializers import ProjectUpdateLocationSerializer, MapProjectUpdateLocationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ProjectUpdateLocationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisation locations to be viewed or edited.\n \"\"\"\n queryset = ProjectUpdateLocation.objects.all()\n serializer_class = ProjectUpdateLocationSerializer\n\n\nclass MapProjectUpdateLocationViewSet(BaseRSRViewSet):\n\n \"\"\"Returns a resource tailored for generating a map of update locations.\n\n Allowed parameters are:\n limit (default 100 / max 500), and\n location_target__project (filter on project ID)\n \"\"\"\n\n filter_fields = ('location_target__project', )\n max_paginate_by = 500\n paginate_by = 100\n queryset = ProjectUpdateLocation.objects.select_related(\n 'location_target',\n 'location_target__project').only(\n 'id', 'latitude', 'longitude',\n 'location_target__id', 'location_target__project', 'location_target__title',\n 'location_target__photo', 'location_target__video')\n serializer_class = MapProjectUpdateLocationSerializer\n", "path": "akvo/rest/views/project_update_location.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rsr.models import OrganisationLocation\nfrom ..serializers import OrganisationLocationSerializer, MapOrganisationLocationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass OrganisationLocationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisation locations to be viewed or edited.\n \"\"\"\n queryset = OrganisationLocation.objects.all()\n serializer_class = OrganisationLocationSerializer\n\n\nclass MapOrganisationLocationViewSet(BaseRSRViewSet):\n\n \"\"\"Returns a resource tailored for generating a map of organisation locations.\n\n Allowed parameters are:\n limit (default 100 / max 500),\n location_target (filter on organisation ID), and\n country (filter on country ID)\n \"\"\"\n\n filter_fields = ('location_target', 'country')\n max_paginate_by = 500\n paginate_by = 100\n queryset = OrganisationLocation.objects.select_related(\n 'location_target', 'country').only(\n 'id', 'latitude', 'longitude',\n 'location_target__id', 'location_target__name',\n 'location_target__logo',\n 'country')\n serializer_class = MapOrganisationLocationSerializer\n", "path": "akvo/rest/views/organisation_location.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rsr.models import ProjectLocation\nfrom ..serializers import ProjectLocationSerializer, MapProjectLocationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ProjectLocationViewSet(BaseRSRViewSet):\n \"\"\"\n \"\"\"\n queryset = ProjectLocation.objects.all()\n serializer_class = ProjectLocationSerializer\n filter_fields = ('location_target', 'country', )\n\n\nclass MapProjectLocationViewSet(BaseRSRViewSet):\n\n \"\"\"Returns a resource tailored for generating a map of project locations.\n\n Allowed parameters are:\n limit (default 100 / max 500),\n location_target (filter on project ID), and\n country (filter on country ID)\n \"\"\"\n\n filter_fields = ('location_target', 'country')\n max_paginate_by = 500\n paginate_by = 100\n queryset = ProjectLocation.objects.select_related(\n 'location_target', 'country').only(\n 'id', 'latitude', 'longitude',\n 'location_target__id', 'location_target__title',\n 'location_target__current_image',\n 'country')\n serializer_class = MapProjectLocationSerializer\n", "path": "akvo/rest/views/project_location.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import ProjectUpdateLocation\nfrom ..serializers import ProjectUpdateLocationSerializer, MapProjectUpdateLocationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ProjectUpdateLocationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisation locations to be viewed or edited.\n \"\"\"\n queryset = ProjectUpdateLocation.objects.all()\n serializer_class = ProjectUpdateLocationSerializer\n\n\nclass MapProjectUpdateLocationViewSet(BaseRSRViewSet):\n\n \"\"\"Returns a resource tailored for generating a map of update locations.\n\n Allowed parameters are:\n __limit__ (default 100 / max 500),\n __location_target\\__project__ (filter on project ID),\n __location_target\\__project\\__partners__\n (filter on organisation ID of the projects' organisations),\n __location_target\\__user\\__employers__ (filter on organisation ID of the users' organisations)\n \"\"\"\n\n filter_fields = (\n 'location_target__project',\n 'location_target__project__partners',\n 'location_target__user__employers'\n )\n max_paginate_by = 500\n paginate_by = 100\n queryset = ProjectUpdateLocation.objects.select_related(\n 'location_target',\n 'location_target__project').only(\n 'id', 'latitude', 'longitude',\n 'location_target__id', 'location_target__project', 'location_target__title',\n 'location_target__photo', 'location_target__video')\n serializer_class = MapProjectUpdateLocationSerializer\n", "path": "akvo/rest/views/project_update_location.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rsr.models import OrganisationLocation\nfrom ..serializers import OrganisationLocationSerializer, MapOrganisationLocationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass OrganisationLocationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisation locations to be viewed or edited.\n \"\"\"\n queryset = OrganisationLocation.objects.all()\n serializer_class = OrganisationLocationSerializer\n\n\nclass MapOrganisationLocationViewSet(BaseRSRViewSet):\n\n \"\"\"Returns a resource tailored for generating a map of organisation locations.\n\n Allowed parameters are:\n __limit__ (default 100 / max 500),\n __location_target__ (filter on organisation ID), and\n __country__ (filter on country ID)\n \"\"\"\n\n filter_fields = ('location_target', 'country')\n max_paginate_by = 500\n paginate_by = 100\n queryset = OrganisationLocation.objects.select_related(\n 'location_target', 'country').only(\n 'id', 'latitude', 'longitude',\n 'location_target__id', 'location_target__name',\n 'location_target__logo',\n 'country')\n serializer_class = MapOrganisationLocationSerializer\n", "path": "akvo/rest/views/organisation_location.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rsr.models import ProjectLocation\nfrom ..serializers import ProjectLocationSerializer, MapProjectLocationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ProjectLocationViewSet(BaseRSRViewSet):\n \"\"\"\n \"\"\"\n queryset = ProjectLocation.objects.all()\n serializer_class = ProjectLocationSerializer\n filter_fields = ('location_target', 'country', )\n\n\nclass MapProjectLocationViewSet(BaseRSRViewSet):\n\n \"\"\"Returns a resource tailored for generating a map of project locations.\n\n Allowed parameters are:\n __limit__ (default 100 / max 500),\n __location_target__ (filter on project ID),\n __location_target\\__partners__ (filter on organisation ID), and\n __country__ (filter on country ID)\n \"\"\"\n\n filter_fields = (\n 'location_target',\n 'location_target__partners',\n 'country'\n )\n max_paginate_by = 500\n paginate_by = 100\n queryset = ProjectLocation.objects.select_related(\n 'location_target', 'country').only(\n 'id', 'latitude', 'longitude',\n 'location_target__id', 'location_target__title',\n 'location_target__current_image',\n 'country')\n serializer_class = MapProjectLocationSerializer\n", "path": "akvo/rest/views/project_location.py"}]} | 1,465 | 696 |
gh_patches_debug_10696 | rasdani/github-patches | git_diff | Kinto__kinto-1138 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enforce the permission endpoint when the admin plugin is included.
Enforce the permission endpoint when the admin plugin is included.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/__init__.py`
Content:
```
1 import pkg_resources
2 import logging
3
4 import kinto.core
5 from pyramid.config import Configurator
6 from pyramid.settings import asbool
7 from pyramid.security import Authenticated, Everyone
8
9 from kinto.authorization import RouteFactory
10
11
12 # Module version, as defined in PEP-0396.
13 __version__ = pkg_resources.get_distribution(__package__).version
14
15 # Implemented HTTP API Version
16 HTTP_API_VERSION = '1.16'
17
18 # Main kinto logger
19 logger = logging.getLogger(__name__)
20
21
22 DEFAULT_SETTINGS = {
23 'flush_endpoint_enabled': False,
24 'retry_after_seconds': 3,
25 'cache_backend': 'kinto.core.cache.memory',
26 'permission_backend': 'kinto.core.permission.memory',
27 'storage_backend': 'kinto.core.storage.memory',
28 'project_docs': 'https://kinto.readthedocs.io/',
29 'bucket_create_principals': Authenticated,
30 'permissions_read_principals': Everyone,
31 'multiauth.authorization_policy': (
32 'kinto.authorization.AuthorizationPolicy'),
33 'experimental_collection_schema_validation': False,
34 'experimental_permissions_endpoint': False,
35 'http_api_version': HTTP_API_VERSION,
36 'bucket_id_generator': 'kinto.views.NameGenerator',
37 'collection_id_generator': 'kinto.views.NameGenerator',
38 'group_id_generator': 'kinto.views.NameGenerator',
39 'record_id_generator': 'kinto.views.RelaxedUUID'
40 }
41
42
43 def main(global_config, config=None, **settings):
44 if not config:
45 config = Configurator(settings=settings, root_factory=RouteFactory)
46
47 # Force project name, since it determines settings prefix.
48 config.add_settings({'kinto.project_name': 'kinto'})
49
50 kinto.core.initialize(config,
51 version=__version__,
52 default_settings=DEFAULT_SETTINGS)
53
54 settings = config.get_settings()
55
56 # Expose capability
57 schema_enabled = asbool(
58 settings['experimental_collection_schema_validation']
59 )
60 if schema_enabled:
61 config.add_api_capability(
62 "schema",
63 description="Validates collection records with JSON schemas.",
64 url="https://kinto.readthedocs.io/en/latest/api/1.x/"
65 "collections.html#collection-json-schema")
66
67 # Scan Kinto views.
68 kwargs = {}
69
70 flush_enabled = asbool(settings['flush_endpoint_enabled'])
71 if flush_enabled:
72 config.add_api_capability(
73 "flush_endpoint",
74 description="The __flush__ endpoint can be used to remove all "
75 "data from all backends.",
76 url="https://kinto.readthedocs.io/en/latest/configuration/"
77 "settings.html#activating-the-flush-endpoint")
78 else:
79 kwargs['ignore'] = ['kinto.views.flush']
80
81 # Permissions endpoint enabled if permission backend is setup.
82 permissions_endpoint_enabled = (
83 asbool(settings['experimental_permissions_endpoint']) and
84 hasattr(config.registry, 'permission'))
85 if permissions_endpoint_enabled:
86 config.add_api_capability(
87 "permissions_endpoint",
88 description="The permissions endpoint can be used to list all "
89 "user objects permissions.",
90 url="https://kinto.readthedocs.io/en/latest/configuration/"
91 "settings.html#activating-the-permissions-endpoint")
92 else:
93 kwargs.setdefault('ignore', []).append('kinto.views.permissions')
94
95 config.scan("kinto.views", **kwargs)
96
97 app = config.make_wsgi_app()
98
99 # Install middleware (no-op if disabled)
100 return kinto.core.install_middlewares(app, settings)
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/__init__.py b/kinto/__init__.py
--- a/kinto/__init__.py
+++ b/kinto/__init__.py
@@ -79,8 +79,9 @@
kwargs['ignore'] = ['kinto.views.flush']
# Permissions endpoint enabled if permission backend is setup.
+ is_admin_enabled = 'kinto.plugins.admin' in settings['includes']
permissions_endpoint_enabled = (
- asbool(settings['experimental_permissions_endpoint']) and
+ (is_admin_enabled or asbool(settings['experimental_permissions_endpoint'])) and
hasattr(config.registry, 'permission'))
if permissions_endpoint_enabled:
config.add_api_capability(
| {"golden_diff": "diff --git a/kinto/__init__.py b/kinto/__init__.py\n--- a/kinto/__init__.py\n+++ b/kinto/__init__.py\n@@ -79,8 +79,9 @@\n kwargs['ignore'] = ['kinto.views.flush']\n \n # Permissions endpoint enabled if permission backend is setup.\n+ is_admin_enabled = 'kinto.plugins.admin' in settings['includes']\n permissions_endpoint_enabled = (\n- asbool(settings['experimental_permissions_endpoint']) and\n+ (is_admin_enabled or asbool(settings['experimental_permissions_endpoint'])) and\n hasattr(config.registry, 'permission'))\n if permissions_endpoint_enabled:\n config.add_api_capability(\n", "issue": "Enforce the permission endpoint when the admin plugin is included.\n\nEnforce the permission endpoint when the admin plugin is included.\n\n", "before_files": [{"content": "import pkg_resources\nimport logging\n\nimport kinto.core\nfrom pyramid.config import Configurator\nfrom pyramid.settings import asbool\nfrom pyramid.security import Authenticated, Everyone\n\nfrom kinto.authorization import RouteFactory\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution(__package__).version\n\n# Implemented HTTP API Version\nHTTP_API_VERSION = '1.16'\n\n# Main kinto logger\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_SETTINGS = {\n 'flush_endpoint_enabled': False,\n 'retry_after_seconds': 3,\n 'cache_backend': 'kinto.core.cache.memory',\n 'permission_backend': 'kinto.core.permission.memory',\n 'storage_backend': 'kinto.core.storage.memory',\n 'project_docs': 'https://kinto.readthedocs.io/',\n 'bucket_create_principals': Authenticated,\n 'permissions_read_principals': Everyone,\n 'multiauth.authorization_policy': (\n 'kinto.authorization.AuthorizationPolicy'),\n 'experimental_collection_schema_validation': False,\n 'experimental_permissions_endpoint': False,\n 'http_api_version': HTTP_API_VERSION,\n 'bucket_id_generator': 'kinto.views.NameGenerator',\n 'collection_id_generator': 'kinto.views.NameGenerator',\n 'group_id_generator': 'kinto.views.NameGenerator',\n 'record_id_generator': 'kinto.views.RelaxedUUID'\n}\n\n\ndef main(global_config, config=None, **settings):\n if not config:\n config = Configurator(settings=settings, root_factory=RouteFactory)\n\n # Force project name, since it determines settings prefix.\n config.add_settings({'kinto.project_name': 'kinto'})\n\n kinto.core.initialize(config,\n version=__version__,\n default_settings=DEFAULT_SETTINGS)\n\n settings = config.get_settings()\n\n # Expose capability\n schema_enabled = asbool(\n settings['experimental_collection_schema_validation']\n )\n if schema_enabled:\n config.add_api_capability(\n \"schema\",\n description=\"Validates collection records with JSON schemas.\",\n url=\"https://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"collections.html#collection-json-schema\")\n\n # Scan Kinto views.\n kwargs = {}\n\n flush_enabled = asbool(settings['flush_endpoint_enabled'])\n if flush_enabled:\n config.add_api_capability(\n \"flush_endpoint\",\n description=\"The __flush__ endpoint can be used to remove all \"\n \"data from all backends.\",\n url=\"https://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-flush-endpoint\")\n else:\n kwargs['ignore'] = ['kinto.views.flush']\n\n # Permissions endpoint enabled if permission backend is setup.\n permissions_endpoint_enabled = (\n asbool(settings['experimental_permissions_endpoint']) and\n hasattr(config.registry, 'permission'))\n if permissions_endpoint_enabled:\n config.add_api_capability(\n \"permissions_endpoint\",\n description=\"The permissions endpoint can be used to list all \"\n \"user objects permissions.\",\n url=\"https://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-permissions-endpoint\")\n else:\n kwargs.setdefault('ignore', []).append('kinto.views.permissions')\n\n config.scan(\"kinto.views\", **kwargs)\n\n app = config.make_wsgi_app()\n\n # Install middleware (no-op if disabled)\n return kinto.core.install_middlewares(app, settings)\n", "path": "kinto/__init__.py"}], "after_files": [{"content": "import pkg_resources\nimport logging\n\nimport kinto.core\nfrom pyramid.config import Configurator\nfrom pyramid.settings import asbool\nfrom pyramid.security import Authenticated, Everyone\n\nfrom kinto.authorization import RouteFactory\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution(__package__).version\n\n# Implemented HTTP API Version\nHTTP_API_VERSION = '1.16'\n\n# Main kinto logger\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_SETTINGS = {\n 'flush_endpoint_enabled': False,\n 'retry_after_seconds': 3,\n 'cache_backend': 'kinto.core.cache.memory',\n 'permission_backend': 'kinto.core.permission.memory',\n 'storage_backend': 'kinto.core.storage.memory',\n 'project_docs': 'https://kinto.readthedocs.io/',\n 'bucket_create_principals': Authenticated,\n 'permissions_read_principals': Everyone,\n 'multiauth.authorization_policy': (\n 'kinto.authorization.AuthorizationPolicy'),\n 'experimental_collection_schema_validation': False,\n 'experimental_permissions_endpoint': False,\n 'http_api_version': HTTP_API_VERSION,\n 'bucket_id_generator': 'kinto.views.NameGenerator',\n 'collection_id_generator': 'kinto.views.NameGenerator',\n 'group_id_generator': 'kinto.views.NameGenerator',\n 'record_id_generator': 'kinto.views.RelaxedUUID'\n}\n\n\ndef main(global_config, config=None, **settings):\n if not config:\n config = Configurator(settings=settings, root_factory=RouteFactory)\n\n # Force project name, since it determines settings prefix.\n config.add_settings({'kinto.project_name': 'kinto'})\n\n kinto.core.initialize(config,\n version=__version__,\n default_settings=DEFAULT_SETTINGS)\n\n settings = config.get_settings()\n\n # Expose capability\n schema_enabled = asbool(\n settings['experimental_collection_schema_validation']\n )\n if schema_enabled:\n config.add_api_capability(\n \"schema\",\n description=\"Validates collection records with JSON schemas.\",\n url=\"https://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"collections.html#collection-json-schema\")\n\n # Scan Kinto views.\n kwargs = {}\n\n flush_enabled = asbool(settings['flush_endpoint_enabled'])\n if flush_enabled:\n config.add_api_capability(\n \"flush_endpoint\",\n description=\"The __flush__ endpoint can be used to remove all \"\n \"data from all backends.\",\n url=\"https://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-flush-endpoint\")\n else:\n kwargs['ignore'] = ['kinto.views.flush']\n\n # Permissions endpoint enabled if permission backend is setup.\n is_admin_enabled = 'kinto.plugins.admin' in settings['includes']\n permissions_endpoint_enabled = (\n (is_admin_enabled or asbool(settings['experimental_permissions_endpoint'])) and\n hasattr(config.registry, 'permission'))\n if permissions_endpoint_enabled:\n config.add_api_capability(\n \"permissions_endpoint\",\n description=\"The permissions endpoint can be used to list all \"\n \"user objects permissions.\",\n url=\"https://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-permissions-endpoint\")\n else:\n kwargs.setdefault('ignore', []).append('kinto.views.permissions')\n\n config.scan(\"kinto.views\", **kwargs)\n\n app = config.make_wsgi_app()\n\n # Install middleware (no-op if disabled)\n return kinto.core.install_middlewares(app, settings)\n", "path": "kinto/__init__.py"}]} | 1,212 | 144 |
gh_patches_debug_31308 | rasdani/github-patches | git_diff | dask__distributed-4984 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop down tile to reveal "secret" dashboards
We're accumulating a lot of _secret_ dashboard pages https://github.com/dask/distributed/blob/c2557938e6c4175534031cba5ca5ac9d2cdc95f7/distributed/dashboard/scheduler.py#L82-L119
although most are not easily accessible from the UI. Most of the pages are not useful for the ordinary user and are only relevant for specific edge cases or debugging. hence, it makes sense that they are not promoted as a top-level dashboard page.
However, at least for debugging purposes, I would really appreciate if these pages were a bit easier to navigate. In particular I'm looking for a way which doesn't require me to know the exact endpoint for an individual plot and requires me to type it into my browser.
I would propose to add a drop down menu / button which can be used to browse all _hidden_ dashboard pages.
Disclaimer: I can't implement this. I barely know what bokeh is.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/dashboard/scheduler.py`
Content:
```
1 from urllib.parse import urljoin
2
3 from tornado import web
4 from tornado.ioloop import IOLoop
5
6 try:
7 import numpy as np
8 except ImportError:
9 np = False
10
11 from .components.nvml import gpu_doc # noqa: 1708
12 from .components.nvml import NVML_ENABLED, gpu_memory_doc, gpu_utilization_doc
13 from .components.scheduler import (
14 AggregateAction,
15 BandwidthTypes,
16 BandwidthWorkers,
17 ComputePerKey,
18 CurrentLoad,
19 MemoryByKey,
20 NBytes,
21 NBytesCluster,
22 Occupancy,
23 SystemMonitor,
24 TaskGraph,
25 TaskGroupGraph,
26 TaskProgress,
27 TaskStream,
28 WorkerTable,
29 events_doc,
30 graph_doc,
31 individual_doc,
32 individual_profile_doc,
33 individual_profile_server_doc,
34 profile_doc,
35 profile_server_doc,
36 status_doc,
37 stealing_doc,
38 systemmonitor_doc,
39 tasks_doc,
40 tg_graph_doc,
41 workers_doc,
42 )
43 from .core import BokehApplication
44 from .worker import counters_doc
45
46 template_variables = {
47 "pages": [
48 "status",
49 "workers",
50 "tasks",
51 "system",
52 "profile",
53 "graph",
54 "groups",
55 "info",
56 ]
57 }
58
59 if NVML_ENABLED:
60 template_variables["pages"].insert(4, "gpu")
61
62
63 def connect(application, http_server, scheduler, prefix=""):
64 bokeh_app = BokehApplication(
65 applications, scheduler, prefix=prefix, template_variables=template_variables
66 )
67 application.add_application(bokeh_app)
68 bokeh_app.initialize(IOLoop.current())
69
70 bokeh_app.add_handlers(
71 r".*",
72 [
73 (
74 r"/",
75 web.RedirectHandler,
76 {"url": urljoin((prefix or "").strip("/") + "/", r"status")},
77 )
78 ],
79 )
80
81
82 applications = {
83 "/system": systemmonitor_doc,
84 "/stealing": stealing_doc,
85 "/workers": workers_doc,
86 "/events": events_doc,
87 "/counters": counters_doc,
88 "/tasks": tasks_doc,
89 "/status": status_doc,
90 "/profile": profile_doc,
91 "/profile-server": profile_server_doc,
92 "/graph": graph_doc,
93 "/groups": tg_graph_doc,
94 "/gpu": gpu_doc,
95 "/individual-task-stream": individual_doc(
96 TaskStream, 100, n_rectangles=1000, clear_interval="10s"
97 ),
98 "/individual-progress": individual_doc(TaskProgress, 100, height=160),
99 "/individual-graph": individual_doc(TaskGraph, 200),
100 "/individual-groups": individual_doc(TaskGroupGraph, 200),
101 "/individual-nbytes": individual_doc(NBytes, 100),
102 "/individual-nbytes-cluster": individual_doc(NBytesCluster, 100),
103 "/individual-cpu": individual_doc(CurrentLoad, 100, fig_attr="cpu_figure"),
104 "/individual-nprocessing": individual_doc(
105 CurrentLoad, 100, fig_attr="processing_figure"
106 ),
107 "/individual-occupancy": individual_doc(Occupancy, 100),
108 "/individual-workers": individual_doc(WorkerTable, 500),
109 "/individual-bandwidth-types": individual_doc(BandwidthTypes, 500),
110 "/individual-bandwidth-workers": individual_doc(BandwidthWorkers, 500),
111 "/individual-memory-by-key": individual_doc(MemoryByKey, 500),
112 "/individual-compute-time-per-key": individual_doc(ComputePerKey, 500),
113 "/individual-aggregate-time-per-action": individual_doc(AggregateAction, 500),
114 "/individual-scheduler-system": individual_doc(SystemMonitor, 500),
115 "/individual-profile": individual_profile_doc,
116 "/individual-profile-server": individual_profile_server_doc,
117 "/individual-gpu-memory": gpu_memory_doc,
118 "/individual-gpu-utilization": gpu_utilization_doc,
119 }
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/distributed/dashboard/scheduler.py b/distributed/dashboard/scheduler.py
--- a/distributed/dashboard/scheduler.py
+++ b/distributed/dashboard/scheduler.py
@@ -43,42 +43,6 @@
from .core import BokehApplication
from .worker import counters_doc
-template_variables = {
- "pages": [
- "status",
- "workers",
- "tasks",
- "system",
- "profile",
- "graph",
- "groups",
- "info",
- ]
-}
-
-if NVML_ENABLED:
- template_variables["pages"].insert(4, "gpu")
-
-
-def connect(application, http_server, scheduler, prefix=""):
- bokeh_app = BokehApplication(
- applications, scheduler, prefix=prefix, template_variables=template_variables
- )
- application.add_application(bokeh_app)
- bokeh_app.initialize(IOLoop.current())
-
- bokeh_app.add_handlers(
- r".*",
- [
- (
- r"/",
- web.RedirectHandler,
- {"url": urljoin((prefix or "").strip("/") + "/", r"status")},
- )
- ],
- )
-
-
applications = {
"/system": systemmonitor_doc,
"/stealing": stealing_doc,
@@ -117,3 +81,40 @@
"/individual-gpu-memory": gpu_memory_doc,
"/individual-gpu-utilization": gpu_utilization_doc,
}
+
+
+template_variables = {
+ "pages": [
+ "status",
+ "workers",
+ "tasks",
+ "system",
+ "profile",
+ "graph",
+ "groups",
+ "info",
+ ],
+ "plots": [x.replace("/", "") for x in applications if "individual" in x],
+}
+
+if NVML_ENABLED:
+ template_variables["pages"].insert(4, "gpu")
+
+
+def connect(application, http_server, scheduler, prefix=""):
+ bokeh_app = BokehApplication(
+ applications, scheduler, prefix=prefix, template_variables=template_variables
+ )
+ application.add_application(bokeh_app)
+ bokeh_app.initialize(IOLoop.current())
+
+ bokeh_app.add_handlers(
+ r".*",
+ [
+ (
+ r"/",
+ web.RedirectHandler,
+ {"url": urljoin((prefix or "").strip("/") + "/", r"status")},
+ )
+ ],
+ )
| {"golden_diff": "diff --git a/distributed/dashboard/scheduler.py b/distributed/dashboard/scheduler.py\n--- a/distributed/dashboard/scheduler.py\n+++ b/distributed/dashboard/scheduler.py\n@@ -43,42 +43,6 @@\n from .core import BokehApplication\n from .worker import counters_doc\n \n-template_variables = {\n- \"pages\": [\n- \"status\",\n- \"workers\",\n- \"tasks\",\n- \"system\",\n- \"profile\",\n- \"graph\",\n- \"groups\",\n- \"info\",\n- ]\n-}\n-\n-if NVML_ENABLED:\n- template_variables[\"pages\"].insert(4, \"gpu\")\n-\n-\n-def connect(application, http_server, scheduler, prefix=\"\"):\n- bokeh_app = BokehApplication(\n- applications, scheduler, prefix=prefix, template_variables=template_variables\n- )\n- application.add_application(bokeh_app)\n- bokeh_app.initialize(IOLoop.current())\n-\n- bokeh_app.add_handlers(\n- r\".*\",\n- [\n- (\n- r\"/\",\n- web.RedirectHandler,\n- {\"url\": urljoin((prefix or \"\").strip(\"/\") + \"/\", r\"status\")},\n- )\n- ],\n- )\n-\n-\n applications = {\n \"/system\": systemmonitor_doc,\n \"/stealing\": stealing_doc,\n@@ -117,3 +81,40 @@\n \"/individual-gpu-memory\": gpu_memory_doc,\n \"/individual-gpu-utilization\": gpu_utilization_doc,\n }\n+\n+\n+template_variables = {\n+ \"pages\": [\n+ \"status\",\n+ \"workers\",\n+ \"tasks\",\n+ \"system\",\n+ \"profile\",\n+ \"graph\",\n+ \"groups\",\n+ \"info\",\n+ ],\n+ \"plots\": [x.replace(\"/\", \"\") for x in applications if \"individual\" in x],\n+}\n+\n+if NVML_ENABLED:\n+ template_variables[\"pages\"].insert(4, \"gpu\")\n+\n+\n+def connect(application, http_server, scheduler, prefix=\"\"):\n+ bokeh_app = BokehApplication(\n+ applications, scheduler, prefix=prefix, template_variables=template_variables\n+ )\n+ application.add_application(bokeh_app)\n+ bokeh_app.initialize(IOLoop.current())\n+\n+ bokeh_app.add_handlers(\n+ r\".*\",\n+ [\n+ (\n+ r\"/\",\n+ web.RedirectHandler,\n+ {\"url\": urljoin((prefix or \"\").strip(\"/\") + \"/\", r\"status\")},\n+ )\n+ ],\n+ )\n", "issue": "Drop down tile to reveal \"secret\" dashboards\nWe're accumulating a lot of _secret_ dashboard pages https://github.com/dask/distributed/blob/c2557938e6c4175534031cba5ca5ac9d2cdc95f7/distributed/dashboard/scheduler.py#L82-L119\r\nalthough most are not easily accessible from the UI. Most of the pages are not useful for the ordinary user and are only relevant for specific edge cases or debugging. hence, it makes sense that they are not promoted as a top-level dashboard page.\r\n\r\nHowever, at least for debugging purposes, I would really appreciate if these pages were a bit easier to navigate. In particular I'm looking for a way which doesn't require me to know the exact endpoint for an individual plot and requires me to type it into my browser.\r\n\r\nI would propose to add a drop down menu / button which can be used to browse all _hidden_ dashboard pages.\r\n\r\nDisclaimer: I can't implement this. I barely know what bokeh is.\n", "before_files": [{"content": "from urllib.parse import urljoin\n\nfrom tornado import web\nfrom tornado.ioloop import IOLoop\n\ntry:\n import numpy as np\nexcept ImportError:\n np = False\n\nfrom .components.nvml import gpu_doc # noqa: 1708\nfrom .components.nvml import NVML_ENABLED, gpu_memory_doc, gpu_utilization_doc\nfrom .components.scheduler import (\n AggregateAction,\n BandwidthTypes,\n BandwidthWorkers,\n ComputePerKey,\n CurrentLoad,\n MemoryByKey,\n NBytes,\n NBytesCluster,\n Occupancy,\n SystemMonitor,\n TaskGraph,\n TaskGroupGraph,\n TaskProgress,\n TaskStream,\n WorkerTable,\n events_doc,\n graph_doc,\n individual_doc,\n individual_profile_doc,\n individual_profile_server_doc,\n profile_doc,\n profile_server_doc,\n status_doc,\n stealing_doc,\n systemmonitor_doc,\n tasks_doc,\n tg_graph_doc,\n workers_doc,\n)\nfrom .core import BokehApplication\nfrom .worker import counters_doc\n\ntemplate_variables = {\n \"pages\": [\n \"status\",\n \"workers\",\n \"tasks\",\n \"system\",\n \"profile\",\n \"graph\",\n \"groups\",\n \"info\",\n ]\n}\n\nif NVML_ENABLED:\n template_variables[\"pages\"].insert(4, \"gpu\")\n\n\ndef connect(application, http_server, scheduler, prefix=\"\"):\n bokeh_app = BokehApplication(\n applications, scheduler, prefix=prefix, template_variables=template_variables\n )\n application.add_application(bokeh_app)\n bokeh_app.initialize(IOLoop.current())\n\n bokeh_app.add_handlers(\n r\".*\",\n [\n (\n r\"/\",\n web.RedirectHandler,\n {\"url\": urljoin((prefix or \"\").strip(\"/\") + \"/\", r\"status\")},\n )\n ],\n )\n\n\napplications = {\n \"/system\": systemmonitor_doc,\n \"/stealing\": stealing_doc,\n \"/workers\": workers_doc,\n \"/events\": events_doc,\n \"/counters\": counters_doc,\n \"/tasks\": tasks_doc,\n \"/status\": status_doc,\n \"/profile\": profile_doc,\n \"/profile-server\": profile_server_doc,\n \"/graph\": graph_doc,\n \"/groups\": tg_graph_doc,\n \"/gpu\": gpu_doc,\n \"/individual-task-stream\": individual_doc(\n TaskStream, 100, n_rectangles=1000, clear_interval=\"10s\"\n ),\n \"/individual-progress\": individual_doc(TaskProgress, 100, height=160),\n \"/individual-graph\": individual_doc(TaskGraph, 200),\n \"/individual-groups\": individual_doc(TaskGroupGraph, 200),\n \"/individual-nbytes\": individual_doc(NBytes, 100),\n \"/individual-nbytes-cluster\": individual_doc(NBytesCluster, 100),\n \"/individual-cpu\": individual_doc(CurrentLoad, 100, fig_attr=\"cpu_figure\"),\n \"/individual-nprocessing\": individual_doc(\n CurrentLoad, 100, fig_attr=\"processing_figure\"\n ),\n \"/individual-occupancy\": individual_doc(Occupancy, 100),\n \"/individual-workers\": individual_doc(WorkerTable, 500),\n \"/individual-bandwidth-types\": individual_doc(BandwidthTypes, 500),\n \"/individual-bandwidth-workers\": individual_doc(BandwidthWorkers, 500),\n \"/individual-memory-by-key\": individual_doc(MemoryByKey, 500),\n \"/individual-compute-time-per-key\": individual_doc(ComputePerKey, 500),\n \"/individual-aggregate-time-per-action\": individual_doc(AggregateAction, 500),\n \"/individual-scheduler-system\": individual_doc(SystemMonitor, 500),\n \"/individual-profile\": individual_profile_doc,\n \"/individual-profile-server\": individual_profile_server_doc,\n \"/individual-gpu-memory\": gpu_memory_doc,\n \"/individual-gpu-utilization\": gpu_utilization_doc,\n}\n", "path": "distributed/dashboard/scheduler.py"}], "after_files": [{"content": "from urllib.parse import urljoin\n\nfrom tornado import web\nfrom tornado.ioloop import IOLoop\n\ntry:\n import numpy as np\nexcept ImportError:\n np = False\n\nfrom .components.nvml import gpu_doc # noqa: 1708\nfrom .components.nvml import NVML_ENABLED, gpu_memory_doc, gpu_utilization_doc\nfrom .components.scheduler import (\n AggregateAction,\n BandwidthTypes,\n BandwidthWorkers,\n ComputePerKey,\n CurrentLoad,\n MemoryByKey,\n NBytes,\n NBytesCluster,\n Occupancy,\n SystemMonitor,\n TaskGraph,\n TaskGroupGraph,\n TaskProgress,\n TaskStream,\n WorkerTable,\n events_doc,\n graph_doc,\n individual_doc,\n individual_profile_doc,\n individual_profile_server_doc,\n profile_doc,\n profile_server_doc,\n status_doc,\n stealing_doc,\n systemmonitor_doc,\n tasks_doc,\n tg_graph_doc,\n workers_doc,\n)\nfrom .core import BokehApplication\nfrom .worker import counters_doc\n\napplications = {\n \"/system\": systemmonitor_doc,\n \"/stealing\": stealing_doc,\n \"/workers\": workers_doc,\n \"/events\": events_doc,\n \"/counters\": counters_doc,\n \"/tasks\": tasks_doc,\n \"/status\": status_doc,\n \"/profile\": profile_doc,\n \"/profile-server\": profile_server_doc,\n \"/graph\": graph_doc,\n \"/groups\": tg_graph_doc,\n \"/gpu\": gpu_doc,\n \"/individual-task-stream\": individual_doc(\n TaskStream, 100, n_rectangles=1000, clear_interval=\"10s\"\n ),\n \"/individual-progress\": individual_doc(TaskProgress, 100, height=160),\n \"/individual-graph\": individual_doc(TaskGraph, 200),\n \"/individual-groups\": individual_doc(TaskGroupGraph, 200),\n \"/individual-nbytes\": individual_doc(NBytes, 100),\n \"/individual-nbytes-cluster\": individual_doc(NBytesCluster, 100),\n \"/individual-cpu\": individual_doc(CurrentLoad, 100, fig_attr=\"cpu_figure\"),\n \"/individual-nprocessing\": individual_doc(\n CurrentLoad, 100, fig_attr=\"processing_figure\"\n ),\n \"/individual-occupancy\": individual_doc(Occupancy, 100),\n \"/individual-workers\": individual_doc(WorkerTable, 500),\n \"/individual-bandwidth-types\": individual_doc(BandwidthTypes, 500),\n \"/individual-bandwidth-workers\": individual_doc(BandwidthWorkers, 500),\n \"/individual-memory-by-key\": individual_doc(MemoryByKey, 500),\n \"/individual-compute-time-per-key\": individual_doc(ComputePerKey, 500),\n \"/individual-aggregate-time-per-action\": individual_doc(AggregateAction, 500),\n \"/individual-scheduler-system\": individual_doc(SystemMonitor, 500),\n \"/individual-profile\": individual_profile_doc,\n \"/individual-profile-server\": individual_profile_server_doc,\n \"/individual-gpu-memory\": gpu_memory_doc,\n \"/individual-gpu-utilization\": gpu_utilization_doc,\n}\n\n\ntemplate_variables = {\n \"pages\": [\n \"status\",\n \"workers\",\n \"tasks\",\n \"system\",\n \"profile\",\n \"graph\",\n \"groups\",\n \"info\",\n ],\n \"plots\": [x.replace(\"/\", \"\") for x in applications if \"individual\" in x],\n}\n\nif NVML_ENABLED:\n template_variables[\"pages\"].insert(4, \"gpu\")\n\n\ndef connect(application, http_server, scheduler, prefix=\"\"):\n bokeh_app = BokehApplication(\n applications, scheduler, prefix=prefix, template_variables=template_variables\n )\n application.add_application(bokeh_app)\n bokeh_app.initialize(IOLoop.current())\n\n bokeh_app.add_handlers(\n r\".*\",\n [\n (\n r\"/\",\n web.RedirectHandler,\n {\"url\": urljoin((prefix or \"\").strip(\"/\") + \"/\", r\"status\")},\n )\n ],\n )\n", "path": "distributed/dashboard/scheduler.py"}]} | 1,592 | 548 |
gh_patches_debug_28060 | rasdani/github-patches | git_diff | dynaconf__dynaconf-131 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
YAML.load without a loader is deprecated for security purposes
We've started seeing the following warning:
```
lib/python3.6/site-packages/dynaconf/loaders/base.py:95: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.
```
See here: https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dynaconf/loaders/yaml_loader.py`
Content:
```
1 # coding: utf-8
2 import io
3 from pathlib import Path
4 from dynaconf import default_settings
5 from dynaconf.loaders.base import BaseLoader
6 from dynaconf.constants import YAML_EXTENSIONS
7 from dynaconf.utils import object_merge
8 try:
9 import yaml
10 except ImportError as e: # pragma: no cover
11 yaml = None
12
13
14 def load(obj, env=None, silent=True, key=None, filename=None):
15 """
16 Reads and loads in to "obj" a single key or all keys from source file.
17
18 :param obj: the settings instance
19 :param env: settings current env default='development'
20 :param silent: if errors should raise
21 :param key: if defined load a single key, else load all in env
22 :param filename: Optional custom filename to load
23 :return: None
24 """
25 if yaml is None: # pragma: no cover
26 BaseLoader.warn_not_installed(obj, 'yaml')
27 return
28
29 loader = BaseLoader(
30 obj=obj,
31 env=env,
32 identifier='yaml',
33 extensions=YAML_EXTENSIONS,
34 file_reader=yaml.load,
35 string_reader=yaml.load
36 )
37 loader.load(filename=filename, key=key, silent=silent)
38
39
40 def write(settings_path, settings_data, merge=True):
41 """Write data to a settings file.
42
43 :param settings_path: the filepath
44 :param settings_data: a dictionary with data
45 :param merge: boolean if existing file should be merged with new data
46 """
47 settings_path = Path(settings_path)
48 if settings_path.exists() and merge: # pragma: no cover
49 object_merge(
50 yaml.load(
51 io.open(
52 str(settings_path),
53 encoding=default_settings.ENCODING_FOR_DYNACONF
54 )
55 ),
56 settings_data
57 )
58
59 yaml.dump(
60 settings_data,
61 io.open(
62 str(settings_path), 'w',
63 encoding=default_settings.ENCODING_FOR_DYNACONF
64 )
65 )
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dynaconf/loaders/yaml_loader.py b/dynaconf/loaders/yaml_loader.py
--- a/dynaconf/loaders/yaml_loader.py
+++ b/dynaconf/loaders/yaml_loader.py
@@ -1,10 +1,13 @@
# coding: utf-8
import io
+import os
from pathlib import Path
+from warnings import warn
from dynaconf import default_settings
from dynaconf.loaders.base import BaseLoader
from dynaconf.constants import YAML_EXTENSIONS
from dynaconf.utils import object_merge
+
try:
import yaml
except ImportError as e: # pragma: no cover
@@ -26,13 +29,25 @@
BaseLoader.warn_not_installed(obj, 'yaml')
return
+ # Resolve the loaders
+ # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
+ # Possible values are `safe_load, full_load, unsafe_load, load`
+ yaml_loader_name = os.environ.get('YAML_LOADER_FOR_DYNACONF', 'full_load')
+ yaml_reader = getattr(yaml, yaml_loader_name, yaml.load)
+ if yaml_reader.__name__ == 'unsafe_load': # pragma: no cover
+ warn(
+ "yaml.unsafe_load is deprecated."
+ " Please read https://msg.pyyaml.org/load for full details."
+ " Try to use full_load or safe_load."
+ )
+
loader = BaseLoader(
obj=obj,
env=env,
identifier='yaml',
extensions=YAML_EXTENSIONS,
- file_reader=yaml.load,
- string_reader=yaml.load
+ file_reader=yaml_reader,
+ string_reader=yaml_reader
)
loader.load(filename=filename, key=key, silent=silent)
| {"golden_diff": "diff --git a/dynaconf/loaders/yaml_loader.py b/dynaconf/loaders/yaml_loader.py\n--- a/dynaconf/loaders/yaml_loader.py\n+++ b/dynaconf/loaders/yaml_loader.py\n@@ -1,10 +1,13 @@\n # coding: utf-8\n import io\n+import os\n from pathlib import Path\n+from warnings import warn\n from dynaconf import default_settings\n from dynaconf.loaders.base import BaseLoader\n from dynaconf.constants import YAML_EXTENSIONS\n from dynaconf.utils import object_merge\n+\n try:\n import yaml\n except ImportError as e: # pragma: no cover\n@@ -26,13 +29,25 @@\n BaseLoader.warn_not_installed(obj, 'yaml')\n return\n \n+ # Resolve the loaders\n+ # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n+ # Possible values are `safe_load, full_load, unsafe_load, load`\n+ yaml_loader_name = os.environ.get('YAML_LOADER_FOR_DYNACONF', 'full_load')\n+ yaml_reader = getattr(yaml, yaml_loader_name, yaml.load)\n+ if yaml_reader.__name__ == 'unsafe_load': # pragma: no cover\n+ warn(\n+ \"yaml.unsafe_load is deprecated.\"\n+ \" Please read https://msg.pyyaml.org/load for full details.\"\n+ \" Try to use full_load or safe_load.\"\n+ )\n+\n loader = BaseLoader(\n obj=obj,\n env=env,\n identifier='yaml',\n extensions=YAML_EXTENSIONS,\n- file_reader=yaml.load,\n- string_reader=yaml.load\n+ file_reader=yaml_reader,\n+ string_reader=yaml_reader\n )\n loader.load(filename=filename, key=key, silent=silent)\n", "issue": "YAML.load without a loader is deprecated for security purposes\nWe've started seeing the following warning:\r\n```\r\nlib/python3.6/site-packages/dynaconf/loaders/base.py:95: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.\r\n```\r\n\r\nSee here: https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n", "before_files": [{"content": "# coding: utf-8\nimport io\nfrom pathlib import Path\nfrom dynaconf import default_settings\nfrom dynaconf.loaders.base import BaseLoader\nfrom dynaconf.constants import YAML_EXTENSIONS\nfrom dynaconf.utils import object_merge\ntry:\n import yaml\nexcept ImportError as e: # pragma: no cover\n yaml = None\n\n\ndef load(obj, env=None, silent=True, key=None, filename=None):\n \"\"\"\n Reads and loads in to \"obj\" a single key or all keys from source file.\n\n :param obj: the settings instance\n :param env: settings current env default='development'\n :param silent: if errors should raise\n :param key: if defined load a single key, else load all in env\n :param filename: Optional custom filename to load\n :return: None\n \"\"\"\n if yaml is None: # pragma: no cover\n BaseLoader.warn_not_installed(obj, 'yaml')\n return\n\n loader = BaseLoader(\n obj=obj,\n env=env,\n identifier='yaml',\n extensions=YAML_EXTENSIONS,\n file_reader=yaml.load,\n string_reader=yaml.load\n )\n loader.load(filename=filename, key=key, silent=silent)\n\n\ndef write(settings_path, settings_data, merge=True):\n \"\"\"Write data to a settings file.\n\n :param settings_path: the filepath\n :param settings_data: a dictionary with data\n :param merge: boolean if existing file should be merged with new data\n \"\"\"\n settings_path = Path(settings_path)\n if settings_path.exists() and merge: # pragma: no cover\n object_merge(\n yaml.load(\n io.open(\n str(settings_path),\n encoding=default_settings.ENCODING_FOR_DYNACONF\n )\n ),\n settings_data\n )\n\n yaml.dump(\n settings_data,\n io.open(\n str(settings_path), 'w',\n encoding=default_settings.ENCODING_FOR_DYNACONF\n )\n )\n", "path": "dynaconf/loaders/yaml_loader.py"}], "after_files": [{"content": "# coding: utf-8\nimport io\nimport os\nfrom pathlib import Path\nfrom warnings import warn\nfrom dynaconf import default_settings\nfrom dynaconf.loaders.base import BaseLoader\nfrom dynaconf.constants import YAML_EXTENSIONS\nfrom dynaconf.utils import object_merge\n\ntry:\n import yaml\nexcept ImportError as e: # pragma: no cover\n yaml = None\n\n\ndef load(obj, env=None, silent=True, key=None, filename=None):\n \"\"\"\n Reads and loads in to \"obj\" a single key or all keys from source file.\n\n :param obj: the settings instance\n :param env: settings current env default='development'\n :param silent: if errors should raise\n :param key: if defined load a single key, else load all in env\n :param filename: Optional custom filename to load\n :return: None\n \"\"\"\n if yaml is None: # pragma: no cover\n BaseLoader.warn_not_installed(obj, 'yaml')\n return\n\n # Resolve the loaders\n # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n # Possible values are `safe_load, full_load, unsafe_load, load`\n yaml_loader_name = os.environ.get('YAML_LOADER_FOR_DYNACONF', 'full_load')\n yaml_reader = getattr(yaml, yaml_loader_name, yaml.load)\n if yaml_reader.__name__ == 'unsafe_load': # pragma: no cover\n warn(\n \"yaml.unsafe_load is deprecated.\"\n \" Please read https://msg.pyyaml.org/load for full details.\"\n \" Try to use full_load or safe_load.\"\n )\n\n loader = BaseLoader(\n obj=obj,\n env=env,\n identifier='yaml',\n extensions=YAML_EXTENSIONS,\n file_reader=yaml_reader,\n string_reader=yaml_reader\n )\n loader.load(filename=filename, key=key, silent=silent)\n\n\ndef write(settings_path, settings_data, merge=True):\n \"\"\"Write data to a settings file.\n\n :param settings_path: the filepath\n :param settings_data: a dictionary with data\n :param merge: boolean if existing file should be merged with new data\n \"\"\"\n settings_path = Path(settings_path)\n if settings_path.exists() and merge: # pragma: no cover\n object_merge(\n yaml.load(\n io.open(\n str(settings_path),\n encoding=default_settings.ENCODING_FOR_DYNACONF\n )\n ),\n settings_data\n )\n\n yaml.dump(\n settings_data,\n io.open(\n str(settings_path), 'w',\n encoding=default_settings.ENCODING_FOR_DYNACONF\n )\n )\n", "path": "dynaconf/loaders/yaml_loader.py"}]} | 925 | 401 |
gh_patches_debug_41208 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3751 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Audit log disaggregation categories and labels
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/indicator_dimension_name.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import IndicatorDimensionName
9
10 from ..serializers import IndicatorDimensionNameSerializer
11 from ..viewsets import PublicProjectViewSet
12
13
14 class IndicatorDimensionNameViewSet(PublicProjectViewSet):
15 """
16 """
17 queryset = IndicatorDimensionName.objects.prefetch_related('dimension_values')
18 serializer_class = IndicatorDimensionNameSerializer
19 project_relation = 'project__'
20
```
Path: `akvo/rest/views/indicator_dimension_value.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import IndicatorDimensionValue
9
10 from ..serializers import IndicatorDimensionValueSerializer
11 from ..viewsets import PublicProjectViewSet
12
13
14 class IndicatorDimensionValueViewSet(PublicProjectViewSet):
15 """
16 """
17 queryset = IndicatorDimensionValue.objects.all()
18 serializer_class = IndicatorDimensionValueSerializer
19 project_relation = 'name__project__'
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/indicator_dimension_name.py b/akvo/rest/views/indicator_dimension_name.py
--- a/akvo/rest/views/indicator_dimension_name.py
+++ b/akvo/rest/views/indicator_dimension_name.py
@@ -5,6 +5,8 @@
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
+from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION
+from django.contrib.contenttypes.models import ContentType
from akvo.rsr.models import IndicatorDimensionName
from ..serializers import IndicatorDimensionNameSerializer
@@ -17,3 +19,31 @@
queryset = IndicatorDimensionName.objects.prefetch_related('dimension_values')
serializer_class = IndicatorDimensionNameSerializer
project_relation = 'project__'
+
+ def create(self, request, *args, **kwargs):
+ response = super(IndicatorDimensionNameViewSet, self).create(request, *args, **kwargs)
+ self._log_action(ADDITION, response.data, str(request.data))
+ return response
+
+ def update(self, request, *args, **kwargs):
+ response = super(IndicatorDimensionNameViewSet, self).update(request, *args, **kwargs)
+ self._log_action(CHANGE, response.data, str(request.data))
+ return response
+
+ def destroy(self, request, *args, **kwargs):
+ instance = self.get_object()
+ data = {'id': instance.id, 'name': instance.name}
+ response = super(IndicatorDimensionNameViewSet, self).destroy(request, *args, **kwargs)
+ self._log_action(DELETION, data)
+ return response
+
+ def _log_action(self, action_flag, instance, message=''):
+ user = self.request.user
+ LogEntry.objects.log_action(
+ user_id=user.pk,
+ content_type_id=ContentType.objects.get_for_model(IndicatorDimensionName).pk,
+ object_id=instance['id'],
+ object_repr=str(instance),
+ action_flag=action_flag,
+ change_message=message
+ )
diff --git a/akvo/rest/views/indicator_dimension_value.py b/akvo/rest/views/indicator_dimension_value.py
--- a/akvo/rest/views/indicator_dimension_value.py
+++ b/akvo/rest/views/indicator_dimension_value.py
@@ -5,6 +5,8 @@
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
+from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION
+from django.contrib.contenttypes.models import ContentType
from akvo.rsr.models import IndicatorDimensionValue
from ..serializers import IndicatorDimensionValueSerializer
@@ -17,3 +19,31 @@
queryset = IndicatorDimensionValue.objects.all()
serializer_class = IndicatorDimensionValueSerializer
project_relation = 'name__project__'
+
+ def create(self, request, *args, **kwargs):
+ response = super(IndicatorDimensionValueViewSet, self).create(request, *args, **kwargs)
+ self._log_action(ADDITION, response.data, str(request.data))
+ return response
+
+ def update(self, request, *args, **kwargs):
+ response = super(IndicatorDimensionValueViewSet, self).update(request, *args, **kwargs)
+ self._log_action(CHANGE, response.data, str(request.data))
+ return response
+
+ def destroy(self, request, *args, **kwargs):
+ instance = self.get_object()
+ data = {'id': instance.id, 'value': instance.value}
+ response = super(IndicatorDimensionValueViewSet, self).destroy(request, *args, **kwargs)
+ self._log_action(DELETION, data)
+ return response
+
+ def _log_action(self, action_flag, instance, message=''):
+ user = self.request.user
+ LogEntry.objects.log_action(
+ user_id=user.pk,
+ content_type_id=ContentType.objects.get_for_model(IndicatorDimensionValue).pk,
+ object_id=instance['id'],
+ object_repr=str(instance),
+ action_flag=action_flag,
+ change_message=message
+ )
| {"golden_diff": "diff --git a/akvo/rest/views/indicator_dimension_name.py b/akvo/rest/views/indicator_dimension_name.py\n--- a/akvo/rest/views/indicator_dimension_name.py\n+++ b/akvo/rest/views/indicator_dimension_name.py\n@@ -5,6 +5,8 @@\n # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n \n \n+from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION\n+from django.contrib.contenttypes.models import ContentType\n from akvo.rsr.models import IndicatorDimensionName\n \n from ..serializers import IndicatorDimensionNameSerializer\n@@ -17,3 +19,31 @@\n queryset = IndicatorDimensionName.objects.prefetch_related('dimension_values')\n serializer_class = IndicatorDimensionNameSerializer\n project_relation = 'project__'\n+\n+ def create(self, request, *args, **kwargs):\n+ response = super(IndicatorDimensionNameViewSet, self).create(request, *args, **kwargs)\n+ self._log_action(ADDITION, response.data, str(request.data))\n+ return response\n+\n+ def update(self, request, *args, **kwargs):\n+ response = super(IndicatorDimensionNameViewSet, self).update(request, *args, **kwargs)\n+ self._log_action(CHANGE, response.data, str(request.data))\n+ return response\n+\n+ def destroy(self, request, *args, **kwargs):\n+ instance = self.get_object()\n+ data = {'id': instance.id, 'name': instance.name}\n+ response = super(IndicatorDimensionNameViewSet, self).destroy(request, *args, **kwargs)\n+ self._log_action(DELETION, data)\n+ return response\n+\n+ def _log_action(self, action_flag, instance, message=''):\n+ user = self.request.user\n+ LogEntry.objects.log_action(\n+ user_id=user.pk,\n+ content_type_id=ContentType.objects.get_for_model(IndicatorDimensionName).pk,\n+ object_id=instance['id'],\n+ object_repr=str(instance),\n+ action_flag=action_flag,\n+ change_message=message\n+ )\ndiff --git a/akvo/rest/views/indicator_dimension_value.py b/akvo/rest/views/indicator_dimension_value.py\n--- a/akvo/rest/views/indicator_dimension_value.py\n+++ b/akvo/rest/views/indicator_dimension_value.py\n@@ -5,6 +5,8 @@\n # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n \n \n+from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION\n+from django.contrib.contenttypes.models import ContentType\n from akvo.rsr.models import IndicatorDimensionValue\n \n from ..serializers import IndicatorDimensionValueSerializer\n@@ -17,3 +19,31 @@\n queryset = IndicatorDimensionValue.objects.all()\n serializer_class = IndicatorDimensionValueSerializer\n project_relation = 'name__project__'\n+\n+ def create(self, request, *args, **kwargs):\n+ response = super(IndicatorDimensionValueViewSet, self).create(request, *args, **kwargs)\n+ self._log_action(ADDITION, response.data, str(request.data))\n+ return response\n+\n+ def update(self, request, *args, **kwargs):\n+ response = super(IndicatorDimensionValueViewSet, self).update(request, *args, **kwargs)\n+ self._log_action(CHANGE, response.data, str(request.data))\n+ return response\n+\n+ def destroy(self, request, *args, **kwargs):\n+ instance = self.get_object()\n+ data = {'id': instance.id, 'value': instance.value}\n+ response = super(IndicatorDimensionValueViewSet, self).destroy(request, *args, **kwargs)\n+ self._log_action(DELETION, data)\n+ return response\n+\n+ def _log_action(self, action_flag, instance, message=''):\n+ user = self.request.user\n+ LogEntry.objects.log_action(\n+ user_id=user.pk,\n+ content_type_id=ContentType.objects.get_for_model(IndicatorDimensionValue).pk,\n+ object_id=instance['id'],\n+ object_repr=str(instance),\n+ action_flag=action_flag,\n+ change_message=message\n+ )\n", "issue": "Audit log disaggregation categories and labels\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorDimensionName\n\nfrom ..serializers import IndicatorDimensionNameSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionNameViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimensionName.objects.prefetch_related('dimension_values')\n serializer_class = IndicatorDimensionNameSerializer\n project_relation = 'project__'\n", "path": "akvo/rest/views/indicator_dimension_name.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorDimensionValue\n\nfrom ..serializers import IndicatorDimensionValueSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionValueViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimensionValue.objects.all()\n serializer_class = IndicatorDimensionValueSerializer\n project_relation = 'name__project__'\n", "path": "akvo/rest/views/indicator_dimension_value.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION\nfrom django.contrib.contenttypes.models import ContentType\nfrom akvo.rsr.models import IndicatorDimensionName\n\nfrom ..serializers import IndicatorDimensionNameSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionNameViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimensionName.objects.prefetch_related('dimension_values')\n serializer_class = IndicatorDimensionNameSerializer\n project_relation = 'project__'\n\n def create(self, request, *args, **kwargs):\n response = super(IndicatorDimensionNameViewSet, self).create(request, *args, **kwargs)\n self._log_action(ADDITION, response.data, str(request.data))\n return response\n\n def update(self, request, *args, **kwargs):\n response = super(IndicatorDimensionNameViewSet, self).update(request, *args, **kwargs)\n self._log_action(CHANGE, response.data, str(request.data))\n return response\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n data = {'id': instance.id, 'name': instance.name}\n response = super(IndicatorDimensionNameViewSet, self).destroy(request, *args, **kwargs)\n self._log_action(DELETION, data)\n return response\n\n def _log_action(self, action_flag, instance, message=''):\n user = self.request.user\n LogEntry.objects.log_action(\n user_id=user.pk,\n content_type_id=ContentType.objects.get_for_model(IndicatorDimensionName).pk,\n object_id=instance['id'],\n object_repr=str(instance),\n action_flag=action_flag,\n change_message=message\n )\n", "path": "akvo/rest/views/indicator_dimension_name.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION\nfrom django.contrib.contenttypes.models import ContentType\nfrom akvo.rsr.models import IndicatorDimensionValue\n\nfrom ..serializers import IndicatorDimensionValueSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionValueViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimensionValue.objects.all()\n serializer_class = IndicatorDimensionValueSerializer\n project_relation = 'name__project__'\n\n def create(self, request, *args, **kwargs):\n response = super(IndicatorDimensionValueViewSet, self).create(request, *args, **kwargs)\n self._log_action(ADDITION, response.data, str(request.data))\n return response\n\n def update(self, request, *args, **kwargs):\n response = super(IndicatorDimensionValueViewSet, self).update(request, *args, **kwargs)\n self._log_action(CHANGE, response.data, str(request.data))\n return response\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n data = {'id': instance.id, 'value': instance.value}\n response = super(IndicatorDimensionValueViewSet, self).destroy(request, *args, **kwargs)\n self._log_action(DELETION, data)\n return response\n\n def _log_action(self, action_flag, instance, message=''):\n user = self.request.user\n LogEntry.objects.log_action(\n user_id=user.pk,\n content_type_id=ContentType.objects.get_for_model(IndicatorDimensionValue).pk,\n object_id=instance['id'],\n object_repr=str(instance),\n action_flag=action_flag,\n change_message=message\n )\n", "path": "akvo/rest/views/indicator_dimension_value.py"}]} | 646 | 925 |
gh_patches_debug_61680 | rasdani/github-patches | git_diff | joke2k__faker-48 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Capital O missing an umlaut
Hello, I noticed in faker/Providers/De_de/internet.py in the _to_ascii method, the capital O is missing an umlaut.
It should be: ('Ö', 'Oe')
Currently:
replacements = (
('ä', 'ae'), ('Ä', 'Ae'),
('ö', 'oe'), ('O', 'Oe'),
('ü', 'ue'), ('Ü', 'Ue'),
('ß', 'ss')
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/de_DE/internet.py`
Content:
```
1 # coding=utf-8
2 from __future__ import unicode_literals
3 from ..internet import Provider as InternetProvider
4
5 import re
6
7
8 class Provider(InternetProvider):
9
10 free_email_domains = (
11 'web.de', 'gmail.com', 'hotmail.de', 'yahoo.de', 'googlemail.com',
12 'aol.de', 'gmx.de'
13 )
14 tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de')
15
16 @staticmethod
17 def _to_ascii(string):
18 replacements = (
19 ('ä', 'ae'), ('Ä', 'Ae'),
20 ('ö', 'oe'), ('O', 'Oe'),
21 ('ü', 'ue'), ('Ü', 'Ue'),
22 ('ß', 'ss')
23 )
24 for search, replace in replacements:
25 string = string.replace(search, replace)
26
27 return string
28
29 def user_name(self):
30 pattern = self.random_element(self.user_name_formats)
31 return self._to_ascii(
32 self.bothify(self.generator.parse(pattern)
33 ).lower())
34
35 def domain_word(self):
36 company = self.generator.format('company')
37 company_elements = company.split(' ')
38 company = self._to_ascii(company_elements.pop(0))
39 return re.sub(r'\W', '', company).lower()
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/providers/de_DE/internet.py b/faker/providers/de_DE/internet.py
--- a/faker/providers/de_DE/internet.py
+++ b/faker/providers/de_DE/internet.py
@@ -17,7 +17,7 @@
def _to_ascii(string):
replacements = (
('ä', 'ae'), ('Ä', 'Ae'),
- ('ö', 'oe'), ('O', 'Oe'),
+ ('ö', 'oe'), ('Ö', 'Oe'),
('ü', 'ue'), ('Ü', 'Ue'),
('ß', 'ss')
)
| {"golden_diff": "diff --git a/faker/providers/de_DE/internet.py b/faker/providers/de_DE/internet.py\n--- a/faker/providers/de_DE/internet.py\n+++ b/faker/providers/de_DE/internet.py\n@@ -17,7 +17,7 @@\n def _to_ascii(string):\n replacements = (\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n- ('\u00f6', 'oe'), ('O', 'Oe'),\n+ ('\u00f6', 'oe'), ('\u00d6', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n ('\u00df', 'ss')\n )\n", "issue": "Capital O missing an umlaut\nHello, I noticed in faker/Providers/De_de/internet.py in the _to_ascii method, the capital O is missing an umlaut. \n\nIt should be: ('\u00d6', 'Oe') \n\nCurrently:\nreplacements = (\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n ('\u00f6', 'oe'), ('O', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n ('\u00df', 'ss')\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom ..internet import Provider as InternetProvider\n\nimport re\n\n\nclass Provider(InternetProvider):\n\n free_email_domains = (\n 'web.de', 'gmail.com', 'hotmail.de', 'yahoo.de', 'googlemail.com',\n 'aol.de', 'gmx.de'\n )\n tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de')\n\n @staticmethod\n def _to_ascii(string):\n replacements = (\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n ('\u00f6', 'oe'), ('O', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n ('\u00df', 'ss')\n )\n for search, replace in replacements:\n string = string.replace(search, replace)\n\n return string\n\n def user_name(self):\n pattern = self.random_element(self.user_name_formats)\n return self._to_ascii(\n self.bothify(self.generator.parse(pattern)\n ).lower())\n\n def domain_word(self):\n company = self.generator.format('company')\n company_elements = company.split(' ')\n company = self._to_ascii(company_elements.pop(0))\n return re.sub(r'\\W', '', company).lower()\n", "path": "faker/providers/de_DE/internet.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom ..internet import Provider as InternetProvider\n\nimport re\n\n\nclass Provider(InternetProvider):\n\n free_email_domains = (\n 'web.de', 'gmail.com', 'hotmail.de', 'yahoo.de', 'googlemail.com',\n 'aol.de', 'gmx.de'\n )\n tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de')\n\n @staticmethod\n def _to_ascii(string):\n replacements = (\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n ('\u00f6', 'oe'), ('\u00d6', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n ('\u00df', 'ss')\n )\n for search, replace in replacements:\n string = string.replace(search, replace)\n\n return string\n\n def user_name(self):\n pattern = self.random_element(self.user_name_formats)\n return self._to_ascii(\n self.bothify(self.generator.parse(pattern)\n ).lower())\n\n def domain_word(self):\n company = self.generator.format('company')\n company_elements = company.split(' ')\n company = self._to_ascii(company_elements.pop(0))\n return re.sub(r'\\W', '', company).lower()\n", "path": "faker/providers/de_DE/internet.py"}]} | 722 | 134 |
gh_patches_debug_5954 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2609 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Issues with installation process that connects an existing DB
- [x] Tester Marius reports (server credentials in Upwork)
- [ ] It seems that even if you select existing database, it still tries to start a docker container for the database, creating a conflict?
- [x] Tester Mohammad reports an error as well ([details here](https://docs.google.com/document/d/15m9eZFocAsU1V9inLKxC6i_KQxMdu28snRrBPOrf5Hk/edit))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/install.py`
Content:
```
1 from sqlalchemy import text
2 from sqlalchemy.exc import OperationalError
3
4 from db import engine
5 from db.types import install
6
7
8 def install_mathesar(
9 database_name, username, password, hostname, port, skip_confirm
10 ):
11 """Create database and install Mathesar on it."""
12 user_db_engine = engine.create_future_engine(
13 username, password, hostname, database_name, port,
14 connect_args={"connect_timeout": 10}
15 )
16 try:
17 user_db_engine.connect()
18 print(f"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...")
19 install.install_mathesar_on_database(user_db_engine)
20 user_db_engine.dispose()
21 except OperationalError:
22 database_created = _create_database(
23 database_name=database_name,
24 hostname=hostname,
25 username=username,
26 password=password,
27 port=port,
28 skip_confirm=skip_confirm
29 )
30 if database_created:
31 print(f"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...")
32 install.install_mathesar_on_database(user_db_engine)
33 user_db_engine.dispose()
34 else:
35 print(f"Skipping installing on DB with key {database_name}.")
36
37
38 def _create_database(database_name, hostname, username, password, port, skip_confirm=True):
39 if skip_confirm is True:
40 create_database = "y"
41 else:
42 create_database = input(
43 f"Create a new Database called {database_name}? (y/n) > "
44 )
45 if create_database.lower() in ["y", "yes"]:
46 # We need to connect to an existing database inorder to create a new Database.
47 # So we use the default Database `postgres` that comes with postgres.
48 # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)
49 root_database = "postgres"
50 root_db_engine = engine.create_future_engine(
51 username, password, hostname, root_database, port,
52 connect_args={"connect_timeout": 10}
53 )
54 with root_db_engine.connect() as conn:
55 conn.execution_options(isolation_level="AUTOCOMMIT")
56 conn.execute(text(f"CREATE DATABASE {database_name}"))
57 root_db_engine.dispose()
58 print(f"Created DB is {database_name}.")
59 return True
60 else:
61 print(f"Database {database_name} not created!")
62 return False
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/install.py b/db/install.py
--- a/db/install.py
+++ b/db/install.py
@@ -53,7 +53,7 @@
)
with root_db_engine.connect() as conn:
conn.execution_options(isolation_level="AUTOCOMMIT")
- conn.execute(text(f"CREATE DATABASE {database_name}"))
+ conn.execute(text(f'CREATE DATABASE "{database_name}"'))
root_db_engine.dispose()
print(f"Created DB is {database_name}.")
return True
| {"golden_diff": "diff --git a/db/install.py b/db/install.py\n--- a/db/install.py\n+++ b/db/install.py\n@@ -53,7 +53,7 @@\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n- conn.execute(text(f\"CREATE DATABASE {database_name}\"))\n+ conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n", "issue": "Issues with installation process that connects an existing DB\n- [x] Tester Marius reports (server credentials in Upwork)\r\n - [ ] It seems that even if you select existing database, it still tries to start a docker container for the database, creating a conflict?\r\n- [x] Tester Mohammad reports an error as well ([details here](https://docs.google.com/document/d/15m9eZFocAsU1V9inLKxC6i_KQxMdu28snRrBPOrf5Hk/edit))\n", "before_files": [{"content": "from sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db import engine\nfrom db.types import install\n\n\ndef install_mathesar(\n database_name, username, password, hostname, port, skip_confirm\n):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n username, password, hostname, database_name, port,\n connect_args={\"connect_timeout\": 10}\n )\n try:\n user_db_engine.connect()\n print(f\"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...\")\n install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n except OperationalError:\n database_created = _create_database(\n database_name=database_name,\n hostname=hostname,\n username=username,\n password=password,\n port=port,\n skip_confirm=skip_confirm\n )\n if database_created:\n print(f\"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...\")\n install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n else:\n print(f\"Skipping installing on DB with key {database_name}.\")\n\n\ndef _create_database(database_name, hostname, username, password, port, skip_confirm=True):\n if skip_confirm is True:\n create_database = \"y\"\n else:\n create_database = input(\n f\"Create a new Database called {database_name}? (y/n) > \"\n )\n if create_database.lower() in [\"y\", \"yes\"]:\n # We need to connect to an existing database inorder to create a new Database.\n # So we use the default Database `postgres` that comes with postgres.\n # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"CREATE DATABASE {database_name}\"))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "path": "db/install.py"}], "after_files": [{"content": "from sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db import engine\nfrom db.types import install\n\n\ndef install_mathesar(\n database_name, username, password, hostname, port, skip_confirm\n):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n username, password, hostname, database_name, port,\n connect_args={\"connect_timeout\": 10}\n )\n try:\n user_db_engine.connect()\n print(f\"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...\")\n install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n except OperationalError:\n database_created = _create_database(\n database_name=database_name,\n hostname=hostname,\n username=username,\n password=password,\n port=port,\n skip_confirm=skip_confirm\n )\n if database_created:\n print(f\"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...\")\n install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n else:\n print(f\"Skipping installing on DB with key {database_name}.\")\n\n\ndef _create_database(database_name, hostname, username, password, port, skip_confirm=True):\n if skip_confirm is True:\n create_database = \"y\"\n else:\n create_database = input(\n f\"Create a new Database called {database_name}? (y/n) > \"\n )\n if create_database.lower() in [\"y\", \"yes\"]:\n # We need to connect to an existing database inorder to create a new Database.\n # So we use the default Database `postgres` that comes with postgres.\n # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "path": "db/install.py"}]} | 986 | 111 |
gh_patches_debug_31675 | rasdani/github-patches | git_diff | pyload__pyload-1369 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Uplea plugin out of date
Hi,
any download from uplea.com fails:
pyLoad reports success on downloading but actually only the HTML page giving acces to download is downloaded...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `module/plugins/hoster/UpleaCom.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import re
4
5 from urlparse import urljoin
6
7 from module.plugins.internal.XFSHoster import XFSHoster, create_getInfo
8
9
10 class UpleaCom(XFSHoster):
11 __name__ = "UpleaCom"
12 __type__ = "hoster"
13 __version__ = "0.06"
14
15 __pattern__ = r'https?://(?:www\.)?uplea\.com/dl/\w{15}'
16
17 __description__ = """Uplea.com hoster plugin"""
18 __license__ = "GPLv3"
19 __authors__ = [("Redleon", None)]
20
21
22 NAME_PATTERN = r'class="agmd size18">(?P<N>.+?)<'
23 SIZE_PATTERN = r'size14">(?P<S>[\d.,]+) (?P<U>[\w^_])</span>'
24
25 OFFLINE_PATTERN = r'>You followed an invalid or expired link'
26
27 LINK_PATTERN = r'"(http?://\w+\.uplea\.com/anonym/.*?)"'
28
29 WAIT_PATTERN = r'timeText:([\d.]+),'
30 STEP_PATTERN = r'<a href="(/step/.+)">'
31
32
33 def setup(self):
34 self.multiDL = False
35 self.chunkLimit = 1
36 self.resumeDownload = True
37
38
39 def handleFree(self, pyfile):
40 m = re.search(self.STEP_PATTERN, self.html)
41 if m is None:
42 self.error(_("STEP_PATTERN not found"))
43
44 self.html = self.load(urljoin("http://uplea.com/", m.group(1)))
45
46 m = re.search(self.WAIT_PATTERN, self.html)
47 if m:
48 self.wait(m.group(1), True)
49 self.retry()
50
51 m = re.search(self.LINK_PATTERN, self.html)
52 if m is None:
53 self.error(_("LINK_PATTERN not found"))
54
55 self.link = m.group(1)
56 self.wait(15)
57
58
59 getInfo = create_getInfo(UpleaCom)
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/module/plugins/hoster/UpleaCom.py b/module/plugins/hoster/UpleaCom.py
--- a/module/plugins/hoster/UpleaCom.py
+++ b/module/plugins/hoster/UpleaCom.py
@@ -10,23 +10,26 @@
class UpleaCom(XFSHoster):
__name__ = "UpleaCom"
__type__ = "hoster"
- __version__ = "0.06"
+ __version__ = "0.07"
__pattern__ = r'https?://(?:www\.)?uplea\.com/dl/\w{15}'
__description__ = """Uplea.com hoster plugin"""
__license__ = "GPLv3"
- __authors__ = [("Redleon", None)]
+ __authors__ = [("Redleon", None),
+ ("GammaC0de", None)]
NAME_PATTERN = r'class="agmd size18">(?P<N>.+?)<'
- SIZE_PATTERN = r'size14">(?P<S>[\d.,]+) (?P<U>[\w^_])</span>'
+ SIZE_PATTERN = r'size14">(?P<S>[\d.,]+) (?P<U>[\w^_]+?)</span>'
+ SIZE_REPLACEMENTS = [('Ko','KB'), ('Mo','MB'), ('Go','GB')]
OFFLINE_PATTERN = r'>You followed an invalid or expired link'
+ PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'
- LINK_PATTERN = r'"(http?://\w+\.uplea\.com/anonym/.*?)"'
+ LINK_PATTERN = r'"(https?://\w+\.uplea\.com/anonym/.*?)"'
- WAIT_PATTERN = r'timeText:([\d.]+),'
+ WAIT_PATTERN = r'timeText: ?([\d.]+),'
STEP_PATTERN = r'<a href="(/step/.+)">'
@@ -45,9 +48,14 @@
m = re.search(self.WAIT_PATTERN, self.html)
if m:
+ self.logDebug(_("Waiting %s seconds") % m.group(1))
self.wait(m.group(1), True)
self.retry()
+ m = re.search(self.PREMIUM_PATTERN, self.html)
+ if m:
+ self.error(_("This URL requires a premium account"))
+
m = re.search(self.LINK_PATTERN, self.html)
if m is None:
self.error(_("LINK_PATTERN not found"))
| {"golden_diff": "diff --git a/module/plugins/hoster/UpleaCom.py b/module/plugins/hoster/UpleaCom.py\n--- a/module/plugins/hoster/UpleaCom.py\n+++ b/module/plugins/hoster/UpleaCom.py\n@@ -10,23 +10,26 @@\n class UpleaCom(XFSHoster):\n __name__ = \"UpleaCom\"\n __type__ = \"hoster\"\n- __version__ = \"0.06\"\n+ __version__ = \"0.07\"\n \n __pattern__ = r'https?://(?:www\\.)?uplea\\.com/dl/\\w{15}'\n \n __description__ = \"\"\"Uplea.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n- __authors__ = [(\"Redleon\", None)]\n+ __authors__ = [(\"Redleon\", None),\n+ (\"GammaC0de\", None)]\n \n \n NAME_PATTERN = r'class=\"agmd size18\">(?P<N>.+?)<'\n- SIZE_PATTERN = r'size14\">(?P<S>[\\d.,]+) (?P<U>[\\w^_])</span>'\n+ SIZE_PATTERN = r'size14\">(?P<S>[\\d.,]+) (?P<U>[\\w^_]+?)</span>'\n+ SIZE_REPLACEMENTS = [('Ko','KB'), ('Mo','MB'), ('Go','GB')]\n \n OFFLINE_PATTERN = r'>You followed an invalid or expired link'\n+ PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'\n \n- LINK_PATTERN = r'\"(http?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n+ LINK_PATTERN = r'\"(https?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n \n- WAIT_PATTERN = r'timeText:([\\d.]+),'\n+ WAIT_PATTERN = r'timeText: ?([\\d.]+),'\n STEP_PATTERN = r'<a href=\"(/step/.+)\">'\n \n \n@@ -45,9 +48,14 @@\n \n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n+ self.logDebug(_(\"Waiting %s seconds\") % m.group(1))\n self.wait(m.group(1), True)\n self.retry()\n \n+ m = re.search(self.PREMIUM_PATTERN, self.html)\n+ if m:\n+ self.error(_(\"This URL requires a premium account\"))\n+\n m = re.search(self.LINK_PATTERN, self.html)\n if m is None:\n self.error(_(\"LINK_PATTERN not found\"))\n", "issue": "Uplea plugin out of date\nHi,\nany download from uplea.com fails:\npyLoad reports success on downloading but actually only the HTML page giving acces to download is downloaded...\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\n\nfrom urlparse import urljoin\n\nfrom module.plugins.internal.XFSHoster import XFSHoster, create_getInfo\n\n\nclass UpleaCom(XFSHoster):\n __name__ = \"UpleaCom\"\n __type__ = \"hoster\"\n __version__ = \"0.06\"\n\n __pattern__ = r'https?://(?:www\\.)?uplea\\.com/dl/\\w{15}'\n\n __description__ = \"\"\"Uplea.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Redleon\", None)]\n\n\n NAME_PATTERN = r'class=\"agmd size18\">(?P<N>.+?)<'\n SIZE_PATTERN = r'size14\">(?P<S>[\\d.,]+) (?P<U>[\\w^_])</span>'\n\n OFFLINE_PATTERN = r'>You followed an invalid or expired link'\n\n LINK_PATTERN = r'\"(http?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n\n WAIT_PATTERN = r'timeText:([\\d.]+),'\n STEP_PATTERN = r'<a href=\"(/step/.+)\">'\n\n\n def setup(self):\n self.multiDL = False\n self.chunkLimit = 1\n self.resumeDownload = True\n\n\n def handleFree(self, pyfile):\n m = re.search(self.STEP_PATTERN, self.html)\n if m is None:\n self.error(_(\"STEP_PATTERN not found\"))\n\n self.html = self.load(urljoin(\"http://uplea.com/\", m.group(1)))\n\n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n self.wait(m.group(1), True)\n self.retry()\n\n m = re.search(self.LINK_PATTERN, self.html)\n if m is None:\n self.error(_(\"LINK_PATTERN not found\"))\n\n self.link = m.group(1)\n self.wait(15)\n\n\ngetInfo = create_getInfo(UpleaCom)\n", "path": "module/plugins/hoster/UpleaCom.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\n\nfrom urlparse import urljoin\n\nfrom module.plugins.internal.XFSHoster import XFSHoster, create_getInfo\n\n\nclass UpleaCom(XFSHoster):\n __name__ = \"UpleaCom\"\n __type__ = \"hoster\"\n __version__ = \"0.07\"\n\n __pattern__ = r'https?://(?:www\\.)?uplea\\.com/dl/\\w{15}'\n\n __description__ = \"\"\"Uplea.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Redleon\", None),\n (\"GammaC0de\", None)]\n\n\n NAME_PATTERN = r'class=\"agmd size18\">(?P<N>.+?)<'\n SIZE_PATTERN = r'size14\">(?P<S>[\\d.,]+) (?P<U>[\\w^_]+?)</span>'\n SIZE_REPLACEMENTS = [('Ko','KB'), ('Mo','MB'), ('Go','GB')]\n\n OFFLINE_PATTERN = r'>You followed an invalid or expired link'\n PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'\n\n LINK_PATTERN = r'\"(https?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n\n WAIT_PATTERN = r'timeText: ?([\\d.]+),'\n STEP_PATTERN = r'<a href=\"(/step/.+)\">'\n\n\n def setup(self):\n self.multiDL = False\n self.chunkLimit = 1\n self.resumeDownload = True\n\n\n def handleFree(self, pyfile):\n m = re.search(self.STEP_PATTERN, self.html)\n if m is None:\n self.error(_(\"STEP_PATTERN not found\"))\n\n self.html = self.load(urljoin(\"http://uplea.com/\", m.group(1)))\n\n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n self.logDebug(_(\"Waiting %s seconds\") % m.group(1))\n self.wait(m.group(1), True)\n self.retry()\n\n m = re.search(self.PREMIUM_PATTERN, self.html)\n if m:\n self.error(_(\"This URL requires a premium account\"))\n\n m = re.search(self.LINK_PATTERN, self.html)\n if m is None:\n self.error(_(\"LINK_PATTERN not found\"))\n\n self.link = m.group(1)\n self.wait(15)\n\n\ngetInfo = create_getInfo(UpleaCom)\n", "path": "module/plugins/hoster/UpleaCom.py"}]} | 864 | 582 |
gh_patches_debug_1520 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-316 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wendy's
e.g. https://locations.wendys.com/jamestown-ny-3438
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/wendys.py`
Content:
```
1 import scrapy
2 import re
3 import json
4 from locations.items import GeojsonPointItem
5
6 DAY_MAPPING = {
7 'Monday': 'Mo',
8 'Tuesday': 'Tu',
9 'Wednesday': 'We',
10 'Thursday': 'Th',
11 'Friday': 'Fr',
12 'Saturday': 'Sa',
13 'Sunday': 'Su'
14 }
15
16
17 class WendysSpider(scrapy.Spider):
18
19 name = "wendys"
20 allowed_domains = ["locations.wendys.com"]
21 download_delay = 0
22 download_timeout = 30
23 start_urls = (
24 'https://locations.wendys.com',
25 )
26
27 def handle_error(self, failure):
28 self.log("Request failed: %s" % failure.request)
29 def parse_day(self, day):
30 return DAY_MAPPING[day.strip()]
31 def parse_times(self, times):
32 hours_to = [x.strip() for x in times.split('-')]
33 cleaned_times = []
34
35 for hour in hours_to:
36 if re.search('pm$', hour):
37 hour = re.sub('pm', '', hour).strip()
38 hour_min = hour.split(":")
39 if int(hour_min[0]) < 12:
40 hour_min[0] = str(12 + int(hour_min[0]))
41 cleaned_times.append(":".join(hour_min))
42
43 if re.search('am$', hour):
44 hour = re.sub('am', '', hour).strip()
45 hour_min = hour.split(":")
46 if len(hour_min[0]) <2:
47 hour_min[0] = hour_min[0].zfill(2)
48 else:
49 hour_min[0] = str(int(hour_min[0]))
50
51 cleaned_times.append(":".join(hour_min))
52 return "-".join(cleaned_times)
53
54 def parse_hours(self, lis):
55 hours = []
56 for li in lis:
57 day = li.xpath('./span[@class="day"]/text()').extract()[1]
58 times = li.xpath('./span[2]/text()').extract_first()
59 if times and day:
60 parsed_time = self.parse_times(times)
61 parsed_day = self.parse_day(day)
62 hours.append(parsed_day + ' ' + parsed_time)
63
64 return "; ".join(hours)
65 def parse_stores(self, response):
66 page_content = response.body_as_unicode()
67 json_content = re.findall('li.data.results =[^;]+' , page_content)
68 if len(json_content)>0:
69 json_content = json_content[0].replace('li.data.results =' ,'')
70 json_data = json.loads(json_content)
71 properties = {
72 'addr_full': json_data[0]['address'],
73 'phone':json_data[0]['phone'],
74 'city': json_data[0]['city'],
75 'state':json_data[0]['state'],
76 'postcode': json_data[0]['postal'],
77 'ref': json_data[0]['id'],
78 'website': response.url,
79 'lat': json_data[0]['lat'],
80 'lon': json_data[0]['lon'],
81 }
82 hours = self.parse_hours(response.xpath('//div[@class="hours"]/ol/li'))
83 if hours:
84 properties['opening_hours'] = hours
85
86 yield GeojsonPointItem(**properties)
87
88 def parse_city_stores(self, response):
89 stores = response.xpath('//div[@class="col-xs-12 col-lg-10 col-lg-offset-1"]/article/ul/li/a/@href').extract()
90 for store in stores:
91 if store:
92 yield scrapy.Request(response.urljoin(store), callback=self.parse_stores ,errback=self.handle_error)
93
94 def parse_state(self, response):
95 city_urls = response.xpath('//div[@class="col-xs-12 col-lg-10 col-lg-offset-1"]/article/div[@class="col"]/ul/li/a/@href').extract()
96 for path in city_urls:
97 yield scrapy.Request(response.urljoin(path), callback=self.parse_city_stores ,errback=self.handle_error)
98
99 def parse(self, response):
100 urls = response.xpath('//div[@class="col-xs-12 col-lg-10 col-lg-offset-1"]/article/div[@class="col"]/ul/li/a/@href').extract()
101 for path in urls:
102 yield scrapy.Request(response.urljoin(path), callback=self.parse_state ,errback=self.handle_error)
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/wendys.py b/locations/spiders/wendys.py
--- a/locations/spiders/wendys.py
+++ b/locations/spiders/wendys.py
@@ -18,7 +18,7 @@
name = "wendys"
allowed_domains = ["locations.wendys.com"]
- download_delay = 0
+ download_delay = 0.5
download_timeout = 30
start_urls = (
'https://locations.wendys.com',
| {"golden_diff": "diff --git a/locations/spiders/wendys.py b/locations/spiders/wendys.py\n--- a/locations/spiders/wendys.py\n+++ b/locations/spiders/wendys.py\n@@ -18,7 +18,7 @@\n \n name = \"wendys\"\n allowed_domains = [\"locations.wendys.com\"]\n- download_delay = 0\n+ download_delay = 0.5\n download_timeout = 30\n start_urls = (\n 'https://locations.wendys.com',\n", "issue": "Wendy's\ne.g. https://locations.wendys.com/jamestown-ny-3438\n", "before_files": [{"content": "import scrapy\nimport re\nimport json\nfrom locations.items import GeojsonPointItem\n\nDAY_MAPPING = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'\n}\n\n\nclass WendysSpider(scrapy.Spider):\n\n name = \"wendys\"\n allowed_domains = [\"locations.wendys.com\"]\n download_delay = 0\n download_timeout = 30\n start_urls = (\n 'https://locations.wendys.com',\n )\n\n def handle_error(self, failure):\n self.log(\"Request failed: %s\" % failure.request)\n def parse_day(self, day):\n return DAY_MAPPING[day.strip()]\n def parse_times(self, times):\n hours_to = [x.strip() for x in times.split('-')]\n cleaned_times = []\n\n for hour in hours_to:\n if re.search('pm$', hour):\n hour = re.sub('pm', '', hour).strip()\n hour_min = hour.split(\":\")\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n cleaned_times.append(\":\".join(hour_min))\n\n if re.search('am$', hour):\n hour = re.sub('am', '', hour).strip()\n hour_min = hour.split(\":\")\n if len(hour_min[0]) <2:\n hour_min[0] = hour_min[0].zfill(2)\n else:\n hour_min[0] = str(int(hour_min[0]))\n\n cleaned_times.append(\":\".join(hour_min))\n return \"-\".join(cleaned_times)\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n day = li.xpath('./span[@class=\"day\"]/text()').extract()[1]\n times = li.xpath('./span[2]/text()').extract_first()\n if times and day:\n parsed_time = self.parse_times(times)\n parsed_day = self.parse_day(day)\n hours.append(parsed_day + ' ' + parsed_time)\n\n return \"; \".join(hours)\n def parse_stores(self, response):\n page_content = response.body_as_unicode()\n json_content = re.findall('li.data.results =[^;]+' , page_content)\n if len(json_content)>0:\n json_content = json_content[0].replace('li.data.results =' ,'')\n json_data = json.loads(json_content)\n properties = {\n 'addr_full': json_data[0]['address'],\n 'phone':json_data[0]['phone'],\n 'city': json_data[0]['city'],\n 'state':json_data[0]['state'],\n 'postcode': json_data[0]['postal'],\n 'ref': json_data[0]['id'],\n 'website': response.url,\n 'lat': json_data[0]['lat'],\n 'lon': json_data[0]['lon'],\n }\n hours = self.parse_hours(response.xpath('//div[@class=\"hours\"]/ol/li'))\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse_city_stores(self, response):\n stores = response.xpath('//div[@class=\"col-xs-12 col-lg-10 col-lg-offset-1\"]/article/ul/li/a/@href').extract()\n for store in stores:\n if store:\n yield scrapy.Request(response.urljoin(store), callback=self.parse_stores ,errback=self.handle_error)\n\n def parse_state(self, response):\n city_urls = response.xpath('//div[@class=\"col-xs-12 col-lg-10 col-lg-offset-1\"]/article/div[@class=\"col\"]/ul/li/a/@href').extract()\n for path in city_urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_city_stores ,errback=self.handle_error)\n\n def parse(self, response):\n urls = response.xpath('//div[@class=\"col-xs-12 col-lg-10 col-lg-offset-1\"]/article/div[@class=\"col\"]/ul/li/a/@href').extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_state ,errback=self.handle_error)\n", "path": "locations/spiders/wendys.py"}], "after_files": [{"content": "import scrapy\nimport re\nimport json\nfrom locations.items import GeojsonPointItem\n\nDAY_MAPPING = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'\n}\n\n\nclass WendysSpider(scrapy.Spider):\n\n name = \"wendys\"\n allowed_domains = [\"locations.wendys.com\"]\n download_delay = 0.5\n download_timeout = 30\n start_urls = (\n 'https://locations.wendys.com',\n )\n\n def handle_error(self, failure):\n self.log(\"Request failed: %s\" % failure.request)\n def parse_day(self, day):\n return DAY_MAPPING[day.strip()]\n def parse_times(self, times):\n hours_to = [x.strip() for x in times.split('-')]\n cleaned_times = []\n\n for hour in hours_to:\n if re.search('pm$', hour):\n hour = re.sub('pm', '', hour).strip()\n hour_min = hour.split(\":\")\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n cleaned_times.append(\":\".join(hour_min))\n\n if re.search('am$', hour):\n hour = re.sub('am', '', hour).strip()\n hour_min = hour.split(\":\")\n if len(hour_min[0]) <2:\n hour_min[0] = hour_min[0].zfill(2)\n else:\n hour_min[0] = str(int(hour_min[0]))\n\n cleaned_times.append(\":\".join(hour_min))\n return \"-\".join(cleaned_times)\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n day = li.xpath('./span[@class=\"day\"]/text()').extract()[1]\n times = li.xpath('./span[2]/text()').extract_first()\n if times and day:\n parsed_time = self.parse_times(times)\n parsed_day = self.parse_day(day)\n hours.append(parsed_day + ' ' + parsed_time)\n\n return \"; \".join(hours)\n def parse_stores(self, response):\n page_content = response.body_as_unicode()\n json_content = re.findall('li.data.results =[^;]+' , page_content)\n if len(json_content)>0:\n json_content = json_content[0].replace('li.data.results =' ,'')\n json_data = json.loads(json_content)\n properties = {\n 'addr_full': json_data[0]['address'],\n 'phone':json_data[0]['phone'],\n 'city': json_data[0]['city'],\n 'state':json_data[0]['state'],\n 'postcode': json_data[0]['postal'],\n 'ref': json_data[0]['id'],\n 'website': response.url,\n 'lat': json_data[0]['lat'],\n 'lon': json_data[0]['lon'],\n }\n hours = self.parse_hours(response.xpath('//div[@class=\"hours\"]/ol/li'))\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse_city_stores(self, response):\n stores = response.xpath('//div[@class=\"col-xs-12 col-lg-10 col-lg-offset-1\"]/article/ul/li/a/@href').extract()\n for store in stores:\n if store:\n yield scrapy.Request(response.urljoin(store), callback=self.parse_stores ,errback=self.handle_error)\n\n def parse_state(self, response):\n city_urls = response.xpath('//div[@class=\"col-xs-12 col-lg-10 col-lg-offset-1\"]/article/div[@class=\"col\"]/ul/li/a/@href').extract()\n for path in city_urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_city_stores ,errback=self.handle_error)\n\n def parse(self, response):\n urls = response.xpath('//div[@class=\"col-xs-12 col-lg-10 col-lg-offset-1\"]/article/div[@class=\"col\"]/ul/li/a/@href').extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_state ,errback=self.handle_error)\n", "path": "locations/spiders/wendys.py"}]} | 1,411 | 116 |
gh_patches_debug_6762 | rasdani/github-patches | git_diff | microsoft__DeepSpeed-4770 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nv-sd CI test failure
The Nightly CI for https://github.com/microsoft/DeepSpeed/actions/runs/7073374056 failed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepspeed/model_implementations/diffusers/unet.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 # SPDX-License-Identifier: Apache-2.0
3
4 # DeepSpeed Team
5
6 import torch
7 from ..features.cuda_graph import CUDAGraph
8
9
10 class DSUNet(CUDAGraph, torch.nn.Module):
11
12 def __init__(self, unet, enable_cuda_graph=True):
13 super().__init__(enable_cuda_graph=enable_cuda_graph)
14 self.unet = unet
15 # SD pipeline accesses this attribute
16 self.in_channels = unet.in_channels
17 self.device = self.unet.device
18 self.dtype = self.unet.dtype
19 self.config = self.unet.config
20 self.fwd_count = 0
21 self.unet.requires_grad_(requires_grad=False)
22 self.unet.to(memory_format=torch.channels_last)
23 self.cuda_graph_created = False
24
25 def _graph_replay(self, *inputs, **kwargs):
26 for i in range(len(inputs)):
27 if torch.is_tensor(inputs[i]):
28 self.static_inputs[i].copy_(inputs[i])
29 for k in kwargs:
30 if torch.is_tensor(kwargs[k]):
31 self.static_kwargs[k].copy_(kwargs[k])
32 self._cuda_graphs.replay()
33 return self.static_output
34
35 def forward(self, *inputs, **kwargs):
36 if self.enable_cuda_graph:
37 if self.cuda_graph_created:
38 outputs = self._graph_replay(*inputs, **kwargs)
39 else:
40 self._create_cuda_graph(*inputs, **kwargs)
41 outputs = self._graph_replay(*inputs, **kwargs)
42 return outputs
43 else:
44 return self._forward(*inputs, **kwargs)
45
46 def _create_cuda_graph(self, *inputs, **kwargs):
47 # warmup to create the workspace and cublas handle
48 cuda_stream = torch.cuda.Stream()
49 cuda_stream.wait_stream(torch.cuda.current_stream())
50 with torch.cuda.stream(cuda_stream):
51 for i in range(3):
52 ret = self._forward(*inputs, **kwargs)
53 torch.cuda.current_stream().wait_stream(cuda_stream)
54
55 # create cuda_graph and assign static_inputs and static_outputs
56 self._cuda_graphs = torch.cuda.CUDAGraph()
57 self.static_inputs = inputs
58 self.static_kwargs = kwargs
59
60 with torch.cuda.graph(self._cuda_graphs):
61 self.static_output = self._forward(*self.static_inputs, **self.static_kwargs)
62
63 self.cuda_graph_created = True
64
65 def _forward(self,
66 sample,
67 timestamp,
68 encoder_hidden_states,
69 return_dict=True,
70 cross_attention_kwargs=None,
71 timestep_cond=None):
72 if cross_attention_kwargs:
73 return self.unet(sample,
74 timestamp,
75 encoder_hidden_states,
76 return_dict,
77 cross_attention_kwargs=cross_attention_kwargs)
78 else:
79 return self.unet(sample, timestamp, encoder_hidden_states, return_dict)
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deepspeed/model_implementations/diffusers/unet.py b/deepspeed/model_implementations/diffusers/unet.py
--- a/deepspeed/model_implementations/diffusers/unet.py
+++ b/deepspeed/model_implementations/diffusers/unet.py
@@ -68,7 +68,8 @@
encoder_hidden_states,
return_dict=True,
cross_attention_kwargs=None,
- timestep_cond=None):
+ timestep_cond=None,
+ added_cond_kwargs=None):
if cross_attention_kwargs:
return self.unet(sample,
timestamp,
| {"golden_diff": "diff --git a/deepspeed/model_implementations/diffusers/unet.py b/deepspeed/model_implementations/diffusers/unet.py\n--- a/deepspeed/model_implementations/diffusers/unet.py\n+++ b/deepspeed/model_implementations/diffusers/unet.py\n@@ -68,7 +68,8 @@\n encoder_hidden_states,\n return_dict=True,\n cross_attention_kwargs=None,\n- timestep_cond=None):\n+ timestep_cond=None,\n+ added_cond_kwargs=None):\n if cross_attention_kwargs:\n return self.unet(sample,\n timestamp,\n", "issue": "nv-sd CI test failure\nThe Nightly CI for https://github.com/microsoft/DeepSpeed/actions/runs/7073374056 failed.\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport torch\nfrom ..features.cuda_graph import CUDAGraph\n\n\nclass DSUNet(CUDAGraph, torch.nn.Module):\n\n def __init__(self, unet, enable_cuda_graph=True):\n super().__init__(enable_cuda_graph=enable_cuda_graph)\n self.unet = unet\n # SD pipeline accesses this attribute\n self.in_channels = unet.in_channels\n self.device = self.unet.device\n self.dtype = self.unet.dtype\n self.config = self.unet.config\n self.fwd_count = 0\n self.unet.requires_grad_(requires_grad=False)\n self.unet.to(memory_format=torch.channels_last)\n self.cuda_graph_created = False\n\n def _graph_replay(self, *inputs, **kwargs):\n for i in range(len(inputs)):\n if torch.is_tensor(inputs[i]):\n self.static_inputs[i].copy_(inputs[i])\n for k in kwargs:\n if torch.is_tensor(kwargs[k]):\n self.static_kwargs[k].copy_(kwargs[k])\n self._cuda_graphs.replay()\n return self.static_output\n\n def forward(self, *inputs, **kwargs):\n if self.enable_cuda_graph:\n if self.cuda_graph_created:\n outputs = self._graph_replay(*inputs, **kwargs)\n else:\n self._create_cuda_graph(*inputs, **kwargs)\n outputs = self._graph_replay(*inputs, **kwargs)\n return outputs\n else:\n return self._forward(*inputs, **kwargs)\n\n def _create_cuda_graph(self, *inputs, **kwargs):\n # warmup to create the workspace and cublas handle\n cuda_stream = torch.cuda.Stream()\n cuda_stream.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(cuda_stream):\n for i in range(3):\n ret = self._forward(*inputs, **kwargs)\n torch.cuda.current_stream().wait_stream(cuda_stream)\n\n # create cuda_graph and assign static_inputs and static_outputs\n self._cuda_graphs = torch.cuda.CUDAGraph()\n self.static_inputs = inputs\n self.static_kwargs = kwargs\n\n with torch.cuda.graph(self._cuda_graphs):\n self.static_output = self._forward(*self.static_inputs, **self.static_kwargs)\n\n self.cuda_graph_created = True\n\n def _forward(self,\n sample,\n timestamp,\n encoder_hidden_states,\n return_dict=True,\n cross_attention_kwargs=None,\n timestep_cond=None):\n if cross_attention_kwargs:\n return self.unet(sample,\n timestamp,\n encoder_hidden_states,\n return_dict,\n cross_attention_kwargs=cross_attention_kwargs)\n else:\n return self.unet(sample, timestamp, encoder_hidden_states, return_dict)\n", "path": "deepspeed/model_implementations/diffusers/unet.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport torch\nfrom ..features.cuda_graph import CUDAGraph\n\n\nclass DSUNet(CUDAGraph, torch.nn.Module):\n\n def __init__(self, unet, enable_cuda_graph=True):\n super().__init__(enable_cuda_graph=enable_cuda_graph)\n self.unet = unet\n # SD pipeline accesses this attribute\n self.in_channels = unet.in_channels\n self.device = self.unet.device\n self.dtype = self.unet.dtype\n self.config = self.unet.config\n self.fwd_count = 0\n self.unet.requires_grad_(requires_grad=False)\n self.unet.to(memory_format=torch.channels_last)\n self.cuda_graph_created = False\n\n def _graph_replay(self, *inputs, **kwargs):\n for i in range(len(inputs)):\n if torch.is_tensor(inputs[i]):\n self.static_inputs[i].copy_(inputs[i])\n for k in kwargs:\n if torch.is_tensor(kwargs[k]):\n self.static_kwargs[k].copy_(kwargs[k])\n self._cuda_graphs.replay()\n return self.static_output\n\n def forward(self, *inputs, **kwargs):\n if self.enable_cuda_graph:\n if self.cuda_graph_created:\n outputs = self._graph_replay(*inputs, **kwargs)\n else:\n self._create_cuda_graph(*inputs, **kwargs)\n outputs = self._graph_replay(*inputs, **kwargs)\n return outputs\n else:\n return self._forward(*inputs, **kwargs)\n\n def _create_cuda_graph(self, *inputs, **kwargs):\n # warmup to create the workspace and cublas handle\n cuda_stream = torch.cuda.Stream()\n cuda_stream.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(cuda_stream):\n for i in range(3):\n ret = self._forward(*inputs, **kwargs)\n torch.cuda.current_stream().wait_stream(cuda_stream)\n\n # create cuda_graph and assign static_inputs and static_outputs\n self._cuda_graphs = torch.cuda.CUDAGraph()\n self.static_inputs = inputs\n self.static_kwargs = kwargs\n\n with torch.cuda.graph(self._cuda_graphs):\n self.static_output = self._forward(*self.static_inputs, **self.static_kwargs)\n\n self.cuda_graph_created = True\n\n def _forward(self,\n sample,\n timestamp,\n encoder_hidden_states,\n return_dict=True,\n cross_attention_kwargs=None,\n timestep_cond=None,\n added_cond_kwargs=None):\n if cross_attention_kwargs:\n return self.unet(sample,\n timestamp,\n encoder_hidden_states,\n return_dict,\n cross_attention_kwargs=cross_attention_kwargs)\n else:\n return self.unet(sample, timestamp, encoder_hidden_states, return_dict)\n", "path": "deepspeed/model_implementations/diffusers/unet.py"}]} | 1,053 | 127 |
gh_patches_debug_460 | rasdani/github-patches | git_diff | gratipay__gratipay.com-3013 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Twitter asks for authorization even though I've already authorized Gittip
As of #1369 Twitter is now asking me to authorize Giitip even though I've already done so.

<bountysource-plugin>
---
Want to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/1428788-twitter-asks-for-authorization-even-though-i-ve-already-authorized-gittip?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).
</bountysource-plugin>
Twitter asks for authorization even though I've already authorized Gittip
As of #1369 Twitter is now asking me to authorize Giitip even though I've already done so.

<bountysource-plugin>
---
Want to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/1428788-twitter-asks-for-authorization-even-though-i-ve-already-authorized-gittip?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).
</bountysource-plugin>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gratipay/elsewhere/twitter.py`
Content:
```
1 from __future__ import absolute_import, division, print_function, unicode_literals
2
3 from gratipay.elsewhere import PlatformOAuth1
4 from gratipay.elsewhere._extractors import key, not_available
5
6
7 class Twitter(PlatformOAuth1):
8
9 # Platform attributes
10 name = 'twitter'
11 display_name = 'Twitter'
12 account_url = 'https://twitter.com/{user_name}'
13
14 # Auth attributes
15 auth_url = 'https://api.twitter.com'
16
17 # API attributes
18 api_format = 'json'
19 api_url = 'https://api.twitter.com/1.1'
20 api_user_info_path = '/users/show.json?screen_name={user_name}'
21 api_user_self_info_path = '/account/verify_credentials.json'
22 ratelimit_headers_prefix = 'x-rate-limit-'
23
24 # User info extractors
25 x_user_id = key('id')
26 x_user_name = key('screen_name')
27 x_display_name = key('name')
28 x_email = not_available
29 x_avatar_url = key('profile_image_url_https',
30 clean=lambda v: v.replace('_normal.', '.'))
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gratipay/elsewhere/twitter.py b/gratipay/elsewhere/twitter.py
--- a/gratipay/elsewhere/twitter.py
+++ b/gratipay/elsewhere/twitter.py
@@ -13,6 +13,7 @@
# Auth attributes
auth_url = 'https://api.twitter.com'
+ authorize_path = '/oauth/authenticate'
# API attributes
api_format = 'json'
| {"golden_diff": "diff --git a/gratipay/elsewhere/twitter.py b/gratipay/elsewhere/twitter.py\n--- a/gratipay/elsewhere/twitter.py\n+++ b/gratipay/elsewhere/twitter.py\n@@ -13,6 +13,7 @@\n \n # Auth attributes\n auth_url = 'https://api.twitter.com'\n+ authorize_path = '/oauth/authenticate'\n \n # API attributes\n api_format = 'json'\n", "issue": "Twitter asks for authorization even though I've already authorized Gittip\nAs of #1369 Twitter is now asking me to authorize Giitip even though I've already done so.\n\n\n\n<bountysource-plugin>\n\n---\n\nWant to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/1428788-twitter-asks-for-authorization-even-though-i-ve-already-authorized-gittip?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).\n</bountysource-plugin>\n\nTwitter asks for authorization even though I've already authorized Gittip\nAs of #1369 Twitter is now asking me to authorize Giitip even though I've already done so.\n\n\n\n<bountysource-plugin>\n\n---\n\nWant to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/1428788-twitter-asks-for-authorization-even-though-i-ve-already-authorized-gittip?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).\n</bountysource-plugin>\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom gratipay.elsewhere import PlatformOAuth1\nfrom gratipay.elsewhere._extractors import key, not_available\n\n\nclass Twitter(PlatformOAuth1):\n\n # Platform attributes\n name = 'twitter'\n display_name = 'Twitter'\n account_url = 'https://twitter.com/{user_name}'\n\n # Auth attributes\n auth_url = 'https://api.twitter.com'\n\n # API attributes\n api_format = 'json'\n api_url = 'https://api.twitter.com/1.1'\n api_user_info_path = '/users/show.json?screen_name={user_name}'\n api_user_self_info_path = '/account/verify_credentials.json'\n ratelimit_headers_prefix = 'x-rate-limit-'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('screen_name')\n x_display_name = key('name')\n x_email = not_available\n x_avatar_url = key('profile_image_url_https',\n clean=lambda v: v.replace('_normal.', '.'))\n", "path": "gratipay/elsewhere/twitter.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom gratipay.elsewhere import PlatformOAuth1\nfrom gratipay.elsewhere._extractors import key, not_available\n\n\nclass Twitter(PlatformOAuth1):\n\n # Platform attributes\n name = 'twitter'\n display_name = 'Twitter'\n account_url = 'https://twitter.com/{user_name}'\n\n # Auth attributes\n auth_url = 'https://api.twitter.com'\n authorize_path = '/oauth/authenticate'\n\n # API attributes\n api_format = 'json'\n api_url = 'https://api.twitter.com/1.1'\n api_user_info_path = '/users/show.json?screen_name={user_name}'\n api_user_self_info_path = '/account/verify_credentials.json'\n ratelimit_headers_prefix = 'x-rate-limit-'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('screen_name')\n x_display_name = key('name')\n x_email = not_available\n x_avatar_url = key('profile_image_url_https',\n clean=lambda v: v.replace('_normal.', '.'))\n", "path": "gratipay/elsewhere/twitter.py"}]} | 1,069 | 96 |
gh_patches_debug_16571 | rasdani/github-patches | git_diff | geopandas__geopandas-854 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecation Warning with fiona 1.8b1
using a `debian:buster` docker image
installed Fiona with
> pip install git+https://github.com/Toblerity/[email protected]
I got this __warning__ today:
```python
/usr/local/lib/python2.7/dist-packages/geopandas/io/file.py:108: FionaDeprecationWarning: Use fiona.Env() instead.
with fiona.drivers():
No handlers could be found for logger "rasterio._gdal"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geopandas/io/file.py`
Content:
```
1 import os
2
3 import fiona
4 import numpy as np
5 import six
6
7 from geopandas import GeoDataFrame, GeoSeries
8
9 # Adapted from pandas.io.common
10 if six.PY3:
11 from urllib.request import urlopen as _urlopen
12 from urllib.parse import urlparse as parse_url
13 from urllib.parse import uses_relative, uses_netloc, uses_params
14 else:
15 from urllib2 import urlopen as _urlopen
16 from urlparse import urlparse as parse_url
17 from urlparse import uses_relative, uses_netloc, uses_params
18
19 _VALID_URLS = set(uses_relative + uses_netloc + uses_params)
20 _VALID_URLS.discard('')
21
22
23 def _is_url(url):
24 """Check to see if *url* has a valid protocol."""
25 try:
26 return parse_url(url).scheme in _VALID_URLS
27 except:
28 return False
29
30
31 def read_file(filename, bbox=None, **kwargs):
32 """
33 Returns a GeoDataFrame from a file or URL.
34
35 Parameters
36 ----------
37 filename: str
38 Either the absolute or relative path to the file or URL to
39 be opened.
40 bbox : tuple | GeoDataFrame or GeoSeries, default None
41 Filter features by given bounding box, GeoSeries, or GeoDataFrame.
42 CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.
43 **kwargs:
44 Keyword args to be passed to the `open` or `BytesCollection` method
45 in the fiona library when opening the file. For more information on
46 possible keywords, type:
47 ``import fiona; help(fiona.open)``
48
49 Examples
50 --------
51 >>> df = geopandas.read_file("nybb.shp")
52
53 Returns
54 -------
55 geodataframe : GeoDataFrame
56 """
57 if _is_url(filename):
58 req = _urlopen(filename)
59 path_or_bytes = req.read()
60 reader = fiona.BytesCollection
61 else:
62 path_or_bytes = filename
63 reader = fiona.open
64
65 with reader(path_or_bytes, **kwargs) as features:
66 crs = features.crs
67 if bbox is not None:
68 if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):
69 bbox = tuple(bbox.to_crs(crs).total_bounds)
70 assert len(bbox) == 4
71 f_filt = features.filter(bbox=bbox)
72 else:
73 f_filt = features
74
75 columns = list(features.meta["schema"]["properties"]) + ["geometry"]
76 gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)
77
78 return gdf
79
80
81 def to_file(df, filename, driver="ESRI Shapefile", schema=None,
82 **kwargs):
83 """
84 Write this GeoDataFrame to an OGR data source
85
86 A dictionary of supported OGR providers is available via:
87 >>> import fiona
88 >>> fiona.supported_drivers
89
90 Parameters
91 ----------
92 df : GeoDataFrame to be written
93 filename : string
94 File path or file handle to write to.
95 driver : string, default 'ESRI Shapefile'
96 The OGR format driver used to write the vector file.
97 schema : dict, default None
98 If specified, the schema dictionary is passed to Fiona to
99 better control how the file is written. If None, GeoPandas
100 will determine the schema based on each column's dtype
101
102 The *kwargs* are passed to fiona.open and can be used to write
103 to multi-layer data, store data within archives (zip files), etc.
104 """
105 if schema is None:
106 schema = infer_schema(df)
107 filename = os.path.abspath(os.path.expanduser(filename))
108 with fiona.drivers():
109 with fiona.open(filename, 'w', driver=driver, crs=df.crs,
110 schema=schema, **kwargs) as colxn:
111 colxn.writerecords(df.iterfeatures())
112
113
114 def infer_schema(df):
115 try:
116 from collections import OrderedDict
117 except ImportError:
118 from ordereddict import OrderedDict
119
120 def convert_type(column, in_type):
121 if in_type == object:
122 return 'str'
123 out_type = type(np.asscalar(np.zeros(1, in_type))).__name__
124 if out_type == 'long':
125 out_type = 'int'
126 if out_type == 'bool':
127 raise ValueError('column "{}" is boolean type, '.format(column) +
128 'which is unsupported in file writing. '
129 'Consider casting the column to int type.')
130 return out_type
131
132 properties = OrderedDict([
133 (col, convert_type(col, _type)) for col, _type in
134 zip(df.columns, df.dtypes) if col != df._geometry_column_name
135 ])
136
137 if df.empty:
138 raise ValueError("Cannot write empty DataFrame to file.")
139
140 geom_type = _common_geom_type(df)
141
142 if not geom_type:
143 raise ValueError("Geometry column cannot contain mutiple "
144 "geometry types when writing to file.")
145
146 schema = {'geometry': geom_type, 'properties': properties}
147
148 return schema
149
150
151 def _common_geom_type(df):
152 # Need to check geom_types before we write to file...
153 # Some (most?) providers expect a single geometry type:
154 # Point, LineString, or Polygon
155 geom_types = df.geometry.geom_type.unique()
156
157 from os.path import commonprefix
158 # use reversed geom types and commonprefix to find the common suffix,
159 # then reverse the result to get back to a geom type
160 geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1]
161 if not geom_type:
162 return None
163
164 if df.geometry.has_z.any():
165 geom_type = "3D " + geom_type
166
167 return geom_type
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geopandas/io/file.py b/geopandas/io/file.py
--- a/geopandas/io/file.py
+++ b/geopandas/io/file.py
@@ -4,6 +4,11 @@
import numpy as np
import six
+try:
+ from fiona import Env as fiona_env
+except ImportError:
+ from fiona import drivers as fiona_env
+
from geopandas import GeoDataFrame, GeoSeries
# Adapted from pandas.io.common
@@ -105,7 +110,7 @@
if schema is None:
schema = infer_schema(df)
filename = os.path.abspath(os.path.expanduser(filename))
- with fiona.drivers():
+ with fiona_env():
with fiona.open(filename, 'w', driver=driver, crs=df.crs,
schema=schema, **kwargs) as colxn:
colxn.writerecords(df.iterfeatures())
| {"golden_diff": "diff --git a/geopandas/io/file.py b/geopandas/io/file.py\n--- a/geopandas/io/file.py\n+++ b/geopandas/io/file.py\n@@ -4,6 +4,11 @@\n import numpy as np\n import six\n \n+try:\n+ from fiona import Env as fiona_env\n+except ImportError:\n+ from fiona import drivers as fiona_env\n+\n from geopandas import GeoDataFrame, GeoSeries\n \n # Adapted from pandas.io.common\n@@ -105,7 +110,7 @@\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n- with fiona.drivers():\n+ with fiona_env():\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as colxn:\n colxn.writerecords(df.iterfeatures())\n", "issue": "Deprecation Warning with fiona 1.8b1\nusing a `debian:buster` docker image\r\n\r\ninstalled Fiona with \r\n> pip install git+https://github.com/Toblerity/[email protected]\r\n\r\nI got this __warning__ today: \r\n```python\r\n/usr/local/lib/python2.7/dist-packages/geopandas/io/file.py:108: FionaDeprecationWarning: Use fiona.Env() instead.\r\n with fiona.drivers():\r\nNo handlers could be found for logger \"rasterio._gdal\"\r\n```\n", "before_files": [{"content": "import os\n\nimport fiona\nimport numpy as np\nimport six\n\nfrom geopandas import GeoDataFrame, GeoSeries\n\n# Adapted from pandas.io.common\nif six.PY3:\n from urllib.request import urlopen as _urlopen\n from urllib.parse import urlparse as parse_url\n from urllib.parse import uses_relative, uses_netloc, uses_params\nelse:\n from urllib2 import urlopen as _urlopen\n from urlparse import urlparse as parse_url\n from urlparse import uses_relative, uses_netloc, uses_params\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\n\ndef _is_url(url):\n \"\"\"Check to see if *url* has a valid protocol.\"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef read_file(filename, bbox=None, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file or URL.\n\n Parameters\n ----------\n filename: str\n Either the absolute or relative path to the file or URL to\n be opened.\n bbox : tuple | GeoDataFrame or GeoSeries, default None\n Filter features by given bounding box, GeoSeries, or GeoDataFrame.\n CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.\n **kwargs:\n Keyword args to be passed to the `open` or `BytesCollection` method\n in the fiona library when opening the file. For more information on\n possible keywords, type:\n ``import fiona; help(fiona.open)``\n\n Examples\n --------\n >>> df = geopandas.read_file(\"nybb.shp\")\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n if _is_url(filename):\n req = _urlopen(filename)\n path_or_bytes = req.read()\n reader = fiona.BytesCollection\n else:\n path_or_bytes = filename\n reader = fiona.open\n\n with reader(path_or_bytes, **kwargs) as features:\n crs = features.crs\n if bbox is not None:\n if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):\n bbox = tuple(bbox.to_crs(crs).total_bounds)\n assert len(bbox) == 4\n f_filt = features.filter(bbox=bbox)\n else:\n f_filt = features\n\n columns = list(features.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.drivers():\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as colxn:\n colxn.writerecords(df.iterfeatures())\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(column, in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n if out_type == 'bool':\n raise ValueError('column \"{}\" is boolean type, '.format(column) +\n 'which is unsupported in file writing. '\n 'Consider casting the column to int type.')\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(col, _type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n if df.empty:\n raise ValueError(\"Cannot write empty DataFrame to file.\")\n\n geom_type = _common_geom_type(df)\n \n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix\n # use reversed geom types and commonprefix to find the common suffix,\n # then reverse the result to get back to a geom type\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1]\n if not geom_type:\n return None\n\n if df.geometry.has_z.any():\n geom_type = \"3D \" + geom_type\n\n return geom_type\n", "path": "geopandas/io/file.py"}], "after_files": [{"content": "import os\n\nimport fiona\nimport numpy as np\nimport six\n\ntry:\n from fiona import Env as fiona_env\nexcept ImportError:\n from fiona import drivers as fiona_env\n\nfrom geopandas import GeoDataFrame, GeoSeries\n\n# Adapted from pandas.io.common\nif six.PY3:\n from urllib.request import urlopen as _urlopen\n from urllib.parse import urlparse as parse_url\n from urllib.parse import uses_relative, uses_netloc, uses_params\nelse:\n from urllib2 import urlopen as _urlopen\n from urlparse import urlparse as parse_url\n from urlparse import uses_relative, uses_netloc, uses_params\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\n\ndef _is_url(url):\n \"\"\"Check to see if *url* has a valid protocol.\"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef read_file(filename, bbox=None, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file or URL.\n\n Parameters\n ----------\n filename: str\n Either the absolute or relative path to the file or URL to\n be opened.\n bbox : tuple | GeoDataFrame or GeoSeries, default None\n Filter features by given bounding box, GeoSeries, or GeoDataFrame.\n CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.\n **kwargs:\n Keyword args to be passed to the `open` or `BytesCollection` method\n in the fiona library when opening the file. For more information on\n possible keywords, type:\n ``import fiona; help(fiona.open)``\n\n Examples\n --------\n >>> df = geopandas.read_file(\"nybb.shp\")\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n if _is_url(filename):\n req = _urlopen(filename)\n path_or_bytes = req.read()\n reader = fiona.BytesCollection\n else:\n path_or_bytes = filename\n reader = fiona.open\n\n with reader(path_or_bytes, **kwargs) as features:\n crs = features.crs\n if bbox is not None:\n if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):\n bbox = tuple(bbox.to_crs(crs).total_bounds)\n assert len(bbox) == 4\n f_filt = features.filter(bbox=bbox)\n else:\n f_filt = features\n\n columns = list(features.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona_env():\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as colxn:\n colxn.writerecords(df.iterfeatures())\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(column, in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n if out_type == 'bool':\n raise ValueError('column \"{}\" is boolean type, '.format(column) +\n 'which is unsupported in file writing. '\n 'Consider casting the column to int type.')\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(col, _type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n if df.empty:\n raise ValueError(\"Cannot write empty DataFrame to file.\")\n\n geom_type = _common_geom_type(df)\n \n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix\n # use reversed geom types and commonprefix to find the common suffix,\n # then reverse the result to get back to a geom type\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1]\n if not geom_type:\n return None\n\n if df.geometry.has_z.any():\n geom_type = \"3D \" + geom_type\n\n return geom_type\n", "path": "geopandas/io/file.py"}]} | 2,018 | 203 |
gh_patches_debug_24607 | rasdani/github-patches | git_diff | streamlink__streamlink-3185 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tv360.com.tr no playable stream
## Bug Report
- [x] This is a bug report and I have read the contribution guidelines.
### Description
can't find playable stream.
### Expected / Actual behavior
stream supposed to be found
### Reproduction steps / Explicit stream URLs to test
``` 1. streamlink https://www.tv360.com.tr/canli-yayin ```
### Log output
```
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.8.2
[cli][debug] Streamlink: 1.5.0
[cli][debug] Requests(2.24.0), Socks(1.7.1), Websocket(0.57.0)
[cli][info] Found matching plugin tv360 for URL tv360.com.tr/canli-yayin
error: No playable streams found on this URL: tv360.com.tr/canli-yayin
```
### Additional comments, screenshots, etc.
[Love Streamlink? Please consider supporting our collective. Thanks!](https://opencollective.com/streamlink/donate)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/tv360.py`
Content:
```
1 from __future__ import print_function
2
3 import re
4
5 from streamlink.plugin import Plugin
6 from streamlink.plugin.api import validate
7 from streamlink.stream import HLSStream
8
9
10 class TV360(Plugin):
11 url_re = re.compile(r"https?://(?:www.)?tv360.com.tr/canli-yayin")
12 hls_re = re.compile(r'''hls.loadSource\(["'](http.*m3u8)["']\)''', re.DOTALL)
13
14 hls_schema = validate.Schema(
15 validate.transform(hls_re.search),
16 validate.any(None, validate.all(validate.get(1)))
17 )
18
19 @classmethod
20 def can_handle_url(cls, url):
21 return cls.url_re.match(url) is not None
22
23 def _get_streams(self):
24 res = self.session.http.get(self.url)
25 hls_url = self.hls_re.search(res.text)
26
27 if hls_url:
28 return HLSStream.parse_variant_playlist(self.session, hls_url.group(1))
29
30
31 __plugin__ = TV360
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/tv360.py b/src/streamlink/plugins/tv360.py
--- a/src/streamlink/plugins/tv360.py
+++ b/src/streamlink/plugins/tv360.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
import re
from streamlink.plugin import Plugin
@@ -9,11 +7,11 @@
class TV360(Plugin):
url_re = re.compile(r"https?://(?:www.)?tv360.com.tr/canli-yayin")
- hls_re = re.compile(r'''hls.loadSource\(["'](http.*m3u8)["']\)''', re.DOTALL)
+ hls_re = re.compile(r'''src="(http.*m3u8)"''')
hls_schema = validate.Schema(
validate.transform(hls_re.search),
- validate.any(None, validate.all(validate.get(1)))
+ validate.any(None, validate.all(validate.get(1), validate.url()))
)
@classmethod
@@ -21,11 +19,10 @@
return cls.url_re.match(url) is not None
def _get_streams(self):
- res = self.session.http.get(self.url)
- hls_url = self.hls_re.search(res.text)
+ hls_url = self.session.http.get(self.url, schema=self.hls_schema)
if hls_url:
- return HLSStream.parse_variant_playlist(self.session, hls_url.group(1))
+ return HLSStream.parse_variant_playlist(self.session, hls_url)
__plugin__ = TV360
| {"golden_diff": "diff --git a/src/streamlink/plugins/tv360.py b/src/streamlink/plugins/tv360.py\n--- a/src/streamlink/plugins/tv360.py\n+++ b/src/streamlink/plugins/tv360.py\n@@ -1,5 +1,3 @@\n-from __future__ import print_function\n-\n import re\n \n from streamlink.plugin import Plugin\n@@ -9,11 +7,11 @@\n \n class TV360(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?tv360.com.tr/canli-yayin\")\n- hls_re = re.compile(r'''hls.loadSource\\([\"'](http.*m3u8)[\"']\\)''', re.DOTALL)\n+ hls_re = re.compile(r'''src=\"(http.*m3u8)\"''')\n \n hls_schema = validate.Schema(\n validate.transform(hls_re.search),\n- validate.any(None, validate.all(validate.get(1)))\n+ validate.any(None, validate.all(validate.get(1), validate.url()))\n )\n \n @classmethod\n@@ -21,11 +19,10 @@\n return cls.url_re.match(url) is not None\n \n def _get_streams(self):\n- res = self.session.http.get(self.url)\n- hls_url = self.hls_re.search(res.text)\n+ hls_url = self.session.http.get(self.url, schema=self.hls_schema)\n \n if hls_url:\n- return HLSStream.parse_variant_playlist(self.session, hls_url.group(1))\n+ return HLSStream.parse_variant_playlist(self.session, hls_url)\n \n \n __plugin__ = TV360\n", "issue": "tv360.com.tr no playable stream\n## Bug Report\r\n- [x] This is a bug report and I have read the contribution guidelines.\r\n\r\n### Description\r\n\r\ncan't find playable stream.\r\n\r\n### Expected / Actual behavior\r\n\r\nstream supposed to be found\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n``` 1. streamlink https://www.tv360.com.tr/canli-yayin ```\r\n\r\n### Log output\r\n\r\n```\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.8.2\r\n[cli][debug] Streamlink: 1.5.0\r\n[cli][debug] Requests(2.24.0), Socks(1.7.1), Websocket(0.57.0)\r\n[cli][info] Found matching plugin tv360 for URL tv360.com.tr/canli-yayin\r\nerror: No playable streams found on this URL: tv360.com.tr/canli-yayin\r\n```\r\n\r\n\r\n### Additional comments, screenshots, etc.\r\n\r\n\r\n\r\n[Love Streamlink? Please consider supporting our collective. Thanks!](https://opencollective.com/streamlink/donate)\r\n\n", "before_files": [{"content": "from __future__ import print_function\n\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass TV360(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?tv360.com.tr/canli-yayin\")\n hls_re = re.compile(r'''hls.loadSource\\([\"'](http.*m3u8)[\"']\\)''', re.DOTALL)\n\n hls_schema = validate.Schema(\n validate.transform(hls_re.search),\n validate.any(None, validate.all(validate.get(1)))\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n res = self.session.http.get(self.url)\n hls_url = self.hls_re.search(res.text)\n\n if hls_url:\n return HLSStream.parse_variant_playlist(self.session, hls_url.group(1))\n\n\n__plugin__ = TV360\n", "path": "src/streamlink/plugins/tv360.py"}], "after_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass TV360(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?tv360.com.tr/canli-yayin\")\n hls_re = re.compile(r'''src=\"(http.*m3u8)\"''')\n\n hls_schema = validate.Schema(\n validate.transform(hls_re.search),\n validate.any(None, validate.all(validate.get(1), validate.url()))\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n hls_url = self.session.http.get(self.url, schema=self.hls_schema)\n\n if hls_url:\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n\n__plugin__ = TV360\n", "path": "src/streamlink/plugins/tv360.py"}]} | 805 | 363 |
gh_patches_debug_8778 | rasdani/github-patches | git_diff | pytorch__ignite-1330 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docstring of Canberra metric warning
Following this comment
> @sdesrozis could you please investigate why there is a warning here : https://travis-ci.org/github/pytorch/ignite/jobs/730492404#L2924
thanks !
> Doc link is apparently badly rendered : https://pytorch.org/ignite/master/contrib/metrics.html#ignite.contrib.metrics.regression.CanberraMetric
> _Originally posted by @vfdev-5 in https://github.com/pytorch/ignite/pull/1314#issuecomment-699506241_
Namespace are shared so reference should be unique
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/contrib/metrics/regression/canberra_metric.py`
Content:
```
1 from typing import Callable, Union
2
3 import torch
4
5 from ignite.contrib.metrics.regression._base import _BaseRegression
6 from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
7
8
9 class CanberraMetric(_BaseRegression):
10 r"""
11 Calculates the Canberra Metric.
12
13 :math:`\text{CM} = \sum_{j=1}^n\frac{|A_j - P_j|}{|A_j| + |P_j|}`
14
15 where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
16
17 More details can be found in `Botchkarev 2018`_ or `scikit-learn distance metrics`_
18
19 - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
20 - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
21
22 .. _Botchkarev 2018: https://arxiv.org/abs/1809.03006
23 .. _scikit-learn distance metrics:
24 https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html
25
26 """
27
28 def __init__(
29 self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
30 ):
31 self._sum_of_errors = None
32 super(CanberraMetric, self).__init__(output_transform, device)
33
34 @reinit__is_reduced
35 def reset(self):
36 self._sum_of_errors = torch.tensor(0.0, device=self._device)
37
38 def _update(self, output):
39 y_pred, y = output
40 errors = torch.abs(y - y_pred) / (torch.abs(y_pred) + torch.abs(y))
41 self._sum_of_errors += torch.sum(errors).to(self._device)
42
43 @sync_all_reduce("_sum_of_errors")
44 def compute(self):
45 return self._sum_of_errors.item()
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/contrib/metrics/regression/canberra_metric.py b/ignite/contrib/metrics/regression/canberra_metric.py
--- a/ignite/contrib/metrics/regression/canberra_metric.py
+++ b/ignite/contrib/metrics/regression/canberra_metric.py
@@ -19,7 +19,6 @@
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
- .. _Botchkarev 2018: https://arxiv.org/abs/1809.03006
.. _scikit-learn distance metrics:
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html
| {"golden_diff": "diff --git a/ignite/contrib/metrics/regression/canberra_metric.py b/ignite/contrib/metrics/regression/canberra_metric.py\n--- a/ignite/contrib/metrics/regression/canberra_metric.py\n+++ b/ignite/contrib/metrics/regression/canberra_metric.py\n@@ -19,7 +19,6 @@\n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.\n \n- .. _Botchkarev 2018: https://arxiv.org/abs/1809.03006\n .. _scikit-learn distance metrics:\n https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html\n", "issue": "Docstring of Canberra metric warning\nFollowing this comment \r\n\r\n\r\n> @sdesrozis could you please investigate why there is a warning here : https://travis-ci.org/github/pytorch/ignite/jobs/730492404#L2924 \r\nthanks !\r\n> Doc link is apparently badly rendered : https://pytorch.org/ignite/master/contrib/metrics.html#ignite.contrib.metrics.regression.CanberraMetric\r\n> _Originally posted by @vfdev-5 in https://github.com/pytorch/ignite/pull/1314#issuecomment-699506241_\r\n\r\nNamespace are shared so reference should be unique\r\n\n", "before_files": [{"content": "from typing import Callable, Union\n\nimport torch\n\nfrom ignite.contrib.metrics.regression._base import _BaseRegression\nfrom ignite.metrics.metric import reinit__is_reduced, sync_all_reduce\n\n\nclass CanberraMetric(_BaseRegression):\n r\"\"\"\n Calculates the Canberra Metric.\n\n :math:`\\text{CM} = \\sum_{j=1}^n\\frac{|A_j - P_j|}{|A_j| + |P_j|}`\n\n where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.\n\n More details can be found in `Botchkarev 2018`_ or `scikit-learn distance metrics`_\n\n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.\n\n .. _Botchkarev 2018: https://arxiv.org/abs/1809.03006\n .. _scikit-learn distance metrics:\n https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html\n\n \"\"\"\n\n def __init__(\n self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device(\"cpu\")\n ):\n self._sum_of_errors = None\n super(CanberraMetric, self).__init__(output_transform, device)\n\n @reinit__is_reduced\n def reset(self):\n self._sum_of_errors = torch.tensor(0.0, device=self._device)\n\n def _update(self, output):\n y_pred, y = output\n errors = torch.abs(y - y_pred) / (torch.abs(y_pred) + torch.abs(y))\n self._sum_of_errors += torch.sum(errors).to(self._device)\n\n @sync_all_reduce(\"_sum_of_errors\")\n def compute(self):\n return self._sum_of_errors.item()\n", "path": "ignite/contrib/metrics/regression/canberra_metric.py"}], "after_files": [{"content": "from typing import Callable, Union\n\nimport torch\n\nfrom ignite.contrib.metrics.regression._base import _BaseRegression\nfrom ignite.metrics.metric import reinit__is_reduced, sync_all_reduce\n\n\nclass CanberraMetric(_BaseRegression):\n r\"\"\"\n Calculates the Canberra Metric.\n\n :math:`\\text{CM} = \\sum_{j=1}^n\\frac{|A_j - P_j|}{|A_j| + |P_j|}`\n\n where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.\n\n More details can be found in `Botchkarev 2018`_ or `scikit-learn distance metrics`_\n\n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.\n\n .. _scikit-learn distance metrics:\n https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html\n\n \"\"\"\n\n def __init__(\n self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device(\"cpu\")\n ):\n self._sum_of_errors = None\n super(CanberraMetric, self).__init__(output_transform, device)\n\n @reinit__is_reduced\n def reset(self):\n self._sum_of_errors = torch.tensor(0.0, device=self._device)\n\n def _update(self, output):\n y_pred, y = output\n errors = torch.abs(y - y_pred) / (torch.abs(y_pred) + torch.abs(y))\n self._sum_of_errors += torch.sum(errors).to(self._device)\n\n @sync_all_reduce(\"_sum_of_errors\")\n def compute(self):\n return self._sum_of_errors.item()\n", "path": "ignite/contrib/metrics/regression/canberra_metric.py"}]} | 949 | 201 |
gh_patches_debug_35818 | rasdani/github-patches | git_diff | beetbox__beets-1779 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mbsubmit: cleanup and completion
Glad to see a new release has been made!
I'm getting back to work on beets after a few days away from the computer, hopefully bringing issue #1689 to a close eventually. As hinted on the previous discussion, this pull request is intended to take care of the `mbsubmit` plugin cleanup, now that the underlying pieces are in place.
I have modified a bit the behaviour, making the decision of appending the `"Print tracks"` choice depend solely on `task.rec`. The default behaviour is to only append the choice to matches where the recommendation is equal or lower than `Recommendation.medium`, which hopefully covers the most obvious choices (albums with no matches, albums with weak-ish matches) and the original request by @awesomer, and also avoids polluting the prompt in the cases where the match is strong. A config option has been added that allows the user to modify this settings (extra-picky users might find it useful to always be able to print tracks for fixing spelling mistakes, other users might only want it on albums with no matches, etc).
Other than that, a configuration option for setting the format string has been added as well - I can't think of a case where this might come in handy currently, but maybe more creative users might find it useful.
A couple of notes:
- currently, the plugin makes no effort of nicely formatting items that might be lacking some of the required fields. Would it be useful to add some extra checks and fall back to printing the filename (or something more advanced with the help of `fromfilename`, etc) in those cases?
- there might be some problems on some combination on options: for example, if the user sets the threshold to `strong`, but launches the importer in non-timid mode, the prompt will not actually be displayed. Would a note on the (upcoming) documentation suffice, as handling this case probably requires some changes that seem to be a bit out of the scope of the plugin?
As usual, any comments and input are more than welcome!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/mbsubmit.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This file is part of beets.
3 # Copyright 2016, Adrian Sampson and Diego Moreda.
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be
14 # included in all copies or substantial portions of the Software.
15
16 """Aid in submitting information to MusicBrainz.
17
18 This plugin allows the user to print track information in a format that is
19 parseable by the MusicBrainz track parser. Programmatic submitting is not
20 implemented by MusicBrainz yet.
21 """
22
23 from __future__ import (division, absolute_import, print_function,
24 unicode_literals)
25
26
27 from beets.autotag import Recommendation
28 from beets.importer import action
29 from beets.plugins import BeetsPlugin
30 from beets.ui.commands import PromptChoice
31 from beetsplug.info import print_data
32
33
34 class MBSubmitPlugin(BeetsPlugin):
35 def __init__(self):
36 super(MBSubmitPlugin, self).__init__()
37
38 self.register_listener('before_choose_candidate',
39 self.before_choose_candidate_event)
40
41 def before_choose_candidate_event(self, session, task):
42 if not task.candidates or task.rec == Recommendation.none:
43 return [PromptChoice('p', 'Print tracks', self.print_tracks),
44 PromptChoice('k', 'print tracks and sKip',
45 self.print_tracks_and_skip)]
46
47 # Callbacks for choices.
48 def print_tracks(self, session, task):
49 for i in task.items:
50 print_data(None, i, '$track. $artist - $title ($length)')
51
52 def print_tracks_and_skip(self, session, task):
53 for i in task.items:
54 print_data(None, i, '$track. $artist - $title ($length)')
55 return action.SKIP
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beetsplug/mbsubmit.py b/beetsplug/mbsubmit.py
--- a/beetsplug/mbsubmit.py
+++ b/beetsplug/mbsubmit.py
@@ -16,8 +16,10 @@
"""Aid in submitting information to MusicBrainz.
This plugin allows the user to print track information in a format that is
-parseable by the MusicBrainz track parser. Programmatic submitting is not
+parseable by the MusicBrainz track parser [1]. Programmatic submitting is not
implemented by MusicBrainz yet.
+
+[1] http://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings
"""
from __future__ import (division, absolute_import, print_function,
@@ -25,7 +27,6 @@
from beets.autotag import Recommendation
-from beets.importer import action
from beets.plugins import BeetsPlugin
from beets.ui.commands import PromptChoice
from beetsplug.info import print_data
@@ -35,21 +36,26 @@
def __init__(self):
super(MBSubmitPlugin, self).__init__()
+ self.config.add({
+ 'format': '$track. $title - $artist ($length)',
+ 'threshold': 'medium',
+ })
+
+ # Validate and store threshold.
+ self.threshold = self.config['threshold'].as_choice({
+ 'none': Recommendation.none,
+ 'low': Recommendation.low,
+ 'medium': Recommendation.medium,
+ 'strong': Recommendation.strong
+ })
+
self.register_listener('before_choose_candidate',
self.before_choose_candidate_event)
def before_choose_candidate_event(self, session, task):
- if not task.candidates or task.rec == Recommendation.none:
- return [PromptChoice('p', 'Print tracks', self.print_tracks),
- PromptChoice('k', 'print tracks and sKip',
- self.print_tracks_and_skip)]
+ if task.rec <= self.threshold:
+ return [PromptChoice('p', 'Print tracks', self.print_tracks)]
- # Callbacks for choices.
def print_tracks(self, session, task):
for i in task.items:
- print_data(None, i, '$track. $artist - $title ($length)')
-
- def print_tracks_and_skip(self, session, task):
- for i in task.items:
- print_data(None, i, '$track. $artist - $title ($length)')
- return action.SKIP
+ print_data(None, i, self.config['format'].get())
| {"golden_diff": "diff --git a/beetsplug/mbsubmit.py b/beetsplug/mbsubmit.py\n--- a/beetsplug/mbsubmit.py\n+++ b/beetsplug/mbsubmit.py\n@@ -16,8 +16,10 @@\n \"\"\"Aid in submitting information to MusicBrainz.\n \n This plugin allows the user to print track information in a format that is\n-parseable by the MusicBrainz track parser. Programmatic submitting is not\n+parseable by the MusicBrainz track parser [1]. Programmatic submitting is not\n implemented by MusicBrainz yet.\n+\n+[1] http://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings\n \"\"\"\n \n from __future__ import (division, absolute_import, print_function,\n@@ -25,7 +27,6 @@\n \n \n from beets.autotag import Recommendation\n-from beets.importer import action\n from beets.plugins import BeetsPlugin\n from beets.ui.commands import PromptChoice\n from beetsplug.info import print_data\n@@ -35,21 +36,26 @@\n def __init__(self):\n super(MBSubmitPlugin, self).__init__()\n \n+ self.config.add({\n+ 'format': '$track. $title - $artist ($length)',\n+ 'threshold': 'medium',\n+ })\n+\n+ # Validate and store threshold.\n+ self.threshold = self.config['threshold'].as_choice({\n+ 'none': Recommendation.none,\n+ 'low': Recommendation.low,\n+ 'medium': Recommendation.medium,\n+ 'strong': Recommendation.strong\n+ })\n+\n self.register_listener('before_choose_candidate',\n self.before_choose_candidate_event)\n \n def before_choose_candidate_event(self, session, task):\n- if not task.candidates or task.rec == Recommendation.none:\n- return [PromptChoice('p', 'Print tracks', self.print_tracks),\n- PromptChoice('k', 'print tracks and sKip',\n- self.print_tracks_and_skip)]\n+ if task.rec <= self.threshold:\n+ return [PromptChoice('p', 'Print tracks', self.print_tracks)]\n \n- # Callbacks for choices.\n def print_tracks(self, session, task):\n for i in task.items:\n- print_data(None, i, '$track. $artist - $title ($length)')\n-\n- def print_tracks_and_skip(self, session, task):\n- for i in task.items:\n- print_data(None, i, '$track. $artist - $title ($length)')\n- return action.SKIP\n+ print_data(None, i, self.config['format'].get())\n", "issue": "mbsubmit: cleanup and completion\nGlad to see a new release has been made!\n\nI'm getting back to work on beets after a few days away from the computer, hopefully bringing issue #1689 to a close eventually. As hinted on the previous discussion, this pull request is intended to take care of the `mbsubmit` plugin cleanup, now that the underlying pieces are in place.\n\nI have modified a bit the behaviour, making the decision of appending the `\"Print tracks\"` choice depend solely on `task.rec`. The default behaviour is to only append the choice to matches where the recommendation is equal or lower than `Recommendation.medium`, which hopefully covers the most obvious choices (albums with no matches, albums with weak-ish matches) and the original request by @awesomer, and also avoids polluting the prompt in the cases where the match is strong. A config option has been added that allows the user to modify this settings (extra-picky users might find it useful to always be able to print tracks for fixing spelling mistakes, other users might only want it on albums with no matches, etc).\n\nOther than that, a configuration option for setting the format string has been added as well - I can't think of a case where this might come in handy currently, but maybe more creative users might find it useful.\n\nA couple of notes:\n- currently, the plugin makes no effort of nicely formatting items that might be lacking some of the required fields. Would it be useful to add some extra checks and fall back to printing the filename (or something more advanced with the help of `fromfilename`, etc) in those cases?\n- there might be some problems on some combination on options: for example, if the user sets the threshold to `strong`, but launches the importer in non-timid mode, the prompt will not actually be displayed. Would a note on the (upcoming) documentation suffice, as handling this case probably requires some changes that seem to be a bit out of the scope of the plugin?\n\nAs usual, any comments and input are more than welcome!\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson and Diego Moreda.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Aid in submitting information to MusicBrainz.\n\nThis plugin allows the user to print track information in a format that is\nparseable by the MusicBrainz track parser. Programmatic submitting is not\nimplemented by MusicBrainz yet.\n\"\"\"\n\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\n\nfrom beets.autotag import Recommendation\nfrom beets.importer import action\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui.commands import PromptChoice\nfrom beetsplug.info import print_data\n\n\nclass MBSubmitPlugin(BeetsPlugin):\n def __init__(self):\n super(MBSubmitPlugin, self).__init__()\n\n self.register_listener('before_choose_candidate',\n self.before_choose_candidate_event)\n\n def before_choose_candidate_event(self, session, task):\n if not task.candidates or task.rec == Recommendation.none:\n return [PromptChoice('p', 'Print tracks', self.print_tracks),\n PromptChoice('k', 'print tracks and sKip',\n self.print_tracks_and_skip)]\n\n # Callbacks for choices.\n def print_tracks(self, session, task):\n for i in task.items:\n print_data(None, i, '$track. $artist - $title ($length)')\n\n def print_tracks_and_skip(self, session, task):\n for i in task.items:\n print_data(None, i, '$track. $artist - $title ($length)')\n return action.SKIP\n", "path": "beetsplug/mbsubmit.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson and Diego Moreda.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Aid in submitting information to MusicBrainz.\n\nThis plugin allows the user to print track information in a format that is\nparseable by the MusicBrainz track parser [1]. Programmatic submitting is not\nimplemented by MusicBrainz yet.\n\n[1] http://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings\n\"\"\"\n\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\n\nfrom beets.autotag import Recommendation\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui.commands import PromptChoice\nfrom beetsplug.info import print_data\n\n\nclass MBSubmitPlugin(BeetsPlugin):\n def __init__(self):\n super(MBSubmitPlugin, self).__init__()\n\n self.config.add({\n 'format': '$track. $title - $artist ($length)',\n 'threshold': 'medium',\n })\n\n # Validate and store threshold.\n self.threshold = self.config['threshold'].as_choice({\n 'none': Recommendation.none,\n 'low': Recommendation.low,\n 'medium': Recommendation.medium,\n 'strong': Recommendation.strong\n })\n\n self.register_listener('before_choose_candidate',\n self.before_choose_candidate_event)\n\n def before_choose_candidate_event(self, session, task):\n if task.rec <= self.threshold:\n return [PromptChoice('p', 'Print tracks', self.print_tracks)]\n\n def print_tracks(self, session, task):\n for i in task.items:\n print_data(None, i, self.config['format'].get())\n", "path": "beetsplug/mbsubmit.py"}]} | 1,253 | 559 |
gh_patches_debug_27451 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-3041 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Systemd_analyze parser is raising lots of exceptions in production
The SystemdAnalyzeBlame parser is throwing a large number of the exception ValueError('too many values to unpack (expected 2)',) in production.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/parsers/systemd_analyze.py`
Content:
```
1 """
2 SystemdAnalyzeBlame - command ``systemd-analyze blame``
3 =======================================================
4
5 This module parses the output of command ``systemd-analyze blame``.
6 """
7 from insights.specs import Specs
8 from insights import CommandParser, parser
9 from insights.parsers import SkipException
10
11
12 @parser(Specs.systemd_analyze_blame)
13 class SystemdAnalyzeBlame(CommandParser, dict):
14 """Parse the output of ``systemd-analyze blame`` as ``dict``. The time to
15 initialize is converted into seconds.
16
17 Typical output::
18
19 33.080s cloud-init-local.service
20 32.423s unbound-anchor.service
21 2.773s kdump.service
22 1.699s dnf-makecache.service
23 1.304s cloud-init.service
24 1.073s initrd-switch-root.service
25 939ms cloud-config.service
26 872ms tuned.service
27 770ms cloud-final.service
28
29 Examples:
30
31 >>> 'cloud-init-local.service' in output
32 True
33 >>> output.get('cloud-init.service', 0)
34 1.304
35
36 Returns:
37 (dict): With unit-name & time as key-value pair.
38 Ex::
39
40 {'cloud-config.service': 0.939,
41 'cloud-final.service': 0.77,
42 'cloud-init-local.service': 33.08,
43 'cloud-init.service': 1.304,
44 'dnf-makecache.service': 1.699,
45 'initrd-switch-root.service': 1.073,
46 'kdump.service': 2.773,
47 'tuned.service': 0.872,
48 'unbound-anchor.service': 32.423}
49
50 Raises:
51 SkipException: If content is not provided.
52 """
53 def parse_content(self, content):
54 if not content:
55 raise SkipException
56
57 for c in content:
58 time, service = c.split()
59 if time.endswith('ms'):
60 _time = round(float(time.strip('ms')) / 1000, 5)
61 else:
62 _time = round(float(time.strip('ms')), 5)
63
64 self[service] = _time
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/parsers/systemd_analyze.py b/insights/parsers/systemd_analyze.py
--- a/insights/parsers/systemd_analyze.py
+++ b/insights/parsers/systemd_analyze.py
@@ -55,10 +55,34 @@
raise SkipException
for c in content:
- time, service = c.split()
- if time.endswith('ms'):
- _time = round(float(time.strip('ms')) / 1000, 5)
- else:
- _time = round(float(time.strip('ms')), 5)
+ cols = c.split()
+ # Check to make sure that the first character of the first
+ # entry is a number. This will hopefully exclude any errors
+ # that are outputted in the file.
+ if cols[0][0].isdigit():
+ # The service should be the last column, so just
+ # remove the last column from the list before looping.
+ service = cols.pop()
+ time = 0
+ for x in cols:
+ # Convert each column to seconds, and add them up.
+ if x.endswith('y'):
+ # Pulled the 31557600 from systemd src.
+ time += int(x.strip('y')) * 31557600
+ elif x.endswith('month'):
+ # Pulled the 2629800 from systemd src.
+ time += int(x.strip('month')) * 2629800
+ elif x.endswith('w'):
+ time += int(x.strip('w')) * 7 * 24 * 60 ** 2
+ elif x.endswith('d'):
+ time += int(x.strip('d')) * 24 * 60 ** 2
+ elif x.endswith('h'):
+ time += int(x.strip('h')) * 60 ** 2
+ elif x.endswith('min'):
+ time += int(x.strip('min')) * 60
+ elif x.endswith('ms'):
+ time += float(x.strip('ms')) / 1000
+ elif x.endswith('s'):
+ time += float(x.strip('s'))
- self[service] = _time
+ self[service] = time
| {"golden_diff": "diff --git a/insights/parsers/systemd_analyze.py b/insights/parsers/systemd_analyze.py\n--- a/insights/parsers/systemd_analyze.py\n+++ b/insights/parsers/systemd_analyze.py\n@@ -55,10 +55,34 @@\n raise SkipException\n \n for c in content:\n- time, service = c.split()\n- if time.endswith('ms'):\n- _time = round(float(time.strip('ms')) / 1000, 5)\n- else:\n- _time = round(float(time.strip('ms')), 5)\n+ cols = c.split()\n+ # Check to make sure that the first character of the first\n+ # entry is a number. This will hopefully exclude any errors\n+ # that are outputted in the file.\n+ if cols[0][0].isdigit():\n+ # The service should be the last column, so just\n+ # remove the last column from the list before looping.\n+ service = cols.pop()\n+ time = 0\n+ for x in cols:\n+ # Convert each column to seconds, and add them up.\n+ if x.endswith('y'):\n+ # Pulled the 31557600 from systemd src.\n+ time += int(x.strip('y')) * 31557600\n+ elif x.endswith('month'):\n+ # Pulled the 2629800 from systemd src.\n+ time += int(x.strip('month')) * 2629800\n+ elif x.endswith('w'):\n+ time += int(x.strip('w')) * 7 * 24 * 60 ** 2\n+ elif x.endswith('d'):\n+ time += int(x.strip('d')) * 24 * 60 ** 2\n+ elif x.endswith('h'):\n+ time += int(x.strip('h')) * 60 ** 2\n+ elif x.endswith('min'):\n+ time += int(x.strip('min')) * 60\n+ elif x.endswith('ms'):\n+ time += float(x.strip('ms')) / 1000\n+ elif x.endswith('s'):\n+ time += float(x.strip('s'))\n \n- self[service] = _time\n+ self[service] = time\n", "issue": "Systemd_analyze parser is raising lots of exceptions in production\nThe SystemdAnalyzeBlame parser is throwing a large number of the exception ValueError('too many values to unpack (expected 2)',) in production.\n", "before_files": [{"content": "\"\"\"\nSystemdAnalyzeBlame - command ``systemd-analyze blame``\n=======================================================\n\nThis module parses the output of command ``systemd-analyze blame``.\n\"\"\"\nfrom insights.specs import Specs\nfrom insights import CommandParser, parser\nfrom insights.parsers import SkipException\n\n\n@parser(Specs.systemd_analyze_blame)\nclass SystemdAnalyzeBlame(CommandParser, dict):\n \"\"\"Parse the output of ``systemd-analyze blame`` as ``dict``. The time to\n initialize is converted into seconds.\n\n Typical output::\n\n 33.080s cloud-init-local.service\n 32.423s unbound-anchor.service\n 2.773s kdump.service\n 1.699s dnf-makecache.service\n 1.304s cloud-init.service\n 1.073s initrd-switch-root.service\n 939ms cloud-config.service\n 872ms tuned.service\n 770ms cloud-final.service\n\n Examples:\n\n >>> 'cloud-init-local.service' in output\n True\n >>> output.get('cloud-init.service', 0)\n 1.304\n\n Returns:\n (dict): With unit-name & time as key-value pair.\n Ex::\n\n {'cloud-config.service': 0.939,\n 'cloud-final.service': 0.77,\n 'cloud-init-local.service': 33.08,\n 'cloud-init.service': 1.304,\n 'dnf-makecache.service': 1.699,\n 'initrd-switch-root.service': 1.073,\n 'kdump.service': 2.773,\n 'tuned.service': 0.872,\n 'unbound-anchor.service': 32.423}\n\n Raises:\n SkipException: If content is not provided.\n \"\"\"\n def parse_content(self, content):\n if not content:\n raise SkipException\n\n for c in content:\n time, service = c.split()\n if time.endswith('ms'):\n _time = round(float(time.strip('ms')) / 1000, 5)\n else:\n _time = round(float(time.strip('ms')), 5)\n\n self[service] = _time\n", "path": "insights/parsers/systemd_analyze.py"}], "after_files": [{"content": "\"\"\"\nSystemdAnalyzeBlame - command ``systemd-analyze blame``\n=======================================================\n\nThis module parses the output of command ``systemd-analyze blame``.\n\"\"\"\nfrom insights.specs import Specs\nfrom insights import CommandParser, parser\nfrom insights.parsers import SkipException\n\n\n@parser(Specs.systemd_analyze_blame)\nclass SystemdAnalyzeBlame(CommandParser, dict):\n \"\"\"Parse the output of ``systemd-analyze blame`` as ``dict``. The time to\n initialize is converted into seconds.\n\n Typical output::\n\n 33.080s cloud-init-local.service\n 32.423s unbound-anchor.service\n 2.773s kdump.service\n 1.699s dnf-makecache.service\n 1.304s cloud-init.service\n 1.073s initrd-switch-root.service\n 939ms cloud-config.service\n 872ms tuned.service\n 770ms cloud-final.service\n\n Examples:\n\n >>> 'cloud-init-local.service' in output\n True\n >>> output.get('cloud-init.service', 0)\n 1.304\n\n Returns:\n (dict): With unit-name & time as key-value pair.\n Ex::\n\n {'cloud-config.service': 0.939,\n 'cloud-final.service': 0.77,\n 'cloud-init-local.service': 33.08,\n 'cloud-init.service': 1.304,\n 'dnf-makecache.service': 1.699,\n 'initrd-switch-root.service': 1.073,\n 'kdump.service': 2.773,\n 'tuned.service': 0.872,\n 'unbound-anchor.service': 32.423}\n\n Raises:\n SkipException: If content is not provided.\n \"\"\"\n def parse_content(self, content):\n if not content:\n raise SkipException\n\n for c in content:\n cols = c.split()\n # Check to make sure that the first character of the first\n # entry is a number. This will hopefully exclude any errors\n # that are outputted in the file.\n if cols[0][0].isdigit():\n # The service should be the last column, so just\n # remove the last column from the list before looping.\n service = cols.pop()\n time = 0\n for x in cols:\n # Convert each column to seconds, and add them up.\n if x.endswith('y'):\n # Pulled the 31557600 from systemd src.\n time += int(x.strip('y')) * 31557600\n elif x.endswith('month'):\n # Pulled the 2629800 from systemd src.\n time += int(x.strip('month')) * 2629800\n elif x.endswith('w'):\n time += int(x.strip('w')) * 7 * 24 * 60 ** 2\n elif x.endswith('d'):\n time += int(x.strip('d')) * 24 * 60 ** 2\n elif x.endswith('h'):\n time += int(x.strip('h')) * 60 ** 2\n elif x.endswith('min'):\n time += int(x.strip('min')) * 60\n elif x.endswith('ms'):\n time += float(x.strip('ms')) / 1000\n elif x.endswith('s'):\n time += float(x.strip('s'))\n\n self[service] = time\n", "path": "insights/parsers/systemd_analyze.py"}]} | 944 | 525 |
gh_patches_debug_30261 | rasdani/github-patches | git_diff | mozilla__pontoon-2490 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rename "Deadline"
As part of https://github.com/mozilla/pontoon/pull/1565, we wrote that "Deadline sounds permanent, threatening, and ugly."
Let's replace the word with something else.
Maybe "Due date"?
Rename "Deadline"
As part of https://github.com/mozilla/pontoon/pull/1565, we wrote that "Deadline sounds permanent, threatening, and ugly."
Let's replace the word with something else.
Maybe "Due date"?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pontoon/projects/management/commands/send_deadline_notifications.py`
Content:
```
1 import datetime
2
3 from django.contrib.auth.models import User
4 from django.core.management.base import BaseCommand
5 from notifications.signals import notify
6
7 from pontoon.base.models import Project
8
9
10 class Command(BaseCommand):
11 help = "Notify contributors about the approaching project deadline"
12
13 def handle(self, *args, **options):
14 """
15 This command sends deadline reminders to contributors of projects that
16 are due in 7 days. If 2 days before the deadline project still isn't
17 complete for the contributor's locale, notifications are sent again.
18
19 The command is designed to run daily.
20 """
21 for project in Project.objects.available():
22 if project.deadline:
23 days_left = (project.deadline - datetime.date.today()).days
24 if days_left not in (2, 7):
25 continue
26 else:
27 continue
28
29 self.stdout.write(f"Sending deadline notifications for project {project}.")
30
31 is_project_public = project.visibility == Project.Visibility.PUBLIC
32 verb = f"due in {days_left} days"
33 locales = []
34
35 for project_locale in project.project_locale.all():
36 if project_locale.approved_strings < project_locale.total_strings:
37 locales.append(project_locale.locale)
38
39 contributors = (
40 User.objects.filter(
41 translation__entity__resource__project=project,
42 translation__locale__in=locales,
43 profile__project_deadline_notifications=True,
44 ).distinct(),
45 )
46
47 for contributor in contributors:
48 if is_project_public or contributor.is_superuser:
49 notify.send(project, recipient=contributor, verb=verb)
50
51 self.stdout.write(f"Deadline notifications for project {project} sent.")
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pontoon/projects/management/commands/send_deadline_notifications.py b/pontoon/projects/management/commands/send_deadline_notifications.py
--- a/pontoon/projects/management/commands/send_deadline_notifications.py
+++ b/pontoon/projects/management/commands/send_deadline_notifications.py
@@ -8,12 +8,12 @@
class Command(BaseCommand):
- help = "Notify contributors about the approaching project deadline"
+ help = "Notify contributors about the approaching project target date"
def handle(self, *args, **options):
"""
- This command sends deadline reminders to contributors of projects that
- are due in 7 days. If 2 days before the deadline project still isn't
+ This command sends target date reminders to contributors of projects that
+ are due in 7 days. If 2 days before the target date project still isn't
complete for the contributor's locale, notifications are sent again.
The command is designed to run daily.
@@ -26,7 +26,9 @@
else:
continue
- self.stdout.write(f"Sending deadline notifications for project {project}.")
+ self.stdout.write(
+ f"Sending target date notifications for project {project}."
+ )
is_project_public = project.visibility == Project.Visibility.PUBLIC
verb = f"due in {days_left} days"
@@ -48,4 +50,4 @@
if is_project_public or contributor.is_superuser:
notify.send(project, recipient=contributor, verb=verb)
- self.stdout.write(f"Deadline notifications for project {project} sent.")
+ self.stdout.write(f"Target date notifications for project {project} sent.")
| {"golden_diff": "diff --git a/pontoon/projects/management/commands/send_deadline_notifications.py b/pontoon/projects/management/commands/send_deadline_notifications.py\n--- a/pontoon/projects/management/commands/send_deadline_notifications.py\n+++ b/pontoon/projects/management/commands/send_deadline_notifications.py\n@@ -8,12 +8,12 @@\n \n \n class Command(BaseCommand):\n- help = \"Notify contributors about the approaching project deadline\"\n+ help = \"Notify contributors about the approaching project target date\"\n \n def handle(self, *args, **options):\n \"\"\"\n- This command sends deadline reminders to contributors of projects that\n- are due in 7 days. If 2 days before the deadline project still isn't\n+ This command sends target date reminders to contributors of projects that\n+ are due in 7 days. If 2 days before the target date project still isn't\n complete for the contributor's locale, notifications are sent again.\n \n The command is designed to run daily.\n@@ -26,7 +26,9 @@\n else:\n continue\n \n- self.stdout.write(f\"Sending deadline notifications for project {project}.\")\n+ self.stdout.write(\n+ f\"Sending target date notifications for project {project}.\"\n+ )\n \n is_project_public = project.visibility == Project.Visibility.PUBLIC\n verb = f\"due in {days_left} days\"\n@@ -48,4 +50,4 @@\n if is_project_public or contributor.is_superuser:\n notify.send(project, recipient=contributor, verb=verb)\n \n- self.stdout.write(f\"Deadline notifications for project {project} sent.\")\n+ self.stdout.write(f\"Target date notifications for project {project} sent.\")\n", "issue": "Rename \"Deadline\"\nAs part of https://github.com/mozilla/pontoon/pull/1565, we wrote that \"Deadline sounds permanent, threatening, and ugly.\"\r\n\r\nLet's replace the word with something else.\r\n\r\nMaybe \"Due date\"?\nRename \"Deadline\"\nAs part of https://github.com/mozilla/pontoon/pull/1565, we wrote that \"Deadline sounds permanent, threatening, and ugly.\"\r\n\r\nLet's replace the word with something else.\r\n\r\nMaybe \"Due date\"?\n", "before_files": [{"content": "import datetime\n\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\nfrom notifications.signals import notify\n\nfrom pontoon.base.models import Project\n\n\nclass Command(BaseCommand):\n help = \"Notify contributors about the approaching project deadline\"\n\n def handle(self, *args, **options):\n \"\"\"\n This command sends deadline reminders to contributors of projects that\n are due in 7 days. If 2 days before the deadline project still isn't\n complete for the contributor's locale, notifications are sent again.\n\n The command is designed to run daily.\n \"\"\"\n for project in Project.objects.available():\n if project.deadline:\n days_left = (project.deadline - datetime.date.today()).days\n if days_left not in (2, 7):\n continue\n else:\n continue\n\n self.stdout.write(f\"Sending deadline notifications for project {project}.\")\n\n is_project_public = project.visibility == Project.Visibility.PUBLIC\n verb = f\"due in {days_left} days\"\n locales = []\n\n for project_locale in project.project_locale.all():\n if project_locale.approved_strings < project_locale.total_strings:\n locales.append(project_locale.locale)\n\n contributors = (\n User.objects.filter(\n translation__entity__resource__project=project,\n translation__locale__in=locales,\n profile__project_deadline_notifications=True,\n ).distinct(),\n )\n\n for contributor in contributors:\n if is_project_public or contributor.is_superuser:\n notify.send(project, recipient=contributor, verb=verb)\n\n self.stdout.write(f\"Deadline notifications for project {project} sent.\")\n", "path": "pontoon/projects/management/commands/send_deadline_notifications.py"}], "after_files": [{"content": "import datetime\n\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\nfrom notifications.signals import notify\n\nfrom pontoon.base.models import Project\n\n\nclass Command(BaseCommand):\n help = \"Notify contributors about the approaching project target date\"\n\n def handle(self, *args, **options):\n \"\"\"\n This command sends target date reminders to contributors of projects that\n are due in 7 days. If 2 days before the target date project still isn't\n complete for the contributor's locale, notifications are sent again.\n\n The command is designed to run daily.\n \"\"\"\n for project in Project.objects.available():\n if project.deadline:\n days_left = (project.deadline - datetime.date.today()).days\n if days_left not in (2, 7):\n continue\n else:\n continue\n\n self.stdout.write(\n f\"Sending target date notifications for project {project}.\"\n )\n\n is_project_public = project.visibility == Project.Visibility.PUBLIC\n verb = f\"due in {days_left} days\"\n locales = []\n\n for project_locale in project.project_locale.all():\n if project_locale.approved_strings < project_locale.total_strings:\n locales.append(project_locale.locale)\n\n contributors = (\n User.objects.filter(\n translation__entity__resource__project=project,\n translation__locale__in=locales,\n profile__project_deadline_notifications=True,\n ).distinct(),\n )\n\n for contributor in contributors:\n if is_project_public or contributor.is_superuser:\n notify.send(project, recipient=contributor, verb=verb)\n\n self.stdout.write(f\"Target date notifications for project {project} sent.\")\n", "path": "pontoon/projects/management/commands/send_deadline_notifications.py"}]} | 814 | 370 |
gh_patches_debug_51239 | rasdani/github-patches | git_diff | ManimCommunity__manim-3541 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
reST in ``MoveAlongPath`` not correct
[https://docs.manim.community/en/stable/reference/manim.animation.movement.MoveAlongPath.html](https://docs.manim.community/en/stable/reference/manim.animation.movement.MoveAlongPath.html)

The ``..rubric:: Example`` should not be shown in the docs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/animation/movement.py`
Content:
```
1 """Animations related to movement."""
2
3 from __future__ import annotations
4
5 __all__ = [
6 "Homotopy",
7 "SmoothedVectorizedHomotopy",
8 "ComplexHomotopy",
9 "PhaseFlow",
10 "MoveAlongPath",
11 ]
12
13 from typing import TYPE_CHECKING, Any, Callable
14
15 import numpy as np
16
17 from ..animation.animation import Animation
18 from ..utils.rate_functions import linear
19
20 if TYPE_CHECKING:
21 from ..mobject.mobject import Mobject, VMobject
22
23
24 class Homotopy(Animation):
25 """A Homotopy.
26
27 This is an animation transforming the points of a mobject according
28 to the specified transformation function. With the parameter :math:`t`
29 moving from 0 to 1 throughout the animation and :math:`(x, y, z)`
30 describing the coordinates of the point of a mobject,
31 the function passed to the ``homotopy`` keyword argument should
32 transform the tuple :math:`(x, y, z, t)` to :math:`(x', y', z')`,
33 the coordinates the original point is transformed to at time :math:`t`.
34
35 Parameters
36 ----------
37 homotopy
38 A function mapping :math:`(x, y, z, t)` to :math:`(x', y', z')`.
39 mobject
40 The mobject transformed under the given homotopy.
41 run_time
42 The run time of the animation.
43 apply_function_kwargs
44 Keyword arguments propagated to :meth:`.Mobject.apply_function`.
45 kwargs
46 Further keyword arguments passed to the parent class.
47 """
48
49 def __init__(
50 self,
51 homotopy: Callable[[float, float, float, float], tuple[float, float, float]],
52 mobject: Mobject,
53 run_time: float = 3,
54 apply_function_kwargs: dict[str, Any] | None = None,
55 **kwargs,
56 ) -> None:
57 self.homotopy = homotopy
58 self.apply_function_kwargs = (
59 apply_function_kwargs if apply_function_kwargs is not None else {}
60 )
61 super().__init__(mobject, run_time=run_time, **kwargs)
62
63 def function_at_time_t(self, t: float) -> tuple[float, float, float]:
64 return lambda p: self.homotopy(*p, t)
65
66 def interpolate_submobject(
67 self,
68 submobject: Mobject,
69 starting_submobject: Mobject,
70 alpha: float,
71 ) -> None:
72 submobject.points = starting_submobject.points
73 submobject.apply_function(
74 self.function_at_time_t(alpha), **self.apply_function_kwargs
75 )
76
77
78 class SmoothedVectorizedHomotopy(Homotopy):
79 def interpolate_submobject(
80 self,
81 submobject: Mobject,
82 starting_submobject: Mobject,
83 alpha: float,
84 ) -> None:
85 super().interpolate_submobject(submobject, starting_submobject, alpha)
86 submobject.make_smooth()
87
88
89 class ComplexHomotopy(Homotopy):
90 def __init__(
91 self, complex_homotopy: Callable[[complex], float], mobject: Mobject, **kwargs
92 ) -> None:
93 """
94 Complex Homotopy a function Cx[0, 1] to C
95 """
96
97 def homotopy(
98 x: float,
99 y: float,
100 z: float,
101 t: float,
102 ) -> tuple[float, float, float]:
103 c = complex_homotopy(complex(x, y), t)
104 return (c.real, c.imag, z)
105
106 super().__init__(homotopy, mobject, **kwargs)
107
108
109 class PhaseFlow(Animation):
110 def __init__(
111 self,
112 function: Callable[[np.ndarray], np.ndarray],
113 mobject: Mobject,
114 virtual_time: float = 1,
115 suspend_mobject_updating: bool = False,
116 rate_func: Callable[[float], float] = linear,
117 **kwargs,
118 ) -> None:
119 self.virtual_time = virtual_time
120 self.function = function
121 super().__init__(
122 mobject,
123 suspend_mobject_updating=suspend_mobject_updating,
124 rate_func=rate_func,
125 **kwargs,
126 )
127
128 def interpolate_mobject(self, alpha: float) -> None:
129 if hasattr(self, "last_alpha"):
130 dt = self.virtual_time * (
131 self.rate_func(alpha) - self.rate_func(self.last_alpha)
132 )
133 self.mobject.apply_function(lambda p: p + dt * self.function(p))
134 self.last_alpha = alpha
135
136
137 class MoveAlongPath(Animation):
138 """Make one mobject move along the path of another mobject.
139 Example
140 --------
141 .. manim:: MoveAlongPathExample
142
143 class MoveAlongPathExample(Scene):
144 def construct(self):
145 d1 = Dot().set_color(ORANGE)
146 l1 = Line(LEFT, RIGHT)
147 l2 = VMobject()
148 self.add(d1, l1, l2)
149 l2.add_updater(lambda x: x.become(Line(LEFT, d1.get_center()).set_color(ORANGE)))
150 self.play(MoveAlongPath(d1, l1), rate_func=linear)
151 """
152
153 def __init__(
154 self,
155 mobject: Mobject,
156 path: VMobject,
157 suspend_mobject_updating: bool | None = False,
158 **kwargs,
159 ) -> None:
160 self.path = path
161 super().__init__(
162 mobject, suspend_mobject_updating=suspend_mobject_updating, **kwargs
163 )
164
165 def interpolate_mobject(self, alpha: float) -> None:
166 point = self.path.point_from_proportion(self.rate_func(alpha))
167 self.mobject.move_to(point)
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manim/animation/movement.py b/manim/animation/movement.py
--- a/manim/animation/movement.py
+++ b/manim/animation/movement.py
@@ -136,8 +136,7 @@
class MoveAlongPath(Animation):
"""Make one mobject move along the path of another mobject.
- Example
- --------
+
.. manim:: MoveAlongPathExample
class MoveAlongPathExample(Scene):
| {"golden_diff": "diff --git a/manim/animation/movement.py b/manim/animation/movement.py\n--- a/manim/animation/movement.py\n+++ b/manim/animation/movement.py\n@@ -136,8 +136,7 @@\n \n class MoveAlongPath(Animation):\n \"\"\"Make one mobject move along the path of another mobject.\n- Example\n- --------\n+\n .. manim:: MoveAlongPathExample\n \n class MoveAlongPathExample(Scene):\n", "issue": "reST in ``MoveAlongPath`` not correct\n[https://docs.manim.community/en/stable/reference/manim.animation.movement.MoveAlongPath.html](https://docs.manim.community/en/stable/reference/manim.animation.movement.MoveAlongPath.html)\r\n\r\n\r\n\r\nThe ``..rubric:: Example`` should not be shown in the docs.\n", "before_files": [{"content": "\"\"\"Animations related to movement.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"Homotopy\",\n \"SmoothedVectorizedHomotopy\",\n \"ComplexHomotopy\",\n \"PhaseFlow\",\n \"MoveAlongPath\",\n]\n\nfrom typing import TYPE_CHECKING, Any, Callable\n\nimport numpy as np\n\nfrom ..animation.animation import Animation\nfrom ..utils.rate_functions import linear\n\nif TYPE_CHECKING:\n from ..mobject.mobject import Mobject, VMobject\n\n\nclass Homotopy(Animation):\n \"\"\"A Homotopy.\n\n This is an animation transforming the points of a mobject according\n to the specified transformation function. With the parameter :math:`t`\n moving from 0 to 1 throughout the animation and :math:`(x, y, z)`\n describing the coordinates of the point of a mobject,\n the function passed to the ``homotopy`` keyword argument should\n transform the tuple :math:`(x, y, z, t)` to :math:`(x', y', z')`,\n the coordinates the original point is transformed to at time :math:`t`.\n\n Parameters\n ----------\n homotopy\n A function mapping :math:`(x, y, z, t)` to :math:`(x', y', z')`.\n mobject\n The mobject transformed under the given homotopy.\n run_time\n The run time of the animation.\n apply_function_kwargs\n Keyword arguments propagated to :meth:`.Mobject.apply_function`.\n kwargs\n Further keyword arguments passed to the parent class.\n \"\"\"\n\n def __init__(\n self,\n homotopy: Callable[[float, float, float, float], tuple[float, float, float]],\n mobject: Mobject,\n run_time: float = 3,\n apply_function_kwargs: dict[str, Any] | None = None,\n **kwargs,\n ) -> None:\n self.homotopy = homotopy\n self.apply_function_kwargs = (\n apply_function_kwargs if apply_function_kwargs is not None else {}\n )\n super().__init__(mobject, run_time=run_time, **kwargs)\n\n def function_at_time_t(self, t: float) -> tuple[float, float, float]:\n return lambda p: self.homotopy(*p, t)\n\n def interpolate_submobject(\n self,\n submobject: Mobject,\n starting_submobject: Mobject,\n alpha: float,\n ) -> None:\n submobject.points = starting_submobject.points\n submobject.apply_function(\n self.function_at_time_t(alpha), **self.apply_function_kwargs\n )\n\n\nclass SmoothedVectorizedHomotopy(Homotopy):\n def interpolate_submobject(\n self,\n submobject: Mobject,\n starting_submobject: Mobject,\n alpha: float,\n ) -> None:\n super().interpolate_submobject(submobject, starting_submobject, alpha)\n submobject.make_smooth()\n\n\nclass ComplexHomotopy(Homotopy):\n def __init__(\n self, complex_homotopy: Callable[[complex], float], mobject: Mobject, **kwargs\n ) -> None:\n \"\"\"\n Complex Homotopy a function Cx[0, 1] to C\n \"\"\"\n\n def homotopy(\n x: float,\n y: float,\n z: float,\n t: float,\n ) -> tuple[float, float, float]:\n c = complex_homotopy(complex(x, y), t)\n return (c.real, c.imag, z)\n\n super().__init__(homotopy, mobject, **kwargs)\n\n\nclass PhaseFlow(Animation):\n def __init__(\n self,\n function: Callable[[np.ndarray], np.ndarray],\n mobject: Mobject,\n virtual_time: float = 1,\n suspend_mobject_updating: bool = False,\n rate_func: Callable[[float], float] = linear,\n **kwargs,\n ) -> None:\n self.virtual_time = virtual_time\n self.function = function\n super().__init__(\n mobject,\n suspend_mobject_updating=suspend_mobject_updating,\n rate_func=rate_func,\n **kwargs,\n )\n\n def interpolate_mobject(self, alpha: float) -> None:\n if hasattr(self, \"last_alpha\"):\n dt = self.virtual_time * (\n self.rate_func(alpha) - self.rate_func(self.last_alpha)\n )\n self.mobject.apply_function(lambda p: p + dt * self.function(p))\n self.last_alpha = alpha\n\n\nclass MoveAlongPath(Animation):\n \"\"\"Make one mobject move along the path of another mobject.\n Example\n --------\n .. manim:: MoveAlongPathExample\n\n class MoveAlongPathExample(Scene):\n def construct(self):\n d1 = Dot().set_color(ORANGE)\n l1 = Line(LEFT, RIGHT)\n l2 = VMobject()\n self.add(d1, l1, l2)\n l2.add_updater(lambda x: x.become(Line(LEFT, d1.get_center()).set_color(ORANGE)))\n self.play(MoveAlongPath(d1, l1), rate_func=linear)\n \"\"\"\n\n def __init__(\n self,\n mobject: Mobject,\n path: VMobject,\n suspend_mobject_updating: bool | None = False,\n **kwargs,\n ) -> None:\n self.path = path\n super().__init__(\n mobject, suspend_mobject_updating=suspend_mobject_updating, **kwargs\n )\n\n def interpolate_mobject(self, alpha: float) -> None:\n point = self.path.point_from_proportion(self.rate_func(alpha))\n self.mobject.move_to(point)\n", "path": "manim/animation/movement.py"}], "after_files": [{"content": "\"\"\"Animations related to movement.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"Homotopy\",\n \"SmoothedVectorizedHomotopy\",\n \"ComplexHomotopy\",\n \"PhaseFlow\",\n \"MoveAlongPath\",\n]\n\nfrom typing import TYPE_CHECKING, Any, Callable\n\nimport numpy as np\n\nfrom ..animation.animation import Animation\nfrom ..utils.rate_functions import linear\n\nif TYPE_CHECKING:\n from ..mobject.mobject import Mobject, VMobject\n\n\nclass Homotopy(Animation):\n \"\"\"A Homotopy.\n\n This is an animation transforming the points of a mobject according\n to the specified transformation function. With the parameter :math:`t`\n moving from 0 to 1 throughout the animation and :math:`(x, y, z)`\n describing the coordinates of the point of a mobject,\n the function passed to the ``homotopy`` keyword argument should\n transform the tuple :math:`(x, y, z, t)` to :math:`(x', y', z')`,\n the coordinates the original point is transformed to at time :math:`t`.\n\n Parameters\n ----------\n homotopy\n A function mapping :math:`(x, y, z, t)` to :math:`(x', y', z')`.\n mobject\n The mobject transformed under the given homotopy.\n run_time\n The run time of the animation.\n apply_function_kwargs\n Keyword arguments propagated to :meth:`.Mobject.apply_function`.\n kwargs\n Further keyword arguments passed to the parent class.\n \"\"\"\n\n def __init__(\n self,\n homotopy: Callable[[float, float, float, float], tuple[float, float, float]],\n mobject: Mobject,\n run_time: float = 3,\n apply_function_kwargs: dict[str, Any] | None = None,\n **kwargs,\n ) -> None:\n self.homotopy = homotopy\n self.apply_function_kwargs = (\n apply_function_kwargs if apply_function_kwargs is not None else {}\n )\n super().__init__(mobject, run_time=run_time, **kwargs)\n\n def function_at_time_t(self, t: float) -> tuple[float, float, float]:\n return lambda p: self.homotopy(*p, t)\n\n def interpolate_submobject(\n self,\n submobject: Mobject,\n starting_submobject: Mobject,\n alpha: float,\n ) -> None:\n submobject.points = starting_submobject.points\n submobject.apply_function(\n self.function_at_time_t(alpha), **self.apply_function_kwargs\n )\n\n\nclass SmoothedVectorizedHomotopy(Homotopy):\n def interpolate_submobject(\n self,\n submobject: Mobject,\n starting_submobject: Mobject,\n alpha: float,\n ) -> None:\n super().interpolate_submobject(submobject, starting_submobject, alpha)\n submobject.make_smooth()\n\n\nclass ComplexHomotopy(Homotopy):\n def __init__(\n self, complex_homotopy: Callable[[complex], float], mobject: Mobject, **kwargs\n ) -> None:\n \"\"\"\n Complex Homotopy a function Cx[0, 1] to C\n \"\"\"\n\n def homotopy(\n x: float,\n y: float,\n z: float,\n t: float,\n ) -> tuple[float, float, float]:\n c = complex_homotopy(complex(x, y), t)\n return (c.real, c.imag, z)\n\n super().__init__(homotopy, mobject, **kwargs)\n\n\nclass PhaseFlow(Animation):\n def __init__(\n self,\n function: Callable[[np.ndarray], np.ndarray],\n mobject: Mobject,\n virtual_time: float = 1,\n suspend_mobject_updating: bool = False,\n rate_func: Callable[[float], float] = linear,\n **kwargs,\n ) -> None:\n self.virtual_time = virtual_time\n self.function = function\n super().__init__(\n mobject,\n suspend_mobject_updating=suspend_mobject_updating,\n rate_func=rate_func,\n **kwargs,\n )\n\n def interpolate_mobject(self, alpha: float) -> None:\n if hasattr(self, \"last_alpha\"):\n dt = self.virtual_time * (\n self.rate_func(alpha) - self.rate_func(self.last_alpha)\n )\n self.mobject.apply_function(lambda p: p + dt * self.function(p))\n self.last_alpha = alpha\n\n\nclass MoveAlongPath(Animation):\n \"\"\"Make one mobject move along the path of another mobject.\n\n .. manim:: MoveAlongPathExample\n\n class MoveAlongPathExample(Scene):\n def construct(self):\n d1 = Dot().set_color(ORANGE)\n l1 = Line(LEFT, RIGHT)\n l2 = VMobject()\n self.add(d1, l1, l2)\n l2.add_updater(lambda x: x.become(Line(LEFT, d1.get_center()).set_color(ORANGE)))\n self.play(MoveAlongPath(d1, l1), rate_func=linear)\n \"\"\"\n\n def __init__(\n self,\n mobject: Mobject,\n path: VMobject,\n suspend_mobject_updating: bool | None = False,\n **kwargs,\n ) -> None:\n self.path = path\n super().__init__(\n mobject, suspend_mobject_updating=suspend_mobject_updating, **kwargs\n )\n\n def interpolate_mobject(self, alpha: float) -> None:\n point = self.path.point_from_proportion(self.rate_func(alpha))\n self.mobject.move_to(point)\n", "path": "manim/animation/movement.py"}]} | 2,036 | 103 |
gh_patches_debug_563 | rasdani/github-patches | git_diff | pex-tool__pex-910 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.5
On the docket:
+ [x] Kill `Pip.spawn_install_wheel` `overwrite` arg. #907
+ [x] Silence pip warnings about Python 2.7. #908
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.1.4'
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.1.4'
+__version__ = '2.1.5'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.4'\n+__version__ = '2.1.5'\n", "issue": "Release 2.1.5\nOn the docket:\r\n+ [x] Kill `Pip.spawn_install_wheel` `overwrite` arg. #907\r\n+ [x] Silence pip warnings about Python 2.7. #908\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.4'\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.5'\n", "path": "pex/version.py"}]} | 360 | 94 |
gh_patches_debug_31701 | rasdani/github-patches | git_diff | searx__searx-1594 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Duden search engine not working anymore
They changed the site layout.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/duden.py`
Content:
```
1 """
2 Duden
3 @website https://www.duden.de
4 @provide-api no
5 @using-api no
6 @results HTML (using search portal)
7 @stable no (HTML can change)
8 @parse url, title, content
9 """
10
11 from lxml import html, etree
12 import re
13 from searx.engines.xpath import extract_text
14 from searx.url_utils import quote
15 from searx import logger
16
17 categories = ['general']
18 paging = True
19 language_support = False
20
21 # search-url
22 base_url = 'https://www.duden.de/'
23 search_url = base_url + 'suchen/dudenonline/{query}?page={offset}'
24
25
26 def request(query, params):
27 '''pre-request callback
28 params<dict>:
29 method : POST/GET
30 headers : {}
31 data : {} # if method == POST
32 url : ''
33 category: 'search category'
34 pageno : 1 # number of the requested page
35 '''
36
37 offset = (params['pageno'] - 1)
38 params['url'] = search_url.format(offset=offset, query=quote(query))
39 return params
40
41
42 def response(resp):
43 '''post-response callback
44 resp: requests response object
45 '''
46 results = []
47
48 dom = html.fromstring(resp.text)
49
50 try:
51 number_of_results_string = re.sub('[^0-9]', '', dom.xpath(
52 '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0]
53 )
54
55 results.append({'number_of_results': int(number_of_results_string)})
56
57 except:
58 logger.debug("Couldn't read number of results.")
59 pass
60
61 for result in dom.xpath('//section[@class="wide" and not(contains(@style,"overflow:hidden"))]'):
62 try:
63 logger.debug("running for %s" % str(result))
64 link = result.xpath('.//h2/a')[0]
65 url = link.attrib.get('href')
66 title = result.xpath('string(.//h2/a)')
67 content = extract_text(result.xpath('.//p'))
68 # append result
69 results.append({'url': url,
70 'title': title,
71 'content': content})
72 except:
73 logger.debug('result parse error in:\n%s', etree.tostring(result, pretty_print=True))
74 continue
75
76 return results
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/duden.py b/searx/engines/duden.py
--- a/searx/engines/duden.py
+++ b/searx/engines/duden.py
@@ -11,7 +11,7 @@
from lxml import html, etree
import re
from searx.engines.xpath import extract_text
-from searx.url_utils import quote
+from searx.url_utils import quote, urljoin
from searx import logger
categories = ['general']
@@ -20,7 +20,7 @@
# search-url
base_url = 'https://www.duden.de/'
-search_url = base_url + 'suchen/dudenonline/{query}?page={offset}'
+search_url = base_url + 'suchen/dudenonline/{query}?search_api_fulltext=&page={offset}'
def request(query, params):
@@ -35,7 +35,11 @@
'''
offset = (params['pageno'] - 1)
- params['url'] = search_url.format(offset=offset, query=quote(query))
+ if offset == 0:
+ search_url_fmt = base_url + 'suchen/dudenonline/{query}'
+ params['url'] = search_url_fmt.format(query=quote(query))
+ else:
+ params['url'] = search_url.format(offset=offset, query=quote(query))
return params
@@ -58,12 +62,11 @@
logger.debug("Couldn't read number of results.")
pass
- for result in dom.xpath('//section[@class="wide" and not(contains(@style,"overflow:hidden"))]'):
+ for result in dom.xpath('//section[not(contains(@class, "essay"))]'):
try:
- logger.debug("running for %s" % str(result))
- link = result.xpath('.//h2/a')[0]
- url = link.attrib.get('href')
- title = result.xpath('string(.//h2/a)')
+ url = result.xpath('.//h2/a')[0].get('href')
+ url = urljoin(base_url, url)
+ title = result.xpath('string(.//h2/a)').strip()
content = extract_text(result.xpath('.//p'))
# append result
results.append({'url': url,
| {"golden_diff": "diff --git a/searx/engines/duden.py b/searx/engines/duden.py\n--- a/searx/engines/duden.py\n+++ b/searx/engines/duden.py\n@@ -11,7 +11,7 @@\n from lxml import html, etree\n import re\n from searx.engines.xpath import extract_text\n-from searx.url_utils import quote\n+from searx.url_utils import quote, urljoin\n from searx import logger\n \n categories = ['general']\n@@ -20,7 +20,7 @@\n \n # search-url\n base_url = 'https://www.duden.de/'\n-search_url = base_url + 'suchen/dudenonline/{query}?page={offset}'\n+search_url = base_url + 'suchen/dudenonline/{query}?search_api_fulltext=&page={offset}'\n \n \n def request(query, params):\n@@ -35,7 +35,11 @@\n '''\n \n offset = (params['pageno'] - 1)\n- params['url'] = search_url.format(offset=offset, query=quote(query))\n+ if offset == 0:\n+ search_url_fmt = base_url + 'suchen/dudenonline/{query}'\n+ params['url'] = search_url_fmt.format(query=quote(query))\n+ else:\n+ params['url'] = search_url.format(offset=offset, query=quote(query))\n return params\n \n \n@@ -58,12 +62,11 @@\n logger.debug(\"Couldn't read number of results.\")\n pass\n \n- for result in dom.xpath('//section[@class=\"wide\" and not(contains(@style,\"overflow:hidden\"))]'):\n+ for result in dom.xpath('//section[not(contains(@class, \"essay\"))]'):\n try:\n- logger.debug(\"running for %s\" % str(result))\n- link = result.xpath('.//h2/a')[0]\n- url = link.attrib.get('href')\n- title = result.xpath('string(.//h2/a)')\n+ url = result.xpath('.//h2/a')[0].get('href')\n+ url = urljoin(base_url, url)\n+ title = result.xpath('string(.//h2/a)').strip()\n content = extract_text(result.xpath('.//p'))\n # append result\n results.append({'url': url,\n", "issue": "Duden search engine not working anymore\nThey changed the site layout.\n", "before_files": [{"content": "\"\"\"\n Duden\n @website https://www.duden.de\n @provide-api no\n @using-api no\n @results HTML (using search portal)\n @stable no (HTML can change)\n @parse url, title, content\n\"\"\"\n\nfrom lxml import html, etree\nimport re\nfrom searx.engines.xpath import extract_text\nfrom searx.url_utils import quote\nfrom searx import logger\n\ncategories = ['general']\npaging = True\nlanguage_support = False\n\n# search-url\nbase_url = 'https://www.duden.de/'\nsearch_url = base_url + 'suchen/dudenonline/{query}?page={offset}'\n\n\ndef request(query, params):\n '''pre-request callback\n params<dict>:\n method : POST/GET\n headers : {}\n data : {} # if method == POST\n url : ''\n category: 'search category'\n pageno : 1 # number of the requested page\n '''\n\n offset = (params['pageno'] - 1)\n params['url'] = search_url.format(offset=offset, query=quote(query))\n return params\n\n\ndef response(resp):\n '''post-response callback\n resp: requests response object\n '''\n results = []\n\n dom = html.fromstring(resp.text)\n\n try:\n number_of_results_string = re.sub('[^0-9]', '', dom.xpath(\n '//a[@class=\"active\" and contains(@href,\"/suchen/dudenonline\")]/span/text()')[0]\n )\n\n results.append({'number_of_results': int(number_of_results_string)})\n\n except:\n logger.debug(\"Couldn't read number of results.\")\n pass\n\n for result in dom.xpath('//section[@class=\"wide\" and not(contains(@style,\"overflow:hidden\"))]'):\n try:\n logger.debug(\"running for %s\" % str(result))\n link = result.xpath('.//h2/a')[0]\n url = link.attrib.get('href')\n title = result.xpath('string(.//h2/a)')\n content = extract_text(result.xpath('.//p'))\n # append result\n results.append({'url': url,\n 'title': title,\n 'content': content})\n except:\n logger.debug('result parse error in:\\n%s', etree.tostring(result, pretty_print=True))\n continue\n\n return results\n", "path": "searx/engines/duden.py"}], "after_files": [{"content": "\"\"\"\n Duden\n @website https://www.duden.de\n @provide-api no\n @using-api no\n @results HTML (using search portal)\n @stable no (HTML can change)\n @parse url, title, content\n\"\"\"\n\nfrom lxml import html, etree\nimport re\nfrom searx.engines.xpath import extract_text\nfrom searx.url_utils import quote, urljoin\nfrom searx import logger\n\ncategories = ['general']\npaging = True\nlanguage_support = False\n\n# search-url\nbase_url = 'https://www.duden.de/'\nsearch_url = base_url + 'suchen/dudenonline/{query}?search_api_fulltext=&page={offset}'\n\n\ndef request(query, params):\n '''pre-request callback\n params<dict>:\n method : POST/GET\n headers : {}\n data : {} # if method == POST\n url : ''\n category: 'search category'\n pageno : 1 # number of the requested page\n '''\n\n offset = (params['pageno'] - 1)\n if offset == 0:\n search_url_fmt = base_url + 'suchen/dudenonline/{query}'\n params['url'] = search_url_fmt.format(query=quote(query))\n else:\n params['url'] = search_url.format(offset=offset, query=quote(query))\n return params\n\n\ndef response(resp):\n '''post-response callback\n resp: requests response object\n '''\n results = []\n\n dom = html.fromstring(resp.text)\n\n try:\n number_of_results_string = re.sub('[^0-9]', '', dom.xpath(\n '//a[@class=\"active\" and contains(@href,\"/suchen/dudenonline\")]/span/text()')[0]\n )\n\n results.append({'number_of_results': int(number_of_results_string)})\n\n except:\n logger.debug(\"Couldn't read number of results.\")\n pass\n\n for result in dom.xpath('//section[not(contains(@class, \"essay\"))]'):\n try:\n url = result.xpath('.//h2/a')[0].get('href')\n url = urljoin(base_url, url)\n title = result.xpath('string(.//h2/a)').strip()\n content = extract_text(result.xpath('.//p'))\n # append result\n results.append({'url': url,\n 'title': title,\n 'content': content})\n except:\n logger.debug('result parse error in:\\n%s', etree.tostring(result, pretty_print=True))\n continue\n\n return results\n", "path": "searx/engines/duden.py"}]} | 942 | 511 |
gh_patches_debug_43872 | rasdani/github-patches | git_diff | conda-forge__conda-smithy-133 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Harden maintainers linting
As addressed in https://github.com/conda-forge/pyutilib-feedstock/pull/1:
```
Running command: ['python', '/home/travis/build/conda-forge/conda-forge.github.io/scripts/update_teams.py', './feedstocks_repo/feedstocks']
Traceback (most recent call last):
File "/home/travis/build/conda-forge/conda-forge.github.io/scripts/update_teams.py", line 85, in <module>
contributors = data.get('extra', {}).get('recipe-maintainers', [])
AttributeError: 'list' object has no attribute 'get'
CalledProcessError: Command '['python', '/home/travis/build/conda-forge/conda-forge.github.io/scripts/update_teams.py',
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_smithy/lint_recipe.py`
Content:
```
1 import os
2 import re
3
4 import jinja2
5 import ruamel.yaml
6
7
8 EXPECTED_SECTION_ORDER = ['package', 'source', 'build', 'requirements',
9 'test', 'app', 'about', 'extra']
10
11 REQUIREMENTS_ORDER = ['build', 'run']
12
13
14 class NullUndefined(jinja2.Undefined):
15 def __unicode__(self):
16 return unicode(self._undefined_name)
17
18
19 def lintify(meta, recipe_dir=None):
20 lints = []
21 major_sections = list(meta.keys())
22
23 # If the recipe_dir exists (no guarantee within this function) , we can
24 # find the meta.yaml within it.
25 meta_fname = os.path.join(recipe_dir or '', 'meta.yaml')
26
27 # 1: Top level meta.yaml keys should have a specific order.
28 section_order_sorted = sorted(major_sections,
29 key=EXPECTED_SECTION_ORDER.index)
30 if major_sections != section_order_sorted:
31 lints.append('The top level meta keys are in an unexpected order. '
32 'Expecting {}.'.format(section_order_sorted))
33
34 # 2: The about section should have a home, license and summary.
35 for about_item in ['home', 'license', 'summary']:
36 about_section = meta.get('about', {}) or {}
37 # if the section doesn't exist, or is just empty, lint it.
38 if not about_section.get(about_item, ''):
39 lints.append('The {} item is expected in the about section.'
40 ''.format(about_item))
41
42 # 3: The recipe should have some maintainers.
43 extra_section = meta.get('extra', {}) or {}
44 if not extra_section.get('recipe-maintainers', []):
45 lints.append('The recipe could do with some maintainers listed in '
46 'the "extra/recipe-maintainers" section.')
47
48 # 4: The recipe should have some tests.
49 if 'test' not in major_sections:
50 test_files = ['run_test.py', 'run_test.sh', 'run_test.bat',
51 'run_test.pl']
52 a_test_file_exists = (recipe_dir is not None and
53 any(os.path.exists(os.path.join(recipe_dir,
54 test_file))
55 for test_file in test_files))
56 if not a_test_file_exists:
57 lints.append('The recipe must have some tests.')
58
59 # 5: License cannot be 'unknown.'
60 license = meta.get('about', {}).get('license', '').lower()
61 if 'unknown' == license.strip():
62 lints.append('The recipe license cannot be unknown.')
63
64 # 6: Selectors should be in a tidy form.
65 if recipe_dir is not None and os.path.exists(meta_fname):
66 bad_selectors = []
67 # Good selectors look like ".*\s\s#\s[...]"
68 good_selectors_pat = re.compile(r'(.+?)\s{2,}#\s\[(.+)\](?(2).*)$')
69 with open(meta_fname, 'r') as fh:
70 for selector_line in selector_lines(fh):
71 if not good_selectors_pat.match(selector_line):
72 bad_selectors.append(selector_line)
73 if bad_selectors:
74 lints.append('Selectors are suggested to take a '
75 '" # [<selector>]" form.')
76
77 # 7: The build section should have a build number.
78 build_section = meta.get('build', {}) or {}
79 build_number = build_section.get('number', None)
80 if build_number is None:
81 lints.append('The recipe must have a `build/number` section.')
82
83 # 8: The build section should be before the run section in requirements.
84 requirements_section = meta.get('requirements', {}) or {}
85 requirements_order_sorted = sorted(requirements_section,
86 key=REQUIREMENTS_ORDER.index)
87 if requirements_section.keys() != requirements_order_sorted:
88 lints.append('The `requirements/build` section should be defined '
89 'before the `requirements/run` section.')
90
91 # 9: Files downloaded should have a hash.
92 source_section = meta.get('source', {}) or {}
93 if ('url' in source_section and
94 not ({'sha1', 'sha256', 'md5'} & set(source_section.keys()))):
95 lints.append('When defining a source/url please add a sha256, sha1 '
96 'or md5 checksum (sha256 preferably).')
97
98 return lints
99
100
101 def selector_lines(lines):
102 # Using the same pattern defined in conda-build (metadata.py),
103 # we identify selectors.
104 sel_pat = re.compile(r'(.+?)\s*(#.*)?\[(.+)\](?(2).*)$')
105
106 for line in lines:
107 line = line.rstrip()
108 if line.lstrip().startswith('#'):
109 # Don't bother with comment only lines
110 continue
111 m = sel_pat.match(line)
112 if m:
113 m.group(3)
114 yield line
115
116
117 def main(recipe_dir):
118 recipe_dir = os.path.abspath(recipe_dir)
119 recipe_meta = os.path.join(recipe_dir, 'meta.yaml')
120 if not os.path.exists(recipe_dir):
121 raise IOError('Feedstock has no recipe/meta.yaml.')
122
123 env = jinja2.Environment(undefined=NullUndefined)
124
125 with open(recipe_meta, 'r') as fh:
126 content = env.from_string(''.join(fh)).render()
127 meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)
128 results = lintify(meta, recipe_dir)
129 return results
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda_smithy/lint_recipe.py b/conda_smithy/lint_recipe.py
--- a/conda_smithy/lint_recipe.py
+++ b/conda_smithy/lint_recipe.py
@@ -16,6 +16,15 @@
return unicode(self._undefined_name)
+def get_section(parent, name, lints):
+ section = parent.get(name, {})
+ if not isinstance(section, dict):
+ lints.append('The "{}" section was expected to be a dictionary, but '
+ 'got a {}.'.format(name, type(section).__name__))
+ section = {}
+ return section
+
+
def lintify(meta, recipe_dir=None):
lints = []
major_sections = list(meta.keys())
@@ -24,6 +33,12 @@
# find the meta.yaml within it.
meta_fname = os.path.join(recipe_dir or '', 'meta.yaml')
+ source_section = get_section(meta, 'source', lints)
+ build_section = get_section(meta, 'build', lints)
+ requirements_section = get_section(meta, 'requirements', lints)
+ about_section = get_section(meta, 'about', lints)
+ extra_section = get_section(meta, 'extra', lints)
+
# 1: Top level meta.yaml keys should have a specific order.
section_order_sorted = sorted(major_sections,
key=EXPECTED_SECTION_ORDER.index)
@@ -33,14 +48,12 @@
# 2: The about section should have a home, license and summary.
for about_item in ['home', 'license', 'summary']:
- about_section = meta.get('about', {}) or {}
# if the section doesn't exist, or is just empty, lint it.
if not about_section.get(about_item, ''):
lints.append('The {} item is expected in the about section.'
''.format(about_item))
# 3: The recipe should have some maintainers.
- extra_section = meta.get('extra', {}) or {}
if not extra_section.get('recipe-maintainers', []):
lints.append('The recipe could do with some maintainers listed in '
'the "extra/recipe-maintainers" section.')
@@ -57,7 +70,7 @@
lints.append('The recipe must have some tests.')
# 5: License cannot be 'unknown.'
- license = meta.get('about', {}).get('license', '').lower()
+ license = about_section.get('license', '').lower()
if 'unknown' == license.strip():
lints.append('The recipe license cannot be unknown.')
@@ -75,13 +88,10 @@
'" # [<selector>]" form.')
# 7: The build section should have a build number.
- build_section = meta.get('build', {}) or {}
- build_number = build_section.get('number', None)
- if build_number is None:
+ if build_section.get('number', None) is None:
lints.append('The recipe must have a `build/number` section.')
# 8: The build section should be before the run section in requirements.
- requirements_section = meta.get('requirements', {}) or {}
requirements_order_sorted = sorted(requirements_section,
key=REQUIREMENTS_ORDER.index)
if requirements_section.keys() != requirements_order_sorted:
@@ -89,7 +99,6 @@
'before the `requirements/run` section.')
# 9: Files downloaded should have a hash.
- source_section = meta.get('source', {}) or {}
if ('url' in source_section and
not ({'sha1', 'sha256', 'md5'} & set(source_section.keys()))):
lints.append('When defining a source/url please add a sha256, sha1 '
| {"golden_diff": "diff --git a/conda_smithy/lint_recipe.py b/conda_smithy/lint_recipe.py\n--- a/conda_smithy/lint_recipe.py\n+++ b/conda_smithy/lint_recipe.py\n@@ -16,6 +16,15 @@\n return unicode(self._undefined_name)\n \n \n+def get_section(parent, name, lints):\n+ section = parent.get(name, {})\n+ if not isinstance(section, dict):\n+ lints.append('The \"{}\" section was expected to be a dictionary, but '\n+ 'got a {}.'.format(name, type(section).__name__))\n+ section = {}\n+ return section\n+\n+\n def lintify(meta, recipe_dir=None):\n lints = []\n major_sections = list(meta.keys())\n@@ -24,6 +33,12 @@\n # find the meta.yaml within it.\n meta_fname = os.path.join(recipe_dir or '', 'meta.yaml')\n \n+ source_section = get_section(meta, 'source', lints)\n+ build_section = get_section(meta, 'build', lints)\n+ requirements_section = get_section(meta, 'requirements', lints)\n+ about_section = get_section(meta, 'about', lints)\n+ extra_section = get_section(meta, 'extra', lints)\n+\n # 1: Top level meta.yaml keys should have a specific order.\n section_order_sorted = sorted(major_sections,\n key=EXPECTED_SECTION_ORDER.index)\n@@ -33,14 +48,12 @@\n \n # 2: The about section should have a home, license and summary.\n for about_item in ['home', 'license', 'summary']:\n- about_section = meta.get('about', {}) or {}\n # if the section doesn't exist, or is just empty, lint it.\n if not about_section.get(about_item, ''):\n lints.append('The {} item is expected in the about section.'\n ''.format(about_item))\n \n # 3: The recipe should have some maintainers.\n- extra_section = meta.get('extra', {}) or {}\n if not extra_section.get('recipe-maintainers', []):\n lints.append('The recipe could do with some maintainers listed in '\n 'the \"extra/recipe-maintainers\" section.')\n@@ -57,7 +70,7 @@\n lints.append('The recipe must have some tests.')\n \n # 5: License cannot be 'unknown.'\n- license = meta.get('about', {}).get('license', '').lower()\n+ license = about_section.get('license', '').lower()\n if 'unknown' == license.strip():\n lints.append('The recipe license cannot be unknown.')\n \n@@ -75,13 +88,10 @@\n '\" # [<selector>]\" form.')\n \n # 7: The build section should have a build number.\n- build_section = meta.get('build', {}) or {}\n- build_number = build_section.get('number', None)\n- if build_number is None:\n+ if build_section.get('number', None) is None:\n lints.append('The recipe must have a `build/number` section.')\n \n # 8: The build section should be before the run section in requirements.\n- requirements_section = meta.get('requirements', {}) or {}\n requirements_order_sorted = sorted(requirements_section,\n key=REQUIREMENTS_ORDER.index)\n if requirements_section.keys() != requirements_order_sorted:\n@@ -89,7 +99,6 @@\n 'before the `requirements/run` section.')\n \n # 9: Files downloaded should have a hash.\n- source_section = meta.get('source', {}) or {}\n if ('url' in source_section and\n not ({'sha1', 'sha256', 'md5'} & set(source_section.keys()))):\n lints.append('When defining a source/url please add a sha256, sha1 '\n", "issue": "Harden maintainers linting\nAs addressed in https://github.com/conda-forge/pyutilib-feedstock/pull/1:\n\n```\nRunning command: ['python', '/home/travis/build/conda-forge/conda-forge.github.io/scripts/update_teams.py', './feedstocks_repo/feedstocks']\nTraceback (most recent call last):\n File \"/home/travis/build/conda-forge/conda-forge.github.io/scripts/update_teams.py\", line 85, in <module>\n contributors = data.get('extra', {}).get('recipe-maintainers', [])\nAttributeError: 'list' object has no attribute 'get'\nCalledProcessError: Command '['python', '/home/travis/build/conda-forge/conda-forge.github.io/scripts/update_teams.py',\n```\n\n", "before_files": [{"content": "import os\nimport re\n\nimport jinja2\nimport ruamel.yaml\n\n\nEXPECTED_SECTION_ORDER = ['package', 'source', 'build', 'requirements',\n 'test', 'app', 'about', 'extra']\n\nREQUIREMENTS_ORDER = ['build', 'run']\n\n\nclass NullUndefined(jinja2.Undefined):\n def __unicode__(self):\n return unicode(self._undefined_name)\n\n\ndef lintify(meta, recipe_dir=None):\n lints = []\n major_sections = list(meta.keys())\n\n # If the recipe_dir exists (no guarantee within this function) , we can\n # find the meta.yaml within it.\n meta_fname = os.path.join(recipe_dir or '', 'meta.yaml')\n\n # 1: Top level meta.yaml keys should have a specific order.\n section_order_sorted = sorted(major_sections,\n key=EXPECTED_SECTION_ORDER.index)\n if major_sections != section_order_sorted:\n lints.append('The top level meta keys are in an unexpected order. '\n 'Expecting {}.'.format(section_order_sorted))\n\n # 2: The about section should have a home, license and summary.\n for about_item in ['home', 'license', 'summary']:\n about_section = meta.get('about', {}) or {}\n # if the section doesn't exist, or is just empty, lint it.\n if not about_section.get(about_item, ''):\n lints.append('The {} item is expected in the about section.'\n ''.format(about_item))\n\n # 3: The recipe should have some maintainers.\n extra_section = meta.get('extra', {}) or {}\n if not extra_section.get('recipe-maintainers', []):\n lints.append('The recipe could do with some maintainers listed in '\n 'the \"extra/recipe-maintainers\" section.')\n\n # 4: The recipe should have some tests.\n if 'test' not in major_sections:\n test_files = ['run_test.py', 'run_test.sh', 'run_test.bat',\n 'run_test.pl']\n a_test_file_exists = (recipe_dir is not None and\n any(os.path.exists(os.path.join(recipe_dir,\n test_file))\n for test_file in test_files))\n if not a_test_file_exists:\n lints.append('The recipe must have some tests.')\n\n # 5: License cannot be 'unknown.'\n license = meta.get('about', {}).get('license', '').lower()\n if 'unknown' == license.strip():\n lints.append('The recipe license cannot be unknown.')\n\n # 6: Selectors should be in a tidy form.\n if recipe_dir is not None and os.path.exists(meta_fname):\n bad_selectors = []\n # Good selectors look like \".*\\s\\s#\\s[...]\"\n good_selectors_pat = re.compile(r'(.+?)\\s{2,}#\\s\\[(.+)\\](?(2).*)$')\n with open(meta_fname, 'r') as fh:\n for selector_line in selector_lines(fh):\n if not good_selectors_pat.match(selector_line):\n bad_selectors.append(selector_line)\n if bad_selectors:\n lints.append('Selectors are suggested to take a '\n '\" # [<selector>]\" form.')\n\n # 7: The build section should have a build number.\n build_section = meta.get('build', {}) or {}\n build_number = build_section.get('number', None)\n if build_number is None:\n lints.append('The recipe must have a `build/number` section.')\n\n # 8: The build section should be before the run section in requirements.\n requirements_section = meta.get('requirements', {}) or {}\n requirements_order_sorted = sorted(requirements_section,\n key=REQUIREMENTS_ORDER.index)\n if requirements_section.keys() != requirements_order_sorted:\n lints.append('The `requirements/build` section should be defined '\n 'before the `requirements/run` section.')\n\n # 9: Files downloaded should have a hash.\n source_section = meta.get('source', {}) or {}\n if ('url' in source_section and\n not ({'sha1', 'sha256', 'md5'} & set(source_section.keys()))):\n lints.append('When defining a source/url please add a sha256, sha1 '\n 'or md5 checksum (sha256 preferably).')\n\n return lints\n\n\ndef selector_lines(lines):\n # Using the same pattern defined in conda-build (metadata.py),\n # we identify selectors.\n sel_pat = re.compile(r'(.+?)\\s*(#.*)?\\[(.+)\\](?(2).*)$')\n\n for line in lines:\n line = line.rstrip()\n if line.lstrip().startswith('#'):\n # Don't bother with comment only lines\n continue\n m = sel_pat.match(line)\n if m:\n m.group(3)\n yield line\n\n\ndef main(recipe_dir):\n recipe_dir = os.path.abspath(recipe_dir)\n recipe_meta = os.path.join(recipe_dir, 'meta.yaml')\n if not os.path.exists(recipe_dir):\n raise IOError('Feedstock has no recipe/meta.yaml.')\n\n env = jinja2.Environment(undefined=NullUndefined)\n\n with open(recipe_meta, 'r') as fh:\n content = env.from_string(''.join(fh)).render()\n meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)\n results = lintify(meta, recipe_dir)\n return results\n", "path": "conda_smithy/lint_recipe.py"}], "after_files": [{"content": "import os\nimport re\n\nimport jinja2\nimport ruamel.yaml\n\n\nEXPECTED_SECTION_ORDER = ['package', 'source', 'build', 'requirements',\n 'test', 'app', 'about', 'extra']\n\nREQUIREMENTS_ORDER = ['build', 'run']\n\n\nclass NullUndefined(jinja2.Undefined):\n def __unicode__(self):\n return unicode(self._undefined_name)\n\n\ndef get_section(parent, name, lints):\n section = parent.get(name, {})\n if not isinstance(section, dict):\n lints.append('The \"{}\" section was expected to be a dictionary, but '\n 'got a {}.'.format(name, type(section).__name__))\n section = {}\n return section\n\n\ndef lintify(meta, recipe_dir=None):\n lints = []\n major_sections = list(meta.keys())\n\n # If the recipe_dir exists (no guarantee within this function) , we can\n # find the meta.yaml within it.\n meta_fname = os.path.join(recipe_dir or '', 'meta.yaml')\n\n source_section = get_section(meta, 'source', lints)\n build_section = get_section(meta, 'build', lints)\n requirements_section = get_section(meta, 'requirements', lints)\n about_section = get_section(meta, 'about', lints)\n extra_section = get_section(meta, 'extra', lints)\n\n # 1: Top level meta.yaml keys should have a specific order.\n section_order_sorted = sorted(major_sections,\n key=EXPECTED_SECTION_ORDER.index)\n if major_sections != section_order_sorted:\n lints.append('The top level meta keys are in an unexpected order. '\n 'Expecting {}.'.format(section_order_sorted))\n\n # 2: The about section should have a home, license and summary.\n for about_item in ['home', 'license', 'summary']:\n # if the section doesn't exist, or is just empty, lint it.\n if not about_section.get(about_item, ''):\n lints.append('The {} item is expected in the about section.'\n ''.format(about_item))\n\n # 3: The recipe should have some maintainers.\n if not extra_section.get('recipe-maintainers', []):\n lints.append('The recipe could do with some maintainers listed in '\n 'the \"extra/recipe-maintainers\" section.')\n\n # 4: The recipe should have some tests.\n if 'test' not in major_sections:\n test_files = ['run_test.py', 'run_test.sh', 'run_test.bat',\n 'run_test.pl']\n a_test_file_exists = (recipe_dir is not None and\n any(os.path.exists(os.path.join(recipe_dir,\n test_file))\n for test_file in test_files))\n if not a_test_file_exists:\n lints.append('The recipe must have some tests.')\n\n # 5: License cannot be 'unknown.'\n license = about_section.get('license', '').lower()\n if 'unknown' == license.strip():\n lints.append('The recipe license cannot be unknown.')\n\n # 6: Selectors should be in a tidy form.\n if recipe_dir is not None and os.path.exists(meta_fname):\n bad_selectors = []\n # Good selectors look like \".*\\s\\s#\\s[...]\"\n good_selectors_pat = re.compile(r'(.+?)\\s{2,}#\\s\\[(.+)\\](?(2).*)$')\n with open(meta_fname, 'r') as fh:\n for selector_line in selector_lines(fh):\n if not good_selectors_pat.match(selector_line):\n bad_selectors.append(selector_line)\n if bad_selectors:\n lints.append('Selectors are suggested to take a '\n '\" # [<selector>]\" form.')\n\n # 7: The build section should have a build number.\n if build_section.get('number', None) is None:\n lints.append('The recipe must have a `build/number` section.')\n\n # 8: The build section should be before the run section in requirements.\n requirements_order_sorted = sorted(requirements_section,\n key=REQUIREMENTS_ORDER.index)\n if requirements_section.keys() != requirements_order_sorted:\n lints.append('The `requirements/build` section should be defined '\n 'before the `requirements/run` section.')\n\n # 9: Files downloaded should have a hash.\n if ('url' in source_section and\n not ({'sha1', 'sha256', 'md5'} & set(source_section.keys()))):\n lints.append('When defining a source/url please add a sha256, sha1 '\n 'or md5 checksum (sha256 preferably).')\n\n return lints\n\n\ndef selector_lines(lines):\n # Using the same pattern defined in conda-build (metadata.py),\n # we identify selectors.\n sel_pat = re.compile(r'(.+?)\\s*(#.*)?\\[(.+)\\](?(2).*)$')\n\n for line in lines:\n line = line.rstrip()\n if line.lstrip().startswith('#'):\n # Don't bother with comment only lines\n continue\n m = sel_pat.match(line)\n if m:\n m.group(3)\n yield line\n\n\ndef main(recipe_dir):\n recipe_dir = os.path.abspath(recipe_dir)\n recipe_meta = os.path.join(recipe_dir, 'meta.yaml')\n if not os.path.exists(recipe_dir):\n raise IOError('Feedstock has no recipe/meta.yaml.')\n\n env = jinja2.Environment(undefined=NullUndefined)\n\n with open(recipe_meta, 'r') as fh:\n content = env.from_string(''.join(fh)).render()\n meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)\n results = lintify(meta, recipe_dir)\n return results\n", "path": "conda_smithy/lint_recipe.py"}]} | 1,882 | 851 |
gh_patches_debug_19671 | rasdani/github-patches | git_diff | kartoza__prj.app-508 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Course name
Currently the course name is quite long, i.e. XProject_QGIS Introduction_2017-07-05-2017-07-19
Would it be better if we can have a shorter course name, i.e. QGIS Introduction 101?
What do you think @timlinux ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django_project/certification/models/certifying_organisation.py`
Content:
```
1 # coding=utf-8
2 """Certifying organisation model definitions for certification apps.
3
4 """
5
6 import os
7 from django.conf.global_settings import MEDIA_ROOT
8 from django.core.urlresolvers import reverse
9 from django.core.exceptions import ValidationError
10 from django.core.validators import validate_email
11 from django.db import models
12 from django.utils.text import slugify
13 from django.utils.translation import ugettext_lazy as _
14 from core.settings.contrib import STOP_WORDS
15 from unidecode import unidecode
16 from django.contrib.auth.models import User
17 from django_countries.fields import CountryField
18 import logging
19
20 logger = logging.getLogger(__name__)
21
22
23 class SlugifyingMixin(object):
24
25 class Meta:
26 abstract = True
27
28 def save(self, *args, **kwargs):
29 if not self.pk:
30 words = self.name.split()
31 filtered_words = [word for word in words if
32 word.lower() not in STOP_WORDS]
33 # unidecode() represents special characters (unicode data) in ASCII
34 new_list = unidecode(' '.join(filtered_words))
35 self.slug = slugify(new_list)[:50]
36 super(SlugifyingMixin, self).save(*args, **kwargs)
37
38
39 class ApprovedCertifyingOrganisationManager(models.Manager):
40 """Custom training centre manager.
41
42 Shows only approved certifying organisation.
43 """
44
45 def get_queryset(self):
46 """Query set generator. """
47
48 return super(
49 ApprovedCertifyingOrganisationManager, self).get_queryset().filter(
50 approved=True)
51
52
53 class UnapprovedCertifyingOrganisationManager(models.Manager):
54 """Custom training centre manager.
55
56 Shows only unapproved certifying organisation.
57 """
58
59 def get_queryset(self):
60 """Query set generator. """
61
62 return super(
63 UnapprovedCertifyingOrganisationManager, self).get_queryset(
64 ).filter(approved=False)
65
66
67 def validate_email_address(value):
68 try:
69 validate_email(value)
70 return True
71 except ValidationError(
72 _('%(value)s is not a valid email address'),
73 params={'value': value},):
74 return False
75
76
77 class CertifyingOrganisation(SlugifyingMixin, models.Model):
78 """Certifying organisation model."""
79
80 name = models.CharField(
81 help_text=_('name of organisation or institution'),
82 max_length=200,
83 null=False,
84 blank=False
85 )
86
87 organisation_email = models.CharField(
88 help_text=_('Email address organisation or institution.'),
89 max_length=200,
90 null=False,
91 blank=False,
92 validators=[validate_email_address],
93 )
94
95 address = models.TextField(
96 help_text=_('Address of Organisation or Institution.'),
97 max_length=1000,
98 null=False,
99 blank=False
100 )
101
102 logo = models.ImageField(
103 help_text=_('Logo for this organisation. '
104 'Most browsers support dragging the image directly on to '
105 'the "Choose File" button above.'),
106 upload_to=os.path.join(MEDIA_ROOT, 'images/organisations'),
107 blank=True
108 )
109
110 country = CountryField(
111 help_text=_('Select the country for this Institution'),
112 null=True,
113 blank=True)
114
115 organisation_phone = models.CharField(
116 help_text=_('Phone number: (country code)(number) e.g. +6221551553'),
117 max_length=200,
118 null=False,
119 blank=False
120 )
121
122 approved = models.BooleanField(
123 help_text=_('Approval from project admin'),
124 default=False
125 )
126
127 enabled = models.BooleanField(
128 help_text=_('Project enabled'),
129 default=True
130 )
131
132 slug = models.SlugField()
133 organisation_owners = models.ManyToManyField(User)
134 project = models.ForeignKey('base.Project')
135 objects = models.Manager()
136 approved_objects = ApprovedCertifyingOrganisationManager()
137 unapproved_objects = UnapprovedCertifyingOrganisationManager()
138
139 # noinspection PyClassicStyleClass.
140 class Meta:
141 """Meta class for Course attendee."""
142
143 app_label = 'certification'
144 ordering = ['name']
145 unique_together = ['name', 'project']
146
147 def save(self, *args, **kwargs):
148 super(CertifyingOrganisation, self).save(*args, **kwargs)
149
150 def __unicode__(self):
151 return '%s - %s' % (self.project.name, self.name)
152
153 def get_absolute_url(self):
154 """Return URL to certifying organisation detail page.
155
156 :return: URL
157 :rtype: str
158 """
159 return reverse('certifying-organisation-detail', kwargs={
160 'slug': self.slug,
161 'project_slug': self.project.slug
162 })
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django_project/certification/models/certifying_organisation.py b/django_project/certification/models/certifying_organisation.py
--- a/django_project/certification/models/certifying_organisation.py
+++ b/django_project/certification/models/certifying_organisation.py
@@ -74,7 +74,7 @@
return False
-class CertifyingOrganisation(SlugifyingMixin, models.Model):
+class CertifyingOrganisation(models.Model):
"""Certifying organisation model."""
name = models.CharField(
@@ -145,6 +145,15 @@
unique_together = ['name', 'project']
def save(self, *args, **kwargs):
+ if not self.pk:
+ words = self.name.split()
+ filtered_words = [word for word in words if
+ word.lower() not in STOP_WORDS]
+ # unidecode() represents special characters (unicode data) in ASCII
+ new_list = \
+ self.project.slug + ' ' + \
+ unidecode(' '.join(filtered_words))
+ self.slug = slugify(new_list)[:50]
super(CertifyingOrganisation, self).save(*args, **kwargs)
def __unicode__(self):
| {"golden_diff": "diff --git a/django_project/certification/models/certifying_organisation.py b/django_project/certification/models/certifying_organisation.py\n--- a/django_project/certification/models/certifying_organisation.py\n+++ b/django_project/certification/models/certifying_organisation.py\n@@ -74,7 +74,7 @@\n return False\n \n \n-class CertifyingOrganisation(SlugifyingMixin, models.Model):\n+class CertifyingOrganisation(models.Model):\n \"\"\"Certifying organisation model.\"\"\"\n \n name = models.CharField(\n@@ -145,6 +145,15 @@\n unique_together = ['name', 'project']\n \n def save(self, *args, **kwargs):\n+ if not self.pk:\n+ words = self.name.split()\n+ filtered_words = [word for word in words if\n+ word.lower() not in STOP_WORDS]\n+ # unidecode() represents special characters (unicode data) in ASCII\n+ new_list = \\\n+ self.project.slug + ' ' + \\\n+ unidecode(' '.join(filtered_words))\n+ self.slug = slugify(new_list)[:50]\n super(CertifyingOrganisation, self).save(*args, **kwargs)\n \n def __unicode__(self):\n", "issue": "Course name\nCurrently the course name is quite long, i.e. XProject_QGIS Introduction_2017-07-05-2017-07-19 \n\nWould it be better if we can have a shorter course name, i.e. QGIS Introduction 101?\nWhat do you think @timlinux ?\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Certifying organisation model definitions for certification apps.\n\n\"\"\"\n\nimport os\nfrom django.conf.global_settings import MEDIA_ROOT\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import validate_email\nfrom django.db import models\nfrom django.utils.text import slugify\nfrom django.utils.translation import ugettext_lazy as _\nfrom core.settings.contrib import STOP_WORDS\nfrom unidecode import unidecode\nfrom django.contrib.auth.models import User\nfrom django_countries.fields import CountryField\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass SlugifyingMixin(object):\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n if not self.pk:\n words = self.name.split()\n filtered_words = [word for word in words if\n word.lower() not in STOP_WORDS]\n # unidecode() represents special characters (unicode data) in ASCII\n new_list = unidecode(' '.join(filtered_words))\n self.slug = slugify(new_list)[:50]\n super(SlugifyingMixin, self).save(*args, **kwargs)\n\n\nclass ApprovedCertifyingOrganisationManager(models.Manager):\n \"\"\"Custom training centre manager.\n\n Shows only approved certifying organisation.\n \"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator. \"\"\"\n\n return super(\n ApprovedCertifyingOrganisationManager, self).get_queryset().filter(\n approved=True)\n\n\nclass UnapprovedCertifyingOrganisationManager(models.Manager):\n \"\"\"Custom training centre manager.\n\n Shows only unapproved certifying organisation.\n \"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator. \"\"\"\n\n return super(\n UnapprovedCertifyingOrganisationManager, self).get_queryset(\n ).filter(approved=False)\n\n\ndef validate_email_address(value):\n try:\n validate_email(value)\n return True\n except ValidationError(\n _('%(value)s is not a valid email address'),\n params={'value': value},):\n return False\n\n\nclass CertifyingOrganisation(SlugifyingMixin, models.Model):\n \"\"\"Certifying organisation model.\"\"\"\n\n name = models.CharField(\n help_text=_('name of organisation or institution'),\n max_length=200,\n null=False,\n blank=False\n )\n\n organisation_email = models.CharField(\n help_text=_('Email address organisation or institution.'),\n max_length=200,\n null=False,\n blank=False,\n validators=[validate_email_address],\n )\n\n address = models.TextField(\n help_text=_('Address of Organisation or Institution.'),\n max_length=1000,\n null=False,\n blank=False\n )\n\n logo = models.ImageField(\n help_text=_('Logo for this organisation. '\n 'Most browsers support dragging the image directly on to '\n 'the \"Choose File\" button above.'),\n upload_to=os.path.join(MEDIA_ROOT, 'images/organisations'),\n blank=True\n )\n\n country = CountryField(\n help_text=_('Select the country for this Institution'),\n null=True,\n blank=True)\n\n organisation_phone = models.CharField(\n help_text=_('Phone number: (country code)(number) e.g. +6221551553'),\n max_length=200,\n null=False,\n blank=False\n )\n\n approved = models.BooleanField(\n help_text=_('Approval from project admin'),\n default=False\n )\n\n enabled = models.BooleanField(\n help_text=_('Project enabled'),\n default=True\n )\n\n slug = models.SlugField()\n organisation_owners = models.ManyToManyField(User)\n project = models.ForeignKey('base.Project')\n objects = models.Manager()\n approved_objects = ApprovedCertifyingOrganisationManager()\n unapproved_objects = UnapprovedCertifyingOrganisationManager()\n\n # noinspection PyClassicStyleClass.\n class Meta:\n \"\"\"Meta class for Course attendee.\"\"\"\n\n app_label = 'certification'\n ordering = ['name']\n unique_together = ['name', 'project']\n\n def save(self, *args, **kwargs):\n super(CertifyingOrganisation, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return '%s - %s' % (self.project.name, self.name)\n\n def get_absolute_url(self):\n \"\"\"Return URL to certifying organisation detail page.\n\n :return: URL\n :rtype: str\n \"\"\"\n return reverse('certifying-organisation-detail', kwargs={\n 'slug': self.slug,\n 'project_slug': self.project.slug\n })\n", "path": "django_project/certification/models/certifying_organisation.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"Certifying organisation model definitions for certification apps.\n\n\"\"\"\n\nimport os\nfrom django.conf.global_settings import MEDIA_ROOT\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import validate_email\nfrom django.db import models\nfrom django.utils.text import slugify\nfrom django.utils.translation import ugettext_lazy as _\nfrom core.settings.contrib import STOP_WORDS\nfrom unidecode import unidecode\nfrom django.contrib.auth.models import User\nfrom django_countries.fields import CountryField\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass SlugifyingMixin(object):\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n if not self.pk:\n words = self.name.split()\n filtered_words = [word for word in words if\n word.lower() not in STOP_WORDS]\n # unidecode() represents special characters (unicode data) in ASCII\n new_list = unidecode(' '.join(filtered_words))\n self.slug = slugify(new_list)[:50]\n super(SlugifyingMixin, self).save(*args, **kwargs)\n\n\nclass ApprovedCertifyingOrganisationManager(models.Manager):\n \"\"\"Custom training centre manager.\n\n Shows only approved certifying organisation.\n \"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator. \"\"\"\n\n return super(\n ApprovedCertifyingOrganisationManager, self).get_queryset().filter(\n approved=True)\n\n\nclass UnapprovedCertifyingOrganisationManager(models.Manager):\n \"\"\"Custom training centre manager.\n\n Shows only unapproved certifying organisation.\n \"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator. \"\"\"\n\n return super(\n UnapprovedCertifyingOrganisationManager, self).get_queryset(\n ).filter(approved=False)\n\n\ndef validate_email_address(value):\n try:\n validate_email(value)\n return True\n except ValidationError(\n _('%(value)s is not a valid email address'),\n params={'value': value},):\n return False\n\n\nclass CertifyingOrganisation(models.Model):\n \"\"\"Certifying organisation model.\"\"\"\n\n name = models.CharField(\n help_text=_('name of organisation or institution'),\n max_length=200,\n null=False,\n blank=False\n )\n\n organisation_email = models.CharField(\n help_text=_('Email address organisation or institution.'),\n max_length=200,\n null=False,\n blank=False,\n validators=[validate_email_address],\n )\n\n address = models.TextField(\n help_text=_('Address of Organisation or Institution.'),\n max_length=1000,\n null=False,\n blank=False\n )\n\n logo = models.ImageField(\n help_text=_('Logo for this organisation. '\n 'Most browsers support dragging the image directly on to '\n 'the \"Choose File\" button above.'),\n upload_to=os.path.join(MEDIA_ROOT, 'images/organisations'),\n blank=True\n )\n\n country = CountryField(\n help_text=_('Select the country for this Institution'),\n null=True,\n blank=True)\n\n organisation_phone = models.CharField(\n help_text=_('Phone number: (country code)(number) e.g. +6221551553'),\n max_length=200,\n null=False,\n blank=False\n )\n\n approved = models.BooleanField(\n help_text=_('Approval from project admin'),\n default=False\n )\n\n enabled = models.BooleanField(\n help_text=_('Project enabled'),\n default=True\n )\n\n slug = models.SlugField()\n organisation_owners = models.ManyToManyField(User)\n project = models.ForeignKey('base.Project')\n objects = models.Manager()\n approved_objects = ApprovedCertifyingOrganisationManager()\n unapproved_objects = UnapprovedCertifyingOrganisationManager()\n\n # noinspection PyClassicStyleClass.\n class Meta:\n \"\"\"Meta class for Course attendee.\"\"\"\n\n app_label = 'certification'\n ordering = ['name']\n unique_together = ['name', 'project']\n\n def save(self, *args, **kwargs):\n if not self.pk:\n words = self.name.split()\n filtered_words = [word for word in words if\n word.lower() not in STOP_WORDS]\n # unidecode() represents special characters (unicode data) in ASCII\n new_list = \\\n self.project.slug + ' ' + \\\n unidecode(' '.join(filtered_words))\n self.slug = slugify(new_list)[:50]\n super(CertifyingOrganisation, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return '%s - %s' % (self.project.name, self.name)\n\n def get_absolute_url(self):\n \"\"\"Return URL to certifying organisation detail page.\n\n :return: URL\n :rtype: str\n \"\"\"\n return reverse('certifying-organisation-detail', kwargs={\n 'slug': self.slug,\n 'project_slug': self.project.slug\n })\n", "path": "django_project/certification/models/certifying_organisation.py"}]} | 1,697 | 275 |
gh_patches_debug_14062 | rasdani/github-patches | git_diff | OCA__manufacture-130 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
is:issue is:open [8.0][mrp_production_real_cost] Error when produce product
Hi,
there's new error from mrp_production_real_cost, after I do git pull from the last commit
```
ERROR demo1 openerp.sql_db: Programming error: can't adapt type 'mrp.production', in query SELECT "mrp_production"."id" FROM "mrp_production"
WHERE "mrp_production".id IN %s ORDER BY "mrp_production"."priority" DESC,"mrp_production"."date_planned" ASC
File "/opt/odoo/server/addons/mrp_production_real_cost/models/mrp_production.py", line 34, in action_production_end
self.mapped('move_created_ids2').filtered(
File "/usr/lib/python2.7/dist-packages/psycopg2/extensions.py", line 129, in getquoted
pobjs = [adapt(o) for o in self._seq]
ValueError: "can't adapt type 'mrp.production'" while evaluating
u'action_production_end()'
```
regards
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mrp_production_real_cost/models/mrp_production.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # © 2014-2015 Avanzosc
3 # © 2014-2015 Pedro M. Baeza
4 # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
5
6 from openerp import api, fields, models
7
8
9 class MrpProduction(models.Model):
10 _inherit = 'mrp.production'
11
12 @api.multi
13 @api.depends('analytic_line_ids', 'analytic_line_ids.amount',
14 'product_qty')
15 def _compute_real_cost(self):
16 for production in self:
17 cost_lines = production.analytic_line_ids.filtered(
18 lambda l: l.amount < 0)
19 production.real_cost = -sum(cost_lines.mapped('amount'))
20 production.unit_real_cost = (
21 production.real_cost / production.product_qty)
22
23 analytic_line_ids = fields.One2many(
24 comodel_name="account.analytic.line", inverse_name="mrp_production_id",
25 string="Cost Lines")
26 real_cost = fields.Float(
27 "Total Real Cost", compute="_compute_real_cost", store=True)
28 unit_real_cost = fields.Float(
29 "Unit Real Cost", compute="_compute_real_cost", store=True)
30
31 @api.multi
32 def action_production_end(self):
33 res = super(MrpProduction, self).action_production_end()
34 self.mapped('move_created_ids2').filtered(
35 lambda l: l.state == 'done').product_price_update_production_done()
36 return res
37
38 @api.model
39 def _prepare_real_cost_analytic_line(
40 self, journal, name, production, product, general_account=None,
41 workorder=None, qty=1, amount=0):
42 """
43 Prepare the vals for creating an analytic entry for real cost
44 :param journal: Journal of the entry
45 :param name: Name of the entry
46 :param production: Origin product
47 :param product: Product for the entry
48 :param general_account: General account for the entry
49 :param workorder: Origin workorder
50 :param qty: Quantity for the entry. This quantity will multiply both
51 standard and average costs for the entry costs.
52 :param amount: Cost for calculating real cost.
53 :return: Dictionary with the analytic entry vals.
54 """
55 analytic_line_obj = self.env['account.analytic.line']
56 property_obj = self.env['ir.property']
57 general_account = (
58 general_account or product.property_account_expense or
59 product.categ_id.property_account_expense_categ or
60 property_obj.get('property_account_expense_categ',
61 'product.category'))
62 return {
63 'name': name,
64 'mrp_production_id': production.id,
65 'workorder': workorder and workorder.id or False,
66 'account_id': self.analytic_account_id.id,
67 'journal_id': journal.id,
68 'user_id': self.env.uid,
69 'date': analytic_line_obj._get_default_date(),
70 'product_id': product and product.id or False,
71 'unit_amount': qty,
72 'amount': amount,
73 'product_uom_id': product.uom_id.id,
74 'general_account_id': general_account.id,
75 }
76
77 @api.multi
78 def _costs_generate(self):
79 """
80 As we are generating the account_analytic_lines for MO in the
81 current module, we override this method in order to avoid
82 duplicates created in the parent class. Any other module
83 inheriting this method should take this into account!
84 """
85 return
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mrp_production_real_cost/models/mrp_production.py b/mrp_production_real_cost/models/mrp_production.py
--- a/mrp_production_real_cost/models/mrp_production.py
+++ b/mrp_production_real_cost/models/mrp_production.py
@@ -31,8 +31,15 @@
@api.multi
def action_production_end(self):
res = super(MrpProduction, self).action_production_end()
- self.mapped('move_created_ids2').filtered(
- lambda l: l.state == 'done').product_price_update_production_done()
+ for production in self:
+ # This is needed because commit
+ # https://github.com/odoo/odoo/commit/
+ # 6f29bfc181d23d70d29776d96b4318e9ee2c93a9
+ # introduces a weird behavior on the next call, provoking an error.
+ production.sudo().refresh()
+ production.mapped('move_created_ids2').filtered(
+ lambda l: l.state == 'done'
+ ).product_price_update_production_done()
return res
@api.model
| {"golden_diff": "diff --git a/mrp_production_real_cost/models/mrp_production.py b/mrp_production_real_cost/models/mrp_production.py\n--- a/mrp_production_real_cost/models/mrp_production.py\n+++ b/mrp_production_real_cost/models/mrp_production.py\n@@ -31,8 +31,15 @@\n @api.multi\n def action_production_end(self):\n res = super(MrpProduction, self).action_production_end()\n- self.mapped('move_created_ids2').filtered(\n- lambda l: l.state == 'done').product_price_update_production_done()\n+ for production in self:\n+ # This is needed because commit\n+ # https://github.com/odoo/odoo/commit/\n+ # 6f29bfc181d23d70d29776d96b4318e9ee2c93a9\n+ # introduces a weird behavior on the next call, provoking an error.\n+ production.sudo().refresh()\n+ production.mapped('move_created_ids2').filtered(\n+ lambda l: l.state == 'done'\n+ ).product_price_update_production_done()\n return res\n \n @api.model\n", "issue": "is:issue is:open [8.0][mrp_production_real_cost] Error when produce product\nHi,\n\nthere's new error from mrp_production_real_cost, after I do git pull from the last commit \n\n```\n\nERROR demo1 openerp.sql_db: Programming error: can't adapt type 'mrp.production', in query SELECT \"mrp_production\".\"id\" FROM \"mrp_production\"\n WHERE \"mrp_production\".id IN %s ORDER BY \"mrp_production\".\"priority\" DESC,\"mrp_production\".\"date_planned\" ASC \n\n File \"/opt/odoo/server/addons/mrp_production_real_cost/models/mrp_production.py\", line 34, in action_production_end\n self.mapped('move_created_ids2').filtered(\n\n\n File \"/usr/lib/python2.7/dist-packages/psycopg2/extensions.py\", line 129, in getquoted\n pobjs = [adapt(o) for o in self._seq]\nValueError: \"can't adapt type 'mrp.production'\" while evaluating\nu'action_production_end()'\n```\n\nregards\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# \u00a9 2014-2015 Avanzosc\n# \u00a9 2014-2015 Pedro M. Baeza\n# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html\n\nfrom openerp import api, fields, models\n\n\nclass MrpProduction(models.Model):\n _inherit = 'mrp.production'\n\n @api.multi\n @api.depends('analytic_line_ids', 'analytic_line_ids.amount',\n 'product_qty')\n def _compute_real_cost(self):\n for production in self:\n cost_lines = production.analytic_line_ids.filtered(\n lambda l: l.amount < 0)\n production.real_cost = -sum(cost_lines.mapped('amount'))\n production.unit_real_cost = (\n production.real_cost / production.product_qty)\n\n analytic_line_ids = fields.One2many(\n comodel_name=\"account.analytic.line\", inverse_name=\"mrp_production_id\",\n string=\"Cost Lines\")\n real_cost = fields.Float(\n \"Total Real Cost\", compute=\"_compute_real_cost\", store=True)\n unit_real_cost = fields.Float(\n \"Unit Real Cost\", compute=\"_compute_real_cost\", store=True)\n\n @api.multi\n def action_production_end(self):\n res = super(MrpProduction, self).action_production_end()\n self.mapped('move_created_ids2').filtered(\n lambda l: l.state == 'done').product_price_update_production_done()\n return res\n\n @api.model\n def _prepare_real_cost_analytic_line(\n self, journal, name, production, product, general_account=None,\n workorder=None, qty=1, amount=0):\n \"\"\"\n Prepare the vals for creating an analytic entry for real cost\n :param journal: Journal of the entry\n :param name: Name of the entry\n :param production: Origin product\n :param product: Product for the entry\n :param general_account: General account for the entry\n :param workorder: Origin workorder\n :param qty: Quantity for the entry. This quantity will multiply both\n standard and average costs for the entry costs.\n :param amount: Cost for calculating real cost.\n :return: Dictionary with the analytic entry vals.\n \"\"\"\n analytic_line_obj = self.env['account.analytic.line']\n property_obj = self.env['ir.property']\n general_account = (\n general_account or product.property_account_expense or\n product.categ_id.property_account_expense_categ or\n property_obj.get('property_account_expense_categ',\n 'product.category'))\n return {\n 'name': name,\n 'mrp_production_id': production.id,\n 'workorder': workorder and workorder.id or False,\n 'account_id': self.analytic_account_id.id,\n 'journal_id': journal.id,\n 'user_id': self.env.uid,\n 'date': analytic_line_obj._get_default_date(),\n 'product_id': product and product.id or False,\n 'unit_amount': qty,\n 'amount': amount,\n 'product_uom_id': product.uom_id.id,\n 'general_account_id': general_account.id,\n }\n\n @api.multi\n def _costs_generate(self):\n \"\"\"\n As we are generating the account_analytic_lines for MO in the\n current module, we override this method in order to avoid\n duplicates created in the parent class. Any other module\n inheriting this method should take this into account!\n \"\"\"\n return\n", "path": "mrp_production_real_cost/models/mrp_production.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# \u00a9 2014-2015 Avanzosc\n# \u00a9 2014-2015 Pedro M. Baeza\n# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html\n\nfrom openerp import api, fields, models\n\n\nclass MrpProduction(models.Model):\n _inherit = 'mrp.production'\n\n @api.multi\n @api.depends('analytic_line_ids', 'analytic_line_ids.amount',\n 'product_qty')\n def _compute_real_cost(self):\n for production in self:\n cost_lines = production.analytic_line_ids.filtered(\n lambda l: l.amount < 0)\n production.real_cost = -sum(cost_lines.mapped('amount'))\n production.unit_real_cost = (\n production.real_cost / production.product_qty)\n\n analytic_line_ids = fields.One2many(\n comodel_name=\"account.analytic.line\", inverse_name=\"mrp_production_id\",\n string=\"Cost Lines\")\n real_cost = fields.Float(\n \"Total Real Cost\", compute=\"_compute_real_cost\", store=True)\n unit_real_cost = fields.Float(\n \"Unit Real Cost\", compute=\"_compute_real_cost\", store=True)\n\n @api.multi\n def action_production_end(self):\n res = super(MrpProduction, self).action_production_end()\n for production in self:\n # This is needed because commit\n # https://github.com/odoo/odoo/commit/\n # 6f29bfc181d23d70d29776d96b4318e9ee2c93a9\n # introduces a weird behavior on the next call, provoking an error.\n production.sudo().refresh()\n production.mapped('move_created_ids2').filtered(\n lambda l: l.state == 'done'\n ).product_price_update_production_done()\n return res\n\n @api.model\n def _prepare_real_cost_analytic_line(\n self, journal, name, production, product, general_account=None,\n workorder=None, qty=1, amount=0):\n \"\"\"\n Prepare the vals for creating an analytic entry for real cost\n :param journal: Journal of the entry\n :param name: Name of the entry\n :param production: Origin product\n :param product: Product for the entry\n :param general_account: General account for the entry\n :param workorder: Origin workorder\n :param qty: Quantity for the entry. This quantity will multiply both\n standard and average costs for the entry costs.\n :param amount: Cost for calculating real cost.\n :return: Dictionary with the analytic entry vals.\n \"\"\"\n analytic_line_obj = self.env['account.analytic.line']\n property_obj = self.env['ir.property']\n general_account = (\n general_account or product.property_account_expense or\n product.categ_id.property_account_expense_categ or\n property_obj.get('property_account_expense_categ',\n 'product.category'))\n return {\n 'name': name,\n 'mrp_production_id': production.id,\n 'workorder': workorder and workorder.id or False,\n 'account_id': self.analytic_account_id.id,\n 'journal_id': journal.id,\n 'user_id': self.env.uid,\n 'date': analytic_line_obj._get_default_date(),\n 'product_id': product and product.id or False,\n 'unit_amount': qty,\n 'amount': amount,\n 'product_uom_id': product.uom_id.id,\n 'general_account_id': general_account.id,\n }\n\n @api.multi\n def _costs_generate(self):\n \"\"\"\n As we are generating the account_analytic_lines for MO in the\n current module, we override this method in order to avoid\n duplicates created in the parent class. Any other module\n inheriting this method should take this into account!\n \"\"\"\n return\n", "path": "mrp_production_real_cost/models/mrp_production.py"}]} | 1,426 | 263 |
gh_patches_debug_22165 | rasdani/github-patches | git_diff | pydantic__pydantic-6431 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError in class attribute access for validate_call
### Initial Checks
- [X] I confirm that I'm using Pydantic V2 installed directly from the `main` branch, or equivalent
### Description
https://github.com/pydantic/pydantic/pull/6406 introduced an AttributeError when accessing a method on a class (not instance):
```
draft @ pip-run 'git+https://github.com/pydantic/pydantic' -- pyd2.py
Traceback (most recent call last):
File "/Users/jaraco/draft/pyd2.py", line 14, in <module>
assert Thing.c == Thing.c
^^^^^^^
File "/var/folders/sx/n5gkrgfx6zd91ymxr2sr9wvw00n8zm/T/pip-run-o94_k6_k/pydantic/_internal/_validate_call.py", line 101, in __get__
setattr(obj, self._name, result)
AttributeError: 'NoneType' object has no attribute 'c'
```
### Example Code
```Python
from pydantic import validate_call
class Thing:
def a(self):
pass
c = validate_call(a)
thing = Thing()
assert thing.a == thing.a
assert thing.c == thing.c, f'{thing.c} != {thing.c}'
assert Thing.c == Thing.c
```
### Python, Pydantic & OS Version
```Text
draft @ pip-run 'git+https://github.com/pydantic/pydantic' -- -c 'import pydantic.version; print(pydantic.version.version_info())'
pydantic version: 2.0
pydantic-core version: 2.0.2 release build profile
install path: /private/var/folders/sx/n5gkrgfx6zd91ymxr2sr9wvw00n8zm/T/pip-run-hek15lsq/pydantic
python version: 3.11.4 (main, Jun 15 2023, 07:55:38) [Clang 14.0.3 (clang-1403.0.22.14.1)]
platform: macOS-13.4.1-arm64-arm-64bit
optional deps. installed: ['typing-extensions']
```
Selected Assignee: @Kludex
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydantic/_internal/_validate_call.py`
Content:
```
1 from __future__ import annotations as _annotations
2
3 import inspect
4 from dataclasses import dataclass
5 from functools import partial
6 from typing import Any, Callable
7
8 import pydantic_core
9
10 from ..config import ConfigDict
11 from . import _discriminated_union, _generate_schema, _typing_extra
12 from ._config import ConfigWrapper
13 from ._core_utils import flatten_schema_defs, inline_schema_defs
14
15
16 @dataclass
17 class CallMarker:
18 function: Callable[..., Any]
19 validate_return: bool
20
21
22 class ValidateCallWrapper:
23 """This is a wrapper around a function that validates the arguments passed to it, and optionally the return value.
24
25 It's partially inspired by `wraps` which in turn uses `partial`, but extended to be a descriptor so
26 these functions can be applied to instance methods, class methods, static methods, as well as normal functions.
27 """
28
29 __slots__ = (
30 'raw_function',
31 '_config',
32 '_validate_return',
33 '__pydantic_core_schema__',
34 '__pydantic_validator__',
35 '__signature__',
36 '__name__',
37 '__qualname__',
38 '__annotations__',
39 '__dict__', # required for __module__
40 )
41
42 def __init__(self, function: Callable[..., Any], config: ConfigDict | None, validate_return: bool):
43 self.raw_function = function
44 self._config = config
45 self._validate_return = validate_return
46 self.__signature__ = inspect.signature(function)
47 if isinstance(function, partial):
48 func = function.func
49 self.__name__ = f'partial({func.__name__})'
50 self.__qualname__ = f'partial({func.__qualname__})'
51 self.__annotations__ = func.__annotations__
52 self.__module__ = func.__module__
53 self.__doc__ = func.__doc__
54 else:
55 self.__name__ = function.__name__
56 self.__qualname__ = function.__qualname__
57 self.__annotations__ = function.__annotations__
58 self.__module__ = function.__module__
59 self.__doc__ = function.__doc__
60
61 namespace = _typing_extra.add_module_globals(function, None)
62 config_wrapper = ConfigWrapper(config)
63 gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)
64 self.__pydantic_core_schema__ = schema = gen_schema.collect_definitions(gen_schema.generate_schema(function))
65 core_config = config_wrapper.core_config(self)
66 schema = _discriminated_union.apply_discriminators(flatten_schema_defs(schema))
67 simplified_schema = inline_schema_defs(schema)
68 self.__pydantic_validator__ = pydantic_core.SchemaValidator(simplified_schema, core_config)
69
70 if self._validate_return:
71 return_type = (
72 self.__signature__.return_annotation
73 if self.__signature__.return_annotation is not self.__signature__.empty
74 else Any
75 )
76 gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)
77 self.__return_pydantic_core_schema__ = schema = gen_schema.collect_definitions(
78 gen_schema.generate_schema(return_type)
79 )
80 core_config = config_wrapper.core_config(self)
81 schema = _discriminated_union.apply_discriminators(flatten_schema_defs(schema))
82 simplified_schema = inline_schema_defs(schema)
83 self.__return_pydantic_validator__ = pydantic_core.SchemaValidator(simplified_schema, core_config)
84 else:
85 self.__return_pydantic_core_schema__ = None
86 self.__return_pydantic_validator__ = None
87
88 self._name: str | None = None # set by __get__, used to set the instance attribute when decorating methods
89
90 def __call__(self, *args: Any, **kwargs: Any) -> Any:
91 res = self.__pydantic_validator__.validate_python(pydantic_core.ArgsKwargs(args, kwargs))
92 if self.__return_pydantic_validator__:
93 return self.__return_pydantic_validator__.validate_python(res)
94 return res
95
96 def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper:
97 """Bind the raw function and return another ValidateCallWrapper wrapping that."""
98 bound_function = self.raw_function.__get__(obj, objtype)
99 result = self.__class__(bound_function, self._config, self._validate_return)
100 if self._name is not None:
101 setattr(obj, self._name, result)
102 return result
103
104 def __set_name__(self, owner: Any, name: str) -> None:
105 self._name = name
106
107 def __repr__(self) -> str:
108 return f'ValidateCallWrapper({self.raw_function})'
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pydantic/_internal/_validate_call.py b/pydantic/_internal/_validate_call.py
--- a/pydantic/_internal/_validate_call.py
+++ b/pydantic/_internal/_validate_call.py
@@ -95,10 +95,21 @@
def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper:
"""Bind the raw function and return another ValidateCallWrapper wrapping that."""
+ if obj is None:
+ try:
+ # Handle the case where a method is accessed as a class attribute
+ return objtype.__getattribute__(objtype, self._name) # type: ignore
+ except AttributeError:
+ # This will happen the first time the attribute is accessed
+ pass
+
bound_function = self.raw_function.__get__(obj, objtype)
result = self.__class__(bound_function, self._config, self._validate_return)
if self._name is not None:
- setattr(obj, self._name, result)
+ if obj is not None:
+ setattr(obj, self._name, result)
+ else:
+ setattr(objtype, self._name, result)
return result
def __set_name__(self, owner: Any, name: str) -> None:
| {"golden_diff": "diff --git a/pydantic/_internal/_validate_call.py b/pydantic/_internal/_validate_call.py\n--- a/pydantic/_internal/_validate_call.py\n+++ b/pydantic/_internal/_validate_call.py\n@@ -95,10 +95,21 @@\n \n def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper:\n \"\"\"Bind the raw function and return another ValidateCallWrapper wrapping that.\"\"\"\n+ if obj is None:\n+ try:\n+ # Handle the case where a method is accessed as a class attribute\n+ return objtype.__getattribute__(objtype, self._name) # type: ignore\n+ except AttributeError:\n+ # This will happen the first time the attribute is accessed\n+ pass\n+\n bound_function = self.raw_function.__get__(obj, objtype)\n result = self.__class__(bound_function, self._config, self._validate_return)\n if self._name is not None:\n- setattr(obj, self._name, result)\n+ if obj is not None:\n+ setattr(obj, self._name, result)\n+ else:\n+ setattr(objtype, self._name, result)\n return result\n \n def __set_name__(self, owner: Any, name: str) -> None:\n", "issue": "AttributeError in class attribute access for validate_call\n### Initial Checks\n\n- [X] I confirm that I'm using Pydantic V2 installed directly from the `main` branch, or equivalent\n\n### Description\n\nhttps://github.com/pydantic/pydantic/pull/6406 introduced an AttributeError when accessing a method on a class (not instance):\r\n\r\n```\r\n draft @ pip-run 'git+https://github.com/pydantic/pydantic' -- pyd2.py\r\nTraceback (most recent call last):\r\n File \"/Users/jaraco/draft/pyd2.py\", line 14, in <module>\r\n assert Thing.c == Thing.c\r\n ^^^^^^^\r\n File \"/var/folders/sx/n5gkrgfx6zd91ymxr2sr9wvw00n8zm/T/pip-run-o94_k6_k/pydantic/_internal/_validate_call.py\", line 101, in __get__\r\n setattr(obj, self._name, result)\r\nAttributeError: 'NoneType' object has no attribute 'c'\r\n```\n\n### Example Code\n\n```Python\nfrom pydantic import validate_call\r\n\r\n\r\nclass Thing:\r\n def a(self):\r\n pass\r\n\r\n c = validate_call(a)\r\n\r\n\r\nthing = Thing()\r\nassert thing.a == thing.a\r\nassert thing.c == thing.c, f'{thing.c} != {thing.c}'\r\nassert Thing.c == Thing.c\n```\n\n\n### Python, Pydantic & OS Version\n\n```Text\ndraft @ pip-run 'git+https://github.com/pydantic/pydantic' -- -c 'import pydantic.version; print(pydantic.version.version_info())'\r\n pydantic version: 2.0\r\n pydantic-core version: 2.0.2 release build profile\r\n install path: /private/var/folders/sx/n5gkrgfx6zd91ymxr2sr9wvw00n8zm/T/pip-run-hek15lsq/pydantic\r\n python version: 3.11.4 (main, Jun 15 2023, 07:55:38) [Clang 14.0.3 (clang-1403.0.22.14.1)]\r\n platform: macOS-13.4.1-arm64-arm-64bit\r\n optional deps. installed: ['typing-extensions']\n```\n\n\nSelected Assignee: @Kludex\n", "before_files": [{"content": "from __future__ import annotations as _annotations\n\nimport inspect\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Any, Callable\n\nimport pydantic_core\n\nfrom ..config import ConfigDict\nfrom . import _discriminated_union, _generate_schema, _typing_extra\nfrom ._config import ConfigWrapper\nfrom ._core_utils import flatten_schema_defs, inline_schema_defs\n\n\n@dataclass\nclass CallMarker:\n function: Callable[..., Any]\n validate_return: bool\n\n\nclass ValidateCallWrapper:\n \"\"\"This is a wrapper around a function that validates the arguments passed to it, and optionally the return value.\n\n It's partially inspired by `wraps` which in turn uses `partial`, but extended to be a descriptor so\n these functions can be applied to instance methods, class methods, static methods, as well as normal functions.\n \"\"\"\n\n __slots__ = (\n 'raw_function',\n '_config',\n '_validate_return',\n '__pydantic_core_schema__',\n '__pydantic_validator__',\n '__signature__',\n '__name__',\n '__qualname__',\n '__annotations__',\n '__dict__', # required for __module__\n )\n\n def __init__(self, function: Callable[..., Any], config: ConfigDict | None, validate_return: bool):\n self.raw_function = function\n self._config = config\n self._validate_return = validate_return\n self.__signature__ = inspect.signature(function)\n if isinstance(function, partial):\n func = function.func\n self.__name__ = f'partial({func.__name__})'\n self.__qualname__ = f'partial({func.__qualname__})'\n self.__annotations__ = func.__annotations__\n self.__module__ = func.__module__\n self.__doc__ = func.__doc__\n else:\n self.__name__ = function.__name__\n self.__qualname__ = function.__qualname__\n self.__annotations__ = function.__annotations__\n self.__module__ = function.__module__\n self.__doc__ = function.__doc__\n\n namespace = _typing_extra.add_module_globals(function, None)\n config_wrapper = ConfigWrapper(config)\n gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)\n self.__pydantic_core_schema__ = schema = gen_schema.collect_definitions(gen_schema.generate_schema(function))\n core_config = config_wrapper.core_config(self)\n schema = _discriminated_union.apply_discriminators(flatten_schema_defs(schema))\n simplified_schema = inline_schema_defs(schema)\n self.__pydantic_validator__ = pydantic_core.SchemaValidator(simplified_schema, core_config)\n\n if self._validate_return:\n return_type = (\n self.__signature__.return_annotation\n if self.__signature__.return_annotation is not self.__signature__.empty\n else Any\n )\n gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)\n self.__return_pydantic_core_schema__ = schema = gen_schema.collect_definitions(\n gen_schema.generate_schema(return_type)\n )\n core_config = config_wrapper.core_config(self)\n schema = _discriminated_union.apply_discriminators(flatten_schema_defs(schema))\n simplified_schema = inline_schema_defs(schema)\n self.__return_pydantic_validator__ = pydantic_core.SchemaValidator(simplified_schema, core_config)\n else:\n self.__return_pydantic_core_schema__ = None\n self.__return_pydantic_validator__ = None\n\n self._name: str | None = None # set by __get__, used to set the instance attribute when decorating methods\n\n def __call__(self, *args: Any, **kwargs: Any) -> Any:\n res = self.__pydantic_validator__.validate_python(pydantic_core.ArgsKwargs(args, kwargs))\n if self.__return_pydantic_validator__:\n return self.__return_pydantic_validator__.validate_python(res)\n return res\n\n def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper:\n \"\"\"Bind the raw function and return another ValidateCallWrapper wrapping that.\"\"\"\n bound_function = self.raw_function.__get__(obj, objtype)\n result = self.__class__(bound_function, self._config, self._validate_return)\n if self._name is not None:\n setattr(obj, self._name, result)\n return result\n\n def __set_name__(self, owner: Any, name: str) -> None:\n self._name = name\n\n def __repr__(self) -> str:\n return f'ValidateCallWrapper({self.raw_function})'\n", "path": "pydantic/_internal/_validate_call.py"}], "after_files": [{"content": "from __future__ import annotations as _annotations\n\nimport inspect\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Any, Callable\n\nimport pydantic_core\n\nfrom ..config import ConfigDict\nfrom . import _discriminated_union, _generate_schema, _typing_extra\nfrom ._config import ConfigWrapper\nfrom ._core_utils import flatten_schema_defs, inline_schema_defs\n\n\n@dataclass\nclass CallMarker:\n function: Callable[..., Any]\n validate_return: bool\n\n\nclass ValidateCallWrapper:\n \"\"\"This is a wrapper around a function that validates the arguments passed to it, and optionally the return value.\n\n It's partially inspired by `wraps` which in turn uses `partial`, but extended to be a descriptor so\n these functions can be applied to instance methods, class methods, static methods, as well as normal functions.\n \"\"\"\n\n __slots__ = (\n 'raw_function',\n '_config',\n '_validate_return',\n '__pydantic_core_schema__',\n '__pydantic_validator__',\n '__signature__',\n '__name__',\n '__qualname__',\n '__annotations__',\n '__dict__', # required for __module__\n )\n\n def __init__(self, function: Callable[..., Any], config: ConfigDict | None, validate_return: bool):\n self.raw_function = function\n self._config = config\n self._validate_return = validate_return\n self.__signature__ = inspect.signature(function)\n if isinstance(function, partial):\n func = function.func\n self.__name__ = f'partial({func.__name__})'\n self.__qualname__ = f'partial({func.__qualname__})'\n self.__annotations__ = func.__annotations__\n self.__module__ = func.__module__\n self.__doc__ = func.__doc__\n else:\n self.__name__ = function.__name__\n self.__qualname__ = function.__qualname__\n self.__annotations__ = function.__annotations__\n self.__module__ = function.__module__\n self.__doc__ = function.__doc__\n\n namespace = _typing_extra.add_module_globals(function, None)\n config_wrapper = ConfigWrapper(config)\n gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)\n self.__pydantic_core_schema__ = schema = gen_schema.collect_definitions(gen_schema.generate_schema(function))\n core_config = config_wrapper.core_config(self)\n schema = _discriminated_union.apply_discriminators(flatten_schema_defs(schema))\n simplified_schema = inline_schema_defs(schema)\n self.__pydantic_validator__ = pydantic_core.SchemaValidator(simplified_schema, core_config)\n\n if self._validate_return:\n return_type = (\n self.__signature__.return_annotation\n if self.__signature__.return_annotation is not self.__signature__.empty\n else Any\n )\n gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)\n self.__return_pydantic_core_schema__ = schema = gen_schema.collect_definitions(\n gen_schema.generate_schema(return_type)\n )\n core_config = config_wrapper.core_config(self)\n schema = _discriminated_union.apply_discriminators(flatten_schema_defs(schema))\n simplified_schema = inline_schema_defs(schema)\n self.__return_pydantic_validator__ = pydantic_core.SchemaValidator(simplified_schema, core_config)\n else:\n self.__return_pydantic_core_schema__ = None\n self.__return_pydantic_validator__ = None\n\n self._name: str | None = None # set by __get__, used to set the instance attribute when decorating methods\n\n def __call__(self, *args: Any, **kwargs: Any) -> Any:\n res = self.__pydantic_validator__.validate_python(pydantic_core.ArgsKwargs(args, kwargs))\n if self.__return_pydantic_validator__:\n return self.__return_pydantic_validator__.validate_python(res)\n return res\n\n def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper:\n \"\"\"Bind the raw function and return another ValidateCallWrapper wrapping that.\"\"\"\n if obj is None:\n try:\n # Handle the case where a method is accessed as a class attribute\n return objtype.__getattribute__(objtype, self._name) # type: ignore\n except AttributeError:\n # This will happen the first time the attribute is accessed\n pass\n\n bound_function = self.raw_function.__get__(obj, objtype)\n result = self.__class__(bound_function, self._config, self._validate_return)\n if self._name is not None:\n if obj is not None:\n setattr(obj, self._name, result)\n else:\n setattr(objtype, self._name, result)\n return result\n\n def __set_name__(self, owner: Any, name: str) -> None:\n self._name = name\n\n def __repr__(self) -> str:\n return f'ValidateCallWrapper({self.raw_function})'\n", "path": "pydantic/_internal/_validate_call.py"}]} | 2,009 | 290 |
gh_patches_debug_27704 | rasdani/github-patches | git_diff | ansible__ansible-modules-core-4761 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
junos_package.py: package_version undefined
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
junos_package
##### ANSIBLE VERSION
devel
##### CONFIGURATION
##### OS / ENVIRONMENT
##### SUMMARY
https://github.com/ansible/ansible-modules-core/blame/devel/network/junos/junos_package.py#L141
`wants_ver = module.params['version'] or package_version(module)`
I can't find anywhere in the `ansible/ansible` code base where `package_version` is defined
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `network/junos/junos_package.py`
Content:
```
1 #!/usr/bin/python
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17 #
18
19 DOCUMENTATION = """
20 ---
21 module: junos_package
22 version_added: "2.1"
23 author: "Peter Sprygada (@privateip)"
24 short_description: Installs packages on remote devices running Junos
25 description:
26 - This module can install new and updated packages on remote
27 devices running Junos. The module will compare the specified
28 package with the one running on the remote device and install
29 the specified version if there is a mismatch
30 extends_documentation_fragment: junos
31 options:
32 src:
33 description:
34 - The I(src) argument specifies the path to the source package to be
35 installed on the remote device in the advent of a version mismatch.
36 The I(src) argument can be either a localized path or a full
37 path to the package file to install.
38 required: true
39 default: null
40 aliases: ['package']
41 version:
42 description:
43 - The I(version) argument can be used to explicitly specify the
44 version of the package that should be installed on the remote
45 device. If the I(version) argument is not specified, then
46 the version is extracts from the I(src) filename.
47 required: false
48 default: null
49 reboot:
50 description:
51 - In order for a package to take effect, the remote device must be
52 restarted. When enabled, this argument will instruct the module
53 to reboot the device once the updated package has been installed.
54 If disabled or the remote package does not need to be changed,
55 the device will not be started.
56 required: true
57 default: true
58 choices: ['true', 'false']
59 no_copy:
60 description:
61 - The I(no_copy) argument is responsible for instructing the remote
62 device on where to install the package from. When enabled, the
63 package is transferred to the remote device prior to installing.
64 required: false
65 default: false
66 choices: ['true', 'false']
67 force:
68 description:
69 - The I(force) argument instructs the module to bypass the package
70 version check and install the packaged identified in I(src) on
71 the remote device.
72 required: true
73 default: false
74 choices: ['true', 'false']
75 requirements:
76 - junos-eznc
77 notes:
78 - This module requires the netconf system service be enabled on
79 the remote device being managed
80 """
81
82 EXAMPLES = """
83 # the required set of connection arguments have been purposely left off
84 # the examples for brevity
85
86 - name: install local package on remote device
87 junos_package:
88 src: junos-vsrx-12.1X46-D10.2-domestic.tgz
89
90 - name: install local package on remote device without rebooting
91 junos_package:
92 src: junos-vsrx-12.1X46-D10.2-domestic.tgz
93 reboot: no
94 """
95
96 try:
97 from jnpr.junos.utils.sw import SW
98 HAS_SW = True
99 except ImportError:
100 HAS_SW = False
101
102 def install_package(module):
103 junos = SW(module.connection.device)
104 package = module.params['src']
105 no_copy = module.params['no_copy']
106
107 progress_log = lambda x, y: module.log(y)
108
109 module.log('installing package')
110 result = junos.install(package, progress=progress_log, no_copy=no_copy)
111
112 if not result:
113 module.fail_json(msg='Unable to install package on device')
114
115 if module.params['reboot']:
116 module.log('rebooting system')
117 junos.reboot()
118
119
120 def main():
121 spec = dict(
122 src=dict(type='path', required=True, aliases=['package']),
123 version=dict(),
124 reboot=dict(type='bool', default=True),
125 no_copy=dict(default=False, type='bool'),
126 force=dict(type='bool', default=False),
127 transport=dict(default='netconf', choices=['netconf'])
128 )
129
130 module = get_module(argument_spec=spec,
131 supports_check_mode=True)
132
133 if not HAS_SW:
134 module.fail_json(msg='Missing jnpr.junos.utils.sw module')
135
136 result = dict(changed=False)
137
138 do_upgrade = module.params['force'] or False
139 if not module.params['force']:
140 has_ver = module.get_facts().get('version')
141 wants_ver = module.params['version'] or package_version(module)
142 do_upgrade = has_ver != wants_ver
143
144 if do_upgrade:
145 if not module.check_mode:
146 install_package(module)
147 result['changed'] = True
148
149 module.exit_json(**result)
150
151 from ansible.module_utils.basic import *
152 from ansible.module_utils.junos import *
153
154 if __name__ == '__main__':
155 main()
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/network/junos/junos_package.py b/network/junos/junos_package.py
--- a/network/junos/junos_package.py
+++ b/network/junos/junos_package.py
@@ -92,6 +92,7 @@
src: junos-vsrx-12.1X46-D10.2-domestic.tgz
reboot: no
"""
+from ansible.module_utils.junos import NetworkModule
try:
from jnpr.junos.utils.sw import SW
@@ -127,8 +128,8 @@
transport=dict(default='netconf', choices=['netconf'])
)
- module = get_module(argument_spec=spec,
- supports_check_mode=True)
+ module = NetworkModule(argument_spec=spec,
+ supports_check_mode=True)
if not HAS_SW:
module.fail_json(msg='Missing jnpr.junos.utils.sw module')
@@ -137,8 +138,8 @@
do_upgrade = module.params['force'] or False
if not module.params['force']:
- has_ver = module.get_facts().get('version')
- wants_ver = module.params['version'] or package_version(module)
+ has_ver = module.connection.get_facts().get('version')
+ wants_ver = module.params['version']
do_upgrade = has_ver != wants_ver
if do_upgrade:
@@ -148,8 +149,6 @@
module.exit_json(**result)
-from ansible.module_utils.basic import *
-from ansible.module_utils.junos import *
if __name__ == '__main__':
main()
| {"golden_diff": "diff --git a/network/junos/junos_package.py b/network/junos/junos_package.py\n--- a/network/junos/junos_package.py\n+++ b/network/junos/junos_package.py\n@@ -92,6 +92,7 @@\n src: junos-vsrx-12.1X46-D10.2-domestic.tgz\n reboot: no\n \"\"\"\n+from ansible.module_utils.junos import NetworkModule\n \n try:\n from jnpr.junos.utils.sw import SW\n@@ -127,8 +128,8 @@\n transport=dict(default='netconf', choices=['netconf'])\n )\n \n- module = get_module(argument_spec=spec,\n- supports_check_mode=True)\n+ module = NetworkModule(argument_spec=spec,\n+ supports_check_mode=True)\n \n if not HAS_SW:\n module.fail_json(msg='Missing jnpr.junos.utils.sw module')\n@@ -137,8 +138,8 @@\n \n do_upgrade = module.params['force'] or False\n if not module.params['force']:\n- has_ver = module.get_facts().get('version')\n- wants_ver = module.params['version'] or package_version(module)\n+ has_ver = module.connection.get_facts().get('version')\n+ wants_ver = module.params['version']\n do_upgrade = has_ver != wants_ver\n \n if do_upgrade:\n@@ -148,8 +149,6 @@\n \n module.exit_json(**result)\n \n-from ansible.module_utils.basic import *\n-from ansible.module_utils.junos import *\n \n if __name__ == '__main__':\n main()\n", "issue": "junos_package.py: package_version undefined\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\njunos_package\n##### ANSIBLE VERSION\n\ndevel\n##### CONFIGURATION\n##### OS / ENVIRONMENT\n##### SUMMARY\n\nhttps://github.com/ansible/ansible-modules-core/blame/devel/network/junos/junos_package.py#L141\n\n`wants_ver = module.params['version'] or package_version(module)`\n\nI can't find anywhere in the `ansible/ansible` code base where `package_version` is defined\n\n", "before_files": [{"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = \"\"\"\n---\nmodule: junos_package\nversion_added: \"2.1\"\nauthor: \"Peter Sprygada (@privateip)\"\nshort_description: Installs packages on remote devices running Junos\ndescription:\n - This module can install new and updated packages on remote\n devices running Junos. The module will compare the specified\n package with the one running on the remote device and install\n the specified version if there is a mismatch\nextends_documentation_fragment: junos\noptions:\n src:\n description:\n - The I(src) argument specifies the path to the source package to be\n installed on the remote device in the advent of a version mismatch.\n The I(src) argument can be either a localized path or a full\n path to the package file to install.\n required: true\n default: null\n aliases: ['package']\n version:\n description:\n - The I(version) argument can be used to explicitly specify the\n version of the package that should be installed on the remote\n device. If the I(version) argument is not specified, then\n the version is extracts from the I(src) filename.\n required: false\n default: null\n reboot:\n description:\n - In order for a package to take effect, the remote device must be\n restarted. When enabled, this argument will instruct the module\n to reboot the device once the updated package has been installed.\n If disabled or the remote package does not need to be changed,\n the device will not be started.\n required: true\n default: true\n choices: ['true', 'false']\n no_copy:\n description:\n - The I(no_copy) argument is responsible for instructing the remote\n device on where to install the package from. When enabled, the\n package is transferred to the remote device prior to installing.\n required: false\n default: false\n choices: ['true', 'false']\n force:\n description:\n - The I(force) argument instructs the module to bypass the package\n version check and install the packaged identified in I(src) on\n the remote device.\n required: true\n default: false\n choices: ['true', 'false']\nrequirements:\n - junos-eznc\nnotes:\n - This module requires the netconf system service be enabled on\n the remote device being managed\n\"\"\"\n\nEXAMPLES = \"\"\"\n# the required set of connection arguments have been purposely left off\n# the examples for brevity\n\n- name: install local package on remote device\n junos_package:\n src: junos-vsrx-12.1X46-D10.2-domestic.tgz\n\n- name: install local package on remote device without rebooting\n junos_package:\n src: junos-vsrx-12.1X46-D10.2-domestic.tgz\n reboot: no\n\"\"\"\n\ntry:\n from jnpr.junos.utils.sw import SW\n HAS_SW = True\nexcept ImportError:\n HAS_SW = False\n\ndef install_package(module):\n junos = SW(module.connection.device)\n package = module.params['src']\n no_copy = module.params['no_copy']\n\n progress_log = lambda x, y: module.log(y)\n\n module.log('installing package')\n result = junos.install(package, progress=progress_log, no_copy=no_copy)\n\n if not result:\n module.fail_json(msg='Unable to install package on device')\n\n if module.params['reboot']:\n module.log('rebooting system')\n junos.reboot()\n\n\ndef main():\n spec = dict(\n src=dict(type='path', required=True, aliases=['package']),\n version=dict(),\n reboot=dict(type='bool', default=True),\n no_copy=dict(default=False, type='bool'),\n force=dict(type='bool', default=False),\n transport=dict(default='netconf', choices=['netconf'])\n )\n\n module = get_module(argument_spec=spec,\n supports_check_mode=True)\n\n if not HAS_SW:\n module.fail_json(msg='Missing jnpr.junos.utils.sw module')\n\n result = dict(changed=False)\n\n do_upgrade = module.params['force'] or False\n if not module.params['force']:\n has_ver = module.get_facts().get('version')\n wants_ver = module.params['version'] or package_version(module)\n do_upgrade = has_ver != wants_ver\n\n if do_upgrade:\n if not module.check_mode:\n install_package(module)\n result['changed'] = True\n\n module.exit_json(**result)\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.junos import *\n\nif __name__ == '__main__':\n main()\n", "path": "network/junos/junos_package.py"}], "after_files": [{"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = \"\"\"\n---\nmodule: junos_package\nversion_added: \"2.1\"\nauthor: \"Peter Sprygada (@privateip)\"\nshort_description: Installs packages on remote devices running Junos\ndescription:\n - This module can install new and updated packages on remote\n devices running Junos. The module will compare the specified\n package with the one running on the remote device and install\n the specified version if there is a mismatch\nextends_documentation_fragment: junos\noptions:\n src:\n description:\n - The I(src) argument specifies the path to the source package to be\n installed on the remote device in the advent of a version mismatch.\n The I(src) argument can be either a localized path or a full\n path to the package file to install.\n required: true\n default: null\n aliases: ['package']\n version:\n description:\n - The I(version) argument can be used to explicitly specify the\n version of the package that should be installed on the remote\n device. If the I(version) argument is not specified, then\n the version is extracts from the I(src) filename.\n required: false\n default: null\n reboot:\n description:\n - In order for a package to take effect, the remote device must be\n restarted. When enabled, this argument will instruct the module\n to reboot the device once the updated package has been installed.\n If disabled or the remote package does not need to be changed,\n the device will not be started.\n required: true\n default: true\n choices: ['true', 'false']\n no_copy:\n description:\n - The I(no_copy) argument is responsible for instructing the remote\n device on where to install the package from. When enabled, the\n package is transferred to the remote device prior to installing.\n required: false\n default: false\n choices: ['true', 'false']\n force:\n description:\n - The I(force) argument instructs the module to bypass the package\n version check and install the packaged identified in I(src) on\n the remote device.\n required: true\n default: false\n choices: ['true', 'false']\nrequirements:\n - junos-eznc\nnotes:\n - This module requires the netconf system service be enabled on\n the remote device being managed\n\"\"\"\n\nEXAMPLES = \"\"\"\n# the required set of connection arguments have been purposely left off\n# the examples for brevity\n\n- name: install local package on remote device\n junos_package:\n src: junos-vsrx-12.1X46-D10.2-domestic.tgz\n\n- name: install local package on remote device without rebooting\n junos_package:\n src: junos-vsrx-12.1X46-D10.2-domestic.tgz\n reboot: no\n\"\"\"\nfrom ansible.module_utils.junos import NetworkModule\n\ntry:\n from jnpr.junos.utils.sw import SW\n HAS_SW = True\nexcept ImportError:\n HAS_SW = False\n\ndef install_package(module):\n junos = SW(module.connection.device)\n package = module.params['src']\n no_copy = module.params['no_copy']\n\n progress_log = lambda x, y: module.log(y)\n\n module.log('installing package')\n result = junos.install(package, progress=progress_log, no_copy=no_copy)\n\n if not result:\n module.fail_json(msg='Unable to install package on device')\n\n if module.params['reboot']:\n module.log('rebooting system')\n junos.reboot()\n\n\ndef main():\n spec = dict(\n src=dict(type='path', required=True, aliases=['package']),\n version=dict(),\n reboot=dict(type='bool', default=True),\n no_copy=dict(default=False, type='bool'),\n force=dict(type='bool', default=False),\n transport=dict(default='netconf', choices=['netconf'])\n )\n\n module = NetworkModule(argument_spec=spec,\n supports_check_mode=True)\n\n if not HAS_SW:\n module.fail_json(msg='Missing jnpr.junos.utils.sw module')\n\n result = dict(changed=False)\n\n do_upgrade = module.params['force'] or False\n if not module.params['force']:\n has_ver = module.connection.get_facts().get('version')\n wants_ver = module.params['version']\n do_upgrade = has_ver != wants_ver\n\n if do_upgrade:\n if not module.check_mode:\n install_package(module)\n result['changed'] = True\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n", "path": "network/junos/junos_package.py"}]} | 1,909 | 351 |
gh_patches_debug_4786 | rasdani/github-patches | git_diff | jazzband__pip-tools-314 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip-compile looses `via` with pip 8
```
$ echo jinja2 > req
$ pip-compile --version
pip-compile, version 1.5
$ pip --version
pip 7.1.2 from <snip>lib/python2.7/site-packages (python 2.7)
pip-compile req
#
# This file is autogenerated by pip-compile
# Make changes in req, then run this to update:
#
# pip-compile req
#
jinja2==2.8
markupsafe==0.23 # via jinja2
$ pip install -U pip
<snip>
$ pip --version
pip 8.0.2 from <snip>lib/python2.7/site-packages (python 2.7)
pip-compile req
#
# This file is autogenerated by pip-compile
# Make changes in req, then run this to update:
#
# pip-compile req
#
jinja2==2.8
MarkupSafe==0.23
```
note the missing `via jinja2` for pip 8
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `piptools/writer.py`
Content:
```
1 import os
2 from os.path import basename
3
4 from ._compat import ExitStack
5 from .click import unstyle
6 from .io import AtomicSaver
7 from .logging import log
8 from .utils import comment, format_requirement
9
10
11 class OutputWriter(object):
12 def __init__(self, src_file, dst_file, dry_run, emit_header, emit_index, annotate,
13 default_index_url, index_urls):
14 self.src_file = src_file
15 self.dst_file = dst_file
16 self.dry_run = dry_run
17 self.emit_header = emit_header
18 self.emit_index = emit_index
19 self.annotate = annotate
20 self.default_index_url = default_index_url
21 self.index_urls = index_urls
22
23 def _sort_key(self, ireq):
24 return (not ireq.editable, str(ireq.req).lower())
25
26 def write_header(self):
27 if self.emit_header:
28 yield comment('#')
29 yield comment('# This file is autogenerated by pip-compile')
30 yield comment('# Make changes in {}, then run this to update:'.format(basename(self.src_file)))
31 yield comment('#')
32 args = ''
33 if not self.emit_index:
34 args += '--no-index '
35 if not self.annotate:
36 args += '--no-annotate '
37 yield comment('# pip-compile {args}{filename}'.format(
38 args=args,
39 filename=basename(self.src_file)))
40 yield comment('#')
41
42 def write_index_options(self):
43 if self.emit_index:
44 emitted = False
45 for index, index_url in enumerate(self.index_urls):
46 if index_url.rstrip('/') == self.default_index_url:
47 continue
48 flag = '--index-url' if index == 0 else '--extra-index-url'
49 yield '{} {}'.format(flag, index_url)
50 emitted = True
51 if emitted:
52 yield '' # extra line of whitespace
53
54 def _iter_lines(self, results, reverse_dependencies, primary_packages):
55 for line in self.write_header():
56 yield line
57 for line in self.write_index_options():
58 yield line
59
60 UNSAFE_PACKAGES = {'setuptools', 'distribute', 'pip'}
61 unsafe_packages = {r for r in results if r.name in UNSAFE_PACKAGES}
62 packages = {r for r in results if r.name not in UNSAFE_PACKAGES}
63
64 packages = sorted(packages, key=self._sort_key)
65 unsafe_packages = sorted(unsafe_packages, key=self._sort_key)
66
67 for ireq in packages:
68 line = self._format_requirement(ireq, reverse_dependencies, primary_packages)
69 yield line
70
71 if unsafe_packages:
72 yield ''
73 yield comment('# The following packages are commented out because they are')
74 yield comment('# considered to be unsafe in a requirements file:')
75
76 for ireq in unsafe_packages:
77 line = self._format_requirement(ireq, reverse_dependencies, primary_packages, include_specifier=False)
78 yield comment('# ' + line)
79
80 def write(self, results, reverse_dependencies, primary_packages):
81 with ExitStack() as stack:
82 f = None
83 if not self.dry_run:
84 f = stack.enter_context(AtomicSaver(self.dst_file))
85
86 for line in self._iter_lines(results, reverse_dependencies, primary_packages):
87 log.info(line)
88 if f:
89 f.write(unstyle(line).encode('utf-8'))
90 f.write(os.linesep.encode('utf-8'))
91
92 def _format_requirement(self, ireq, reverse_dependencies, primary_packages, include_specifier=True):
93 line = format_requirement(ireq, include_specifier=include_specifier)
94 if not self.annotate or ireq.name in primary_packages:
95 return line
96
97 # Annotate what packages this package is required by
98 required_by = reverse_dependencies.get(ireq.name, [])
99 if required_by:
100 line = line.ljust(24)
101 annotation = ', '.join(sorted(required_by))
102 line += comment(' # via ' + annotation)
103 return line
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/piptools/writer.py b/piptools/writer.py
--- a/piptools/writer.py
+++ b/piptools/writer.py
@@ -95,7 +95,7 @@
return line
# Annotate what packages this package is required by
- required_by = reverse_dependencies.get(ireq.name, [])
+ required_by = reverse_dependencies.get(ireq.name.lower(), [])
if required_by:
line = line.ljust(24)
annotation = ', '.join(sorted(required_by))
| {"golden_diff": "diff --git a/piptools/writer.py b/piptools/writer.py\n--- a/piptools/writer.py\n+++ b/piptools/writer.py\n@@ -95,7 +95,7 @@\n return line\n \n # Annotate what packages this package is required by\n- required_by = reverse_dependencies.get(ireq.name, [])\n+ required_by = reverse_dependencies.get(ireq.name.lower(), [])\n if required_by:\n line = line.ljust(24)\n annotation = ', '.join(sorted(required_by))\n", "issue": "pip-compile looses `via` with pip 8\n```\n$ echo jinja2 > req\n$ pip-compile --version\npip-compile, version 1.5\n$ pip --version\npip 7.1.2 from <snip>lib/python2.7/site-packages (python 2.7)\n\npip-compile req\n#\n# This file is autogenerated by pip-compile\n# Make changes in req, then run this to update:\n#\n# pip-compile req\n#\n\njinja2==2.8\nmarkupsafe==0.23 # via jinja2\n\n$ pip install -U pip\n<snip>\n\n$ pip --version\npip 8.0.2 from <snip>lib/python2.7/site-packages (python 2.7)\n\npip-compile req\n#\n# This file is autogenerated by pip-compile\n# Make changes in req, then run this to update:\n#\n# pip-compile req\n#\n\njinja2==2.8\nMarkupSafe==0.23\n```\n\nnote the missing `via jinja2` for pip 8\n\n", "before_files": [{"content": "import os\nfrom os.path import basename\n\nfrom ._compat import ExitStack\nfrom .click import unstyle\nfrom .io import AtomicSaver\nfrom .logging import log\nfrom .utils import comment, format_requirement\n\n\nclass OutputWriter(object):\n def __init__(self, src_file, dst_file, dry_run, emit_header, emit_index, annotate,\n default_index_url, index_urls):\n self.src_file = src_file\n self.dst_file = dst_file\n self.dry_run = dry_run\n self.emit_header = emit_header\n self.emit_index = emit_index\n self.annotate = annotate\n self.default_index_url = default_index_url\n self.index_urls = index_urls\n\n def _sort_key(self, ireq):\n return (not ireq.editable, str(ireq.req).lower())\n\n def write_header(self):\n if self.emit_header:\n yield comment('#')\n yield comment('# This file is autogenerated by pip-compile')\n yield comment('# Make changes in {}, then run this to update:'.format(basename(self.src_file)))\n yield comment('#')\n args = ''\n if not self.emit_index:\n args += '--no-index '\n if not self.annotate:\n args += '--no-annotate '\n yield comment('# pip-compile {args}{filename}'.format(\n args=args,\n filename=basename(self.src_file)))\n yield comment('#')\n\n def write_index_options(self):\n if self.emit_index:\n emitted = False\n for index, index_url in enumerate(self.index_urls):\n if index_url.rstrip('/') == self.default_index_url:\n continue\n flag = '--index-url' if index == 0 else '--extra-index-url'\n yield '{} {}'.format(flag, index_url)\n emitted = True\n if emitted:\n yield '' # extra line of whitespace\n\n def _iter_lines(self, results, reverse_dependencies, primary_packages):\n for line in self.write_header():\n yield line\n for line in self.write_index_options():\n yield line\n\n UNSAFE_PACKAGES = {'setuptools', 'distribute', 'pip'}\n unsafe_packages = {r for r in results if r.name in UNSAFE_PACKAGES}\n packages = {r for r in results if r.name not in UNSAFE_PACKAGES}\n\n packages = sorted(packages, key=self._sort_key)\n unsafe_packages = sorted(unsafe_packages, key=self._sort_key)\n\n for ireq in packages:\n line = self._format_requirement(ireq, reverse_dependencies, primary_packages)\n yield line\n\n if unsafe_packages:\n yield ''\n yield comment('# The following packages are commented out because they are')\n yield comment('# considered to be unsafe in a requirements file:')\n\n for ireq in unsafe_packages:\n line = self._format_requirement(ireq, reverse_dependencies, primary_packages, include_specifier=False)\n yield comment('# ' + line)\n\n def write(self, results, reverse_dependencies, primary_packages):\n with ExitStack() as stack:\n f = None\n if not self.dry_run:\n f = stack.enter_context(AtomicSaver(self.dst_file))\n\n for line in self._iter_lines(results, reverse_dependencies, primary_packages):\n log.info(line)\n if f:\n f.write(unstyle(line).encode('utf-8'))\n f.write(os.linesep.encode('utf-8'))\n\n def _format_requirement(self, ireq, reverse_dependencies, primary_packages, include_specifier=True):\n line = format_requirement(ireq, include_specifier=include_specifier)\n if not self.annotate or ireq.name in primary_packages:\n return line\n\n # Annotate what packages this package is required by\n required_by = reverse_dependencies.get(ireq.name, [])\n if required_by:\n line = line.ljust(24)\n annotation = ', '.join(sorted(required_by))\n line += comment(' # via ' + annotation)\n return line\n", "path": "piptools/writer.py"}], "after_files": [{"content": "import os\nfrom os.path import basename\n\nfrom ._compat import ExitStack\nfrom .click import unstyle\nfrom .io import AtomicSaver\nfrom .logging import log\nfrom .utils import comment, format_requirement\n\n\nclass OutputWriter(object):\n def __init__(self, src_file, dst_file, dry_run, emit_header, emit_index, annotate,\n default_index_url, index_urls):\n self.src_file = src_file\n self.dst_file = dst_file\n self.dry_run = dry_run\n self.emit_header = emit_header\n self.emit_index = emit_index\n self.annotate = annotate\n self.default_index_url = default_index_url\n self.index_urls = index_urls\n\n def _sort_key(self, ireq):\n return (not ireq.editable, str(ireq.req).lower())\n\n def write_header(self):\n if self.emit_header:\n yield comment('#')\n yield comment('# This file is autogenerated by pip-compile')\n yield comment('# Make changes in {}, then run this to update:'.format(basename(self.src_file)))\n yield comment('#')\n args = ''\n if not self.emit_index:\n args += '--no-index '\n if not self.annotate:\n args += '--no-annotate '\n yield comment('# pip-compile {args}{filename}'.format(\n args=args,\n filename=basename(self.src_file)))\n yield comment('#')\n\n def write_index_options(self):\n if self.emit_index:\n emitted = False\n for index, index_url in enumerate(self.index_urls):\n if index_url.rstrip('/') == self.default_index_url:\n continue\n flag = '--index-url' if index == 0 else '--extra-index-url'\n yield '{} {}'.format(flag, index_url)\n emitted = True\n if emitted:\n yield '' # extra line of whitespace\n\n def _iter_lines(self, results, reverse_dependencies, primary_packages):\n for line in self.write_header():\n yield line\n for line in self.write_index_options():\n yield line\n\n UNSAFE_PACKAGES = {'setuptools', 'distribute', 'pip'}\n unsafe_packages = {r for r in results if r.name in UNSAFE_PACKAGES}\n packages = {r for r in results if r.name not in UNSAFE_PACKAGES}\n\n packages = sorted(packages, key=self._sort_key)\n unsafe_packages = sorted(unsafe_packages, key=self._sort_key)\n\n for ireq in packages:\n line = self._format_requirement(ireq, reverse_dependencies, primary_packages)\n yield line\n\n if unsafe_packages:\n yield ''\n yield comment('# The following packages are commented out because they are')\n yield comment('# considered to be unsafe in a requirements file:')\n\n for ireq in unsafe_packages:\n line = self._format_requirement(ireq, reverse_dependencies, primary_packages, include_specifier=False)\n yield comment('# ' + line)\n\n def write(self, results, reverse_dependencies, primary_packages):\n with ExitStack() as stack:\n f = None\n if not self.dry_run:\n f = stack.enter_context(AtomicSaver(self.dst_file))\n\n for line in self._iter_lines(results, reverse_dependencies, primary_packages):\n log.info(line)\n if f:\n f.write(unstyle(line).encode('utf-8'))\n f.write(os.linesep.encode('utf-8'))\n\n def _format_requirement(self, ireq, reverse_dependencies, primary_packages, include_specifier=True):\n line = format_requirement(ireq, include_specifier=include_specifier)\n if not self.annotate or ireq.name in primary_packages:\n return line\n\n # Annotate what packages this package is required by\n required_by = reverse_dependencies.get(ireq.name.lower(), [])\n if required_by:\n line = line.ljust(24)\n annotation = ', '.join(sorted(required_by))\n line += comment(' # via ' + annotation)\n return line\n", "path": "piptools/writer.py"}]} | 1,559 | 119 |
gh_patches_debug_23719 | rasdani/github-patches | git_diff | vaexio__vaex-405 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failing to open arrow stream with categoricals
Vaex fails to open arrow streams that contain categorical columns. It would be great if this was working as categoricals have given a great performance in many of my applications.
```import pandas as pd
import pyarrow as pa
import numpy as np
import vaex
df = pd.DataFrame(
{
'col1': pd.Categorical.from_codes(np.full(1, 1), categories=['ABC', 'DEF'])
}
)
table = pa.Table.from_pandas(df)
with pa.OSFile('test2.arrow', 'wb') as sink:
with pa.RecordBatchStreamWriter(sink, table.schema) as writer:
writer.write_table(table)
with pa.OSFile('test2.arrow', 'rb') as source:
df = pa.ipc.open_stream(source).read_pandas()
df = vaex.open('test2.arrow')
```
Output:
```
ERROR:MainThread:vaex:error opening 'test2.arrow'
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
in
17 df = pa.ipc.open_stream(source).read_pandas()
18
---> 19 df = vaex.open('test2.arrow')
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex\__init__.py in open(path, convert, shuffle, copy_index, *args, **kwargs)
189 ds = from_csv(path, copy_index=copy_index, **kwargs)
190 else:
--> 191 ds = vaex.file.open(path, *args, **kwargs)
192 if convert and ds:
193 ds.export_hdf5(filename_hdf5, shuffle=shuffle)
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex\file\__init__.py in open(path, *args, **kwargs)
28 for opener in opener_classes:
29 if opener.can_open(path, *args, **kwargs):
---> 30 return opener.open(path, *args, **kwargs)
31 if hdf5:
32 openers.extend(hdf5.dataset.dataset_type_map.items())
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex_arrow\opener.py in open(path, *args, **kwargs)
9 def open(path, *args, **kwargs):
10 from .dataset import DatasetArrow
---> 11 return DatasetArrow(path, *args, **kwargs)
12
13 class ParquetOpener:
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex_arrow\dataset.py in __init__(self, filename, table, write)
18 self._write = write
19 if table is None:
---> 20 self._load()
21 else:
22 self._load_table(table)
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex_arrow\dataset.py in _load(self)
26 reader = pa.ipc.open_stream(source)
27 table = pa.Table.from_batches([b for b in reader])
---> 28 self._load_table(table)
29
30 def _load_table(self, table):
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex_arrow\dataset.py in _load_table(self, table)
35 # TODO: keep the arrow columns, and support and test chunks
36 arrow_array = col.data.chunks[0]
---> 37 column = column_from_arrow_array(arrow_array)
38
39 self.columns[name] = column
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex_arrow\convert.py in column_from_arrow_array(arrow_array)
28 buffers = arrow_array.buffers()
29 if len(buffers) == 2:
---> 30 return numpy_array_from_arrow_array(arrow_array)
31 elif len(buffers) == 3 and isinstance(arrow_array.type, type(pyarrow.string())):
32 bitmap_buffer, offsets, string_bytes = arrow_array.buffers()
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex_arrow\convert.py in numpy_array_from_arrow_array(arrow_array)
62 array = np.frombuffer(buffers[-1], dtype, len(arrow_array))# TODO: deal with offset ? [arrow_array.offset:arrow_array.offset + len(arrow_array)]
63 else:
---> 64 dtype = arrow_array.type.to_pandas_dtype()
65 if np.bool_ == dtype:
66 # TODO: this will also be a copy, we probably want to support bitmasks as well
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\pyarrow\types.pxi in pyarrow.lib.DataType.to_pandas_dtype()
NotImplementedError: dictionary
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/vaex-arrow/vaex_arrow/dataset.py`
Content:
```
1 __author__ = 'maartenbreddels'
2 import logging
3
4 import pyarrow as pa
5 import pyarrow.parquet as pq
6
7 import vaex.dataset
8 import vaex.file.other
9 from .convert import column_from_arrow_array
10 logger = logging.getLogger("vaex_arrow")
11
12
13 class DatasetArrow(vaex.dataset.DatasetLocal):
14 """Implements storage using arrow"""
15
16 def __init__(self, filename=None, table=None, write=False):
17 super(DatasetArrow, self).__init__(name=filename, path=filename, column_names=[])
18 self._write = write
19 if table is None:
20 self._load()
21 else:
22 self._load_table(table)
23
24 def _load(self):
25 source = pa.memory_map(self.path)
26 reader = pa.ipc.open_stream(source)
27 table = pa.Table.from_batches([b for b in reader])
28 self._load_table(table)
29
30 def _load_table(self, table):
31 self._length_unfiltered = self._length_original = table.num_rows
32 self._index_end = self._length_original = table.num_rows
33 for col in table.columns:
34 name = col.name
35 # TODO: keep the arrow columns, and support and test chunks
36 arrow_array = col.data.chunks[0]
37 column = column_from_arrow_array(arrow_array)
38
39 self.columns[name] = column
40 self.column_names.append(name)
41 self._save_assign_expression(name, vaex.expression.Expression(self, name))
42
43
44 @classmethod
45 def can_open(cls, path, *args, **kwargs):
46 return path.rpartition('.')[2] == 'arrow'
47
48 @classmethod
49 def get_options(cls, path):
50 return []
51
52 @classmethod
53 def option_to_args(cls, option):
54 return []
55
56 class DatasetParquet(DatasetArrow):
57 def _load(self):
58 # might not be optimal, but it works, we can always see if we can
59 # do mmapping later on
60 table = pq.read_table(self.path)
61 self._load_table(table)
62
63 vaex.file.other.dataset_type_map["arrow"] = DatasetArrow
64 vaex.file.other.dataset_type_map["parquet"] = DatasetParquet
65
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/vaex-arrow/vaex_arrow/dataset.py b/packages/vaex-arrow/vaex_arrow/dataset.py
--- a/packages/vaex-arrow/vaex_arrow/dataset.py
+++ b/packages/vaex-arrow/vaex_arrow/dataset.py
@@ -28,14 +28,18 @@
self._load_table(table)
def _load_table(self, table):
- self._length_unfiltered = self._length_original = table.num_rows
- self._index_end = self._length_original = table.num_rows
+ self._length_unfiltered = self._length_original = table.num_rows
+ self._index_end = self._length_original = table.num_rows
for col in table.columns:
name = col.name
# TODO: keep the arrow columns, and support and test chunks
arrow_array = col.data.chunks[0]
- column = column_from_arrow_array(arrow_array)
-
+ if isinstance(arrow_array.type, pa.DictionaryType):
+ column = column_from_arrow_array(arrow_array.indices)
+ labels = column_from_arrow_array(arrow_array.dictionary).tolist()
+ self._categories[name] = dict(labels=labels, N=len(labels))
+ else:
+ column = column_from_arrow_array(arrow_array)
self.columns[name] = column
self.column_names.append(name)
self._save_assign_expression(name, vaex.expression.Expression(self, name))
| {"golden_diff": "diff --git a/packages/vaex-arrow/vaex_arrow/dataset.py b/packages/vaex-arrow/vaex_arrow/dataset.py\n--- a/packages/vaex-arrow/vaex_arrow/dataset.py\n+++ b/packages/vaex-arrow/vaex_arrow/dataset.py\n@@ -28,14 +28,18 @@\n self._load_table(table)\n \n def _load_table(self, table):\n- self._length_unfiltered = self._length_original = table.num_rows\n- self._index_end = self._length_original = table.num_rows\n+ self._length_unfiltered = self._length_original = table.num_rows\n+ self._index_end = self._length_original = table.num_rows\n for col in table.columns:\n name = col.name\n # TODO: keep the arrow columns, and support and test chunks\n arrow_array = col.data.chunks[0]\n- column = column_from_arrow_array(arrow_array)\n-\n+ if isinstance(arrow_array.type, pa.DictionaryType):\n+ column = column_from_arrow_array(arrow_array.indices)\n+ labels = column_from_arrow_array(arrow_array.dictionary).tolist()\n+ self._categories[name] = dict(labels=labels, N=len(labels))\n+ else:\n+ column = column_from_arrow_array(arrow_array)\n self.columns[name] = column\n self.column_names.append(name)\n self._save_assign_expression(name, vaex.expression.Expression(self, name))\n", "issue": "Failing to open arrow stream with categoricals\nVaex fails to open arrow streams that contain categorical columns. It would be great if this was working as categoricals have given a great performance in many of my applications.\r\n\r\n```import pandas as pd\r\nimport pyarrow as pa\r\nimport numpy as np\r\nimport vaex\r\ndf = pd.DataFrame(\r\n {\r\n 'col1': pd.Categorical.from_codes(np.full(1, 1), categories=['ABC', 'DEF'])\r\n }\r\n)\r\ntable = pa.Table.from_pandas(df)\r\n\r\nwith pa.OSFile('test2.arrow', 'wb') as sink:\r\n with pa.RecordBatchStreamWriter(sink, table.schema) as writer:\r\n writer.write_table(table)\r\n\r\nwith pa.OSFile('test2.arrow', 'rb') as source:\r\n df = pa.ipc.open_stream(source).read_pandas()\r\n\r\ndf = vaex.open('test2.arrow')\r\n```\r\n\r\nOutput:\r\n```\r\nERROR:MainThread:vaex:error opening 'test2.arrow'\r\n---------------------------------------------------------------------------\r\nNotImplementedError Traceback (most recent call last)\r\n in \r\n 17 df = pa.ipc.open_stream(source).read_pandas()\r\n 18 \r\n---> 19 df = vaex.open('test2.arrow')\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex\\__init__.py in open(path, convert, shuffle, copy_index, *args, **kwargs)\r\n 189 ds = from_csv(path, copy_index=copy_index, **kwargs)\r\n 190 else:\r\n--> 191 ds = vaex.file.open(path, *args, **kwargs)\r\n 192 if convert and ds:\r\n 193 ds.export_hdf5(filename_hdf5, shuffle=shuffle)\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex\\file\\__init__.py in open(path, *args, **kwargs)\r\n 28 for opener in opener_classes:\r\n 29 if opener.can_open(path, *args, **kwargs):\r\n---> 30 return opener.open(path, *args, **kwargs)\r\n 31 if hdf5:\r\n 32 openers.extend(hdf5.dataset.dataset_type_map.items())\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex_arrow\\opener.py in open(path, *args, **kwargs)\r\n 9 def open(path, *args, **kwargs):\r\n 10 from .dataset import DatasetArrow\r\n---> 11 return DatasetArrow(path, *args, **kwargs)\r\n 12 \r\n 13 class ParquetOpener:\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex_arrow\\dataset.py in __init__(self, filename, table, write)\r\n 18 self._write = write\r\n 19 if table is None:\r\n---> 20 self._load()\r\n 21 else:\r\n 22 self._load_table(table)\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex_arrow\\dataset.py in _load(self)\r\n 26 reader = pa.ipc.open_stream(source)\r\n 27 table = pa.Table.from_batches([b for b in reader])\r\n---> 28 self._load_table(table)\r\n 29 \r\n 30 def _load_table(self, table):\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex_arrow\\dataset.py in _load_table(self, table)\r\n 35 # TODO: keep the arrow columns, and support and test chunks\r\n 36 arrow_array = col.data.chunks[0]\r\n---> 37 column = column_from_arrow_array(arrow_array)\r\n 38 \r\n 39 self.columns[name] = column\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex_arrow\\convert.py in column_from_arrow_array(arrow_array)\r\n 28 buffers = arrow_array.buffers()\r\n 29 if len(buffers) == 2:\r\n---> 30 return numpy_array_from_arrow_array(arrow_array)\r\n 31 elif len(buffers) == 3 and isinstance(arrow_array.type, type(pyarrow.string())):\r\n 32 bitmap_buffer, offsets, string_bytes = arrow_array.buffers()\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex_arrow\\convert.py in numpy_array_from_arrow_array(arrow_array)\r\n 62 array = np.frombuffer(buffers[-1], dtype, len(arrow_array))# TODO: deal with offset ? [arrow_array.offset:arrow_array.offset + len(arrow_array)]\r\n 63 else:\r\n---> 64 dtype = arrow_array.type.to_pandas_dtype()\r\n 65 if np.bool_ == dtype:\r\n 66 # TODO: this will also be a copy, we probably want to support bitmasks as well\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\pyarrow\\types.pxi in pyarrow.lib.DataType.to_pandas_dtype()\r\n\r\nNotImplementedError: dictionary\r\n```\n", "before_files": [{"content": "__author__ = 'maartenbreddels'\nimport logging\n\nimport pyarrow as pa\nimport pyarrow.parquet as pq\n\nimport vaex.dataset\nimport vaex.file.other\nfrom .convert import column_from_arrow_array\nlogger = logging.getLogger(\"vaex_arrow\")\n\n\nclass DatasetArrow(vaex.dataset.DatasetLocal):\n \"\"\"Implements storage using arrow\"\"\"\n\n def __init__(self, filename=None, table=None, write=False):\n super(DatasetArrow, self).__init__(name=filename, path=filename, column_names=[])\n self._write = write\n if table is None:\n self._load()\n else:\n self._load_table(table)\n\n def _load(self):\n source = pa.memory_map(self.path)\n reader = pa.ipc.open_stream(source)\n table = pa.Table.from_batches([b for b in reader])\n self._load_table(table)\n \n def _load_table(self, table):\n self._length_unfiltered = self._length_original = table.num_rows\n self._index_end = self._length_original = table.num_rows\n for col in table.columns:\n name = col.name\n # TODO: keep the arrow columns, and support and test chunks\n arrow_array = col.data.chunks[0]\n column = column_from_arrow_array(arrow_array)\n\n self.columns[name] = column\n self.column_names.append(name)\n self._save_assign_expression(name, vaex.expression.Expression(self, name))\n\n\n @classmethod\n def can_open(cls, path, *args, **kwargs):\n return path.rpartition('.')[2] == 'arrow'\n\n @classmethod\n def get_options(cls, path):\n return []\n\n @classmethod\n def option_to_args(cls, option):\n return []\n\nclass DatasetParquet(DatasetArrow):\n def _load(self):\n # might not be optimal, but it works, we can always see if we can\n # do mmapping later on\n table = pq.read_table(self.path)\n self._load_table(table)\n\nvaex.file.other.dataset_type_map[\"arrow\"] = DatasetArrow\nvaex.file.other.dataset_type_map[\"parquet\"] = DatasetParquet\n\n", "path": "packages/vaex-arrow/vaex_arrow/dataset.py"}], "after_files": [{"content": "__author__ = 'maartenbreddels'\nimport logging\n\nimport pyarrow as pa\nimport pyarrow.parquet as pq\n\nimport vaex.dataset\nimport vaex.file.other\nfrom .convert import column_from_arrow_array\nlogger = logging.getLogger(\"vaex_arrow\")\n\n\nclass DatasetArrow(vaex.dataset.DatasetLocal):\n \"\"\"Implements storage using arrow\"\"\"\n\n def __init__(self, filename=None, table=None, write=False):\n super(DatasetArrow, self).__init__(name=filename, path=filename, column_names=[])\n self._write = write\n if table is None:\n self._load()\n else:\n self._load_table(table)\n\n def _load(self):\n source = pa.memory_map(self.path)\n reader = pa.ipc.open_stream(source)\n table = pa.Table.from_batches([b for b in reader])\n self._load_table(table)\n \n def _load_table(self, table):\n self._length_unfiltered = self._length_original = table.num_rows\n self._index_end = self._length_original = table.num_rows\n for col in table.columns:\n name = col.name\n # TODO: keep the arrow columns, and support and test chunks\n arrow_array = col.data.chunks[0]\n if isinstance(arrow_array.type, pa.DictionaryType):\n column = column_from_arrow_array(arrow_array.indices)\n labels = column_from_arrow_array(arrow_array.dictionary).tolist()\n self._categories[name] = dict(labels=labels, N=len(labels))\n else:\n column = column_from_arrow_array(arrow_array)\n self.columns[name] = column\n self.column_names.append(name)\n self._save_assign_expression(name, vaex.expression.Expression(self, name))\n\n\n @classmethod\n def can_open(cls, path, *args, **kwargs):\n return path.rpartition('.')[2] == 'arrow'\n\n @classmethod\n def get_options(cls, path):\n return []\n\n @classmethod\n def option_to_args(cls, option):\n return []\n\nclass DatasetParquet(DatasetArrow):\n def _load(self):\n # might not be optimal, but it works, we can always see if we can\n # do mmapping later on\n table = pq.read_table(self.path)\n self._load_table(table)\n\nvaex.file.other.dataset_type_map[\"arrow\"] = DatasetArrow\nvaex.file.other.dataset_type_map[\"parquet\"] = DatasetParquet\n\n", "path": "packages/vaex-arrow/vaex_arrow/dataset.py"}]} | 2,043 | 316 |
gh_patches_debug_19901 | rasdani/github-patches | git_diff | python-discord__bot-216 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
!watch alias is not working.
The `!watch` alias broke when we changed the watch command to take a note instead of a channel - this is due to converters in the alias. I'll fix it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/cogs/alias.py`
Content:
```
1 import inspect
2 import logging
3
4 from discord import Colour, Embed, TextChannel, User
5 from discord.ext.commands import (
6 Command, Context, clean_content, command, group
7 )
8
9 from bot.converters import TagNameConverter
10 from bot.pagination import LinePaginator
11
12 log = logging.getLogger(__name__)
13
14
15 class Alias:
16 """
17 Aliases for more used commands
18 """
19
20 def __init__(self, bot):
21 self.bot = bot
22
23 async def invoke(self, ctx, cmd_name, *args, **kwargs):
24 """
25 Invokes a command with args and kwargs.
26 Fail early through `command.can_run`, and logs warnings.
27
28 :param ctx: Context instance for command call
29 :param cmd_name: Name of command/subcommand to be invoked
30 :param args: args to be passed to the command
31 :param kwargs: kwargs to be passed to the command
32 :return: None
33 """
34
35 log.debug(f"{cmd_name} was invoked through an alias")
36 cmd = self.bot.get_command(cmd_name)
37 if not cmd:
38 return log.warning(f'Did not find command "{cmd_name}" to invoke.')
39 elif not await cmd.can_run(ctx):
40 return log.warning(
41 f'{str(ctx.author)} tried to run the command "{cmd_name}"'
42 )
43
44 await ctx.invoke(cmd, *args, **kwargs)
45
46 @command(name='aliases')
47 async def aliases_command(self, ctx):
48 """Show configured aliases on the bot."""
49
50 embed = Embed(
51 title='Configured aliases',
52 colour=Colour.blue()
53 )
54 await LinePaginator.paginate(
55 (
56 f"• `{ctx.prefix}{value.name}` "
57 f"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`"
58 for name, value in inspect.getmembers(self)
59 if isinstance(value, Command) and name.endswith('_alias')
60 ),
61 ctx, embed, empty=False, max_lines=20
62 )
63
64 @command(name="resources", aliases=("resource",), hidden=True)
65 async def site_resources_alias(self, ctx):
66 """
67 Alias for invoking <prefix>site resources.
68 """
69
70 await self.invoke(ctx, "site resources")
71
72 @command(name="watch", hidden=True)
73 async def bigbrother_watch_alias(
74 self, ctx, user: User, channel: TextChannel = None
75 ):
76 """
77 Alias for invoking <prefix>bigbrother watch user [text_channel].
78 """
79
80 await self.invoke(ctx, "bigbrother watch", user, channel)
81
82 @command(name="unwatch", hidden=True)
83 async def bigbrother_unwatch_alias(self, ctx, user: User):
84 """
85 Alias for invoking <prefix>bigbrother unwatch user.
86
87 user: discord.User - A user instance to unwatch
88 """
89
90 await self.invoke(ctx, "bigbrother unwatch", user)
91
92 @command(name="home", hidden=True)
93 async def site_home_alias(self, ctx):
94 """
95 Alias for invoking <prefix>site home.
96 """
97
98 await self.invoke(ctx, "site home")
99
100 @command(name="faq", hidden=True)
101 async def site_faq_alias(self, ctx):
102 """
103 Alias for invoking <prefix>site faq.
104 """
105
106 await self.invoke(ctx, "site faq")
107
108 @command(name="rules", hidden=True)
109 async def site_rules_alias(self, ctx):
110 """
111 Alias for invoking <prefix>site rules.
112 """
113
114 await self.invoke(ctx, "site rules")
115
116 @command(name="reload", hidden=True)
117 async def cogs_reload_alias(self, ctx, *, cog_name: str):
118 """
119 Alias for invoking <prefix>cogs reload cog_name.
120
121 cog_name: str - name of the cog to be reloaded.
122 """
123
124 await self.invoke(ctx, "cogs reload", cog_name)
125
126 @command(name="defon", hidden=True)
127 async def defcon_enable_alias(self, ctx):
128 """
129 Alias for invoking <prefix>defcon enable.
130 """
131
132 await self.invoke(ctx, "defcon enable")
133
134 @command(name="defoff", hidden=True)
135 async def defcon_disable_alias(self, ctx):
136 """
137 Alias for invoking <prefix>defcon disable.
138 """
139
140 await self.invoke(ctx, "defcon disable")
141
142 @group(name="get",
143 aliases=("show", "g"),
144 hidden=True,
145 invoke_without_command=True)
146 async def get_group_alias(self, ctx):
147 """
148 Group for reverse aliases for commands like `tags get`,
149 allowing for `get tags` or `get docs`.
150 """
151
152 pass
153
154 @get_group_alias.command(name="tags", aliases=("tag", "t"), hidden=True)
155 async def tags_get_alias(
156 self, ctx: Context, *, tag_name: TagNameConverter = None
157 ):
158 """
159 Alias for invoking <prefix>tags get [tag_name].
160
161 tag_name: str - tag to be viewed.
162 """
163
164 await self.invoke(ctx, "tags get", tag_name)
165
166 @get_group_alias.command(name="docs", aliases=("doc", "d"), hidden=True)
167 async def docs_get_alias(
168 self, ctx: Context, symbol: clean_content = None
169 ):
170 """
171 Alias for invoking <prefix>docs get [symbol].
172
173 symbol: str - name of doc to be viewed.
174 """
175
176 await self.invoke(ctx, "docs get", symbol)
177
178
179 def setup(bot):
180 bot.add_cog(Alias(bot))
181 log.info("Cog loaded: Alias")
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/cogs/alias.py b/bot/cogs/alias.py
--- a/bot/cogs/alias.py
+++ b/bot/cogs/alias.py
@@ -1,7 +1,7 @@
import inspect
import logging
-from discord import Colour, Embed, TextChannel, User
+from discord import Colour, Embed, User
from discord.ext.commands import (
Command, Context, clean_content, command, group
)
@@ -71,13 +71,13 @@
@command(name="watch", hidden=True)
async def bigbrother_watch_alias(
- self, ctx, user: User, channel: TextChannel = None
+ self, ctx, user: User, *, reason: str = None
):
"""
Alias for invoking <prefix>bigbrother watch user [text_channel].
"""
- await self.invoke(ctx, "bigbrother watch", user, channel)
+ await self.invoke(ctx, "bigbrother watch", user, reason=reason)
@command(name="unwatch", hidden=True)
async def bigbrother_unwatch_alias(self, ctx, user: User):
| {"golden_diff": "diff --git a/bot/cogs/alias.py b/bot/cogs/alias.py\n--- a/bot/cogs/alias.py\n+++ b/bot/cogs/alias.py\n@@ -1,7 +1,7 @@\n import inspect\n import logging\n \n-from discord import Colour, Embed, TextChannel, User\n+from discord import Colour, Embed, User\n from discord.ext.commands import (\n Command, Context, clean_content, command, group\n )\n@@ -71,13 +71,13 @@\n \n @command(name=\"watch\", hidden=True)\n async def bigbrother_watch_alias(\n- self, ctx, user: User, channel: TextChannel = None\n+ self, ctx, user: User, *, reason: str = None\n ):\n \"\"\"\n Alias for invoking <prefix>bigbrother watch user [text_channel].\n \"\"\"\n \n- await self.invoke(ctx, \"bigbrother watch\", user, channel)\n+ await self.invoke(ctx, \"bigbrother watch\", user, reason=reason)\n \n @command(name=\"unwatch\", hidden=True)\n async def bigbrother_unwatch_alias(self, ctx, user: User):\n", "issue": "!watch alias is not working.\nThe `!watch` alias broke when we changed the watch command to take a note instead of a channel - this is due to converters in the alias. I'll fix it.\n", "before_files": [{"content": "import inspect\nimport logging\n\nfrom discord import Colour, Embed, TextChannel, User\nfrom discord.ext.commands import (\n Command, Context, clean_content, command, group\n)\n\nfrom bot.converters import TagNameConverter\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\n\nclass Alias:\n \"\"\"\n Aliases for more used commands\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n async def invoke(self, ctx, cmd_name, *args, **kwargs):\n \"\"\"\n Invokes a command with args and kwargs.\n Fail early through `command.can_run`, and logs warnings.\n\n :param ctx: Context instance for command call\n :param cmd_name: Name of command/subcommand to be invoked\n :param args: args to be passed to the command\n :param kwargs: kwargs to be passed to the command\n :return: None\n \"\"\"\n\n log.debug(f\"{cmd_name} was invoked through an alias\")\n cmd = self.bot.get_command(cmd_name)\n if not cmd:\n return log.warning(f'Did not find command \"{cmd_name}\" to invoke.')\n elif not await cmd.can_run(ctx):\n return log.warning(\n f'{str(ctx.author)} tried to run the command \"{cmd_name}\"'\n )\n\n await ctx.invoke(cmd, *args, **kwargs)\n\n @command(name='aliases')\n async def aliases_command(self, ctx):\n \"\"\"Show configured aliases on the bot.\"\"\"\n\n embed = Embed(\n title='Configured aliases',\n colour=Colour.blue()\n )\n await LinePaginator.paginate(\n (\n f\"\u2022 `{ctx.prefix}{value.name}` \"\n f\"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`\"\n for name, value in inspect.getmembers(self)\n if isinstance(value, Command) and name.endswith('_alias')\n ),\n ctx, embed, empty=False, max_lines=20\n )\n\n @command(name=\"resources\", aliases=(\"resource\",), hidden=True)\n async def site_resources_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>site resources.\n \"\"\"\n\n await self.invoke(ctx, \"site resources\")\n\n @command(name=\"watch\", hidden=True)\n async def bigbrother_watch_alias(\n self, ctx, user: User, channel: TextChannel = None\n ):\n \"\"\"\n Alias for invoking <prefix>bigbrother watch user [text_channel].\n \"\"\"\n\n await self.invoke(ctx, \"bigbrother watch\", user, channel)\n\n @command(name=\"unwatch\", hidden=True)\n async def bigbrother_unwatch_alias(self, ctx, user: User):\n \"\"\"\n Alias for invoking <prefix>bigbrother unwatch user.\n\n user: discord.User - A user instance to unwatch\n \"\"\"\n\n await self.invoke(ctx, \"bigbrother unwatch\", user)\n\n @command(name=\"home\", hidden=True)\n async def site_home_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>site home.\n \"\"\"\n\n await self.invoke(ctx, \"site home\")\n\n @command(name=\"faq\", hidden=True)\n async def site_faq_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>site faq.\n \"\"\"\n\n await self.invoke(ctx, \"site faq\")\n\n @command(name=\"rules\", hidden=True)\n async def site_rules_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>site rules.\n \"\"\"\n\n await self.invoke(ctx, \"site rules\")\n\n @command(name=\"reload\", hidden=True)\n async def cogs_reload_alias(self, ctx, *, cog_name: str):\n \"\"\"\n Alias for invoking <prefix>cogs reload cog_name.\n\n cog_name: str - name of the cog to be reloaded.\n \"\"\"\n\n await self.invoke(ctx, \"cogs reload\", cog_name)\n\n @command(name=\"defon\", hidden=True)\n async def defcon_enable_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>defcon enable.\n \"\"\"\n\n await self.invoke(ctx, \"defcon enable\")\n\n @command(name=\"defoff\", hidden=True)\n async def defcon_disable_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>defcon disable.\n \"\"\"\n\n await self.invoke(ctx, \"defcon disable\")\n\n @group(name=\"get\",\n aliases=(\"show\", \"g\"),\n hidden=True,\n invoke_without_command=True)\n async def get_group_alias(self, ctx):\n \"\"\"\n Group for reverse aliases for commands like `tags get`,\n allowing for `get tags` or `get docs`.\n \"\"\"\n\n pass\n\n @get_group_alias.command(name=\"tags\", aliases=(\"tag\", \"t\"), hidden=True)\n async def tags_get_alias(\n self, ctx: Context, *, tag_name: TagNameConverter = None\n ):\n \"\"\"\n Alias for invoking <prefix>tags get [tag_name].\n\n tag_name: str - tag to be viewed.\n \"\"\"\n\n await self.invoke(ctx, \"tags get\", tag_name)\n\n @get_group_alias.command(name=\"docs\", aliases=(\"doc\", \"d\"), hidden=True)\n async def docs_get_alias(\n self, ctx: Context, symbol: clean_content = None\n ):\n \"\"\"\n Alias for invoking <prefix>docs get [symbol].\n\n symbol: str - name of doc to be viewed.\n \"\"\"\n\n await self.invoke(ctx, \"docs get\", symbol)\n\n\ndef setup(bot):\n bot.add_cog(Alias(bot))\n log.info(\"Cog loaded: Alias\")\n", "path": "bot/cogs/alias.py"}], "after_files": [{"content": "import inspect\nimport logging\n\nfrom discord import Colour, Embed, User\nfrom discord.ext.commands import (\n Command, Context, clean_content, command, group\n)\n\nfrom bot.converters import TagNameConverter\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\n\nclass Alias:\n \"\"\"\n Aliases for more used commands\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n async def invoke(self, ctx, cmd_name, *args, **kwargs):\n \"\"\"\n Invokes a command with args and kwargs.\n Fail early through `command.can_run`, and logs warnings.\n\n :param ctx: Context instance for command call\n :param cmd_name: Name of command/subcommand to be invoked\n :param args: args to be passed to the command\n :param kwargs: kwargs to be passed to the command\n :return: None\n \"\"\"\n\n log.debug(f\"{cmd_name} was invoked through an alias\")\n cmd = self.bot.get_command(cmd_name)\n if not cmd:\n return log.warning(f'Did not find command \"{cmd_name}\" to invoke.')\n elif not await cmd.can_run(ctx):\n return log.warning(\n f'{str(ctx.author)} tried to run the command \"{cmd_name}\"'\n )\n\n await ctx.invoke(cmd, *args, **kwargs)\n\n @command(name='aliases')\n async def aliases_command(self, ctx):\n \"\"\"Show configured aliases on the bot.\"\"\"\n\n embed = Embed(\n title='Configured aliases',\n colour=Colour.blue()\n )\n await LinePaginator.paginate(\n (\n f\"\u2022 `{ctx.prefix}{value.name}` \"\n f\"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`\"\n for name, value in inspect.getmembers(self)\n if isinstance(value, Command) and name.endswith('_alias')\n ),\n ctx, embed, empty=False, max_lines=20\n )\n\n @command(name=\"resources\", aliases=(\"resource\",), hidden=True)\n async def site_resources_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>site resources.\n \"\"\"\n\n await self.invoke(ctx, \"site resources\")\n\n @command(name=\"watch\", hidden=True)\n async def bigbrother_watch_alias(\n self, ctx, user: User, *, reason: str = None\n ):\n \"\"\"\n Alias for invoking <prefix>bigbrother watch user [text_channel].\n \"\"\"\n\n await self.invoke(ctx, \"bigbrother watch\", user, reason=reason)\n\n @command(name=\"unwatch\", hidden=True)\n async def bigbrother_unwatch_alias(self, ctx, user: User):\n \"\"\"\n Alias for invoking <prefix>bigbrother unwatch user.\n\n user: discord.User - A user instance to unwatch\n \"\"\"\n\n await self.invoke(ctx, \"bigbrother unwatch\", user)\n\n @command(name=\"home\", hidden=True)\n async def site_home_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>site home.\n \"\"\"\n\n await self.invoke(ctx, \"site home\")\n\n @command(name=\"faq\", hidden=True)\n async def site_faq_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>site faq.\n \"\"\"\n\n await self.invoke(ctx, \"site faq\")\n\n @command(name=\"rules\", hidden=True)\n async def site_rules_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>site rules.\n \"\"\"\n\n await self.invoke(ctx, \"site rules\")\n\n @command(name=\"reload\", hidden=True)\n async def cogs_reload_alias(self, ctx, *, cog_name: str):\n \"\"\"\n Alias for invoking <prefix>cogs reload cog_name.\n\n cog_name: str - name of the cog to be reloaded.\n \"\"\"\n\n await self.invoke(ctx, \"cogs reload\", cog_name)\n\n @command(name=\"defon\", hidden=True)\n async def defcon_enable_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>defcon enable.\n \"\"\"\n\n await self.invoke(ctx, \"defcon enable\")\n\n @command(name=\"defoff\", hidden=True)\n async def defcon_disable_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>defcon disable.\n \"\"\"\n\n await self.invoke(ctx, \"defcon disable\")\n\n @group(name=\"get\",\n aliases=(\"show\", \"g\"),\n hidden=True,\n invoke_without_command=True)\n async def get_group_alias(self, ctx):\n \"\"\"\n Group for reverse aliases for commands like `tags get`,\n allowing for `get tags` or `get docs`.\n \"\"\"\n\n pass\n\n @get_group_alias.command(name=\"tags\", aliases=(\"tag\", \"t\"), hidden=True)\n async def tags_get_alias(\n self, ctx: Context, *, tag_name: TagNameConverter = None\n ):\n \"\"\"\n Alias for invoking <prefix>tags get [tag_name].\n\n tag_name: str - tag to be viewed.\n \"\"\"\n\n await self.invoke(ctx, \"tags get\", tag_name)\n\n @get_group_alias.command(name=\"docs\", aliases=(\"doc\", \"d\"), hidden=True)\n async def docs_get_alias(\n self, ctx: Context, symbol: clean_content = None\n ):\n \"\"\"\n Alias for invoking <prefix>docs get [symbol].\n\n symbol: str - name of doc to be viewed.\n \"\"\"\n\n await self.invoke(ctx, \"docs get\", symbol)\n\n\ndef setup(bot):\n bot.add_cog(Alias(bot))\n log.info(\"Cog loaded: Alias\")\n", "path": "bot/cogs/alias.py"}]} | 1,951 | 253 |
gh_patches_debug_18879 | rasdani/github-patches | git_diff | netbox-community__netbox-8292 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Circuits list view to display formatted commit rate
### NetBox version
3.1.2
### Feature type
Change to existing functionality
### Proposed functionality
The current circuit list view (/circuits/circuits/) has a column called "Commit Rate (kbps) and shows the rate in kbps i.e. 1000000
However when looking at the circuit details, the commit rate is translated into something more human readable i.e 1 Gbps
Proposing either changing the existing Commit Rate (kbps) column to also translate the commit rate or the addition of an extra column simply called Commit Rate that has the human readable version.
### Use case
Easier for non-technical users to quickly see the commit rate of a circuit in the table view. Brings more parity to the circuit details view.
### Database changes
_No response_
### External dependencies
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/circuits/tables.py`
Content:
```
1 import django_tables2 as tables
2 from django_tables2.utils import Accessor
3
4 from tenancy.tables import TenantColumn
5 from utilities.tables import BaseTable, ButtonsColumn, ChoiceFieldColumn, MarkdownColumn, TagColumn, ToggleColumn
6 from .models import *
7
8
9 __all__ = (
10 'CircuitTable',
11 'CircuitTypeTable',
12 'ProviderTable',
13 'ProviderNetworkTable',
14 )
15
16
17 CIRCUITTERMINATION_LINK = """
18 {% if value.site %}
19 <a href="{{ value.site.get_absolute_url }}">{{ value.site }}</a>
20 {% elif value.provider_network %}
21 <a href="{{ value.provider_network.get_absolute_url }}">{{ value.provider_network }}</a>
22 {% endif %}
23 """
24
25
26 #
27 # Providers
28 #
29
30 class ProviderTable(BaseTable):
31 pk = ToggleColumn()
32 name = tables.Column(
33 linkify=True
34 )
35 circuit_count = tables.Column(
36 accessor=Accessor('count_circuits'),
37 verbose_name='Circuits'
38 )
39 comments = MarkdownColumn()
40 tags = TagColumn(
41 url_name='circuits:provider_list'
42 )
43
44 class Meta(BaseTable.Meta):
45 model = Provider
46 fields = (
47 'pk', 'id', 'name', 'asn', 'account', 'portal_url', 'noc_contact', 'admin_contact', 'circuit_count',
48 'comments', 'tags',
49 )
50 default_columns = ('pk', 'name', 'asn', 'account', 'circuit_count')
51
52
53 #
54 # Provider networks
55 #
56
57 class ProviderNetworkTable(BaseTable):
58 pk = ToggleColumn()
59 name = tables.Column(
60 linkify=True
61 )
62 provider = tables.Column(
63 linkify=True
64 )
65 comments = MarkdownColumn()
66 tags = TagColumn(
67 url_name='circuits:providernetwork_list'
68 )
69
70 class Meta(BaseTable.Meta):
71 model = ProviderNetwork
72 fields = ('pk', 'id', 'name', 'provider', 'description', 'comments', 'tags')
73 default_columns = ('pk', 'name', 'provider', 'description')
74
75
76 #
77 # Circuit types
78 #
79
80 class CircuitTypeTable(BaseTable):
81 pk = ToggleColumn()
82 name = tables.Column(
83 linkify=True
84 )
85 tags = TagColumn(
86 url_name='circuits:circuittype_list'
87 )
88 circuit_count = tables.Column(
89 verbose_name='Circuits'
90 )
91 actions = ButtonsColumn(CircuitType)
92
93 class Meta(BaseTable.Meta):
94 model = CircuitType
95 fields = ('pk', 'id', 'name', 'circuit_count', 'description', 'slug', 'tags', 'actions')
96 default_columns = ('pk', 'name', 'circuit_count', 'description', 'slug', 'actions')
97
98
99 #
100 # Circuits
101 #
102
103 class CircuitTable(BaseTable):
104 pk = ToggleColumn()
105 cid = tables.Column(
106 linkify=True,
107 verbose_name='Circuit ID'
108 )
109 provider = tables.Column(
110 linkify=True
111 )
112 status = ChoiceFieldColumn()
113 tenant = TenantColumn()
114 termination_a = tables.TemplateColumn(
115 template_code=CIRCUITTERMINATION_LINK,
116 verbose_name='Side A'
117 )
118 termination_z = tables.TemplateColumn(
119 template_code=CIRCUITTERMINATION_LINK,
120 verbose_name='Side Z'
121 )
122 comments = MarkdownColumn()
123 tags = TagColumn(
124 url_name='circuits:circuit_list'
125 )
126
127 class Meta(BaseTable.Meta):
128 model = Circuit
129 fields = (
130 'pk', 'id', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'install_date',
131 'commit_rate', 'description', 'comments', 'tags',
132 )
133 default_columns = (
134 'pk', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'description',
135 )
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/circuits/tables.py b/netbox/circuits/tables.py
--- a/netbox/circuits/tables.py
+++ b/netbox/circuits/tables.py
@@ -22,11 +22,32 @@
{% endif %}
"""
+#
+# Table columns
+#
+
+
+class CommitRateColumn(tables.TemplateColumn):
+ """
+ Humanize the commit rate in the column view
+ """
+
+ template_code = """
+ {% load helpers %}
+ {{ record.commit_rate|humanize_speed }}
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(template_code=self.template_code, *args, **kwargs)
+
+ def value(self, value):
+ return str(value) if value else None
#
# Providers
#
+
class ProviderTable(BaseTable):
pk = ToggleColumn()
name = tables.Column(
@@ -119,6 +140,7 @@
template_code=CIRCUITTERMINATION_LINK,
verbose_name='Side Z'
)
+ commit_rate = CommitRateColumn()
comments = MarkdownColumn()
tags = TagColumn(
url_name='circuits:circuit_list'
| {"golden_diff": "diff --git a/netbox/circuits/tables.py b/netbox/circuits/tables.py\n--- a/netbox/circuits/tables.py\n+++ b/netbox/circuits/tables.py\n@@ -22,11 +22,32 @@\n {% endif %}\n \"\"\"\n \n+#\n+# Table columns\n+#\n+\n+\n+class CommitRateColumn(tables.TemplateColumn):\n+ \"\"\"\n+ Humanize the commit rate in the column view\n+ \"\"\"\n+\n+ template_code = \"\"\"\n+ {% load helpers %}\n+ {{ record.commit_rate|humanize_speed }}\n+ \"\"\"\n+\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(template_code=self.template_code, *args, **kwargs)\n+\n+ def value(self, value):\n+ return str(value) if value else None\n \n #\n # Providers\n #\n \n+\n class ProviderTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n@@ -119,6 +140,7 @@\n template_code=CIRCUITTERMINATION_LINK,\n verbose_name='Side Z'\n )\n+ commit_rate = CommitRateColumn()\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:circuit_list'\n", "issue": "Circuits list view to display formatted commit rate\n### NetBox version\n\n3.1.2\n\n### Feature type\n\nChange to existing functionality\n\n### Proposed functionality\n\nThe current circuit list view (/circuits/circuits/) has a column called \"Commit Rate (kbps) and shows the rate in kbps i.e. 1000000\r\n\r\nHowever when looking at the circuit details, the commit rate is translated into something more human readable i.e 1 Gbps\r\n\r\nProposing either changing the existing Commit Rate (kbps) column to also translate the commit rate or the addition of an extra column simply called Commit Rate that has the human readable version.\n\n### Use case\n\nEasier for non-technical users to quickly see the commit rate of a circuit in the table view. Brings more parity to the circuit details view.\n\n### Database changes\n\n_No response_\n\n### External dependencies\n\n_No response_\n", "before_files": [{"content": "import django_tables2 as tables\nfrom django_tables2.utils import Accessor\n\nfrom tenancy.tables import TenantColumn\nfrom utilities.tables import BaseTable, ButtonsColumn, ChoiceFieldColumn, MarkdownColumn, TagColumn, ToggleColumn\nfrom .models import *\n\n\n__all__ = (\n 'CircuitTable',\n 'CircuitTypeTable',\n 'ProviderTable',\n 'ProviderNetworkTable',\n)\n\n\nCIRCUITTERMINATION_LINK = \"\"\"\n{% if value.site %}\n <a href=\"{{ value.site.get_absolute_url }}\">{{ value.site }}</a>\n{% elif value.provider_network %}\n <a href=\"{{ value.provider_network.get_absolute_url }}\">{{ value.provider_network }}</a>\n{% endif %}\n\"\"\"\n\n\n#\n# Providers\n#\n\nclass ProviderTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n linkify=True\n )\n circuit_count = tables.Column(\n accessor=Accessor('count_circuits'),\n verbose_name='Circuits'\n )\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:provider_list'\n )\n\n class Meta(BaseTable.Meta):\n model = Provider\n fields = (\n 'pk', 'id', 'name', 'asn', 'account', 'portal_url', 'noc_contact', 'admin_contact', 'circuit_count',\n 'comments', 'tags',\n )\n default_columns = ('pk', 'name', 'asn', 'account', 'circuit_count')\n\n\n#\n# Provider networks\n#\n\nclass ProviderNetworkTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n linkify=True\n )\n provider = tables.Column(\n linkify=True\n )\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:providernetwork_list'\n )\n\n class Meta(BaseTable.Meta):\n model = ProviderNetwork\n fields = ('pk', 'id', 'name', 'provider', 'description', 'comments', 'tags')\n default_columns = ('pk', 'name', 'provider', 'description')\n\n\n#\n# Circuit types\n#\n\nclass CircuitTypeTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n linkify=True\n )\n tags = TagColumn(\n url_name='circuits:circuittype_list'\n )\n circuit_count = tables.Column(\n verbose_name='Circuits'\n )\n actions = ButtonsColumn(CircuitType)\n\n class Meta(BaseTable.Meta):\n model = CircuitType\n fields = ('pk', 'id', 'name', 'circuit_count', 'description', 'slug', 'tags', 'actions')\n default_columns = ('pk', 'name', 'circuit_count', 'description', 'slug', 'actions')\n\n\n#\n# Circuits\n#\n\nclass CircuitTable(BaseTable):\n pk = ToggleColumn()\n cid = tables.Column(\n linkify=True,\n verbose_name='Circuit ID'\n )\n provider = tables.Column(\n linkify=True\n )\n status = ChoiceFieldColumn()\n tenant = TenantColumn()\n termination_a = tables.TemplateColumn(\n template_code=CIRCUITTERMINATION_LINK,\n verbose_name='Side A'\n )\n termination_z = tables.TemplateColumn(\n template_code=CIRCUITTERMINATION_LINK,\n verbose_name='Side Z'\n )\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:circuit_list'\n )\n\n class Meta(BaseTable.Meta):\n model = Circuit\n fields = (\n 'pk', 'id', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'install_date',\n 'commit_rate', 'description', 'comments', 'tags',\n )\n default_columns = (\n 'pk', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'description',\n )\n", "path": "netbox/circuits/tables.py"}], "after_files": [{"content": "import django_tables2 as tables\nfrom django_tables2.utils import Accessor\n\nfrom tenancy.tables import TenantColumn\nfrom utilities.tables import BaseTable, ButtonsColumn, ChoiceFieldColumn, MarkdownColumn, TagColumn, ToggleColumn\nfrom .models import *\n\n\n__all__ = (\n 'CircuitTable',\n 'CircuitTypeTable',\n 'ProviderTable',\n 'ProviderNetworkTable',\n)\n\n\nCIRCUITTERMINATION_LINK = \"\"\"\n{% if value.site %}\n <a href=\"{{ value.site.get_absolute_url }}\">{{ value.site }}</a>\n{% elif value.provider_network %}\n <a href=\"{{ value.provider_network.get_absolute_url }}\">{{ value.provider_network }}</a>\n{% endif %}\n\"\"\"\n\n#\n# Table columns\n#\n\n\nclass CommitRateColumn(tables.TemplateColumn):\n \"\"\"\n Humanize the commit rate in the column view\n \"\"\"\n\n template_code = \"\"\"\n {% load helpers %}\n {{ record.commit_rate|humanize_speed }}\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(template_code=self.template_code, *args, **kwargs)\n\n def value(self, value):\n return str(value) if value else None\n\n#\n# Providers\n#\n\n\nclass ProviderTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n linkify=True\n )\n circuit_count = tables.Column(\n accessor=Accessor('count_circuits'),\n verbose_name='Circuits'\n )\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:provider_list'\n )\n\n class Meta(BaseTable.Meta):\n model = Provider\n fields = (\n 'pk', 'id', 'name', 'asn', 'account', 'portal_url', 'noc_contact', 'admin_contact', 'circuit_count',\n 'comments', 'tags',\n )\n default_columns = ('pk', 'name', 'asn', 'account', 'circuit_count')\n\n\n#\n# Provider networks\n#\n\nclass ProviderNetworkTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n linkify=True\n )\n provider = tables.Column(\n linkify=True\n )\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:providernetwork_list'\n )\n\n class Meta(BaseTable.Meta):\n model = ProviderNetwork\n fields = ('pk', 'id', 'name', 'provider', 'description', 'comments', 'tags')\n default_columns = ('pk', 'name', 'provider', 'description')\n\n\n#\n# Circuit types\n#\n\nclass CircuitTypeTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n linkify=True\n )\n tags = TagColumn(\n url_name='circuits:circuittype_list'\n )\n circuit_count = tables.Column(\n verbose_name='Circuits'\n )\n actions = ButtonsColumn(CircuitType)\n\n class Meta(BaseTable.Meta):\n model = CircuitType\n fields = ('pk', 'id', 'name', 'circuit_count', 'description', 'slug', 'tags', 'actions')\n default_columns = ('pk', 'name', 'circuit_count', 'description', 'slug', 'actions')\n\n\n#\n# Circuits\n#\n\nclass CircuitTable(BaseTable):\n pk = ToggleColumn()\n cid = tables.Column(\n linkify=True,\n verbose_name='Circuit ID'\n )\n provider = tables.Column(\n linkify=True\n )\n status = ChoiceFieldColumn()\n tenant = TenantColumn()\n termination_a = tables.TemplateColumn(\n template_code=CIRCUITTERMINATION_LINK,\n verbose_name='Side A'\n )\n termination_z = tables.TemplateColumn(\n template_code=CIRCUITTERMINATION_LINK,\n verbose_name='Side Z'\n )\n commit_rate = CommitRateColumn()\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:circuit_list'\n )\n\n class Meta(BaseTable.Meta):\n model = Circuit\n fields = (\n 'pk', 'id', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'install_date',\n 'commit_rate', 'description', 'comments', 'tags',\n )\n default_columns = (\n 'pk', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'description',\n )\n", "path": "netbox/circuits/tables.py"}]} | 1,587 | 271 |
gh_patches_debug_22839 | rasdani/github-patches | git_diff | beetbox__beets-4086 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
unimported: Add an option to ignore some folders
I use a hard drive as my Beets library 'folder'.
Because of its size I also store some other non-imported music folders on that drive.
I ran into the situation that running 'beets unimported' showed me all the files in those unimported folders.
It's logical that the plugin scans those too but a more specific scan would be great.
I could circumvent this by placing all Beets folders in another folder instead of the root of the drive but that would make for a deeper hierarchy which I wouldn't like.
### Proposed solution
Add extra options for the command line
`beets unimported /specific_folder`
or in config.yaml
```
unimported:
ignore_folders: folder-with-non-imported-files
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/unimported.py`
Content:
```
1 # This file is part of beets.
2 # Copyright 2019, Joris Jensen
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be
13 # included in all copies or substantial portions of the Software.
14
15 """
16 List all files in the library folder which are not listed in the
17 beets library database, including art files
18 """
19
20 import os
21
22 from beets import util
23 from beets.plugins import BeetsPlugin
24 from beets.ui import Subcommand, print_
25
26 __author__ = 'https://github.com/MrNuggelz'
27
28
29 class Unimported(BeetsPlugin):
30
31 def __init__(self):
32 super().__init__()
33 self.config.add(
34 {
35 'ignore_extensions': []
36 }
37 )
38
39 def commands(self):
40 def print_unimported(lib, opts, args):
41 ignore_exts = [('.' + x).encode() for x
42 in self.config['ignore_extensions'].as_str_seq()]
43 in_folder = {
44 os.path.join(r, file) for r, d, f in os.walk(lib.directory)
45 for file in f if not any(
46 [file.endswith(extension) for extension in
47 ignore_exts])}
48 in_library = {x.path for x in lib.items()}
49 art_files = {x.artpath for x in lib.albums()}
50 for f in in_folder - in_library - art_files:
51 print_(util.displayable_path(f))
52
53 unimported = Subcommand(
54 'unimported',
55 help='list all files in the library folder which are not listed'
56 ' in the beets library database')
57 unimported.func = print_unimported
58 return [unimported]
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beetsplug/unimported.py b/beetsplug/unimported.py
--- a/beetsplug/unimported.py
+++ b/beetsplug/unimported.py
@@ -38,13 +38,23 @@
def commands(self):
def print_unimported(lib, opts, args):
- ignore_exts = [('.' + x).encode() for x
- in self.config['ignore_extensions'].as_str_seq()]
+ ignore_exts = [
+ ('.' + x).encode()
+ for x in self.config["ignore_extensions"].as_str_seq()
+ ]
+ ignore_dirs = [
+ os.path.join(lib.directory, x.encode())
+ for x in self.config["ignore_subdirectories"].as_str_seq()
+ ]
in_folder = {
- os.path.join(r, file) for r, d, f in os.walk(lib.directory)
- for file in f if not any(
- [file.endswith(extension) for extension in
- ignore_exts])}
+ os.path.join(r, file)
+ for r, d, f in os.walk(lib.directory)
+ for file in f
+ if not any(
+ [file.endswith(ext) for ext in ignore_exts]
+ + [r in ignore_dirs]
+ )
+ }
in_library = {x.path for x in lib.items()}
art_files = {x.artpath for x in lib.albums()}
for f in in_folder - in_library - art_files:
| {"golden_diff": "diff --git a/beetsplug/unimported.py b/beetsplug/unimported.py\n--- a/beetsplug/unimported.py\n+++ b/beetsplug/unimported.py\n@@ -38,13 +38,23 @@\n \n def commands(self):\n def print_unimported(lib, opts, args):\n- ignore_exts = [('.' + x).encode() for x\n- in self.config['ignore_extensions'].as_str_seq()]\n+ ignore_exts = [\n+ ('.' + x).encode()\n+ for x in self.config[\"ignore_extensions\"].as_str_seq()\n+ ]\n+ ignore_dirs = [\n+ os.path.join(lib.directory, x.encode())\n+ for x in self.config[\"ignore_subdirectories\"].as_str_seq()\n+ ]\n in_folder = {\n- os.path.join(r, file) for r, d, f in os.walk(lib.directory)\n- for file in f if not any(\n- [file.endswith(extension) for extension in\n- ignore_exts])}\n+ os.path.join(r, file)\n+ for r, d, f in os.walk(lib.directory)\n+ for file in f\n+ if not any(\n+ [file.endswith(ext) for ext in ignore_exts]\n+ + [r in ignore_dirs]\n+ )\n+ }\n in_library = {x.path for x in lib.items()}\n art_files = {x.artpath for x in lib.albums()}\n for f in in_folder - in_library - art_files:\n", "issue": "unimported: Add an option to ignore some folders\nI use a hard drive as my Beets library 'folder'. \r\nBecause of its size I also store some other non-imported music folders on that drive.\r\nI ran into the situation that running 'beets unimported' showed me all the files in those unimported folders. \r\nIt's logical that the plugin scans those too but a more specific scan would be great.\r\nI could circumvent this by placing all Beets folders in another folder instead of the root of the drive but that would make for a deeper hierarchy which I wouldn't like.\r\n\r\n### Proposed solution\r\n\r\nAdd extra options for the command line\r\n`beets unimported /specific_folder`\r\nor in config.yaml\r\n```\r\nunimported:\r\n ignore_folders: folder-with-non-imported-files\r\n```\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2019, Joris Jensen\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"\nList all files in the library folder which are not listed in the\n beets library database, including art files\n\"\"\"\n\nimport os\n\nfrom beets import util\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui import Subcommand, print_\n\n__author__ = 'https://github.com/MrNuggelz'\n\n\nclass Unimported(BeetsPlugin):\n\n def __init__(self):\n super().__init__()\n self.config.add(\n {\n 'ignore_extensions': []\n }\n )\n\n def commands(self):\n def print_unimported(lib, opts, args):\n ignore_exts = [('.' + x).encode() for x\n in self.config['ignore_extensions'].as_str_seq()]\n in_folder = {\n os.path.join(r, file) for r, d, f in os.walk(lib.directory)\n for file in f if not any(\n [file.endswith(extension) for extension in\n ignore_exts])}\n in_library = {x.path for x in lib.items()}\n art_files = {x.artpath for x in lib.albums()}\n for f in in_folder - in_library - art_files:\n print_(util.displayable_path(f))\n\n unimported = Subcommand(\n 'unimported',\n help='list all files in the library folder which are not listed'\n ' in the beets library database')\n unimported.func = print_unimported\n return [unimported]\n", "path": "beetsplug/unimported.py"}], "after_files": [{"content": "# This file is part of beets.\n# Copyright 2019, Joris Jensen\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"\nList all files in the library folder which are not listed in the\n beets library database, including art files\n\"\"\"\n\nimport os\n\nfrom beets import util\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui import Subcommand, print_\n\n__author__ = 'https://github.com/MrNuggelz'\n\n\nclass Unimported(BeetsPlugin):\n\n def __init__(self):\n super().__init__()\n self.config.add(\n {\n 'ignore_extensions': []\n }\n )\n\n def commands(self):\n def print_unimported(lib, opts, args):\n ignore_exts = [\n ('.' + x).encode()\n for x in self.config[\"ignore_extensions\"].as_str_seq()\n ]\n ignore_dirs = [\n os.path.join(lib.directory, x.encode())\n for x in self.config[\"ignore_subdirectories\"].as_str_seq()\n ]\n in_folder = {\n os.path.join(r, file)\n for r, d, f in os.walk(lib.directory)\n for file in f\n if not any(\n [file.endswith(ext) for ext in ignore_exts]\n + [r in ignore_dirs]\n )\n }\n in_library = {x.path for x in lib.items()}\n art_files = {x.artpath for x in lib.albums()}\n for f in in_folder - in_library - art_files:\n print_(util.displayable_path(f))\n\n unimported = Subcommand(\n 'unimported',\n help='list all files in the library folder which are not listed'\n ' in the beets library database')\n unimported.func = print_unimported\n return [unimported]\n", "path": "beetsplug/unimported.py"}]} | 998 | 331 |
gh_patches_debug_2859 | rasdani/github-patches | git_diff | spack__spack-26095 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CentOS 6 image doesn't build with clingo on Dockerhub
### Steps to reproduce
Has to do with failure on centos:6
```
Step 17/19 : RUN spack spec hdf5+mpi
---> Running in 8335d48ff53f
==> Warning: Spack will not check SSL certificates. You need to update your Python to enable certificate verification.
==> Warning: Spack will not check SSL certificates. You need to update your Python to enable certificate verification.
==> Warning: the original concretizer is currently being used.
Upgrade to "clingo" at your earliest convenience. The original concretizer will be removed from Spack starting at v0.18.0
==> Error: cannot bootstrap the "clingo" Python module from spec "clingo-bootstrap@spack+python %gcc target=x86_64"
Input spec
--------------------------------
hdf5+mpi
Concretized
--------------------------------
==> Bootstrapping clingo from pre-built binaries
The command 'docker-shell spack spec hdf5+mpi' returned a non-zero code: 3
```
---
So it bootstraps *during* concretization?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/spack/spack/schema/container.py`
Content:
```
1 # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5 """Schema for the 'container' subsection of Spack environments."""
6
7 _stages_from_dockerhub = {
8 'type': 'object',
9 'additionalProperties': False,
10 'properties': {
11 'os': {
12 'type': 'string',
13 'enum': ['ubuntu:18.04',
14 'ubuntu:16.04',
15 'centos:7',
16 'centos:6']
17 },
18 'spack': {
19 'type': 'string',
20 },
21 },
22 'required': ['os', 'spack']
23 }
24
25 _custom_stages = {
26 'type': 'object',
27 'additionalProperties': False,
28 'properties': {
29 'build': {'type': 'string'},
30 'final': {'type': 'string'}
31 },
32 'required': ['build', 'final']
33 }
34
35 #: List of packages for the schema below
36 _list_of_packages = {
37 'type': 'array',
38 'items': {
39 'type': 'string'
40 }
41 }
42
43 #: Schema for the container attribute included in Spack environments
44 container_schema = {
45 'type': 'object',
46 'additionalProperties': False,
47 'properties': {
48 # The recipe formats that are currently supported by the command
49 'format': {
50 'type': 'string',
51 'enum': ['docker', 'singularity']
52 },
53 # Describes the base image to start from and the version
54 # of Spack to be used
55 'images': {'anyOf': [_stages_from_dockerhub, _custom_stages]},
56 # Whether or not to strip installed binaries
57 'strip': {
58 'type': 'boolean',
59 'default': True
60 },
61 # Additional system packages that are needed at runtime
62 'os_packages': {
63 'type': 'object',
64 'properties': {
65 'command': {'type': 'string', 'enum': ['apt', 'yum']},
66 'update': {'type': 'boolean'},
67 'build': _list_of_packages,
68 'final': _list_of_packages
69 },
70 'additionalProperties': False
71 },
72 # Add labels to the image
73 'labels': {
74 'type': 'object',
75 },
76 # Add a custom extra section at the bottom of a stage
77 'extra_instructions': {
78 'type': 'object',
79 'additionalProperties': False,
80 'properties': {
81 'build': {'type': 'string'},
82 'final': {'type': 'string'}
83 }
84 },
85 # Reserved for properties that are specific to each format
86 'singularity': {
87 'type': 'object',
88 'additionalProperties': False,
89 'default': {},
90 'properties': {
91 'runscript': {'type': 'string'},
92 'startscript': {'type': 'string'},
93 'test': {'type': 'string'},
94 'help': {'type': 'string'}
95 }
96 },
97 'docker': {
98 'type': 'object',
99 'additionalProperties': False,
100 'default': {},
101 }
102 }
103 }
104
105 properties = {'container': container_schema}
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/spack/spack/schema/container.py b/lib/spack/spack/schema/container.py
--- a/lib/spack/spack/schema/container.py
+++ b/lib/spack/spack/schema/container.py
@@ -12,8 +12,7 @@
'type': 'string',
'enum': ['ubuntu:18.04',
'ubuntu:16.04',
- 'centos:7',
- 'centos:6']
+ 'centos:7']
},
'spack': {
'type': 'string',
| {"golden_diff": "diff --git a/lib/spack/spack/schema/container.py b/lib/spack/spack/schema/container.py\n--- a/lib/spack/spack/schema/container.py\n+++ b/lib/spack/spack/schema/container.py\n@@ -12,8 +12,7 @@\n 'type': 'string',\n 'enum': ['ubuntu:18.04',\n 'ubuntu:16.04',\n- 'centos:7',\n- 'centos:6']\n+ 'centos:7']\n },\n 'spack': {\n 'type': 'string',\n", "issue": "CentOS 6 image doesn't build with clingo on Dockerhub\n### Steps to reproduce\r\n\r\nHas to do with failure on centos:6\r\n\r\n```\r\nStep 17/19 : RUN spack spec hdf5+mpi\r\n ---> Running in 8335d48ff53f\r\n==> Warning: Spack will not check SSL certificates. You need to update your Python to enable certificate verification.\r\n==> Warning: Spack will not check SSL certificates. You need to update your Python to enable certificate verification.\r\n==> Warning: the original concretizer is currently being used.\r\n Upgrade to \"clingo\" at your earliest convenience. The original concretizer will be removed from Spack starting at v0.18.0\r\n==> Error: cannot bootstrap the \"clingo\" Python module from spec \"clingo-bootstrap@spack+python %gcc target=x86_64\"\r\nInput spec\r\n--------------------------------\r\nhdf5+mpi\r\n\r\nConcretized\r\n--------------------------------\r\n==> Bootstrapping clingo from pre-built binaries\r\nThe command 'docker-shell spack spec hdf5+mpi' returned a non-zero code: 3\r\n```\r\n\r\n---\r\n\r\nSo it bootstraps *during* concretization?\n", "before_files": [{"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\"\"\"Schema for the 'container' subsection of Spack environments.\"\"\"\n\n_stages_from_dockerhub = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'os': {\n 'type': 'string',\n 'enum': ['ubuntu:18.04',\n 'ubuntu:16.04',\n 'centos:7',\n 'centos:6']\n },\n 'spack': {\n 'type': 'string',\n },\n },\n 'required': ['os', 'spack']\n}\n\n_custom_stages = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'build': {'type': 'string'},\n 'final': {'type': 'string'}\n },\n 'required': ['build', 'final']\n}\n\n#: List of packages for the schema below\n_list_of_packages = {\n 'type': 'array',\n 'items': {\n 'type': 'string'\n }\n}\n\n#: Schema for the container attribute included in Spack environments\ncontainer_schema = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n # The recipe formats that are currently supported by the command\n 'format': {\n 'type': 'string',\n 'enum': ['docker', 'singularity']\n },\n # Describes the base image to start from and the version\n # of Spack to be used\n 'images': {'anyOf': [_stages_from_dockerhub, _custom_stages]},\n # Whether or not to strip installed binaries\n 'strip': {\n 'type': 'boolean',\n 'default': True\n },\n # Additional system packages that are needed at runtime\n 'os_packages': {\n 'type': 'object',\n 'properties': {\n 'command': {'type': 'string', 'enum': ['apt', 'yum']},\n 'update': {'type': 'boolean'},\n 'build': _list_of_packages,\n 'final': _list_of_packages\n },\n 'additionalProperties': False\n },\n # Add labels to the image\n 'labels': {\n 'type': 'object',\n },\n # Add a custom extra section at the bottom of a stage\n 'extra_instructions': {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'build': {'type': 'string'},\n 'final': {'type': 'string'}\n }\n },\n # Reserved for properties that are specific to each format\n 'singularity': {\n 'type': 'object',\n 'additionalProperties': False,\n 'default': {},\n 'properties': {\n 'runscript': {'type': 'string'},\n 'startscript': {'type': 'string'},\n 'test': {'type': 'string'},\n 'help': {'type': 'string'}\n }\n },\n 'docker': {\n 'type': 'object',\n 'additionalProperties': False,\n 'default': {},\n }\n }\n}\n\nproperties = {'container': container_schema}\n", "path": "lib/spack/spack/schema/container.py"}], "after_files": [{"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\"\"\"Schema for the 'container' subsection of Spack environments.\"\"\"\n\n_stages_from_dockerhub = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'os': {\n 'type': 'string',\n 'enum': ['ubuntu:18.04',\n 'ubuntu:16.04',\n 'centos:7']\n },\n 'spack': {\n 'type': 'string',\n },\n },\n 'required': ['os', 'spack']\n}\n\n_custom_stages = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'build': {'type': 'string'},\n 'final': {'type': 'string'}\n },\n 'required': ['build', 'final']\n}\n\n#: List of packages for the schema below\n_list_of_packages = {\n 'type': 'array',\n 'items': {\n 'type': 'string'\n }\n}\n\n#: Schema for the container attribute included in Spack environments\ncontainer_schema = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n # The recipe formats that are currently supported by the command\n 'format': {\n 'type': 'string',\n 'enum': ['docker', 'singularity']\n },\n # Describes the base image to start from and the version\n # of Spack to be used\n 'images': {'anyOf': [_stages_from_dockerhub, _custom_stages]},\n # Whether or not to strip installed binaries\n 'strip': {\n 'type': 'boolean',\n 'default': True\n },\n # Additional system packages that are needed at runtime\n 'os_packages': {\n 'type': 'object',\n 'properties': {\n 'command': {'type': 'string', 'enum': ['apt', 'yum']},\n 'update': {'type': 'boolean'},\n 'build': _list_of_packages,\n 'final': _list_of_packages\n },\n 'additionalProperties': False\n },\n # Add labels to the image\n 'labels': {\n 'type': 'object',\n },\n # Add a custom extra section at the bottom of a stage\n 'extra_instructions': {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'build': {'type': 'string'},\n 'final': {'type': 'string'}\n }\n },\n # Reserved for properties that are specific to each format\n 'singularity': {\n 'type': 'object',\n 'additionalProperties': False,\n 'default': {},\n 'properties': {\n 'runscript': {'type': 'string'},\n 'startscript': {'type': 'string'},\n 'test': {'type': 'string'},\n 'help': {'type': 'string'}\n }\n },\n 'docker': {\n 'type': 'object',\n 'additionalProperties': False,\n 'default': {},\n }\n }\n}\n\nproperties = {'container': container_schema}\n", "path": "lib/spack/spack/schema/container.py"}]} | 1,444 | 125 |
gh_patches_debug_10429 | rasdani/github-patches | git_diff | safe-global__safe-config-service-1107 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bad logo URL when creating a new Safe App
**Describe the bug**
When inserting a new Safe App, `None` is added to the logo image URL instead of the `app_id`.
Re-uploading the image for the Safe App solves the problem.
**To Reproduce**
Steps to reproduce the behavior:
- Create a new Safe App.
- Check the path for the logo image is not correct (it includes `None` as ID).
**Expected behavior**
A correct Safe App `app_id` is added to the logo path instead of `None`.
**Environment (please complete the following information):**
- Staging and production.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/safe_apps/models.py`
Content:
```
1 import os
2 from enum import Enum
3 from typing import IO, Union
4
5 from django.contrib.postgres.fields import ArrayField
6 from django.core.exceptions import ValidationError
7 from django.core.files.images import get_image_dimensions
8 from django.core.validators import RegexValidator
9 from django.db import models
10
11 _HOSTNAME_VALIDATOR = RegexValidator(
12 r"^(https?:\/\/)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\/?$",
13 message="Enter a valid hostname (Without a resource path)",
14 code="invalid_hostname",
15 )
16
17
18 def safe_app_icon_path(instance: "SafeApp", filename: str) -> str:
19 _, file_extension = os.path.splitext(filename)
20 return f"safe_apps/{instance.app_id}/icon{file_extension}"
21
22
23 def validate_safe_app_icon_size(image: Union[str, IO[bytes]]) -> None:
24 width, height = get_image_dimensions(image)
25 if not width or not height:
26 raise ValidationError(
27 f"Could not get image dimensions. Width={width}, Height={height}"
28 )
29 if width > 512 or height > 512:
30 raise ValidationError("Image width and height need to be at most 512 pixels")
31
32
33 class Provider(models.Model):
34 url = models.URLField(primary_key=True)
35 name = models.CharField(max_length=200)
36
37 def __str__(self) -> str:
38 return f"{self.name} | {self.url}"
39
40
41 class Client(models.Model):
42 url = models.CharField(
43 unique=True,
44 help_text="The domain URL client is hosted at",
45 # The maximum length of a full host name is 253 characters per RFC 1034
46 max_length=255,
47 validators=[_HOSTNAME_VALIDATOR],
48 )
49
50 def __str__(self) -> str:
51 return f"Client: {self.url}"
52
53
54 class SafeApp(models.Model):
55 class AccessControlPolicy(str, Enum):
56 NO_RESTRICTIONS = "NO_RESTRICTIONS"
57 DOMAIN_ALLOWLIST = "DOMAIN_ALLOWLIST"
58
59 app_id = models.BigAutoField(primary_key=True)
60 visible = models.BooleanField(
61 default=True
62 ) # True if this safe-app should be visible from the view. False otherwise
63 url = models.URLField()
64 name = models.CharField(max_length=200)
65 icon_url = models.ImageField(
66 validators=[validate_safe_app_icon_size],
67 upload_to=safe_app_icon_path,
68 max_length=255,
69 null=True,
70 blank=True,
71 )
72 description = models.CharField(max_length=200)
73 chain_ids = ArrayField(models.PositiveBigIntegerField())
74 provider = models.ForeignKey(
75 Provider, null=True, blank=True, on_delete=models.SET_NULL
76 )
77 exclusive_clients = models.ManyToManyField(
78 Client,
79 blank=True,
80 help_text="Clients that are only allowed to use this SafeApp",
81 )
82 developer_website = models.URLField(null=True, blank=True)
83
84 def get_access_control_type(self) -> AccessControlPolicy:
85 if self.exclusive_clients.exists():
86 return SafeApp.AccessControlPolicy.DOMAIN_ALLOWLIST
87 return SafeApp.AccessControlPolicy.NO_RESTRICTIONS
88
89 def __str__(self) -> str:
90 return f"{self.name} | {self.url} | chain_ids={self.chain_ids}"
91
92
93 class Tag(models.Model):
94 name = models.CharField(max_length=255)
95 safe_apps = models.ManyToManyField(SafeApp, blank=True)
96
97 def __str__(self) -> str:
98 return f"Tag: {self.name}"
99
100
101 class Feature(models.Model):
102 # A feature can be enabled for multiple Safe Apps and a Safe App can have multiple features enabled
103 safe_apps = models.ManyToManyField(
104 SafeApp, blank=True, help_text="Safe Apps where this feature is enabled."
105 )
106 key = models.CharField(
107 unique=True,
108 max_length=255,
109 help_text="The unique name/key that identifies this feature",
110 )
111
112 def __str__(self) -> str:
113 return f"Safe App Feature: {self.key}"
114
115
116 class SocialProfile(models.Model):
117 class Platform(models.TextChoices):
118 DISCORD = "DISCORD"
119 GITHUB = "GITHUB"
120 TWITTER = "TWITTER"
121
122 safe_app = models.ForeignKey(SafeApp, on_delete=models.CASCADE)
123 platform = models.CharField(choices=Platform.choices, max_length=255)
124 url = models.URLField()
125
126 def __str__(self) -> str:
127 return f"Social Profile: {self.platform} | {self.url}"
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/safe_apps/models.py b/src/safe_apps/models.py
--- a/src/safe_apps/models.py
+++ b/src/safe_apps/models.py
@@ -1,4 +1,5 @@
import os
+import uuid
from enum import Enum
from typing import IO, Union
@@ -17,7 +18,7 @@
def safe_app_icon_path(instance: "SafeApp", filename: str) -> str:
_, file_extension = os.path.splitext(filename)
- return f"safe_apps/{instance.app_id}/icon{file_extension}"
+ return f"safe_apps/{uuid.uuid4()}/icon{file_extension}"
def validate_safe_app_icon_size(image: Union[str, IO[bytes]]) -> None:
| {"golden_diff": "diff --git a/src/safe_apps/models.py b/src/safe_apps/models.py\n--- a/src/safe_apps/models.py\n+++ b/src/safe_apps/models.py\n@@ -1,4 +1,5 @@\n import os\n+import uuid\n from enum import Enum\n from typing import IO, Union\n \n@@ -17,7 +18,7 @@\n \n def safe_app_icon_path(instance: \"SafeApp\", filename: str) -> str:\n _, file_extension = os.path.splitext(filename)\n- return f\"safe_apps/{instance.app_id}/icon{file_extension}\"\n+ return f\"safe_apps/{uuid.uuid4()}/icon{file_extension}\"\n \n \n def validate_safe_app_icon_size(image: Union[str, IO[bytes]]) -> None:\n", "issue": "Bad logo URL when creating a new Safe App\n**Describe the bug**\r\nWhen inserting a new Safe App, `None` is added to the logo image URL instead of the `app_id`. \r\n\r\nRe-uploading the image for the Safe App solves the problem.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n- Create a new Safe App.\r\n- Check the path for the logo image is not correct (it includes `None` as ID).\r\n\r\n**Expected behavior**\r\nA correct Safe App `app_id` is added to the logo path instead of `None`.\r\n\r\n**Environment (please complete the following information):**\r\n - Staging and production.\r\n\n", "before_files": [{"content": "import os\nfrom enum import Enum\nfrom typing import IO, Union\n\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.core.exceptions import ValidationError\nfrom django.core.files.images import get_image_dimensions\nfrom django.core.validators import RegexValidator\nfrom django.db import models\n\n_HOSTNAME_VALIDATOR = RegexValidator(\n r\"^(https?:\\/\\/)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\/?$\",\n message=\"Enter a valid hostname (Without a resource path)\",\n code=\"invalid_hostname\",\n)\n\n\ndef safe_app_icon_path(instance: \"SafeApp\", filename: str) -> str:\n _, file_extension = os.path.splitext(filename)\n return f\"safe_apps/{instance.app_id}/icon{file_extension}\"\n\n\ndef validate_safe_app_icon_size(image: Union[str, IO[bytes]]) -> None:\n width, height = get_image_dimensions(image)\n if not width or not height:\n raise ValidationError(\n f\"Could not get image dimensions. Width={width}, Height={height}\"\n )\n if width > 512 or height > 512:\n raise ValidationError(\"Image width and height need to be at most 512 pixels\")\n\n\nclass Provider(models.Model):\n url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n\n def __str__(self) -> str:\n return f\"{self.name} | {self.url}\"\n\n\nclass Client(models.Model):\n url = models.CharField(\n unique=True,\n help_text=\"The domain URL client is hosted at\",\n # The maximum length of a full host name is 253 characters per RFC 1034\n max_length=255,\n validators=[_HOSTNAME_VALIDATOR],\n )\n\n def __str__(self) -> str:\n return f\"Client: {self.url}\"\n\n\nclass SafeApp(models.Model):\n class AccessControlPolicy(str, Enum):\n NO_RESTRICTIONS = \"NO_RESTRICTIONS\"\n DOMAIN_ALLOWLIST = \"DOMAIN_ALLOWLIST\"\n\n app_id = models.BigAutoField(primary_key=True)\n visible = models.BooleanField(\n default=True\n ) # True if this safe-app should be visible from the view. False otherwise\n url = models.URLField()\n name = models.CharField(max_length=200)\n icon_url = models.ImageField(\n validators=[validate_safe_app_icon_size],\n upload_to=safe_app_icon_path,\n max_length=255,\n null=True,\n blank=True,\n )\n description = models.CharField(max_length=200)\n chain_ids = ArrayField(models.PositiveBigIntegerField())\n provider = models.ForeignKey(\n Provider, null=True, blank=True, on_delete=models.SET_NULL\n )\n exclusive_clients = models.ManyToManyField(\n Client,\n blank=True,\n help_text=\"Clients that are only allowed to use this SafeApp\",\n )\n developer_website = models.URLField(null=True, blank=True)\n\n def get_access_control_type(self) -> AccessControlPolicy:\n if self.exclusive_clients.exists():\n return SafeApp.AccessControlPolicy.DOMAIN_ALLOWLIST\n return SafeApp.AccessControlPolicy.NO_RESTRICTIONS\n\n def __str__(self) -> str:\n return f\"{self.name} | {self.url} | chain_ids={self.chain_ids}\"\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=255)\n safe_apps = models.ManyToManyField(SafeApp, blank=True)\n\n def __str__(self) -> str:\n return f\"Tag: {self.name}\"\n\n\nclass Feature(models.Model):\n # A feature can be enabled for multiple Safe Apps and a Safe App can have multiple features enabled\n safe_apps = models.ManyToManyField(\n SafeApp, blank=True, help_text=\"Safe Apps where this feature is enabled.\"\n )\n key = models.CharField(\n unique=True,\n max_length=255,\n help_text=\"The unique name/key that identifies this feature\",\n )\n\n def __str__(self) -> str:\n return f\"Safe App Feature: {self.key}\"\n\n\nclass SocialProfile(models.Model):\n class Platform(models.TextChoices):\n DISCORD = \"DISCORD\"\n GITHUB = \"GITHUB\"\n TWITTER = \"TWITTER\"\n\n safe_app = models.ForeignKey(SafeApp, on_delete=models.CASCADE)\n platform = models.CharField(choices=Platform.choices, max_length=255)\n url = models.URLField()\n\n def __str__(self) -> str:\n return f\"Social Profile: {self.platform} | {self.url}\"\n", "path": "src/safe_apps/models.py"}], "after_files": [{"content": "import os\nimport uuid\nfrom enum import Enum\nfrom typing import IO, Union\n\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.core.exceptions import ValidationError\nfrom django.core.files.images import get_image_dimensions\nfrom django.core.validators import RegexValidator\nfrom django.db import models\n\n_HOSTNAME_VALIDATOR = RegexValidator(\n r\"^(https?:\\/\\/)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\/?$\",\n message=\"Enter a valid hostname (Without a resource path)\",\n code=\"invalid_hostname\",\n)\n\n\ndef safe_app_icon_path(instance: \"SafeApp\", filename: str) -> str:\n _, file_extension = os.path.splitext(filename)\n return f\"safe_apps/{uuid.uuid4()}/icon{file_extension}\"\n\n\ndef validate_safe_app_icon_size(image: Union[str, IO[bytes]]) -> None:\n width, height = get_image_dimensions(image)\n if not width or not height:\n raise ValidationError(\n f\"Could not get image dimensions. Width={width}, Height={height}\"\n )\n if width > 512 or height > 512:\n raise ValidationError(\"Image width and height need to be at most 512 pixels\")\n\n\nclass Provider(models.Model):\n url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n\n def __str__(self) -> str:\n return f\"{self.name} | {self.url}\"\n\n\nclass Client(models.Model):\n url = models.CharField(\n unique=True,\n help_text=\"The domain URL client is hosted at\",\n # The maximum length of a full host name is 253 characters per RFC 1034\n max_length=255,\n validators=[_HOSTNAME_VALIDATOR],\n )\n\n def __str__(self) -> str:\n return f\"Client: {self.url}\"\n\n\nclass SafeApp(models.Model):\n class AccessControlPolicy(str, Enum):\n NO_RESTRICTIONS = \"NO_RESTRICTIONS\"\n DOMAIN_ALLOWLIST = \"DOMAIN_ALLOWLIST\"\n\n app_id = models.BigAutoField(primary_key=True)\n visible = models.BooleanField(\n default=True\n ) # True if this safe-app should be visible from the view. False otherwise\n url = models.URLField()\n name = models.CharField(max_length=200)\n icon_url = models.ImageField(\n validators=[validate_safe_app_icon_size],\n upload_to=safe_app_icon_path,\n max_length=255,\n null=True,\n blank=True,\n )\n description = models.CharField(max_length=200)\n chain_ids = ArrayField(models.PositiveBigIntegerField())\n provider = models.ForeignKey(\n Provider, null=True, blank=True, on_delete=models.SET_NULL\n )\n exclusive_clients = models.ManyToManyField(\n Client,\n blank=True,\n help_text=\"Clients that are only allowed to use this SafeApp\",\n )\n developer_website = models.URLField(null=True, blank=True)\n\n def get_access_control_type(self) -> AccessControlPolicy:\n if self.exclusive_clients.exists():\n return SafeApp.AccessControlPolicy.DOMAIN_ALLOWLIST\n return SafeApp.AccessControlPolicy.NO_RESTRICTIONS\n\n def __str__(self) -> str:\n return f\"{self.name} | {self.url} | chain_ids={self.chain_ids}\"\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=255)\n safe_apps = models.ManyToManyField(SafeApp, blank=True)\n\n def __str__(self) -> str:\n return f\"Tag: {self.name}\"\n\n\nclass Feature(models.Model):\n # A feature can be enabled for multiple Safe Apps and a Safe App can have multiple features enabled\n safe_apps = models.ManyToManyField(\n SafeApp, blank=True, help_text=\"Safe Apps where this feature is enabled.\"\n )\n key = models.CharField(\n unique=True,\n max_length=255,\n help_text=\"The unique name/key that identifies this feature\",\n )\n\n def __str__(self) -> str:\n return f\"Safe App Feature: {self.key}\"\n\n\nclass SocialProfile(models.Model):\n class Platform(models.TextChoices):\n DISCORD = \"DISCORD\"\n GITHUB = \"GITHUB\"\n TWITTER = \"TWITTER\"\n\n safe_app = models.ForeignKey(SafeApp, on_delete=models.CASCADE)\n platform = models.CharField(choices=Platform.choices, max_length=255)\n url = models.URLField()\n\n def __str__(self) -> str:\n return f\"Social Profile: {self.platform} | {self.url}\"\n", "path": "src/safe_apps/models.py"}]} | 1,668 | 160 |
gh_patches_debug_31871 | rasdani/github-patches | git_diff | pyca__cryptography-2250 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation of DSA signature references incorrect RFC
Documentation of DSA signatures (https://cryptography.io/en/latest/hazmat/primitives/asymmetric/dsa/#signing) references RFC 6979 which sounds strange. Same for the naming of de/encoding functions at https://cryptography.io/en/latest/hazmat/primitives/asymmetric/utils/#cryptography.hazmat.primitives.asymmetric.utils.decode_rfc6979_signature
But that RFC doesn't actually define the {r,s} encoding. The actual asn1 module can be found in RFC 3279 which defines both dsa-with-sha1 signature and Dss-Sig-Value which is the {r,s} sequence.
The references to RFC 6979 are actually unfortunate, because it defines deterministic DSA signatures, while cryptography.io exposes the randomised version using openssl's `DSA_sign`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/utils.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import abc
8 import binascii
9 import inspect
10 import struct
11 import sys
12 import warnings
13
14
15 DeprecatedIn09 = DeprecationWarning
16
17
18 def read_only_property(name):
19 return property(lambda self: getattr(self, name))
20
21
22 def register_interface(iface):
23 def register_decorator(klass):
24 verify_interface(iface, klass)
25 iface.register(klass)
26 return klass
27 return register_decorator
28
29
30 if hasattr(int, "from_bytes"):
31 int_from_bytes = int.from_bytes
32 else:
33 def int_from_bytes(data, byteorder, signed=False):
34 assert byteorder == 'big'
35 assert not signed
36
37 if len(data) % 4 != 0:
38 data = (b'\x00' * (4 - (len(data) % 4))) + data
39
40 result = 0
41
42 while len(data) > 0:
43 digit, = struct.unpack('>I', data[:4])
44 result = (result << 32) + digit
45 data = data[4:]
46
47 return result
48
49
50 def int_to_bytes(integer):
51 hex_string = '%x' % integer
52 n = len(hex_string)
53 return binascii.unhexlify(hex_string.zfill(n + (n & 1)))
54
55
56 class InterfaceNotImplemented(Exception):
57 pass
58
59
60 def verify_interface(iface, klass):
61 for method in iface.__abstractmethods__:
62 if not hasattr(klass, method):
63 raise InterfaceNotImplemented(
64 "{0} is missing a {1!r} method".format(klass, method)
65 )
66 if isinstance(getattr(iface, method), abc.abstractproperty):
67 # Can't properly verify these yet.
68 continue
69 spec = inspect.getargspec(getattr(iface, method))
70 actual = inspect.getargspec(getattr(klass, method))
71 if spec != actual:
72 raise InterfaceNotImplemented(
73 "{0}.{1}'s signature differs from the expected. Expected: "
74 "{2!r}. Received: {3!r}".format(
75 klass, method, spec, actual
76 )
77 )
78
79
80 if sys.version_info >= (2, 7):
81 def bit_length(x):
82 return x.bit_length()
83 else:
84 def bit_length(x):
85 return len(bin(x)) - (2 + (x <= 0))
86
87
88 class _DeprecatedValue(object):
89 def __init__(self, value, message, warning_class):
90 self.value = value
91 self.message = message
92 self.warning_class = warning_class
93
94
95 class _ModuleWithDeprecations(object):
96 def __init__(self, module):
97 self.__dict__["_module"] = module
98
99 def __getattr__(self, attr):
100 obj = getattr(self._module, attr)
101 if isinstance(obj, _DeprecatedValue):
102 warnings.warn(obj.message, obj.warning_class, stacklevel=2)
103 obj = obj.value
104 return obj
105
106 def __setattr__(self, attr, value):
107 setattr(self._module, attr, value)
108
109 def __dir__(self):
110 return ["_module"] + dir(self._module)
111
112
113 def deprecated(value, module_name, message, warning_class):
114 module = sys.modules[module_name]
115 if not isinstance(module, _ModuleWithDeprecations):
116 sys.modules[module_name] = module = _ModuleWithDeprecations(module)
117 return _DeprecatedValue(value, message, warning_class)
118
```
Path: `src/cryptography/hazmat/primitives/asymmetric/utils.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 from pyasn1.codec.der import decoder, encoder
8 from pyasn1.error import PyAsn1Error
9 from pyasn1.type import namedtype, univ
10
11 import six
12
13
14 class _DSSSigValue(univ.Sequence):
15 componentType = namedtype.NamedTypes(
16 namedtype.NamedType('r', univ.Integer()),
17 namedtype.NamedType('s', univ.Integer())
18 )
19
20
21 def decode_rfc6979_signature(signature):
22 try:
23 data, remaining = decoder.decode(signature, asn1Spec=_DSSSigValue())
24 except PyAsn1Error:
25 raise ValueError("Invalid signature data. Unable to decode ASN.1")
26
27 if remaining:
28 raise ValueError(
29 "The signature contains bytes after the end of the ASN.1 sequence."
30 )
31
32 r = int(data.getComponentByName('r'))
33 s = int(data.getComponentByName('s'))
34 return (r, s)
35
36
37 def encode_rfc6979_signature(r, s):
38 if (
39 not isinstance(r, six.integer_types) or
40 not isinstance(s, six.integer_types)
41 ):
42 raise ValueError("Both r and s must be integers")
43
44 sig = _DSSSigValue()
45 sig.setComponentByName('r', r)
46 sig.setComponentByName('s', s)
47 return encoder.encode(sig)
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/hazmat/primitives/asymmetric/utils.py b/src/cryptography/hazmat/primitives/asymmetric/utils.py
--- a/src/cryptography/hazmat/primitives/asymmetric/utils.py
+++ b/src/cryptography/hazmat/primitives/asymmetric/utils.py
@@ -4,12 +4,16 @@
from __future__ import absolute_import, division, print_function
+import warnings
+
from pyasn1.codec.der import decoder, encoder
from pyasn1.error import PyAsn1Error
from pyasn1.type import namedtype, univ
import six
+from cryptography import utils
+
class _DSSSigValue(univ.Sequence):
componentType = namedtype.NamedTypes(
@@ -19,6 +23,17 @@
def decode_rfc6979_signature(signature):
+ warnings.warn(
+ "decode_rfc6979_signature is deprecated and will "
+ "be removed in a future version, use decode_dss_signature instead "
+ "instead.",
+ utils.DeprecatedIn10,
+ stacklevel=2
+ )
+ return decode_dss_signature(signature)
+
+
+def decode_dss_signature(signature):
try:
data, remaining = decoder.decode(signature, asn1Spec=_DSSSigValue())
except PyAsn1Error:
@@ -35,6 +50,17 @@
def encode_rfc6979_signature(r, s):
+ warnings.warn(
+ "encode_rfc6979_signature is deprecated and will "
+ "be removed in a future version, use encode_dss_signature instead "
+ "instead.",
+ utils.DeprecatedIn10,
+ stacklevel=2
+ )
+ return encode_dss_signature(r, s)
+
+
+def encode_dss_signature(r, s):
if (
not isinstance(r, six.integer_types) or
not isinstance(s, six.integer_types)
diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py
--- a/src/cryptography/utils.py
+++ b/src/cryptography/utils.py
@@ -13,6 +13,7 @@
DeprecatedIn09 = DeprecationWarning
+DeprecatedIn10 = PendingDeprecationWarning
def read_only_property(name):
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/asymmetric/utils.py b/src/cryptography/hazmat/primitives/asymmetric/utils.py\n--- a/src/cryptography/hazmat/primitives/asymmetric/utils.py\n+++ b/src/cryptography/hazmat/primitives/asymmetric/utils.py\n@@ -4,12 +4,16 @@\n \n from __future__ import absolute_import, division, print_function\n \n+import warnings\n+\n from pyasn1.codec.der import decoder, encoder\n from pyasn1.error import PyAsn1Error\n from pyasn1.type import namedtype, univ\n \n import six\n \n+from cryptography import utils\n+\n \n class _DSSSigValue(univ.Sequence):\n componentType = namedtype.NamedTypes(\n@@ -19,6 +23,17 @@\n \n \n def decode_rfc6979_signature(signature):\n+ warnings.warn(\n+ \"decode_rfc6979_signature is deprecated and will \"\n+ \"be removed in a future version, use decode_dss_signature instead \"\n+ \"instead.\",\n+ utils.DeprecatedIn10,\n+ stacklevel=2\n+ )\n+ return decode_dss_signature(signature)\n+\n+\n+def decode_dss_signature(signature):\n try:\n data, remaining = decoder.decode(signature, asn1Spec=_DSSSigValue())\n except PyAsn1Error:\n@@ -35,6 +50,17 @@\n \n \n def encode_rfc6979_signature(r, s):\n+ warnings.warn(\n+ \"encode_rfc6979_signature is deprecated and will \"\n+ \"be removed in a future version, use encode_dss_signature instead \"\n+ \"instead.\",\n+ utils.DeprecatedIn10,\n+ stacklevel=2\n+ )\n+ return encode_dss_signature(r, s)\n+\n+\n+def encode_dss_signature(r, s):\n if (\n not isinstance(r, six.integer_types) or\n not isinstance(s, six.integer_types)\ndiff --git a/src/cryptography/utils.py b/src/cryptography/utils.py\n--- a/src/cryptography/utils.py\n+++ b/src/cryptography/utils.py\n@@ -13,6 +13,7 @@\n \n \n DeprecatedIn09 = DeprecationWarning\n+DeprecatedIn10 = PendingDeprecationWarning\n \n \n def read_only_property(name):\n", "issue": "Documentation of DSA signature references incorrect RFC\nDocumentation of DSA signatures (https://cryptography.io/en/latest/hazmat/primitives/asymmetric/dsa/#signing) references RFC 6979 which sounds strange. Same for the naming of de/encoding functions at https://cryptography.io/en/latest/hazmat/primitives/asymmetric/utils/#cryptography.hazmat.primitives.asymmetric.utils.decode_rfc6979_signature\n\nBut that RFC doesn't actually define the {r,s} encoding. The actual asn1 module can be found in RFC 3279 which defines both dsa-with-sha1 signature and Dss-Sig-Value which is the {r,s} sequence.\n\nThe references to RFC 6979 are actually unfortunate, because it defines deterministic DSA signatures, while cryptography.io exposes the randomised version using openssl's `DSA_sign`.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport struct\nimport sys\nimport warnings\n\n\nDeprecatedIn09 = DeprecationWarning\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n if len(data) % 4 != 0:\n data = (b'\\x00' * (4 - (len(data) % 4))) + data\n\n result = 0\n\n while len(data) > 0:\n digit, = struct.unpack('>I', data[:4])\n result = (result << 32) + digit\n data = data[4:]\n\n return result\n\n\ndef int_to_bytes(integer):\n hex_string = '%x' % integer\n n = len(hex_string)\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n spec = inspect.getargspec(getattr(iface, method))\n actual = inspect.getargspec(getattr(klass, method))\n if spec != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, spec, actual\n )\n )\n\n\nif sys.version_info >= (2, 7):\n def bit_length(x):\n return x.bit_length()\nelse:\n def bit_length(x):\n return len(bin(x)) - (2 + (x <= 0))\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = module = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n", "path": "src/cryptography/utils.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom pyasn1.codec.der import decoder, encoder\nfrom pyasn1.error import PyAsn1Error\nfrom pyasn1.type import namedtype, univ\n\nimport six\n\n\nclass _DSSSigValue(univ.Sequence):\n componentType = namedtype.NamedTypes(\n namedtype.NamedType('r', univ.Integer()),\n namedtype.NamedType('s', univ.Integer())\n )\n\n\ndef decode_rfc6979_signature(signature):\n try:\n data, remaining = decoder.decode(signature, asn1Spec=_DSSSigValue())\n except PyAsn1Error:\n raise ValueError(\"Invalid signature data. Unable to decode ASN.1\")\n\n if remaining:\n raise ValueError(\n \"The signature contains bytes after the end of the ASN.1 sequence.\"\n )\n\n r = int(data.getComponentByName('r'))\n s = int(data.getComponentByName('s'))\n return (r, s)\n\n\ndef encode_rfc6979_signature(r, s):\n if (\n not isinstance(r, six.integer_types) or\n not isinstance(s, six.integer_types)\n ):\n raise ValueError(\"Both r and s must be integers\")\n\n sig = _DSSSigValue()\n sig.setComponentByName('r', r)\n sig.setComponentByName('s', s)\n return encoder.encode(sig)\n", "path": "src/cryptography/hazmat/primitives/asymmetric/utils.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport struct\nimport sys\nimport warnings\n\n\nDeprecatedIn09 = DeprecationWarning\nDeprecatedIn10 = PendingDeprecationWarning\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n if len(data) % 4 != 0:\n data = (b'\\x00' * (4 - (len(data) % 4))) + data\n\n result = 0\n\n while len(data) > 0:\n digit, = struct.unpack('>I', data[:4])\n result = (result << 32) + digit\n data = data[4:]\n\n return result\n\n\ndef int_to_bytes(integer):\n hex_string = '%x' % integer\n n = len(hex_string)\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n spec = inspect.getargspec(getattr(iface, method))\n actual = inspect.getargspec(getattr(klass, method))\n if spec != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, spec, actual\n )\n )\n\n\nif sys.version_info >= (2, 7):\n def bit_length(x):\n return x.bit_length()\nelse:\n def bit_length(x):\n return len(bin(x)) - (2 + (x <= 0))\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = module = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n", "path": "src/cryptography/utils.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport warnings\n\nfrom pyasn1.codec.der import decoder, encoder\nfrom pyasn1.error import PyAsn1Error\nfrom pyasn1.type import namedtype, univ\n\nimport six\n\nfrom cryptography import utils\n\n\nclass _DSSSigValue(univ.Sequence):\n componentType = namedtype.NamedTypes(\n namedtype.NamedType('r', univ.Integer()),\n namedtype.NamedType('s', univ.Integer())\n )\n\n\ndef decode_rfc6979_signature(signature):\n warnings.warn(\n \"decode_rfc6979_signature is deprecated and will \"\n \"be removed in a future version, use decode_dss_signature instead \"\n \"instead.\",\n utils.DeprecatedIn10,\n stacklevel=2\n )\n return decode_dss_signature(signature)\n\n\ndef decode_dss_signature(signature):\n try:\n data, remaining = decoder.decode(signature, asn1Spec=_DSSSigValue())\n except PyAsn1Error:\n raise ValueError(\"Invalid signature data. Unable to decode ASN.1\")\n\n if remaining:\n raise ValueError(\n \"The signature contains bytes after the end of the ASN.1 sequence.\"\n )\n\n r = int(data.getComponentByName('r'))\n s = int(data.getComponentByName('s'))\n return (r, s)\n\n\ndef encode_rfc6979_signature(r, s):\n warnings.warn(\n \"encode_rfc6979_signature is deprecated and will \"\n \"be removed in a future version, use encode_dss_signature instead \"\n \"instead.\",\n utils.DeprecatedIn10,\n stacklevel=2\n )\n return encode_dss_signature(r, s)\n\n\ndef encode_dss_signature(r, s):\n if (\n not isinstance(r, six.integer_types) or\n not isinstance(s, six.integer_types)\n ):\n raise ValueError(\"Both r and s must be integers\")\n\n sig = _DSSSigValue()\n sig.setComponentByName('r', r)\n sig.setComponentByName('s', s)\n return encoder.encode(sig)\n", "path": "src/cryptography/hazmat/primitives/asymmetric/utils.py"}]} | 1,929 | 500 |
gh_patches_debug_43468 | rasdani/github-patches | git_diff | beetbox__beets-1176 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ftintitle: be less verbose during import
During an import, with the ftintitle enabled it seems to have very verbose output. This causes the user to not notice any prompts that beets has, and is somewhat annoying when doing a large import.
As seen here:

My suggestion would be to add a configuration option that would make ftintitle be less verbose. Or, making it not be verbose by default and adding a verbosity configuration option.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/ftintitle.py`
Content:
```
1 # This file is part of beets.
2 # Copyright 2013, Verrus, <github.com/Verrus/beets-plugin-featInTitle>
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be
13 # included in all copies or substantial portions of the Software.
14
15 """Moves "featured" artists to the title from the artist field.
16 """
17 from beets import plugins
18 from beets import ui
19 from beets.util import displayable_path
20 from beets import config
21 import logging
22 import re
23
24 log = logging.getLogger('beets')
25
26
27 def split_on_feat(artist):
28 """Given an artist string, split the "main" artist from any artist
29 on the right-hand side of a string like "feat". Return the main
30 artist, which is always a string, and the featuring artist, which
31 may be a string or None if none is present.
32 """
33 # split on the first "feat".
34 regex = re.compile(plugins.feat_tokens(), re.IGNORECASE)
35 parts = [s.strip() for s in regex.split(artist, 1)]
36 if len(parts) == 1:
37 return parts[0], None
38 else:
39 return tuple(parts)
40
41
42 def contains_feat(title):
43 """Determine whether the title contains a "featured" marker.
44 """
45 return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))
46
47
48 def update_metadata(item, feat_part, drop_feat):
49 """Choose how to add new artists to the title and set the new
50 metadata. Also, print out messages about any changes that are made.
51 If `drop_feat` is set, then do not add the artist to the title; just
52 remove it from the artist field.
53 """
54 # In all cases, update the artist fields.
55 ui.print_(u'artist: {0} -> {1}'.format(item.artist, item.albumartist))
56 item.artist = item.albumartist
57 if item.artist_sort:
58 # Just strip the featured artist from the sort name.
59 item.artist_sort, _ = split_on_feat(item.artist_sort)
60
61 # Only update the title if it does not already contain a featured
62 # artist and if we do not drop featuring information.
63 if not drop_feat and not contains_feat(item.title):
64 new_title = u"{0} feat. {1}".format(item.title, feat_part)
65 ui.print_(u'title: {0} -> {1}'.format(item.title, new_title))
66 item.title = new_title
67
68
69 def ft_in_title(item, drop_feat):
70 """Look for featured artists in the item's artist fields and move
71 them to the title.
72 """
73 artist = item.artist.strip()
74 albumartist = item.albumartist.strip()
75
76 # Check whether there is a featured artist on this track and the
77 # artist field does not exactly match the album artist field. In
78 # that case, we attempt to move the featured artist to the title.
79 _, featured = split_on_feat(artist)
80 if featured and albumartist != artist and albumartist:
81 ui.print_(displayable_path(item.path))
82 feat_part = None
83
84 # Look for the album artist in the artist field. If it's not
85 # present, give up.
86 albumartist_split = artist.split(albumartist)
87 if len(albumartist_split) <= 1:
88 ui.print_('album artist not present in artist')
89
90 # If the last element of the split (the right-hand side of the
91 # album artist) is nonempty, then it probably contains the
92 # featured artist.
93 elif albumartist_split[-1] != '':
94 # Extract the featured artist from the right-hand side.
95 _, feat_part = split_on_feat(albumartist_split[-1])
96
97 # Otherwise, if there's nothing on the right-hand side, look for a
98 # featuring artist on the left-hand side.
99 else:
100 lhs, rhs = split_on_feat(albumartist_split[0])
101 if rhs:
102 feat_part = lhs
103
104 # If we have a featuring artist, move it to the title.
105 if feat_part:
106 update_metadata(item, feat_part, drop_feat)
107 else:
108 ui.print_(u'no featuring artists found')
109
110 ui.print_()
111
112
113 class FtInTitlePlugin(plugins.BeetsPlugin):
114 def __init__(self):
115 super(FtInTitlePlugin, self).__init__()
116
117 self.config.add({
118 'auto': True,
119 'drop': False,
120 })
121
122 self._command = ui.Subcommand(
123 'ftintitle',
124 help='move featured artists to the title field')
125
126 self._command.parser.add_option(
127 '-d', '--drop', dest='drop',
128 action='store_true', default=False,
129 help='drop featuring from artists and ignore title update')
130
131 if self.config['auto']:
132 self.import_stages = [self.imported]
133
134 def commands(self):
135
136 def func(lib, opts, args):
137 self.config.set_args(opts)
138 drop_feat = self.config['drop'].get(bool)
139 write = config['import']['write'].get(bool)
140
141 for item in lib.items(ui.decargs(args)):
142 ft_in_title(item, drop_feat)
143 item.store()
144 if write:
145 item.try_write()
146
147 self._command.func = func
148 return [self._command]
149
150 def imported(self, session, task):
151 """Import hook for moving featuring artist automatically.
152 """
153 drop_feat = self.config['drop'].get(bool)
154
155 for item in task.imported_items():
156 ft_in_title(item, drop_feat)
157 item.store()
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beetsplug/ftintitle.py b/beetsplug/ftintitle.py
--- a/beetsplug/ftintitle.py
+++ b/beetsplug/ftintitle.py
@@ -45,14 +45,15 @@
return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))
-def update_metadata(item, feat_part, drop_feat):
+def update_metadata(item, feat_part, drop_feat, loglevel=logging.DEBUG):
"""Choose how to add new artists to the title and set the new
metadata. Also, print out messages about any changes that are made.
If `drop_feat` is set, then do not add the artist to the title; just
remove it from the artist field.
"""
# In all cases, update the artist fields.
- ui.print_(u'artist: {0} -> {1}'.format(item.artist, item.albumartist))
+ log.log(loglevel, u'artist: {0} -> {1}'.format(
+ item.artist, item.albumartist))
item.artist = item.albumartist
if item.artist_sort:
# Just strip the featured artist from the sort name.
@@ -62,11 +63,11 @@
# artist and if we do not drop featuring information.
if not drop_feat and not contains_feat(item.title):
new_title = u"{0} feat. {1}".format(item.title, feat_part)
- ui.print_(u'title: {0} -> {1}'.format(item.title, new_title))
+ log.log(loglevel, u'title: {0} -> {1}'.format(item.title, new_title))
item.title = new_title
-def ft_in_title(item, drop_feat):
+def ft_in_title(item, drop_feat, loglevel=logging.DEBUG):
"""Look for featured artists in the item's artist fields and move
them to the title.
"""
@@ -78,14 +79,14 @@
# that case, we attempt to move the featured artist to the title.
_, featured = split_on_feat(artist)
if featured and albumartist != artist and albumartist:
- ui.print_(displayable_path(item.path))
+ log.log(loglevel, displayable_path(item.path))
feat_part = None
# Look for the album artist in the artist field. If it's not
# present, give up.
albumartist_split = artist.split(albumartist)
if len(albumartist_split) <= 1:
- ui.print_('album artist not present in artist')
+ log.log(loglevel, 'album artist not present in artist')
# If the last element of the split (the right-hand side of the
# album artist) is nonempty, then it probably contains the
@@ -103,11 +104,9 @@
# If we have a featuring artist, move it to the title.
if feat_part:
- update_metadata(item, feat_part, drop_feat)
+ update_metadata(item, feat_part, drop_feat, loglevel)
else:
- ui.print_(u'no featuring artists found')
-
- ui.print_()
+ log.log(loglevel, u'no featuring artists found')
class FtInTitlePlugin(plugins.BeetsPlugin):
@@ -139,7 +138,7 @@
write = config['import']['write'].get(bool)
for item in lib.items(ui.decargs(args)):
- ft_in_title(item, drop_feat)
+ ft_in_title(item, drop_feat, logging.INFO)
item.store()
if write:
item.try_write()
@@ -153,5 +152,5 @@
drop_feat = self.config['drop'].get(bool)
for item in task.imported_items():
- ft_in_title(item, drop_feat)
+ ft_in_title(item, drop_feat, logging.DEBUG)
item.store()
| {"golden_diff": "diff --git a/beetsplug/ftintitle.py b/beetsplug/ftintitle.py\n--- a/beetsplug/ftintitle.py\n+++ b/beetsplug/ftintitle.py\n@@ -45,14 +45,15 @@\n return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))\n \n \n-def update_metadata(item, feat_part, drop_feat):\n+def update_metadata(item, feat_part, drop_feat, loglevel=logging.DEBUG):\n \"\"\"Choose how to add new artists to the title and set the new\n metadata. Also, print out messages about any changes that are made.\n If `drop_feat` is set, then do not add the artist to the title; just\n remove it from the artist field.\n \"\"\"\n # In all cases, update the artist fields.\n- ui.print_(u'artist: {0} -> {1}'.format(item.artist, item.albumartist))\n+ log.log(loglevel, u'artist: {0} -> {1}'.format(\n+ item.artist, item.albumartist))\n item.artist = item.albumartist\n if item.artist_sort:\n # Just strip the featured artist from the sort name.\n@@ -62,11 +63,11 @@\n # artist and if we do not drop featuring information.\n if not drop_feat and not contains_feat(item.title):\n new_title = u\"{0} feat. {1}\".format(item.title, feat_part)\n- ui.print_(u'title: {0} -> {1}'.format(item.title, new_title))\n+ log.log(loglevel, u'title: {0} -> {1}'.format(item.title, new_title))\n item.title = new_title\n \n \n-def ft_in_title(item, drop_feat):\n+def ft_in_title(item, drop_feat, loglevel=logging.DEBUG):\n \"\"\"Look for featured artists in the item's artist fields and move\n them to the title.\n \"\"\"\n@@ -78,14 +79,14 @@\n # that case, we attempt to move the featured artist to the title.\n _, featured = split_on_feat(artist)\n if featured and albumartist != artist and albumartist:\n- ui.print_(displayable_path(item.path))\n+ log.log(loglevel, displayable_path(item.path))\n feat_part = None\n \n # Look for the album artist in the artist field. If it's not\n # present, give up.\n albumartist_split = artist.split(albumartist)\n if len(albumartist_split) <= 1:\n- ui.print_('album artist not present in artist')\n+ log.log(loglevel, 'album artist not present in artist')\n \n # If the last element of the split (the right-hand side of the\n # album artist) is nonempty, then it probably contains the\n@@ -103,11 +104,9 @@\n \n # If we have a featuring artist, move it to the title.\n if feat_part:\n- update_metadata(item, feat_part, drop_feat)\n+ update_metadata(item, feat_part, drop_feat, loglevel)\n else:\n- ui.print_(u'no featuring artists found')\n-\n- ui.print_()\n+ log.log(loglevel, u'no featuring artists found')\n \n \n class FtInTitlePlugin(plugins.BeetsPlugin):\n@@ -139,7 +138,7 @@\n write = config['import']['write'].get(bool)\n \n for item in lib.items(ui.decargs(args)):\n- ft_in_title(item, drop_feat)\n+ ft_in_title(item, drop_feat, logging.INFO)\n item.store()\n if write:\n item.try_write()\n@@ -153,5 +152,5 @@\n drop_feat = self.config['drop'].get(bool)\n \n for item in task.imported_items():\n- ft_in_title(item, drop_feat)\n+ ft_in_title(item, drop_feat, logging.DEBUG)\n item.store()\n", "issue": "ftintitle: be less verbose during import\nDuring an import, with the ftintitle enabled it seems to have very verbose output. This causes the user to not notice any prompts that beets has, and is somewhat annoying when doing a large import.\n\nAs seen here:\n\n\nMy suggestion would be to add a configuration option that would make ftintitle be less verbose. Or, making it not be verbose by default and adding a verbosity configuration option.\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2013, Verrus, <github.com/Verrus/beets-plugin-featInTitle>\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Moves \"featured\" artists to the title from the artist field.\n\"\"\"\nfrom beets import plugins\nfrom beets import ui\nfrom beets.util import displayable_path\nfrom beets import config\nimport logging\nimport re\n\nlog = logging.getLogger('beets')\n\n\ndef split_on_feat(artist):\n \"\"\"Given an artist string, split the \"main\" artist from any artist\n on the right-hand side of a string like \"feat\". Return the main\n artist, which is always a string, and the featuring artist, which\n may be a string or None if none is present.\n \"\"\"\n # split on the first \"feat\".\n regex = re.compile(plugins.feat_tokens(), re.IGNORECASE)\n parts = [s.strip() for s in regex.split(artist, 1)]\n if len(parts) == 1:\n return parts[0], None\n else:\n return tuple(parts)\n\n\ndef contains_feat(title):\n \"\"\"Determine whether the title contains a \"featured\" marker.\n \"\"\"\n return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))\n\n\ndef update_metadata(item, feat_part, drop_feat):\n \"\"\"Choose how to add new artists to the title and set the new\n metadata. Also, print out messages about any changes that are made.\n If `drop_feat` is set, then do not add the artist to the title; just\n remove it from the artist field.\n \"\"\"\n # In all cases, update the artist fields.\n ui.print_(u'artist: {0} -> {1}'.format(item.artist, item.albumartist))\n item.artist = item.albumartist\n if item.artist_sort:\n # Just strip the featured artist from the sort name.\n item.artist_sort, _ = split_on_feat(item.artist_sort)\n\n # Only update the title if it does not already contain a featured\n # artist and if we do not drop featuring information.\n if not drop_feat and not contains_feat(item.title):\n new_title = u\"{0} feat. {1}\".format(item.title, feat_part)\n ui.print_(u'title: {0} -> {1}'.format(item.title, new_title))\n item.title = new_title\n\n\ndef ft_in_title(item, drop_feat):\n \"\"\"Look for featured artists in the item's artist fields and move\n them to the title.\n \"\"\"\n artist = item.artist.strip()\n albumartist = item.albumartist.strip()\n\n # Check whether there is a featured artist on this track and the\n # artist field does not exactly match the album artist field. In\n # that case, we attempt to move the featured artist to the title.\n _, featured = split_on_feat(artist)\n if featured and albumartist != artist and albumartist:\n ui.print_(displayable_path(item.path))\n feat_part = None\n\n # Look for the album artist in the artist field. If it's not\n # present, give up.\n albumartist_split = artist.split(albumartist)\n if len(albumartist_split) <= 1:\n ui.print_('album artist not present in artist')\n\n # If the last element of the split (the right-hand side of the\n # album artist) is nonempty, then it probably contains the\n # featured artist.\n elif albumartist_split[-1] != '':\n # Extract the featured artist from the right-hand side.\n _, feat_part = split_on_feat(albumartist_split[-1])\n\n # Otherwise, if there's nothing on the right-hand side, look for a\n # featuring artist on the left-hand side.\n else:\n lhs, rhs = split_on_feat(albumartist_split[0])\n if rhs:\n feat_part = lhs\n\n # If we have a featuring artist, move it to the title.\n if feat_part:\n update_metadata(item, feat_part, drop_feat)\n else:\n ui.print_(u'no featuring artists found')\n\n ui.print_()\n\n\nclass FtInTitlePlugin(plugins.BeetsPlugin):\n def __init__(self):\n super(FtInTitlePlugin, self).__init__()\n\n self.config.add({\n 'auto': True,\n 'drop': False,\n })\n\n self._command = ui.Subcommand(\n 'ftintitle',\n help='move featured artists to the title field')\n\n self._command.parser.add_option(\n '-d', '--drop', dest='drop',\n action='store_true', default=False,\n help='drop featuring from artists and ignore title update')\n\n if self.config['auto']:\n self.import_stages = [self.imported]\n\n def commands(self):\n\n def func(lib, opts, args):\n self.config.set_args(opts)\n drop_feat = self.config['drop'].get(bool)\n write = config['import']['write'].get(bool)\n\n for item in lib.items(ui.decargs(args)):\n ft_in_title(item, drop_feat)\n item.store()\n if write:\n item.try_write()\n\n self._command.func = func\n return [self._command]\n\n def imported(self, session, task):\n \"\"\"Import hook for moving featuring artist automatically.\n \"\"\"\n drop_feat = self.config['drop'].get(bool)\n\n for item in task.imported_items():\n ft_in_title(item, drop_feat)\n item.store()\n", "path": "beetsplug/ftintitle.py"}], "after_files": [{"content": "# This file is part of beets.\n# Copyright 2013, Verrus, <github.com/Verrus/beets-plugin-featInTitle>\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Moves \"featured\" artists to the title from the artist field.\n\"\"\"\nfrom beets import plugins\nfrom beets import ui\nfrom beets.util import displayable_path\nfrom beets import config\nimport logging\nimport re\n\nlog = logging.getLogger('beets')\n\n\ndef split_on_feat(artist):\n \"\"\"Given an artist string, split the \"main\" artist from any artist\n on the right-hand side of a string like \"feat\". Return the main\n artist, which is always a string, and the featuring artist, which\n may be a string or None if none is present.\n \"\"\"\n # split on the first \"feat\".\n regex = re.compile(plugins.feat_tokens(), re.IGNORECASE)\n parts = [s.strip() for s in regex.split(artist, 1)]\n if len(parts) == 1:\n return parts[0], None\n else:\n return tuple(parts)\n\n\ndef contains_feat(title):\n \"\"\"Determine whether the title contains a \"featured\" marker.\n \"\"\"\n return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))\n\n\ndef update_metadata(item, feat_part, drop_feat, loglevel=logging.DEBUG):\n \"\"\"Choose how to add new artists to the title and set the new\n metadata. Also, print out messages about any changes that are made.\n If `drop_feat` is set, then do not add the artist to the title; just\n remove it from the artist field.\n \"\"\"\n # In all cases, update the artist fields.\n log.log(loglevel, u'artist: {0} -> {1}'.format(\n item.artist, item.albumartist))\n item.artist = item.albumartist\n if item.artist_sort:\n # Just strip the featured artist from the sort name.\n item.artist_sort, _ = split_on_feat(item.artist_sort)\n\n # Only update the title if it does not already contain a featured\n # artist and if we do not drop featuring information.\n if not drop_feat and not contains_feat(item.title):\n new_title = u\"{0} feat. {1}\".format(item.title, feat_part)\n log.log(loglevel, u'title: {0} -> {1}'.format(item.title, new_title))\n item.title = new_title\n\n\ndef ft_in_title(item, drop_feat, loglevel=logging.DEBUG):\n \"\"\"Look for featured artists in the item's artist fields and move\n them to the title.\n \"\"\"\n artist = item.artist.strip()\n albumartist = item.albumartist.strip()\n\n # Check whether there is a featured artist on this track and the\n # artist field does not exactly match the album artist field. In\n # that case, we attempt to move the featured artist to the title.\n _, featured = split_on_feat(artist)\n if featured and albumartist != artist and albumartist:\n log.log(loglevel, displayable_path(item.path))\n feat_part = None\n\n # Look for the album artist in the artist field. If it's not\n # present, give up.\n albumartist_split = artist.split(albumartist)\n if len(albumartist_split) <= 1:\n log.log(loglevel, 'album artist not present in artist')\n\n # If the last element of the split (the right-hand side of the\n # album artist) is nonempty, then it probably contains the\n # featured artist.\n elif albumartist_split[-1] != '':\n # Extract the featured artist from the right-hand side.\n _, feat_part = split_on_feat(albumartist_split[-1])\n\n # Otherwise, if there's nothing on the right-hand side, look for a\n # featuring artist on the left-hand side.\n else:\n lhs, rhs = split_on_feat(albumartist_split[0])\n if rhs:\n feat_part = lhs\n\n # If we have a featuring artist, move it to the title.\n if feat_part:\n update_metadata(item, feat_part, drop_feat, loglevel)\n else:\n log.log(loglevel, u'no featuring artists found')\n\n\nclass FtInTitlePlugin(plugins.BeetsPlugin):\n def __init__(self):\n super(FtInTitlePlugin, self).__init__()\n\n self.config.add({\n 'auto': True,\n 'drop': False,\n })\n\n self._command = ui.Subcommand(\n 'ftintitle',\n help='move featured artists to the title field')\n\n self._command.parser.add_option(\n '-d', '--drop', dest='drop',\n action='store_true', default=False,\n help='drop featuring from artists and ignore title update')\n\n if self.config['auto']:\n self.import_stages = [self.imported]\n\n def commands(self):\n\n def func(lib, opts, args):\n self.config.set_args(opts)\n drop_feat = self.config['drop'].get(bool)\n write = config['import']['write'].get(bool)\n\n for item in lib.items(ui.decargs(args)):\n ft_in_title(item, drop_feat, logging.INFO)\n item.store()\n if write:\n item.try_write()\n\n self._command.func = func\n return [self._command]\n\n def imported(self, session, task):\n \"\"\"Import hook for moving featuring artist automatically.\n \"\"\"\n drop_feat = self.config['drop'].get(bool)\n\n for item in task.imported_items():\n ft_in_title(item, drop_feat, logging.DEBUG)\n item.store()\n", "path": "beetsplug/ftintitle.py"}]} | 2,037 | 851 |
gh_patches_debug_12476 | rasdani/github-patches | git_diff | bokeh__bokeh-9068 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Development guide missing `test` argument for conda install and pytest install failure on windows
### Missing `test` argument
The current [developement guide](https://bokeh.pydata.org/en/latest/docs/dev_guide/setup.html#conda-packages) is missing the `test` argument for windows setups.
As for OSX / Linux (bash / sh), it is:
- ```conda install `python scripts/deps.py build run test```.
As for windows, the `test` argument is missing for the `deps.py`:
- ```conda install $(python scripts/deps.py build run).split() | where {$_}```
- ```for /F "delims=" %i in ('python scripts\deps.py build run') do (conda install %i)```
Instead, it should be:
- ```conda install $(python scripts/deps.py build run test).split() | where {$_}```
- ```for /F "delims=" %i in ('python scripts\deps.py build run test') do (conda install %i)```
### `pytest<5.0.0` fails
In addition, running ```for /F "delims=" %i in ('python scripts\deps.py build run test') do (conda install %i)``` fails with error `System can't find given file.` which is due to `pytest<5.0.0`. Providing double quotes actually fixes the issue -> `conda install "pytest<5.0.0"`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/deps.py`
Content:
```
1 import sys
2 import jinja2
3 import yaml
4
5
6 def load_setup_py_data():
7 import os
8 import setuptools
9 os.environ['CONDA_BUILD_STATE'] = 'RENDER'
10 data = {}
11
12 def _setup(**kw): data.update(kw)
13 setuptools.setup = _setup
14 return data
15
16 meta_src = jinja2.Template(open("conda.recipe/meta.yaml").read())
17 meta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data),
18 Loader=yaml.FullLoader)
19
20 section = {
21 "build" : meta_src["requirements"]["build"],
22 "deploy" : meta_src["extra"]["deploy"],
23 "run" : meta_src["requirements"]["run"],
24 "test" : meta_src["test"]["requires"],
25 }
26
27 spec = []
28 for name in sys.argv[1:]:
29 spec += section[name]
30
31 # bare python unpins python version causing upgrade to latest
32 if 'python' in spec: spec.remove('python')
33
34 deps = ""
35 deps += " ".join(s for s in spec)
36 deps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec
37 deps = deps.replace(' <', '<')
38 deps = deps.replace(' [unix]', ' ')
39
40 print(deps)
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/deps.py b/scripts/deps.py
--- a/scripts/deps.py
+++ b/scripts/deps.py
@@ -1,4 +1,5 @@
import sys
+import platform
import jinja2
import yaml
@@ -31,6 +32,10 @@
# bare python unpins python version causing upgrade to latest
if 'python' in spec: spec.remove('python')
+# add double quotes to specs for windows, fixes #9065
+if "windows" in platform.platform().lower():
+ spec = ['"{}"'.format(s) for s in spec]
+
deps = ""
deps += " ".join(s for s in spec)
deps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec
| {"golden_diff": "diff --git a/scripts/deps.py b/scripts/deps.py\n--- a/scripts/deps.py\n+++ b/scripts/deps.py\n@@ -1,4 +1,5 @@\n import sys\n+import platform\n import jinja2\n import yaml\n \n@@ -31,6 +32,10 @@\n # bare python unpins python version causing upgrade to latest\n if 'python' in spec: spec.remove('python')\n \n+# add double quotes to specs for windows, fixes #9065\n+if \"windows\" in platform.platform().lower():\n+ spec = ['\"{}\"'.format(s) for s in spec]\n+\n deps = \"\"\n deps += \" \".join(s for s in spec)\n deps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec\n", "issue": "[BUG] Development guide missing `test` argument for conda install and pytest install failure on windows\n### Missing `test` argument\r\n\r\nThe current [developement guide](https://bokeh.pydata.org/en/latest/docs/dev_guide/setup.html#conda-packages) is missing the `test` argument for windows setups. \r\n\r\nAs for OSX / Linux (bash / sh), it is: \r\n- ```conda install `python scripts/deps.py build run test```.\r\n\r\nAs for windows, the `test` argument is missing for the `deps.py`:\r\n- ```conda install $(python scripts/deps.py build run).split() | where {$_}```\r\n- ```for /F \"delims=\" %i in ('python scripts\\deps.py build run') do (conda install %i)```\r\n\r\nInstead, it should be:\r\n- ```conda install $(python scripts/deps.py build run test).split() | where {$_}```\r\n- ```for /F \"delims=\" %i in ('python scripts\\deps.py build run test') do (conda install %i)```\r\n\r\n### `pytest<5.0.0` fails\r\nIn addition, running ```for /F \"delims=\" %i in ('python scripts\\deps.py build run test') do (conda install %i)``` fails with error `System can't find given file.` which is due to `pytest<5.0.0`. Providing double quotes actually fixes the issue -> `conda install \"pytest<5.0.0\"`.\n", "before_files": [{"content": "import sys\nimport jinja2\nimport yaml\n\n\ndef load_setup_py_data():\n import os\n import setuptools\n os.environ['CONDA_BUILD_STATE'] = 'RENDER'\n data = {}\n\n def _setup(**kw): data.update(kw)\n setuptools.setup = _setup\n return data\n\nmeta_src = jinja2.Template(open(\"conda.recipe/meta.yaml\").read())\nmeta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data),\n Loader=yaml.FullLoader)\n\nsection = {\n \"build\" : meta_src[\"requirements\"][\"build\"],\n \"deploy\" : meta_src[\"extra\"][\"deploy\"],\n \"run\" : meta_src[\"requirements\"][\"run\"],\n \"test\" : meta_src[\"test\"][\"requires\"],\n}\n\nspec = []\nfor name in sys.argv[1:]:\n spec += section[name]\n\n# bare python unpins python version causing upgrade to latest\nif 'python' in spec: spec.remove('python')\n\ndeps = \"\"\ndeps += \" \".join(s for s in spec)\ndeps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec\ndeps = deps.replace(' <', '<')\ndeps = deps.replace(' [unix]', ' ')\n\nprint(deps)\n", "path": "scripts/deps.py"}], "after_files": [{"content": "import sys\nimport platform\nimport jinja2\nimport yaml\n\n\ndef load_setup_py_data():\n import os\n import setuptools\n os.environ['CONDA_BUILD_STATE'] = 'RENDER'\n data = {}\n\n def _setup(**kw): data.update(kw)\n setuptools.setup = _setup\n return data\n\nmeta_src = jinja2.Template(open(\"conda.recipe/meta.yaml\").read())\nmeta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data),\n Loader=yaml.FullLoader)\n\nsection = {\n \"build\" : meta_src[\"requirements\"][\"build\"],\n \"deploy\" : meta_src[\"extra\"][\"deploy\"],\n \"run\" : meta_src[\"requirements\"][\"run\"],\n \"test\" : meta_src[\"test\"][\"requires\"],\n}\n\nspec = []\nfor name in sys.argv[1:]:\n spec += section[name]\n\n# bare python unpins python version causing upgrade to latest\nif 'python' in spec: spec.remove('python')\n\n# add double quotes to specs for windows, fixes #9065\nif \"windows\" in platform.platform().lower():\n spec = ['\"{}\"'.format(s) for s in spec]\n\ndeps = \"\"\ndeps += \" \".join(s for s in spec)\ndeps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec\ndeps = deps.replace(' <', '<')\ndeps = deps.replace(' [unix]', ' ')\n\nprint(deps)\n", "path": "scripts/deps.py"}]} | 921 | 177 |
gh_patches_debug_4586 | rasdani/github-patches | git_diff | mdn__kuma-1792 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Delete templates used in the old design, replace them with redesign-specific templates (like profile_redesign.html)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/devmo/views.py`
Content:
```
1 from django.conf import settings
2 from django.core.paginator import Paginator
3 from django.shortcuts import get_object_or_404, render
4 from django.http import (HttpResponseRedirect, HttpResponseForbidden)
5
6 from devmo.urlresolvers import reverse
7
8 import constance.config
9 import basket
10 from taggit.utils import parse_tags
11 from waffle import flag_is_active
12
13 from waffle import flag_is_active
14
15 from access.decorators import login_required
16 from demos.models import Submission
17 from teamwork.models import Team
18
19 from . import INTEREST_SUGGESTIONS
20 from .models import Calendar, Event, UserProfile
21 from .forms import (UserProfileEditForm, newsletter_subscribe,
22 get_subscription_details, subscribed_to_newsletter)
23
24
25 DOCS_ACTIVITY_MAX_ITEMS = getattr(settings,
26 'DOCS_ACTIVITY_MAX_ITEMS', 15)
27
28
29 def events(request):
30 """Developer Engagement Calendar"""
31 cal = Calendar.objects.get(shortname='devengage_events')
32 events = Event.objects.filter(calendar=cal)
33 upcoming_events = events.filter(done=False)
34 past_events = events.filter(done=True)
35 google_maps_api_key = getattr(settings, 'GOOGLE_MAPS_API_KEY',
36 "ABQIAAAAijZqBZcz-rowoXZC1tt9iRT5rHVQFKUGOHoyfP"
37 "_4KyrflbHKcRTt9kQJVST5oKMRj8vKTQS2b7oNjQ")
38
39 return render(request, 'devmo/calendar.html', {
40 'upcoming_events': upcoming_events,
41 'past_events': past_events,
42 'google_maps_api_key': google_maps_api_key
43 })
44
45
46 def profile_view(request, username):
47 profile = get_object_or_404(UserProfile, user__username=username)
48 user = profile.user
49
50 DEMOS_PAGE_SIZE = getattr(settings, 'DEMOS_PAGE_SIZE', 12)
51 sort_order = request.GET.get('sort', 'created')
52 try:
53 page_number = int(request.GET.get('page', 1))
54 except ValueError:
55 page_number = 1
56 show_hidden = (user == request.user) or user.is_superuser
57
58 demos = Submission.objects.all_sorted(sort_order).filter(
59 creator=profile.user)
60 if not show_hidden:
61 demos = demos.exclude(hidden=True)
62
63 demos_paginator = Paginator(demos, DEMOS_PAGE_SIZE, True)
64 demos_page = demos_paginator.page(page_number)
65
66 wiki_activity, docs_feed_items = None, None
67 wiki_activity = profile.wiki_activity()
68
69 if request.user.is_anonymous():
70 show_manage_roles_button = False
71 else:
72 # TODO: This seems wasteful, just to decide whether to show the button
73 roles_by_team = Team.objects.get_team_roles_managed_by(request.user,
74 user)
75 show_manage_roles_button = (len(roles_by_team) > 0)
76
77 template = 'devmo/profile.html'
78 if flag_is_active(request, 'redesign'):
79 template = 'devmo/profile_redesign.html'
80
81 return render(request, template, dict(
82 profile=profile, demos=demos, demos_paginator=demos_paginator,
83 demos_page=demos_page, docs_feed_items=docs_feed_items,
84 wiki_activity=wiki_activity,
85 show_manage_roles_button=show_manage_roles_button,
86 ))
87
88
89 @login_required
90 def my_profile(request):
91 user = request.user
92 return HttpResponseRedirect(reverse(
93 'devmo.views.profile_view', args=(user.username,)))
94
95
96 def profile_edit(request, username):
97 """View and edit user profile"""
98 profile = get_object_or_404(UserProfile, user__username=username)
99 context = {'profile': profile}
100 if not profile.allows_editing_by(request.user):
101 return HttpResponseForbidden()
102
103 # Map of form field names to tag namespaces
104 field_to_tag_ns = (
105 ('interests', 'profile:interest:'),
106 ('expertise', 'profile:expertise:')
107 )
108
109
110 if request.method != 'POST':
111 initial = dict(email=profile.user.email, beta=profile.beta_tester)
112
113 # Load up initial websites with either user data or required base URL
114 for name, meta in UserProfile.website_choices:
115 initial['websites_%s' % name] = profile.websites.get(name, '')
116
117 # Form fields to receive tags filtered by namespace.
118 for field, ns in field_to_tag_ns:
119 initial[field] = ', '.join(t.name.replace(ns, '')
120 for t in profile.tags.all_ns(ns))
121
122 subscription_details = get_subscription_details(profile.user.email)
123 if subscribed_to_newsletter(subscription_details):
124 initial['newsletter'] = True
125 initial['agree'] = True
126
127 # Finally, set up the forms.
128 form = UserProfileEditForm(request.locale,
129 instance=profile,
130 initial=initial)
131
132 else:
133 form = UserProfileEditForm(request.locale,
134 request.POST,
135 request.FILES,
136 instance=profile)
137 if form.is_valid():
138 profile_new = form.save(commit=False)
139
140 # Gather up all websites defined by the model, save them.
141 sites = dict()
142 for name, meta in UserProfile.website_choices:
143 field_name = 'websites_%s' % name
144 field_value = form.cleaned_data.get(field_name, '')
145 if field_value and field_value != meta['prefix']:
146 sites[name] = field_value
147 profile_new.websites = sites
148
149 # Save the profile record now, since the rest of this deals with
150 # related resources...
151 profile_new.save()
152
153 # Update tags from form fields
154 for field, tag_ns in field_to_tag_ns:
155 tags = [t.lower() for t in parse_tags(
156 form.cleaned_data.get(field, ''))]
157 profile_new.tags.set_ns(tag_ns, *tags)
158
159 newsletter_subscribe(request, profile_new.user.email,
160 form.cleaned_data)
161 return HttpResponseRedirect(reverse(
162 'devmo.views.profile_view', args=(profile.user.username,)))
163 context['form'] = form
164 context['INTEREST_SUGGESTIONS'] = INTEREST_SUGGESTIONS
165
166 return render(request, 'devmo/profile_edit.html', context)
167
168
169 @login_required
170 def my_profile_edit(request):
171 user = request.user
172 return HttpResponseRedirect(reverse(
173 'devmo.views.profile_edit', args=(user.username,)))
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/devmo/views.py b/apps/devmo/views.py
--- a/apps/devmo/views.py
+++ b/apps/devmo/views.py
@@ -75,8 +75,6 @@
show_manage_roles_button = (len(roles_by_team) > 0)
template = 'devmo/profile.html'
- if flag_is_active(request, 'redesign'):
- template = 'devmo/profile_redesign.html'
return render(request, template, dict(
profile=profile, demos=demos, demos_paginator=demos_paginator,
| {"golden_diff": "diff --git a/apps/devmo/views.py b/apps/devmo/views.py\n--- a/apps/devmo/views.py\n+++ b/apps/devmo/views.py\n@@ -75,8 +75,6 @@\n show_manage_roles_button = (len(roles_by_team) > 0)\n \n template = 'devmo/profile.html'\n- if flag_is_active(request, 'redesign'):\n- template = 'devmo/profile_redesign.html'\n \n return render(request, template, dict(\n profile=profile, demos=demos, demos_paginator=demos_paginator,\n", "issue": "Delete templates used in the old design, replace them with redesign-specific templates (like profile_redesign.html)\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import get_object_or_404, render\nfrom django.http import (HttpResponseRedirect, HttpResponseForbidden)\n\nfrom devmo.urlresolvers import reverse\n\nimport constance.config\nimport basket\nfrom taggit.utils import parse_tags\nfrom waffle import flag_is_active\n\nfrom waffle import flag_is_active\n\nfrom access.decorators import login_required\nfrom demos.models import Submission\nfrom teamwork.models import Team\n\nfrom . import INTEREST_SUGGESTIONS\nfrom .models import Calendar, Event, UserProfile\nfrom .forms import (UserProfileEditForm, newsletter_subscribe,\n get_subscription_details, subscribed_to_newsletter)\n\n\nDOCS_ACTIVITY_MAX_ITEMS = getattr(settings,\n 'DOCS_ACTIVITY_MAX_ITEMS', 15)\n\n\ndef events(request):\n \"\"\"Developer Engagement Calendar\"\"\"\n cal = Calendar.objects.get(shortname='devengage_events')\n events = Event.objects.filter(calendar=cal)\n upcoming_events = events.filter(done=False)\n past_events = events.filter(done=True)\n google_maps_api_key = getattr(settings, 'GOOGLE_MAPS_API_KEY',\n \"ABQIAAAAijZqBZcz-rowoXZC1tt9iRT5rHVQFKUGOHoyfP\"\n \"_4KyrflbHKcRTt9kQJVST5oKMRj8vKTQS2b7oNjQ\")\n\n return render(request, 'devmo/calendar.html', {\n 'upcoming_events': upcoming_events,\n 'past_events': past_events,\n 'google_maps_api_key': google_maps_api_key\n })\n\n\ndef profile_view(request, username):\n profile = get_object_or_404(UserProfile, user__username=username)\n user = profile.user\n\n DEMOS_PAGE_SIZE = getattr(settings, 'DEMOS_PAGE_SIZE', 12)\n sort_order = request.GET.get('sort', 'created')\n try:\n page_number = int(request.GET.get('page', 1))\n except ValueError:\n page_number = 1\n show_hidden = (user == request.user) or user.is_superuser\n\n demos = Submission.objects.all_sorted(sort_order).filter(\n creator=profile.user)\n if not show_hidden:\n demos = demos.exclude(hidden=True)\n\n demos_paginator = Paginator(demos, DEMOS_PAGE_SIZE, True)\n demos_page = demos_paginator.page(page_number)\n\n wiki_activity, docs_feed_items = None, None\n wiki_activity = profile.wiki_activity()\n\n if request.user.is_anonymous():\n show_manage_roles_button = False\n else:\n # TODO: This seems wasteful, just to decide whether to show the button\n roles_by_team = Team.objects.get_team_roles_managed_by(request.user,\n user)\n show_manage_roles_button = (len(roles_by_team) > 0)\n\n template = 'devmo/profile.html'\n if flag_is_active(request, 'redesign'):\n template = 'devmo/profile_redesign.html'\n\n return render(request, template, dict(\n profile=profile, demos=demos, demos_paginator=demos_paginator,\n demos_page=demos_page, docs_feed_items=docs_feed_items,\n wiki_activity=wiki_activity,\n show_manage_roles_button=show_manage_roles_button,\n ))\n\n\n@login_required\ndef my_profile(request):\n user = request.user\n return HttpResponseRedirect(reverse(\n 'devmo.views.profile_view', args=(user.username,)))\n\n\ndef profile_edit(request, username):\n \"\"\"View and edit user profile\"\"\"\n profile = get_object_or_404(UserProfile, user__username=username)\n context = {'profile': profile}\n if not profile.allows_editing_by(request.user):\n return HttpResponseForbidden()\n\n # Map of form field names to tag namespaces\n field_to_tag_ns = (\n ('interests', 'profile:interest:'),\n ('expertise', 'profile:expertise:')\n )\n\n\n if request.method != 'POST':\n initial = dict(email=profile.user.email, beta=profile.beta_tester)\n\n # Load up initial websites with either user data or required base URL\n for name, meta in UserProfile.website_choices:\n initial['websites_%s' % name] = profile.websites.get(name, '')\n\n # Form fields to receive tags filtered by namespace.\n for field, ns in field_to_tag_ns:\n initial[field] = ', '.join(t.name.replace(ns, '')\n for t in profile.tags.all_ns(ns))\n\n subscription_details = get_subscription_details(profile.user.email)\n if subscribed_to_newsletter(subscription_details):\n initial['newsletter'] = True\n initial['agree'] = True\n\n # Finally, set up the forms.\n form = UserProfileEditForm(request.locale,\n instance=profile,\n initial=initial)\n\n else:\n form = UserProfileEditForm(request.locale,\n request.POST,\n request.FILES,\n instance=profile)\n if form.is_valid():\n profile_new = form.save(commit=False)\n\n # Gather up all websites defined by the model, save them.\n sites = dict()\n for name, meta in UserProfile.website_choices:\n field_name = 'websites_%s' % name\n field_value = form.cleaned_data.get(field_name, '')\n if field_value and field_value != meta['prefix']:\n sites[name] = field_value\n profile_new.websites = sites\n\n # Save the profile record now, since the rest of this deals with\n # related resources...\n profile_new.save()\n\n # Update tags from form fields\n for field, tag_ns in field_to_tag_ns:\n tags = [t.lower() for t in parse_tags(\n form.cleaned_data.get(field, ''))]\n profile_new.tags.set_ns(tag_ns, *tags)\n\n newsletter_subscribe(request, profile_new.user.email,\n form.cleaned_data)\n return HttpResponseRedirect(reverse(\n 'devmo.views.profile_view', args=(profile.user.username,)))\n context['form'] = form\n context['INTEREST_SUGGESTIONS'] = INTEREST_SUGGESTIONS\n\n return render(request, 'devmo/profile_edit.html', context)\n\n\n@login_required\ndef my_profile_edit(request):\n user = request.user\n return HttpResponseRedirect(reverse(\n 'devmo.views.profile_edit', args=(user.username,)))\n", "path": "apps/devmo/views.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import get_object_or_404, render\nfrom django.http import (HttpResponseRedirect, HttpResponseForbidden)\n\nfrom devmo.urlresolvers import reverse\n\nimport constance.config\nimport basket\nfrom taggit.utils import parse_tags\nfrom waffle import flag_is_active\n\nfrom waffle import flag_is_active\n\nfrom access.decorators import login_required\nfrom demos.models import Submission\nfrom teamwork.models import Team\n\nfrom . import INTEREST_SUGGESTIONS\nfrom .models import Calendar, Event, UserProfile\nfrom .forms import (UserProfileEditForm, newsletter_subscribe,\n get_subscription_details, subscribed_to_newsletter)\n\n\nDOCS_ACTIVITY_MAX_ITEMS = getattr(settings,\n 'DOCS_ACTIVITY_MAX_ITEMS', 15)\n\n\ndef events(request):\n \"\"\"Developer Engagement Calendar\"\"\"\n cal = Calendar.objects.get(shortname='devengage_events')\n events = Event.objects.filter(calendar=cal)\n upcoming_events = events.filter(done=False)\n past_events = events.filter(done=True)\n google_maps_api_key = getattr(settings, 'GOOGLE_MAPS_API_KEY',\n \"ABQIAAAAijZqBZcz-rowoXZC1tt9iRT5rHVQFKUGOHoyfP\"\n \"_4KyrflbHKcRTt9kQJVST5oKMRj8vKTQS2b7oNjQ\")\n\n return render(request, 'devmo/calendar.html', {\n 'upcoming_events': upcoming_events,\n 'past_events': past_events,\n 'google_maps_api_key': google_maps_api_key\n })\n\n\ndef profile_view(request, username):\n profile = get_object_or_404(UserProfile, user__username=username)\n user = profile.user\n\n DEMOS_PAGE_SIZE = getattr(settings, 'DEMOS_PAGE_SIZE', 12)\n sort_order = request.GET.get('sort', 'created')\n try:\n page_number = int(request.GET.get('page', 1))\n except ValueError:\n page_number = 1\n show_hidden = (user == request.user) or user.is_superuser\n\n demos = Submission.objects.all_sorted(sort_order).filter(\n creator=profile.user)\n if not show_hidden:\n demos = demos.exclude(hidden=True)\n\n demos_paginator = Paginator(demos, DEMOS_PAGE_SIZE, True)\n demos_page = demos_paginator.page(page_number)\n\n wiki_activity, docs_feed_items = None, None\n wiki_activity = profile.wiki_activity()\n\n if request.user.is_anonymous():\n show_manage_roles_button = False\n else:\n # TODO: This seems wasteful, just to decide whether to show the button\n roles_by_team = Team.objects.get_team_roles_managed_by(request.user,\n user)\n show_manage_roles_button = (len(roles_by_team) > 0)\n\n template = 'devmo/profile.html'\n\n return render(request, template, dict(\n profile=profile, demos=demos, demos_paginator=demos_paginator,\n demos_page=demos_page, docs_feed_items=docs_feed_items,\n wiki_activity=wiki_activity,\n show_manage_roles_button=show_manage_roles_button,\n ))\n\n\n@login_required\ndef my_profile(request):\n user = request.user\n return HttpResponseRedirect(reverse(\n 'devmo.views.profile_view', args=(user.username,)))\n\n\ndef profile_edit(request, username):\n \"\"\"View and edit user profile\"\"\"\n profile = get_object_or_404(UserProfile, user__username=username)\n context = {'profile': profile}\n if not profile.allows_editing_by(request.user):\n return HttpResponseForbidden()\n\n # Map of form field names to tag namespaces\n field_to_tag_ns = (\n ('interests', 'profile:interest:'),\n ('expertise', 'profile:expertise:')\n )\n\n\n if request.method != 'POST':\n initial = dict(email=profile.user.email, beta=profile.beta_tester)\n\n # Load up initial websites with either user data or required base URL\n for name, meta in UserProfile.website_choices:\n initial['websites_%s' % name] = profile.websites.get(name, '')\n\n # Form fields to receive tags filtered by namespace.\n for field, ns in field_to_tag_ns:\n initial[field] = ', '.join(t.name.replace(ns, '')\n for t in profile.tags.all_ns(ns))\n\n subscription_details = get_subscription_details(profile.user.email)\n if subscribed_to_newsletter(subscription_details):\n initial['newsletter'] = True\n initial['agree'] = True\n\n # Finally, set up the forms.\n form = UserProfileEditForm(request.locale,\n instance=profile,\n initial=initial)\n\n else:\n form = UserProfileEditForm(request.locale,\n request.POST,\n request.FILES,\n instance=profile)\n if form.is_valid():\n profile_new = form.save(commit=False)\n\n # Gather up all websites defined by the model, save them.\n sites = dict()\n for name, meta in UserProfile.website_choices:\n field_name = 'websites_%s' % name\n field_value = form.cleaned_data.get(field_name, '')\n if field_value and field_value != meta['prefix']:\n sites[name] = field_value\n profile_new.websites = sites\n\n # Save the profile record now, since the rest of this deals with\n # related resources...\n profile_new.save()\n\n # Update tags from form fields\n for field, tag_ns in field_to_tag_ns:\n tags = [t.lower() for t in parse_tags(\n form.cleaned_data.get(field, ''))]\n profile_new.tags.set_ns(tag_ns, *tags)\n\n newsletter_subscribe(request, profile_new.user.email,\n form.cleaned_data)\n return HttpResponseRedirect(reverse(\n 'devmo.views.profile_view', args=(profile.user.username,)))\n context['form'] = form\n context['INTEREST_SUGGESTIONS'] = INTEREST_SUGGESTIONS\n\n return render(request, 'devmo/profile_edit.html', context)\n\n\n@login_required\ndef my_profile_edit(request):\n user = request.user\n return HttpResponseRedirect(reverse(\n 'devmo.views.profile_edit', args=(user.username,)))\n", "path": "apps/devmo/views.py"}]} | 2,046 | 122 |
gh_patches_debug_24085 | rasdani/github-patches | git_diff | conan-io__conan-2870 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CONAN_LOGIN_USERNAME and CONAN_PASSWORD are ignored in non-interactive mode
Conan 1.3.2
To reproduce:
1. Run `conan user -c`
2. Set `CONAN_LOGIN_USERNAME`, `CONAN_PASSWORD` and `CONAN_NON_INTERACTIVE` environment variables.
3. Run `conan upload -r staging` to remove which requires authentication.
**Expected result**: Conan uses credentials stored in environment variables (as was in Conan 1.2)
**Actual result**: Upload fails with message:
```
Please log in to "staging" to perform this action. Execute "conan user" command.
ERROR: Conan interactive mode disabled. [Remote: staging]
```
My best guess is that in `userio.py`
```python
def request_login(self, remote_name, username=None):
"""Request user to input their name and password
:param username If username is specified it only request password"""
self._raise_if_non_interactive()
```
call to `self._raise_if_non_interactive()` is unnecessary as this check performed by functions called from it (Having prompt printed is not that ugly, or move this check closer to `conan user` command).
Option to always require explicit `conan user` is also possible, although feels little bit inconsistent and makes use of credentials variables pointless.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/userio.py`
Content:
```
1 import os
2 import sys
3 from conans.client.output import ConanOutput
4 from conans.model.username import Username
5 from conans.errors import InvalidNameException, ConanException
6 import getpass
7 from six.moves import input as raw_input
8
9
10 class UserIO(object):
11 """Class to interact with the user, used to show messages and ask for information"""
12
13 def __init__(self, ins=sys.stdin, out=None):
14 """
15 Params:
16 ins: input stream
17 out: ConanOutput, should have "write" method
18 """
19 self._ins = ins
20 if not out:
21 out = ConanOutput(sys.stdout)
22 self.out = out
23 self._interactive = True
24
25 def disable_input(self):
26 self._interactive = False
27
28 def _raise_if_non_interactive(self):
29 if not self._interactive:
30 raise ConanException("Conan interactive mode disabled")
31
32 def raw_input(self):
33 self._raise_if_non_interactive()
34 return raw_input()
35
36 def get_pass(self):
37 self._raise_if_non_interactive()
38 return getpass.getpass("")
39
40 def request_login(self, remote_name, username=None):
41 """Request user to input their name and password
42 :param username If username is specified it only request password"""
43 self._raise_if_non_interactive()
44 user_input = ''
45 while not username:
46 try:
47 self.out.write("Remote '%s' username: " % remote_name)
48 user_input = self.get_username(remote_name)
49 username = Username(user_input)
50 except InvalidNameException:
51 self.out.error('%s is not a valid username' % user_input)
52
53 self.out.write('Please enter a password for "%s" account: ' % username)
54 try:
55 pwd = self.get_password(remote_name)
56 except ConanException:
57 raise
58 except Exception as e:
59 raise ConanException('Cancelled pass %s' % e)
60 return username, pwd
61
62 def get_username(self, remote_name):
63 """Overridable for testing purpose"""
64 return self._get_env_username(remote_name) or self.raw_input()
65
66 def get_password(self, remote_name):
67 """Overridable for testing purpose"""
68 return self._get_env_password(remote_name) or self.get_pass()
69
70 def request_string(self, msg, default_value=None):
71 """Request user to input a msg
72 :param msg Name of the msg
73 """
74 self._raise_if_non_interactive()
75
76 if default_value:
77 self.out.input_text('%s (%s): ' % (msg, default_value))
78 else:
79 self.out.input_text('%s: ' % msg)
80 s = self._ins.readline().replace("\n", "")
81 if default_value is not None and s == '':
82 return default_value
83 return s
84
85 def request_boolean(self, msg, default_option=None):
86 """Request user to input a boolean"""
87 ret = None
88 while ret is None:
89 if default_option is True:
90 s = self.request_string("%s (YES/no)" % msg)
91 elif default_option is False:
92 s = self.request_string("%s (NO/yes)" % msg)
93 else:
94 s = self.request_string("%s (yes/no)" % msg)
95 if default_option is not None and s == '':
96 return default_option
97 if s.lower() in ['yes', 'y']:
98 ret = True
99 elif s.lower() in ['no', 'n']:
100 ret = False
101 else:
102 self.out.error("%s is not a valid answer" % s)
103 return ret
104
105 def _get_env_password(self, remote_name):
106 """
107 Try CONAN_PASSWORD_REMOTE_NAME or CONAN_PASSWORD or return None
108 """
109 remote_name = remote_name.replace("-", "_").upper()
110 var_name = "CONAN_PASSWORD_%s" % remote_name
111 ret = os.getenv(var_name, None) or os.getenv("CONAN_PASSWORD", None)
112 if ret:
113 self.out.info("Got password '******' from environment")
114 return ret
115
116 def _get_env_username(self, remote_name):
117 """
118 Try CONAN_LOGIN_USERNAME_REMOTE_NAME or CONAN_LOGIN_USERNAME or return None
119 """
120 remote_name = remote_name.replace("-", "_").upper()
121 var_name = "CONAN_LOGIN_USERNAME_%s" % remote_name
122 ret = os.getenv(var_name, None) or os.getenv("CONAN_LOGIN_USERNAME", None)
123
124 if ret:
125 self.out.info("Got username '%s' from environment" % ret)
126 return ret
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/client/userio.py b/conans/client/userio.py
--- a/conans/client/userio.py
+++ b/conans/client/userio.py
@@ -40,17 +40,17 @@
def request_login(self, remote_name, username=None):
"""Request user to input their name and password
:param username If username is specified it only request password"""
- self._raise_if_non_interactive()
user_input = ''
while not username:
try:
- self.out.write("Remote '%s' username: " % remote_name)
+ if self._interactive:
+ self.out.write("Remote '%s' username: " % remote_name)
user_input = self.get_username(remote_name)
username = Username(user_input)
except InvalidNameException:
self.out.error('%s is not a valid username' % user_input)
-
- self.out.write('Please enter a password for "%s" account: ' % username)
+ if self._interactive:
+ self.out.write('Please enter a password for "%s" account: ' % username)
try:
pwd = self.get_password(remote_name)
except ConanException:
| {"golden_diff": "diff --git a/conans/client/userio.py b/conans/client/userio.py\n--- a/conans/client/userio.py\n+++ b/conans/client/userio.py\n@@ -40,17 +40,17 @@\n def request_login(self, remote_name, username=None):\n \"\"\"Request user to input their name and password\n :param username If username is specified it only request password\"\"\"\n- self._raise_if_non_interactive()\n user_input = ''\n while not username:\n try:\n- self.out.write(\"Remote '%s' username: \" % remote_name)\n+ if self._interactive:\n+ self.out.write(\"Remote '%s' username: \" % remote_name)\n user_input = self.get_username(remote_name)\n username = Username(user_input)\n except InvalidNameException:\n self.out.error('%s is not a valid username' % user_input)\n-\n- self.out.write('Please enter a password for \"%s\" account: ' % username)\n+ if self._interactive:\n+ self.out.write('Please enter a password for \"%s\" account: ' % username)\n try:\n pwd = self.get_password(remote_name)\n except ConanException:\n", "issue": "CONAN_LOGIN_USERNAME and CONAN_PASSWORD are ignored in non-interactive mode\nConan 1.3.2\r\n\r\nTo reproduce:\r\n1. Run `conan user -c`\r\n2. Set `CONAN_LOGIN_USERNAME`, `CONAN_PASSWORD` and `CONAN_NON_INTERACTIVE` environment variables.\r\n3. Run `conan upload -r staging` to remove which requires authentication.\r\n\r\n**Expected result**: Conan uses credentials stored in environment variables (as was in Conan 1.2)\r\n**Actual result**: Upload fails with message:\r\n```\r\nPlease log in to \"staging\" to perform this action. Execute \"conan user\" command.\r\nERROR: Conan interactive mode disabled. [Remote: staging]\r\n```\r\n\r\nMy best guess is that in `userio.py`\r\n```python\r\n def request_login(self, remote_name, username=None):\r\n \"\"\"Request user to input their name and password\r\n :param username If username is specified it only request password\"\"\"\r\n self._raise_if_non_interactive()\r\n```\r\ncall to `self._raise_if_non_interactive()` is unnecessary as this check performed by functions called from it (Having prompt printed is not that ugly, or move this check closer to `conan user` command).\r\n\r\nOption to always require explicit `conan user` is also possible, although feels little bit inconsistent and makes use of credentials variables pointless.\n", "before_files": [{"content": "import os\nimport sys\nfrom conans.client.output import ConanOutput\nfrom conans.model.username import Username\nfrom conans.errors import InvalidNameException, ConanException\nimport getpass\nfrom six.moves import input as raw_input\n\n\nclass UserIO(object):\n \"\"\"Class to interact with the user, used to show messages and ask for information\"\"\"\n\n def __init__(self, ins=sys.stdin, out=None):\n \"\"\"\n Params:\n ins: input stream\n out: ConanOutput, should have \"write\" method\n \"\"\"\n self._ins = ins\n if not out:\n out = ConanOutput(sys.stdout)\n self.out = out\n self._interactive = True\n\n def disable_input(self):\n self._interactive = False\n\n def _raise_if_non_interactive(self):\n if not self._interactive:\n raise ConanException(\"Conan interactive mode disabled\")\n\n def raw_input(self):\n self._raise_if_non_interactive()\n return raw_input()\n\n def get_pass(self):\n self._raise_if_non_interactive()\n return getpass.getpass(\"\")\n\n def request_login(self, remote_name, username=None):\n \"\"\"Request user to input their name and password\n :param username If username is specified it only request password\"\"\"\n self._raise_if_non_interactive()\n user_input = ''\n while not username:\n try:\n self.out.write(\"Remote '%s' username: \" % remote_name)\n user_input = self.get_username(remote_name)\n username = Username(user_input)\n except InvalidNameException:\n self.out.error('%s is not a valid username' % user_input)\n\n self.out.write('Please enter a password for \"%s\" account: ' % username)\n try:\n pwd = self.get_password(remote_name)\n except ConanException:\n raise\n except Exception as e:\n raise ConanException('Cancelled pass %s' % e)\n return username, pwd\n\n def get_username(self, remote_name):\n \"\"\"Overridable for testing purpose\"\"\"\n return self._get_env_username(remote_name) or self.raw_input()\n\n def get_password(self, remote_name):\n \"\"\"Overridable for testing purpose\"\"\"\n return self._get_env_password(remote_name) or self.get_pass()\n\n def request_string(self, msg, default_value=None):\n \"\"\"Request user to input a msg\n :param msg Name of the msg\n \"\"\"\n self._raise_if_non_interactive()\n\n if default_value:\n self.out.input_text('%s (%s): ' % (msg, default_value))\n else:\n self.out.input_text('%s: ' % msg)\n s = self._ins.readline().replace(\"\\n\", \"\")\n if default_value is not None and s == '':\n return default_value\n return s\n\n def request_boolean(self, msg, default_option=None):\n \"\"\"Request user to input a boolean\"\"\"\n ret = None\n while ret is None:\n if default_option is True:\n s = self.request_string(\"%s (YES/no)\" % msg)\n elif default_option is False:\n s = self.request_string(\"%s (NO/yes)\" % msg)\n else:\n s = self.request_string(\"%s (yes/no)\" % msg)\n if default_option is not None and s == '':\n return default_option\n if s.lower() in ['yes', 'y']:\n ret = True\n elif s.lower() in ['no', 'n']:\n ret = False\n else:\n self.out.error(\"%s is not a valid answer\" % s)\n return ret\n\n def _get_env_password(self, remote_name):\n \"\"\"\n Try CONAN_PASSWORD_REMOTE_NAME or CONAN_PASSWORD or return None\n \"\"\"\n remote_name = remote_name.replace(\"-\", \"_\").upper()\n var_name = \"CONAN_PASSWORD_%s\" % remote_name\n ret = os.getenv(var_name, None) or os.getenv(\"CONAN_PASSWORD\", None)\n if ret:\n self.out.info(\"Got password '******' from environment\")\n return ret\n\n def _get_env_username(self, remote_name):\n \"\"\"\n Try CONAN_LOGIN_USERNAME_REMOTE_NAME or CONAN_LOGIN_USERNAME or return None\n \"\"\"\n remote_name = remote_name.replace(\"-\", \"_\").upper()\n var_name = \"CONAN_LOGIN_USERNAME_%s\" % remote_name\n ret = os.getenv(var_name, None) or os.getenv(\"CONAN_LOGIN_USERNAME\", None)\n\n if ret:\n self.out.info(\"Got username '%s' from environment\" % ret)\n return ret\n", "path": "conans/client/userio.py"}], "after_files": [{"content": "import os\nimport sys\nfrom conans.client.output import ConanOutput\nfrom conans.model.username import Username\nfrom conans.errors import InvalidNameException, ConanException\nimport getpass\nfrom six.moves import input as raw_input\n\n\nclass UserIO(object):\n \"\"\"Class to interact with the user, used to show messages and ask for information\"\"\"\n\n def __init__(self, ins=sys.stdin, out=None):\n \"\"\"\n Params:\n ins: input stream\n out: ConanOutput, should have \"write\" method\n \"\"\"\n self._ins = ins\n if not out:\n out = ConanOutput(sys.stdout)\n self.out = out\n self._interactive = True\n\n def disable_input(self):\n self._interactive = False\n\n def _raise_if_non_interactive(self):\n if not self._interactive:\n raise ConanException(\"Conan interactive mode disabled\")\n\n def raw_input(self):\n self._raise_if_non_interactive()\n return raw_input()\n\n def get_pass(self):\n self._raise_if_non_interactive()\n return getpass.getpass(\"\")\n\n def request_login(self, remote_name, username=None):\n \"\"\"Request user to input their name and password\n :param username If username is specified it only request password\"\"\"\n user_input = ''\n while not username:\n try:\n if self._interactive:\n self.out.write(\"Remote '%s' username: \" % remote_name)\n user_input = self.get_username(remote_name)\n username = Username(user_input)\n except InvalidNameException:\n self.out.error('%s is not a valid username' % user_input)\n if self._interactive:\n self.out.write('Please enter a password for \"%s\" account: ' % username)\n try:\n pwd = self.get_password(remote_name)\n except ConanException:\n raise\n except Exception as e:\n raise ConanException('Cancelled pass %s' % e)\n return username, pwd\n\n def get_username(self, remote_name):\n \"\"\"Overridable for testing purpose\"\"\"\n return self._get_env_username(remote_name) or self.raw_input()\n\n def get_password(self, remote_name):\n \"\"\"Overridable for testing purpose\"\"\"\n return self._get_env_password(remote_name) or self.get_pass()\n\n def request_string(self, msg, default_value=None):\n \"\"\"Request user to input a msg\n :param msg Name of the msg\n \"\"\"\n self._raise_if_non_interactive()\n\n if default_value:\n self.out.input_text('%s (%s): ' % (msg, default_value))\n else:\n self.out.input_text('%s: ' % msg)\n s = self._ins.readline().replace(\"\\n\", \"\")\n if default_value is not None and s == '':\n return default_value\n return s\n\n def request_boolean(self, msg, default_option=None):\n \"\"\"Request user to input a boolean\"\"\"\n ret = None\n while ret is None:\n if default_option is True:\n s = self.request_string(\"%s (YES/no)\" % msg)\n elif default_option is False:\n s = self.request_string(\"%s (NO/yes)\" % msg)\n else:\n s = self.request_string(\"%s (yes/no)\" % msg)\n if default_option is not None and s == '':\n return default_option\n if s.lower() in ['yes', 'y']:\n ret = True\n elif s.lower() in ['no', 'n']:\n ret = False\n else:\n self.out.error(\"%s is not a valid answer\" % s)\n return ret\n\n def _get_env_password(self, remote_name):\n \"\"\"\n Try CONAN_PASSWORD_REMOTE_NAME or CONAN_PASSWORD or return None\n \"\"\"\n remote_name = remote_name.replace(\"-\", \"_\").upper()\n var_name = \"CONAN_PASSWORD_%s\" % remote_name\n ret = os.getenv(var_name, None) or os.getenv(\"CONAN_PASSWORD\", None)\n if ret:\n self.out.info(\"Got password '******' from environment\")\n return ret\n\n def _get_env_username(self, remote_name):\n \"\"\"\n Try CONAN_LOGIN_USERNAME_REMOTE_NAME or CONAN_LOGIN_USERNAME or return None\n \"\"\"\n remote_name = remote_name.replace(\"-\", \"_\").upper()\n var_name = \"CONAN_LOGIN_USERNAME_%s\" % remote_name\n ret = os.getenv(var_name, None) or os.getenv(\"CONAN_LOGIN_USERNAME\", None)\n\n if ret:\n self.out.info(\"Got username '%s' from environment\" % ret)\n return ret\n", "path": "conans/client/userio.py"}]} | 1,782 | 254 |
gh_patches_debug_12473 | rasdani/github-patches | git_diff | urllib3__urllib3-2216 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Is HTTPHeaderDict a public API to make requests?
`HTTPHeaderDict` was initially designed to handle response headers, and is not documented for request headers.
* Should it be documented?
* How should it be imported? The current options are `from urllib3.response import HTTPHeaderDict` and `from urllib3._collections import HTTPHeaderDict`, and they don't feel right.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/urllib3/__init__.py`
Content:
```
1 """
2 Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more
3 """
4
5 # Set default logging handler to avoid "No handler found" warnings.
6 import logging
7 import warnings
8 from logging import NullHandler
9
10 from . import exceptions
11 from ._version import __version__
12 from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
13 from .filepost import encode_multipart_formdata
14 from .poolmanager import PoolManager, ProxyManager, proxy_from_url
15 from .response import HTTPResponse
16 from .util.request import make_headers
17 from .util.retry import Retry
18 from .util.timeout import Timeout
19
20 __author__ = "Andrey Petrov ([email protected])"
21 __license__ = "MIT"
22 __version__ = __version__
23
24 __all__ = (
25 "HTTPConnectionPool",
26 "HTTPSConnectionPool",
27 "PoolManager",
28 "ProxyManager",
29 "HTTPResponse",
30 "Retry",
31 "Timeout",
32 "add_stderr_logger",
33 "connection_from_url",
34 "disable_warnings",
35 "encode_multipart_formdata",
36 "make_headers",
37 "proxy_from_url",
38 "request",
39 )
40
41 logging.getLogger(__name__).addHandler(NullHandler())
42
43
44 def add_stderr_logger(level=logging.DEBUG):
45 """
46 Helper for quickly adding a StreamHandler to the logger. Useful for
47 debugging.
48
49 Returns the handler after adding it.
50 """
51 # This method needs to be in this __init__.py to get the __name__ correct
52 # even if urllib3 is vendored within another package.
53 logger = logging.getLogger(__name__)
54 handler = logging.StreamHandler()
55 handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
56 logger.addHandler(handler)
57 logger.setLevel(level)
58 logger.debug("Added a stderr logging handler to logger: %s", __name__)
59 return handler
60
61
62 # ... Clean up.
63 del NullHandler
64
65
66 # All warning filters *must* be appended unless you're really certain that they
67 # shouldn't be: otherwise, it's very hard for users to use most Python
68 # mechanisms to silence them.
69 # SecurityWarning's always go off by default.
70 warnings.simplefilter("always", exceptions.SecurityWarning, append=True)
71 # InsecurePlatformWarning's don't vary between requests, so we keep it default.
72 warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True)
73 # SNIMissingWarnings should go off only once.
74 warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True)
75
76
77 def disable_warnings(category=exceptions.HTTPWarning):
78 """
79 Helper for quickly disabling all urllib3 warnings.
80 """
81 warnings.simplefilter("ignore", category)
82
83
84 _DEFAULT_POOL = PoolManager()
85
86
87 def request(method, url, fields=None, headers=None):
88 """
89 A convenience, top-level request method. It uses a module-global ``PoolManager`` instance.
90 Therefore, its side effects could be shared across dependencies relying on it.
91 To avoid side effects create a new ``PoolManager`` instance and use it instead.
92 The method does not accept low-level ``**urlopen_kw`` keyword arguments.
93 """
94
95 return _DEFAULT_POOL.request(method, url, fields=fields, headers=headers)
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/urllib3/__init__.py b/src/urllib3/__init__.py
--- a/src/urllib3/__init__.py
+++ b/src/urllib3/__init__.py
@@ -8,6 +8,7 @@
from logging import NullHandler
from . import exceptions
+from ._collections import HTTPHeaderDict
from ._version import __version__
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
from .filepost import encode_multipart_formdata
@@ -23,6 +24,7 @@
__all__ = (
"HTTPConnectionPool",
+ "HTTPHeaderDict",
"HTTPSConnectionPool",
"PoolManager",
"ProxyManager",
| {"golden_diff": "diff --git a/src/urllib3/__init__.py b/src/urllib3/__init__.py\n--- a/src/urllib3/__init__.py\n+++ b/src/urllib3/__init__.py\n@@ -8,6 +8,7 @@\n from logging import NullHandler\n \n from . import exceptions\n+from ._collections import HTTPHeaderDict\n from ._version import __version__\n from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url\n from .filepost import encode_multipart_formdata\n@@ -23,6 +24,7 @@\n \n __all__ = (\n \"HTTPConnectionPool\",\n+ \"HTTPHeaderDict\",\n \"HTTPSConnectionPool\",\n \"PoolManager\",\n \"ProxyManager\",\n", "issue": "Is HTTPHeaderDict a public API to make requests?\n`HTTPHeaderDict` was initially designed to handle response headers, and is not documented for request headers.\r\n\r\n * Should it be documented?\r\n * How should it be imported? The current options are `from urllib3.response import HTTPHeaderDict` and `from urllib3._collections import HTTPHeaderDict`, and they don't feel right.\n", "before_files": [{"content": "\"\"\"\nPython HTTP library with thread-safe connection pooling, file post support, user friendly, and more\n\"\"\"\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nimport logging\nimport warnings\nfrom logging import NullHandler\n\nfrom . import exceptions\nfrom ._version import __version__\nfrom .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url\nfrom .filepost import encode_multipart_formdata\nfrom .poolmanager import PoolManager, ProxyManager, proxy_from_url\nfrom .response import HTTPResponse\nfrom .util.request import make_headers\nfrom .util.retry import Retry\nfrom .util.timeout import Timeout\n\n__author__ = \"Andrey Petrov ([email protected])\"\n__license__ = \"MIT\"\n__version__ = __version__\n\n__all__ = (\n \"HTTPConnectionPool\",\n \"HTTPSConnectionPool\",\n \"PoolManager\",\n \"ProxyManager\",\n \"HTTPResponse\",\n \"Retry\",\n \"Timeout\",\n \"add_stderr_logger\",\n \"connection_from_url\",\n \"disable_warnings\",\n \"encode_multipart_formdata\",\n \"make_headers\",\n \"proxy_from_url\",\n \"request\",\n)\n\nlogging.getLogger(__name__).addHandler(NullHandler())\n\n\ndef add_stderr_logger(level=logging.DEBUG):\n \"\"\"\n Helper for quickly adding a StreamHandler to the logger. Useful for\n debugging.\n\n Returns the handler after adding it.\n \"\"\"\n # This method needs to be in this __init__.py to get the __name__ correct\n # even if urllib3 is vendored within another package.\n logger = logging.getLogger(__name__)\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\"))\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.debug(\"Added a stderr logging handler to logger: %s\", __name__)\n return handler\n\n\n# ... Clean up.\ndel NullHandler\n\n\n# All warning filters *must* be appended unless you're really certain that they\n# shouldn't be: otherwise, it's very hard for users to use most Python\n# mechanisms to silence them.\n# SecurityWarning's always go off by default.\nwarnings.simplefilter(\"always\", exceptions.SecurityWarning, append=True)\n# InsecurePlatformWarning's don't vary between requests, so we keep it default.\nwarnings.simplefilter(\"default\", exceptions.InsecurePlatformWarning, append=True)\n# SNIMissingWarnings should go off only once.\nwarnings.simplefilter(\"default\", exceptions.SNIMissingWarning, append=True)\n\n\ndef disable_warnings(category=exceptions.HTTPWarning):\n \"\"\"\n Helper for quickly disabling all urllib3 warnings.\n \"\"\"\n warnings.simplefilter(\"ignore\", category)\n\n\n_DEFAULT_POOL = PoolManager()\n\n\ndef request(method, url, fields=None, headers=None):\n \"\"\"\n A convenience, top-level request method. It uses a module-global ``PoolManager`` instance.\n Therefore, its side effects could be shared across dependencies relying on it.\n To avoid side effects create a new ``PoolManager`` instance and use it instead.\n The method does not accept low-level ``**urlopen_kw`` keyword arguments.\n \"\"\"\n\n return _DEFAULT_POOL.request(method, url, fields=fields, headers=headers)\n", "path": "src/urllib3/__init__.py"}], "after_files": [{"content": "\"\"\"\nPython HTTP library with thread-safe connection pooling, file post support, user friendly, and more\n\"\"\"\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nimport logging\nimport warnings\nfrom logging import NullHandler\n\nfrom . import exceptions\nfrom ._collections import HTTPHeaderDict\nfrom ._version import __version__\nfrom .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url\nfrom .filepost import encode_multipart_formdata\nfrom .poolmanager import PoolManager, ProxyManager, proxy_from_url\nfrom .response import HTTPResponse\nfrom .util.request import make_headers\nfrom .util.retry import Retry\nfrom .util.timeout import Timeout\n\n__author__ = \"Andrey Petrov ([email protected])\"\n__license__ = \"MIT\"\n__version__ = __version__\n\n__all__ = (\n \"HTTPConnectionPool\",\n \"HTTPHeaderDict\",\n \"HTTPSConnectionPool\",\n \"PoolManager\",\n \"ProxyManager\",\n \"HTTPResponse\",\n \"Retry\",\n \"Timeout\",\n \"add_stderr_logger\",\n \"connection_from_url\",\n \"disable_warnings\",\n \"encode_multipart_formdata\",\n \"make_headers\",\n \"proxy_from_url\",\n \"request\",\n)\n\nlogging.getLogger(__name__).addHandler(NullHandler())\n\n\ndef add_stderr_logger(level=logging.DEBUG):\n \"\"\"\n Helper for quickly adding a StreamHandler to the logger. Useful for\n debugging.\n\n Returns the handler after adding it.\n \"\"\"\n # This method needs to be in this __init__.py to get the __name__ correct\n # even if urllib3 is vendored within another package.\n logger = logging.getLogger(__name__)\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\"))\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.debug(\"Added a stderr logging handler to logger: %s\", __name__)\n return handler\n\n\n# ... Clean up.\ndel NullHandler\n\n\n# All warning filters *must* be appended unless you're really certain that they\n# shouldn't be: otherwise, it's very hard for users to use most Python\n# mechanisms to silence them.\n# SecurityWarning's always go off by default.\nwarnings.simplefilter(\"always\", exceptions.SecurityWarning, append=True)\n# InsecurePlatformWarning's don't vary between requests, so we keep it default.\nwarnings.simplefilter(\"default\", exceptions.InsecurePlatformWarning, append=True)\n# SNIMissingWarnings should go off only once.\nwarnings.simplefilter(\"default\", exceptions.SNIMissingWarning, append=True)\n\n\ndef disable_warnings(category=exceptions.HTTPWarning):\n \"\"\"\n Helper for quickly disabling all urllib3 warnings.\n \"\"\"\n warnings.simplefilter(\"ignore\", category)\n\n\n_DEFAULT_POOL = PoolManager()\n\n\ndef request(method, url, fields=None, headers=None):\n \"\"\"\n A convenience, top-level request method. It uses a module-global ``PoolManager`` instance.\n Therefore, its side effects could be shared across dependencies relying on it.\n To avoid side effects create a new ``PoolManager`` instance and use it instead.\n The method does not accept low-level ``**urlopen_kw`` keyword arguments.\n \"\"\"\n\n return _DEFAULT_POOL.request(method, url, fields=fields, headers=headers)\n", "path": "src/urllib3/__init__.py"}]} | 1,211 | 159 |
gh_patches_debug_3103 | rasdani/github-patches | git_diff | conan-io__conan-center-index-1534 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[conan.io/center] parallel-hashmap/1.31 merged but not found in conan center
Even though https://github.com/conan-io/conan-center-index/pull/1253 has been merged, `parallel-hashmap/1.31` can't be found in Web UI or with `conan search`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/parallel-hashmap/all/conanfile.py`
Content:
```
1 import os
2
3 from conans import ConanFile, tools
4
5 class ParallelHashmapConan(ConanFile):
6 name = "parallel-hashmap"
7 description = "A family of header-only, very fast and memory-friendly hashmap and btree containers."
8 license = "Apache-2.0"
9 topics = ("conan", "parallel-hashmap", "parallel", "hashmap", "btree")
10 homepage = "https://github.com/greg7mdp/parallel-hashmap"
11 url = "https://github.com/conan-io/conan-center-index"
12 no_copy_source = True
13
14 @property
15 def _source_subfolder(self):
16 return "source_subfolder"
17
18 def source(self):
19 tools.get(**self.conan_data["sources"][self.version])
20 os.rename(self.name + "-" + self.version, self._source_subfolder)
21
22 def package(self):
23 self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
24 self.copy("*.h",
25 dst=os.path.join("include", "parallel_hashmap"),
26 src=os.path.join(self._source_subfolder, "parallel_hashmap"))
27 self.copy("phmap.natvis", dst="res", src=self._source_subfolder)
28
29 def package_id(self):
30 self.info.header_only()
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/parallel-hashmap/all/conanfile.py b/recipes/parallel-hashmap/all/conanfile.py
--- a/recipes/parallel-hashmap/all/conanfile.py
+++ b/recipes/parallel-hashmap/all/conanfile.py
@@ -1,7 +1,7 @@
import os
-
from conans import ConanFile, tools
+
class ParallelHashmapConan(ConanFile):
name = "parallel-hashmap"
description = "A family of header-only, very fast and memory-friendly hashmap and btree containers."
| {"golden_diff": "diff --git a/recipes/parallel-hashmap/all/conanfile.py b/recipes/parallel-hashmap/all/conanfile.py\n--- a/recipes/parallel-hashmap/all/conanfile.py\n+++ b/recipes/parallel-hashmap/all/conanfile.py\n@@ -1,7 +1,7 @@\n import os\n-\n from conans import ConanFile, tools\n \n+\n class ParallelHashmapConan(ConanFile):\n name = \"parallel-hashmap\"\n description = \"A family of header-only, very fast and memory-friendly hashmap and btree containers.\"\n", "issue": "[conan.io/center] parallel-hashmap/1.31 merged but not found in conan center\nEven though https://github.com/conan-io/conan-center-index/pull/1253 has been merged, `parallel-hashmap/1.31` can't be found in Web UI or with `conan search`\r\n\n", "before_files": [{"content": "import os\n\nfrom conans import ConanFile, tools\n\nclass ParallelHashmapConan(ConanFile):\n name = \"parallel-hashmap\"\n description = \"A family of header-only, very fast and memory-friendly hashmap and btree containers.\"\n license = \"Apache-2.0\"\n topics = (\"conan\", \"parallel-hashmap\", \"parallel\", \"hashmap\", \"btree\")\n homepage = \"https://github.com/greg7mdp/parallel-hashmap\"\n url = \"https://github.com/conan-io/conan-center-index\"\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(self.name + \"-\" + self.version, self._source_subfolder)\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"*.h\",\n dst=os.path.join(\"include\", \"parallel_hashmap\"),\n src=os.path.join(self._source_subfolder, \"parallel_hashmap\"))\n self.copy(\"phmap.natvis\", dst=\"res\", src=self._source_subfolder)\n\n def package_id(self):\n self.info.header_only()\n", "path": "recipes/parallel-hashmap/all/conanfile.py"}], "after_files": [{"content": "import os\nfrom conans import ConanFile, tools\n\n\nclass ParallelHashmapConan(ConanFile):\n name = \"parallel-hashmap\"\n description = \"A family of header-only, very fast and memory-friendly hashmap and btree containers.\"\n license = \"Apache-2.0\"\n topics = (\"conan\", \"parallel-hashmap\", \"parallel\", \"hashmap\", \"btree\")\n homepage = \"https://github.com/greg7mdp/parallel-hashmap\"\n url = \"https://github.com/conan-io/conan-center-index\"\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(self.name + \"-\" + self.version, self._source_subfolder)\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"*.h\",\n dst=os.path.join(\"include\", \"parallel_hashmap\"),\n src=os.path.join(self._source_subfolder, \"parallel_hashmap\"))\n self.copy(\"phmap.natvis\", dst=\"res\", src=self._source_subfolder)\n\n def package_id(self):\n self.info.header_only()\n", "path": "recipes/parallel-hashmap/all/conanfile.py"}]} | 668 | 120 |
gh_patches_debug_25403 | rasdani/github-patches | git_diff | encode__uvicorn-701 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UnicodeDecodeError when decoding bad headers
Someone (or some bot) was spamming my sever with requests to potential vulnerabilities.
One of the attacks is for a potential vulnerability in php, which sets the the `x-forwarded-for` header to the following value:
```
}__test|O:21:"JDatabaseDriverMysqli":3:{s:2:"fc";O:17:"JSimplepieFactory":0:{}s:21:"\\0\\0\\0disconnectHandlers";a:1:{i:0;a:2:{i:0;O:9:"SimplePie":5:{s:8:"sanitize";O:20:"JDatabaseDriverMysql":0:{}s:8:"feed_url";s:56:"die(md5(DIRECTORY_SEPARATOR));JFactory::getConfig();exit";s:19:"cache_name_function";s:6:"assert";s:5:"cache";b:1;s:11:"cache_class";O:20:"JDatabaseDriverMysql":0:{}}i:1;s:4:"init";}}s:13:"\\0\\0\\0connection";b:1;}\xf0\xfd\xfd\xfd, ...
```
This leads to this exception:
```
Exception in ASGI application
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/uvicorn/protocols/http/httptools_impl.py", line 385, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/usr/local/lib/python3.8/site-packages/uvicorn/middleware/proxy_headers.py", line 40, in __call__
x_forwarded_for = headers[b"x-forwarded-for"].decode("ascii")
UnicodeDecodeError: 'ascii' codec can't decode byte 0xf0 in position 427: ordinal not in range(128)
```
As it's due to malformed header from the client, I would expect this should be a 400 error instead?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/middleware/proxy_headers.py`
Content:
```
1 """
2 This middleware can be used when a known proxy is fronting the application,
3 and is trusted to be properly setting the `X-Forwarded-Proto` and
4 `X-Forwarded-For` headers with the connecting client information.
5
6 Modifies the `client` and `scheme` information so that they reference
7 the connecting client, rather that the connecting proxy.
8
9 https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies
10 """
11
12
13 class ProxyHeadersMiddleware:
14 def __init__(self, app, trusted_hosts="127.0.0.1"):
15 self.app = app
16 if isinstance(trusted_hosts, str):
17 self.trusted_hosts = [item.strip() for item in trusted_hosts.split(",")]
18 else:
19 self.trusted_hosts = trusted_hosts
20 self.always_trust = "*" in self.trusted_hosts
21
22 async def __call__(self, scope, receive, send):
23 if scope["type"] in ("http", "websocket"):
24 client_addr = scope.get("client")
25 client_host = client_addr[0] if client_addr else None
26
27 if self.always_trust or client_host in self.trusted_hosts:
28 headers = dict(scope["headers"])
29
30 if b"x-forwarded-proto" in headers:
31 # Determine if the incoming request was http or https based on
32 # the X-Forwarded-Proto header.
33 x_forwarded_proto = headers[b"x-forwarded-proto"].decode("ascii")
34 scope["scheme"] = x_forwarded_proto.strip()
35
36 if b"x-forwarded-for" in headers:
37 # Determine the client address from the last trusted IP in the
38 # X-Forwarded-For header. We've lost the connecting client's port
39 # information by now, so only include the host.
40 x_forwarded_for = headers[b"x-forwarded-for"].decode("ascii")
41 host = x_forwarded_for.split(",")[-1].strip()
42 port = 0
43 scope["client"] = (host, port)
44
45 return await self.app(scope, receive, send)
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/uvicorn/middleware/proxy_headers.py b/uvicorn/middleware/proxy_headers.py
--- a/uvicorn/middleware/proxy_headers.py
+++ b/uvicorn/middleware/proxy_headers.py
@@ -30,14 +30,14 @@
if b"x-forwarded-proto" in headers:
# Determine if the incoming request was http or https based on
# the X-Forwarded-Proto header.
- x_forwarded_proto = headers[b"x-forwarded-proto"].decode("ascii")
+ x_forwarded_proto = headers[b"x-forwarded-proto"].decode("latin1")
scope["scheme"] = x_forwarded_proto.strip()
if b"x-forwarded-for" in headers:
# Determine the client address from the last trusted IP in the
# X-Forwarded-For header. We've lost the connecting client's port
# information by now, so only include the host.
- x_forwarded_for = headers[b"x-forwarded-for"].decode("ascii")
+ x_forwarded_for = headers[b"x-forwarded-for"].decode("latin1")
host = x_forwarded_for.split(",")[-1].strip()
port = 0
scope["client"] = (host, port)
| {"golden_diff": "diff --git a/uvicorn/middleware/proxy_headers.py b/uvicorn/middleware/proxy_headers.py\n--- a/uvicorn/middleware/proxy_headers.py\n+++ b/uvicorn/middleware/proxy_headers.py\n@@ -30,14 +30,14 @@\n if b\"x-forwarded-proto\" in headers:\n # Determine if the incoming request was http or https based on\n # the X-Forwarded-Proto header.\n- x_forwarded_proto = headers[b\"x-forwarded-proto\"].decode(\"ascii\")\n+ x_forwarded_proto = headers[b\"x-forwarded-proto\"].decode(\"latin1\")\n scope[\"scheme\"] = x_forwarded_proto.strip()\n \n if b\"x-forwarded-for\" in headers:\n # Determine the client address from the last trusted IP in the\n # X-Forwarded-For header. We've lost the connecting client's port\n # information by now, so only include the host.\n- x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"ascii\")\n+ x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"latin1\")\n host = x_forwarded_for.split(\",\")[-1].strip()\n port = 0\n scope[\"client\"] = (host, port)\n", "issue": "UnicodeDecodeError when decoding bad headers\nSomeone (or some bot) was spamming my sever with requests to potential vulnerabilities.\r\n\r\nOne of the attacks is for a potential vulnerability in php, which sets the the `x-forwarded-for` header to the following value:\r\n```\r\n}__test|O:21:\"JDatabaseDriverMysqli\":3:{s:2:\"fc\";O:17:\"JSimplepieFactory\":0:{}s:21:\"\\\\0\\\\0\\\\0disconnectHandlers\";a:1:{i:0;a:2:{i:0;O:9:\"SimplePie\":5:{s:8:\"sanitize\";O:20:\"JDatabaseDriverMysql\":0:{}s:8:\"feed_url\";s:56:\"die(md5(DIRECTORY_SEPARATOR));JFactory::getConfig();exit\";s:19:\"cache_name_function\";s:6:\"assert\";s:5:\"cache\";b:1;s:11:\"cache_class\";O:20:\"JDatabaseDriverMysql\":0:{}}i:1;s:4:\"init\";}}s:13:\"\\\\0\\\\0\\\\0connection\";b:1;}\\xf0\\xfd\\xfd\\xfd, ...\r\n```\r\n\r\nThis leads to this exception:\r\n\r\n```\r\nException in ASGI application\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/uvicorn/protocols/http/httptools_impl.py\", line 385, in run_asgi\r\n result = await app(self.scope, self.receive, self.send)\r\n File \"/usr/local/lib/python3.8/site-packages/uvicorn/middleware/proxy_headers.py\", line 40, in __call__\r\n x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"ascii\")\r\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xf0 in position 427: ordinal not in range(128)\r\n```\r\n\r\nAs it's due to malformed header from the client, I would expect this should be a 400 error instead?\n", "before_files": [{"content": "\"\"\"\nThis middleware can be used when a known proxy is fronting the application,\nand is trusted to be properly setting the `X-Forwarded-Proto` and\n`X-Forwarded-For` headers with the connecting client information.\n\nModifies the `client` and `scheme` information so that they reference\nthe connecting client, rather that the connecting proxy.\n\nhttps://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies\n\"\"\"\n\n\nclass ProxyHeadersMiddleware:\n def __init__(self, app, trusted_hosts=\"127.0.0.1\"):\n self.app = app\n if isinstance(trusted_hosts, str):\n self.trusted_hosts = [item.strip() for item in trusted_hosts.split(\",\")]\n else:\n self.trusted_hosts = trusted_hosts\n self.always_trust = \"*\" in self.trusted_hosts\n\n async def __call__(self, scope, receive, send):\n if scope[\"type\"] in (\"http\", \"websocket\"):\n client_addr = scope.get(\"client\")\n client_host = client_addr[0] if client_addr else None\n\n if self.always_trust or client_host in self.trusted_hosts:\n headers = dict(scope[\"headers\"])\n\n if b\"x-forwarded-proto\" in headers:\n # Determine if the incoming request was http or https based on\n # the X-Forwarded-Proto header.\n x_forwarded_proto = headers[b\"x-forwarded-proto\"].decode(\"ascii\")\n scope[\"scheme\"] = x_forwarded_proto.strip()\n\n if b\"x-forwarded-for\" in headers:\n # Determine the client address from the last trusted IP in the\n # X-Forwarded-For header. We've lost the connecting client's port\n # information by now, so only include the host.\n x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"ascii\")\n host = x_forwarded_for.split(\",\")[-1].strip()\n port = 0\n scope[\"client\"] = (host, port)\n\n return await self.app(scope, receive, send)\n", "path": "uvicorn/middleware/proxy_headers.py"}], "after_files": [{"content": "\"\"\"\nThis middleware can be used when a known proxy is fronting the application,\nand is trusted to be properly setting the `X-Forwarded-Proto` and\n`X-Forwarded-For` headers with the connecting client information.\n\nModifies the `client` and `scheme` information so that they reference\nthe connecting client, rather that the connecting proxy.\n\nhttps://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies\n\"\"\"\n\n\nclass ProxyHeadersMiddleware:\n def __init__(self, app, trusted_hosts=\"127.0.0.1\"):\n self.app = app\n if isinstance(trusted_hosts, str):\n self.trusted_hosts = [item.strip() for item in trusted_hosts.split(\",\")]\n else:\n self.trusted_hosts = trusted_hosts\n self.always_trust = \"*\" in self.trusted_hosts\n\n async def __call__(self, scope, receive, send):\n if scope[\"type\"] in (\"http\", \"websocket\"):\n client_addr = scope.get(\"client\")\n client_host = client_addr[0] if client_addr else None\n\n if self.always_trust or client_host in self.trusted_hosts:\n headers = dict(scope[\"headers\"])\n\n if b\"x-forwarded-proto\" in headers:\n # Determine if the incoming request was http or https based on\n # the X-Forwarded-Proto header.\n x_forwarded_proto = headers[b\"x-forwarded-proto\"].decode(\"latin1\")\n scope[\"scheme\"] = x_forwarded_proto.strip()\n\n if b\"x-forwarded-for\" in headers:\n # Determine the client address from the last trusted IP in the\n # X-Forwarded-For header. We've lost the connecting client's port\n # information by now, so only include the host.\n x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"latin1\")\n host = x_forwarded_for.split(\",\")[-1].strip()\n port = 0\n scope[\"client\"] = (host, port)\n\n return await self.app(scope, receive, send)\n", "path": "uvicorn/middleware/proxy_headers.py"}]} | 1,230 | 276 |
gh_patches_debug_3991 | rasdani/github-patches | git_diff | sublimelsp__LSP-450 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't move cursor at the end when populating the diagnostics panel
When the diagnostics are populated the cursor in the panel is moved to the end. See [here](https://github.com/tomv564/LSP/blob/2869978d8b46d717da27eb0ac7a7e234840b218d/plugin/core/panels.py#L53-L56).
Is there a reason for this?
Because I can't use the `f4` keybinding to goto the next result when it is opened.
Instead I need first to press the `shift + f4`, which is the backward direction.
Here is a simple solution.
```diff
def run(self, edit, characters):
self.view.replace(edit, sublime.Region(0, self.view.size()), characters)
- # Move cursor to the end
+ # Clear the selection
selection = self.view.sel()
selection.clear()
- selection.add(sublime.Region(self.view.size(), self.view.size()))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/core/panels.py`
Content:
```
1 import sublime
2 import sublime_plugin
3
4
5 OUTPUT_PANEL_SETTINGS = {
6 "auto_indent": False,
7 "draw_indent_guides": False,
8 "draw_white_space": "None",
9 "gutter": False,
10 'is_widget': True,
11 "line_numbers": False,
12 "margin": 3,
13 "match_brackets": False,
14 "scroll_past_end": False,
15 "tab_size": 4,
16 "translate_tabs_to_spaces": False,
17 "word_wrap": False
18 }
19
20
21 def create_output_panel(window: sublime.Window, name: str) -> sublime.View:
22 panel = window.create_output_panel(name)
23 settings = panel.settings()
24 for key, value in OUTPUT_PANEL_SETTINGS.items():
25 settings.set(key, value)
26 return panel
27
28
29 def destroy_output_panels(window: sublime.Window):
30 for panel_name in ["references", "diagnostics"]:
31 window.destroy_output_panel(panel_name)
32
33
34 class LspClearPanelCommand(sublime_plugin.TextCommand):
35 """
36 A clear_panel command to clear the error panel.
37 """
38
39 def run(self, edit):
40 self.view.set_read_only(False)
41 self.view.erase(edit, sublime.Region(0, self.view.size()))
42 self.view.set_read_only(True)
43
44
45 class LspUpdatePanelCommand(sublime_plugin.TextCommand):
46 """
47 A update_panel command to update the error panel with new text.
48 """
49
50 def run(self, edit, characters):
51 self.view.replace(edit, sublime.Region(0, self.view.size()), characters)
52
53 # Move cursor to the end
54 selection = self.view.sel()
55 selection.clear()
56 selection.add(sublime.Region(self.view.size(), self.view.size()))
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/core/panels.py b/plugin/core/panels.py
--- a/plugin/core/panels.py
+++ b/plugin/core/panels.py
@@ -50,7 +50,6 @@
def run(self, edit, characters):
self.view.replace(edit, sublime.Region(0, self.view.size()), characters)
- # Move cursor to the end
+ # Clear the selection
selection = self.view.sel()
selection.clear()
- selection.add(sublime.Region(self.view.size(), self.view.size()))
| {"golden_diff": "diff --git a/plugin/core/panels.py b/plugin/core/panels.py\n--- a/plugin/core/panels.py\n+++ b/plugin/core/panels.py\n@@ -50,7 +50,6 @@\n def run(self, edit, characters):\n self.view.replace(edit, sublime.Region(0, self.view.size()), characters)\n \n- # Move cursor to the end\n+ # Clear the selection\n selection = self.view.sel()\n selection.clear()\n- selection.add(sublime.Region(self.view.size(), self.view.size()))\n", "issue": "Don't move cursor at the end when populating the diagnostics panel\nWhen the diagnostics are populated the cursor in the panel is moved to the end. See [here](https://github.com/tomv564/LSP/blob/2869978d8b46d717da27eb0ac7a7e234840b218d/plugin/core/panels.py#L53-L56).\r\n\r\nIs there a reason for this? \r\n\r\nBecause I can't use the `f4` keybinding to goto the next result when it is opened.\r\nInstead I need first to press the `shift + f4`, which is the backward direction.\r\n\r\n\r\nHere is a simple solution.\r\n```diff\r\n def run(self, edit, characters):\r\n self.view.replace(edit, sublime.Region(0, self.view.size()), characters)\r\n \r\n- # Move cursor to the end\r\n+ # Clear the selection\r\n selection = self.view.sel()\r\n selection.clear()\r\n- selection.add(sublime.Region(self.view.size(), self.view.size()))\r\n```\r\n\n", "before_files": [{"content": "import sublime\nimport sublime_plugin\n\n\nOUTPUT_PANEL_SETTINGS = {\n \"auto_indent\": False,\n \"draw_indent_guides\": False,\n \"draw_white_space\": \"None\",\n \"gutter\": False,\n 'is_widget': True,\n \"line_numbers\": False,\n \"margin\": 3,\n \"match_brackets\": False,\n \"scroll_past_end\": False,\n \"tab_size\": 4,\n \"translate_tabs_to_spaces\": False,\n \"word_wrap\": False\n}\n\n\ndef create_output_panel(window: sublime.Window, name: str) -> sublime.View:\n panel = window.create_output_panel(name)\n settings = panel.settings()\n for key, value in OUTPUT_PANEL_SETTINGS.items():\n settings.set(key, value)\n return panel\n\n\ndef destroy_output_panels(window: sublime.Window):\n for panel_name in [\"references\", \"diagnostics\"]:\n window.destroy_output_panel(panel_name)\n\n\nclass LspClearPanelCommand(sublime_plugin.TextCommand):\n \"\"\"\n A clear_panel command to clear the error panel.\n \"\"\"\n\n def run(self, edit):\n self.view.set_read_only(False)\n self.view.erase(edit, sublime.Region(0, self.view.size()))\n self.view.set_read_only(True)\n\n\nclass LspUpdatePanelCommand(sublime_plugin.TextCommand):\n \"\"\"\n A update_panel command to update the error panel with new text.\n \"\"\"\n\n def run(self, edit, characters):\n self.view.replace(edit, sublime.Region(0, self.view.size()), characters)\n\n # Move cursor to the end\n selection = self.view.sel()\n selection.clear()\n selection.add(sublime.Region(self.view.size(), self.view.size()))\n", "path": "plugin/core/panels.py"}], "after_files": [{"content": "import sublime\nimport sublime_plugin\n\n\nOUTPUT_PANEL_SETTINGS = {\n \"auto_indent\": False,\n \"draw_indent_guides\": False,\n \"draw_white_space\": \"None\",\n \"gutter\": False,\n 'is_widget': True,\n \"line_numbers\": False,\n \"margin\": 3,\n \"match_brackets\": False,\n \"scroll_past_end\": False,\n \"tab_size\": 4,\n \"translate_tabs_to_spaces\": False,\n \"word_wrap\": False\n}\n\n\ndef create_output_panel(window: sublime.Window, name: str) -> sublime.View:\n panel = window.create_output_panel(name)\n settings = panel.settings()\n for key, value in OUTPUT_PANEL_SETTINGS.items():\n settings.set(key, value)\n return panel\n\n\ndef destroy_output_panels(window: sublime.Window):\n for panel_name in [\"references\", \"diagnostics\"]:\n window.destroy_output_panel(panel_name)\n\n\nclass LspClearPanelCommand(sublime_plugin.TextCommand):\n \"\"\"\n A clear_panel command to clear the error panel.\n \"\"\"\n\n def run(self, edit):\n self.view.set_read_only(False)\n self.view.erase(edit, sublime.Region(0, self.view.size()))\n self.view.set_read_only(True)\n\n\nclass LspUpdatePanelCommand(sublime_plugin.TextCommand):\n \"\"\"\n A update_panel command to update the error panel with new text.\n \"\"\"\n\n def run(self, edit, characters):\n self.view.replace(edit, sublime.Region(0, self.view.size()), characters)\n\n # Clear the selection\n selection = self.view.sel()\n selection.clear()\n", "path": "plugin/core/panels.py"}]} | 951 | 113 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.