problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_27915 | rasdani/github-patches | git_diff | pulp__pulpcore-193 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Verify if domain name has more than 50 chars
fixes: #4976
</issue>
<code>
[start of pulpcore/app/viewsets/upload.py]
1 import re
2 from datetime import datetime
3
4 from gettext import gettext as _
5 from drf_yasg.utils import swagger_auto_schema
6 from drf_yasg.openapi import Parameter
7 from rest_framework import mixins, serializers
8 from rest_framework.decorators import detail_route
9 from rest_framework.response import Response
10
11 from pulpcore.app.models import Upload
12 from pulpcore.app.serializers import UploadChunkSerializer, UploadCommitSerializer, UploadSerializer
13 from pulpcore.app.viewsets.base import NamedModelViewSet
14
15
16 class UploadViewSet(NamedModelViewSet,
17 mixins.CreateModelMixin,
18 mixins.RetrieveModelMixin,
19 mixins.UpdateModelMixin,
20 mixins.ListModelMixin):
21 """View for chunked uploads."""
22 endpoint_name = 'uploads'
23 queryset = Upload.objects.all()
24 serializer_class = UploadSerializer
25 http_method_names = ['get', 'post', 'head', 'put']
26
27 content_range_pattern = r'^bytes (\d+)-(\d+)/(\d+|[*])$'
28 content_range_parameter = \
29 Parameter(name='Content-Range', in_='header', required=True, type='string',
30 pattern=content_range_pattern,
31 description='The Content-Range header specifies the location of the file chunk '
32 'within the file.')
33
34 @swagger_auto_schema(operation_summary="Upload a file chunk",
35 request_body=UploadChunkSerializer,
36 manual_parameters=[content_range_parameter],
37 responses={200: UploadSerializer})
38 def update(self, request, pk=None):
39 """
40 Upload a chunk for an upload.
41 """
42 upload = self.get_object()
43
44 if upload.completed is not None:
45 raise serializers.ValidationError(_("Cannot upload chunk for a completed upload."))
46
47 try:
48 chunk = request.data['file']
49 except KeyError:
50 raise serializers.ValidationError(_("Missing 'file' parameter."))
51
52 content_range = request.META.get('HTTP_CONTENT_RANGE', '')
53 match = re.compile(self.content_range_pattern).match(content_range)
54 if not match:
55 raise serializers.ValidationError(_("Invalid or missing content range header."))
56 start = int(match[1])
57 end = int(match[2])
58
59 if (end - start + 1) != len(chunk):
60 raise serializers.ValidationError(_("Chunk size does not match content range."))
61
62 if end > upload.size - 1:
63 raise serializers.ValidationError(_("End byte is greater than upload size."))
64
65 upload.append(chunk, start)
66
67 serializer = UploadSerializer(upload, context={'request': request})
68 return Response(serializer.data)
69
70 @swagger_auto_schema(operation_summary="Finish an Upload",
71 request_body=UploadCommitSerializer,
72 responses={200: UploadSerializer})
73 @detail_route(methods=('put',))
74 def commit(self, request, pk):
75 """
76 Commit the upload and mark it as completed.
77 """
78 upload = self.get_object()
79
80 try:
81 sha256 = request.data['sha256']
82 except KeyError:
83 raise serializers.ValidationError(_("Checksum not supplied."))
84
85 if sha256 != upload.sha256:
86 raise serializers.ValidationError(_("Checksum does not match upload."))
87
88 if upload.completed is not None:
89 raise serializers.ValidationError(_("Upload is already complete."))
90
91 upload.completed = datetime.now()
92 upload.save()
93
94 serializer = UploadSerializer(upload, context={'request': request})
95 return Response(serializer.data)
96
[end of pulpcore/app/viewsets/upload.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/app/viewsets/upload.py b/pulpcore/app/viewsets/upload.py
--- a/pulpcore/app/viewsets/upload.py
+++ b/pulpcore/app/viewsets/upload.py
@@ -10,19 +10,33 @@
from pulpcore.app.models import Upload
from pulpcore.app.serializers import UploadChunkSerializer, UploadCommitSerializer, UploadSerializer
-from pulpcore.app.viewsets.base import NamedModelViewSet
+from pulpcore.app.viewsets import BaseFilterSet
+from pulpcore.app.viewsets.base import DATETIME_FILTER_OPTIONS, NamedModelViewSet
+from pulpcore.app.viewsets.custom_filters import IsoDateTimeFilter
+
+
+class UploadFilter(BaseFilterSet):
+ completed = IsoDateTimeFilter(field_name='completed')
+
+ class Meta:
+ model = Upload
+ fields = {
+ 'completed': DATETIME_FILTER_OPTIONS + ['isnull']
+ }
class UploadViewSet(NamedModelViewSet,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
+ mixins.DestroyModelMixin,
mixins.ListModelMixin):
"""View for chunked uploads."""
endpoint_name = 'uploads'
queryset = Upload.objects.all()
serializer_class = UploadSerializer
- http_method_names = ['get', 'post', 'head', 'put']
+ filterset_class = UploadFilter
+ http_method_names = ['get', 'post', 'head', 'put', 'delete'] # remove PATCH
content_range_pattern = r'^bytes (\d+)-(\d+)/(\d+|[*])$'
content_range_parameter = \
| {"golden_diff": "diff --git a/pulpcore/app/viewsets/upload.py b/pulpcore/app/viewsets/upload.py\n--- a/pulpcore/app/viewsets/upload.py\n+++ b/pulpcore/app/viewsets/upload.py\n@@ -10,19 +10,33 @@\n \n from pulpcore.app.models import Upload\n from pulpcore.app.serializers import UploadChunkSerializer, UploadCommitSerializer, UploadSerializer\n-from pulpcore.app.viewsets.base import NamedModelViewSet\n+from pulpcore.app.viewsets import BaseFilterSet\n+from pulpcore.app.viewsets.base import DATETIME_FILTER_OPTIONS, NamedModelViewSet\n+from pulpcore.app.viewsets.custom_filters import IsoDateTimeFilter\n+\n+\n+class UploadFilter(BaseFilterSet):\n+ completed = IsoDateTimeFilter(field_name='completed')\n+\n+ class Meta:\n+ model = Upload\n+ fields = {\n+ 'completed': DATETIME_FILTER_OPTIONS + ['isnull']\n+ }\n \n \n class UploadViewSet(NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n+ mixins.DestroyModelMixin,\n mixins.ListModelMixin):\n \"\"\"View for chunked uploads.\"\"\"\n endpoint_name = 'uploads'\n queryset = Upload.objects.all()\n serializer_class = UploadSerializer\n- http_method_names = ['get', 'post', 'head', 'put']\n+ filterset_class = UploadFilter\n+ http_method_names = ['get', 'post', 'head', 'put', 'delete'] # remove PATCH\n \n content_range_pattern = r'^bytes (\\d+)-(\\d+)/(\\d+|[*])$'\n content_range_parameter = \\\n", "issue": "Verify if domain name has more than 50 chars\nfixes: #4976\n", "before_files": [{"content": "import re\nfrom datetime import datetime\n\nfrom gettext import gettext as _\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg.openapi import Parameter\nfrom rest_framework import mixins, serializers\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.response import Response\n\nfrom pulpcore.app.models import Upload\nfrom pulpcore.app.serializers import UploadChunkSerializer, UploadCommitSerializer, UploadSerializer\nfrom pulpcore.app.viewsets.base import NamedModelViewSet\n\n\nclass UploadViewSet(NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.ListModelMixin):\n \"\"\"View for chunked uploads.\"\"\"\n endpoint_name = 'uploads'\n queryset = Upload.objects.all()\n serializer_class = UploadSerializer\n http_method_names = ['get', 'post', 'head', 'put']\n\n content_range_pattern = r'^bytes (\\d+)-(\\d+)/(\\d+|[*])$'\n content_range_parameter = \\\n Parameter(name='Content-Range', in_='header', required=True, type='string',\n pattern=content_range_pattern,\n description='The Content-Range header specifies the location of the file chunk '\n 'within the file.')\n\n @swagger_auto_schema(operation_summary=\"Upload a file chunk\",\n request_body=UploadChunkSerializer,\n manual_parameters=[content_range_parameter],\n responses={200: UploadSerializer})\n def update(self, request, pk=None):\n \"\"\"\n Upload a chunk for an upload.\n \"\"\"\n upload = self.get_object()\n\n if upload.completed is not None:\n raise serializers.ValidationError(_(\"Cannot upload chunk for a completed upload.\"))\n\n try:\n chunk = request.data['file']\n except KeyError:\n raise serializers.ValidationError(_(\"Missing 'file' parameter.\"))\n\n content_range = request.META.get('HTTP_CONTENT_RANGE', '')\n match = re.compile(self.content_range_pattern).match(content_range)\n if not match:\n raise serializers.ValidationError(_(\"Invalid or missing content range header.\"))\n start = int(match[1])\n end = int(match[2])\n\n if (end - start + 1) != len(chunk):\n raise serializers.ValidationError(_(\"Chunk size does not match content range.\"))\n\n if end > upload.size - 1:\n raise serializers.ValidationError(_(\"End byte is greater than upload size.\"))\n\n upload.append(chunk, start)\n\n serializer = UploadSerializer(upload, context={'request': request})\n return Response(serializer.data)\n\n @swagger_auto_schema(operation_summary=\"Finish an Upload\",\n request_body=UploadCommitSerializer,\n responses={200: UploadSerializer})\n @detail_route(methods=('put',))\n def commit(self, request, pk):\n \"\"\"\n Commit the upload and mark it as completed.\n \"\"\"\n upload = self.get_object()\n\n try:\n sha256 = request.data['sha256']\n except KeyError:\n raise serializers.ValidationError(_(\"Checksum not supplied.\"))\n\n if sha256 != upload.sha256:\n raise serializers.ValidationError(_(\"Checksum does not match upload.\"))\n\n if upload.completed is not None:\n raise serializers.ValidationError(_(\"Upload is already complete.\"))\n\n upload.completed = datetime.now()\n upload.save()\n\n serializer = UploadSerializer(upload, context={'request': request})\n return Response(serializer.data)\n", "path": "pulpcore/app/viewsets/upload.py"}]} | 1,448 | 350 |
gh_patches_debug_10530 | rasdani/github-patches | git_diff | pytorch__examples-1084 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The GPU load is unbalanced
https://github.com/pytorch/examples/blob/2ee8d43dbe420be152fd5ce0d80b43b419a0e352/distributed/ddp-tutorial-series/multigpu_torchrun.py#L39
When I run the code and resume from a existed .pt file. The memory usage of GPU0 is significantly higher than other GPUs.
It can be solved by adding a parameter "map_location".
`snapshot = torch.load(snapshot_path, map_location=torch.device('cuda', int(os.environ["LOCAL_RANK"])))`
## My Environment
cudatoolkit 10.2
pytorch 12.1
</issue>
<code>
[start of distributed/ddp-tutorial-series/multinode.py]
1 import torch
2 import torch.nn.functional as F
3 from torch.utils.data import Dataset, DataLoader
4 from datautils import MyTrainDataset
5
6 import torch.multiprocessing as mp
7 from torch.utils.data.distributed import DistributedSampler
8 from torch.nn.parallel import DistributedDataParallel as DDP
9 from torch.distributed import init_process_group, destroy_process_group
10 import os
11
12
13 def ddp_setup():
14 init_process_group(backend="nccl")
15
16 class Trainer:
17 def __init__(
18 self,
19 model: torch.nn.Module,
20 train_data: DataLoader,
21 optimizer: torch.optim.Optimizer,
22 save_every: int,
23 snapshot_path: str,
24 ) -> None:
25 self.local_rank = int(os.environ["LOCAL_RANK"])
26 self.global_rank = int(os.environ["RANK"])
27 self.model = model.to(self.local_rank)
28 self.train_data = train_data
29 self.optimizer = optimizer
30 self.save_every = save_every
31 self.epochs_run = 0
32 self.snapshot_path = snapshot_path
33 if os.path.exists(snapshot_path):
34 print("Loading snapshot")
35 self._load_snapshot(snapshot_path)
36
37 self.model = DDP(self.model, device_ids=[self.local_rank])
38
39 def _load_snapshot(self, snapshot_path):
40 snapshot = torch.load(snapshot_path)
41 self.model.load_state_dict(snapshot["MODEL_STATE"])
42 self.epochs_run = snapshot["EPOCHS_RUN"]
43 print(f"Resuming training from snapshot at Epoch {self.epochs_run}")
44
45 def _run_batch(self, source, targets):
46 self.optimizer.zero_grad()
47 output = self.model(source)
48 loss = F.cross_entropy(output, targets)
49 loss.backward()
50 self.optimizer.step()
51
52 def _run_epoch(self, epoch):
53 b_sz = len(next(iter(self.train_data))[0])
54 print(f"[GPU{self.global_rank}] Epoch {epoch} | Batchsize: {b_sz} | Steps: {len(self.train_data)}")
55 self.train_data.sampler.set_epoch(epoch)
56 for source, targets in self.train_data:
57 source = source.to(self.local_rank)
58 targets = targets.to(self.local_rank)
59 self._run_batch(source, targets)
60
61 def _save_snapshot(self, epoch):
62 snapshot = {
63 "MODEL_STATE": self.model.module.state_dict(),
64 "EPOCHS_RUN": epoch,
65 }
66 torch.save(snapshot, self.snapshot_path)
67 print(f"Epoch {epoch} | Training snapshot saved at {self.snapshot_path}")
68
69 def train(self, max_epochs: int):
70 for epoch in range(self.epochs_run, max_epochs):
71 self._run_epoch(epoch)
72 if self.local_rank == 0 and epoch % self.save_every == 0:
73 self._save_snapshot(epoch)
74
75
76 def load_train_objs():
77 train_set = MyTrainDataset(2048) # load your dataset
78 model = torch.nn.Linear(20, 1) # load your model
79 optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
80 return train_set, model, optimizer
81
82
83 def prepare_dataloader(dataset: Dataset, batch_size: int):
84 return DataLoader(
85 dataset,
86 batch_size=batch_size,
87 pin_memory=True,
88 shuffle=False,
89 sampler=DistributedSampler(dataset)
90 )
91
92
93 def main(save_every: int, total_epochs: int, batch_size: int, snapshot_path: str = "snapshot.pt"):
94 ddp_setup()
95 dataset, model, optimizer = load_train_objs()
96 train_data = prepare_dataloader(dataset, batch_size)
97 trainer = Trainer(model, train_data, optimizer, save_every, snapshot_path)
98 trainer.train(total_epochs)
99 destroy_process_group()
100
101
102 if __name__ == "__main__":
103 import argparse
104 parser = argparse.ArgumentParser(description='simple distributed training job')
105 parser.add_argument('total_epochs', type=int, help='Total epochs to train the model')
106 parser.add_argument('save_every', type=int, help='How often to save a snapshot')
107 parser.add_argument('--batch_size', default=32, help='Input batch size on each device (default: 32)')
108 args = parser.parse_args()
109
110 main(args.save_every, args.total_epochs, args.batch_size)
111
[end of distributed/ddp-tutorial-series/multinode.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/ddp-tutorial-series/multinode.py b/distributed/ddp-tutorial-series/multinode.py
--- a/distributed/ddp-tutorial-series/multinode.py
+++ b/distributed/ddp-tutorial-series/multinode.py
@@ -37,7 +37,8 @@
self.model = DDP(self.model, device_ids=[self.local_rank])
def _load_snapshot(self, snapshot_path):
- snapshot = torch.load(snapshot_path)
+ loc = f"cuda:{self.gpu_id}"
+ snapshot = torch.load(snapshot_path, map_location=loc)
self.model.load_state_dict(snapshot["MODEL_STATE"])
self.epochs_run = snapshot["EPOCHS_RUN"]
print(f"Resuming training from snapshot at Epoch {self.epochs_run}")
| {"golden_diff": "diff --git a/distributed/ddp-tutorial-series/multinode.py b/distributed/ddp-tutorial-series/multinode.py\n--- a/distributed/ddp-tutorial-series/multinode.py\n+++ b/distributed/ddp-tutorial-series/multinode.py\n@@ -37,7 +37,8 @@\n self.model = DDP(self.model, device_ids=[self.local_rank])\n \n def _load_snapshot(self, snapshot_path):\n- snapshot = torch.load(snapshot_path)\n+ loc = f\"cuda:{self.gpu_id}\"\n+ snapshot = torch.load(snapshot_path, map_location=loc)\n self.model.load_state_dict(snapshot[\"MODEL_STATE\"])\n self.epochs_run = snapshot[\"EPOCHS_RUN\"]\n print(f\"Resuming training from snapshot at Epoch {self.epochs_run}\")\n", "issue": "The GPU load is unbalanced\nhttps://github.com/pytorch/examples/blob/2ee8d43dbe420be152fd5ce0d80b43b419a0e352/distributed/ddp-tutorial-series/multigpu_torchrun.py#L39\r\nWhen I run the code and resume from a existed .pt file. The memory usage of GPU0 is significantly higher than other GPUs. \r\nIt can be solved by adding a parameter \"map_location\".\r\n`snapshot = torch.load(snapshot_path, map_location=torch.device('cuda', int(os.environ[\"LOCAL_RANK\"])))`\r\n## My Environment\r\ncudatoolkit 10.2\r\npytorch 12.1\r\n\n", "before_files": [{"content": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom datautils import MyTrainDataset\n\nimport torch.multiprocessing as mp\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.distributed import init_process_group, destroy_process_group\nimport os\n\n\ndef ddp_setup():\n init_process_group(backend=\"nccl\")\n\nclass Trainer:\n def __init__(\n self,\n model: torch.nn.Module,\n train_data: DataLoader,\n optimizer: torch.optim.Optimizer,\n save_every: int,\n snapshot_path: str,\n ) -> None:\n self.local_rank = int(os.environ[\"LOCAL_RANK\"])\n self.global_rank = int(os.environ[\"RANK\"])\n self.model = model.to(self.local_rank)\n self.train_data = train_data\n self.optimizer = optimizer\n self.save_every = save_every\n self.epochs_run = 0\n self.snapshot_path = snapshot_path\n if os.path.exists(snapshot_path):\n print(\"Loading snapshot\")\n self._load_snapshot(snapshot_path)\n\n self.model = DDP(self.model, device_ids=[self.local_rank])\n\n def _load_snapshot(self, snapshot_path):\n snapshot = torch.load(snapshot_path)\n self.model.load_state_dict(snapshot[\"MODEL_STATE\"])\n self.epochs_run = snapshot[\"EPOCHS_RUN\"]\n print(f\"Resuming training from snapshot at Epoch {self.epochs_run}\")\n\n def _run_batch(self, source, targets):\n self.optimizer.zero_grad()\n output = self.model(source)\n loss = F.cross_entropy(output, targets)\n loss.backward()\n self.optimizer.step()\n\n def _run_epoch(self, epoch):\n b_sz = len(next(iter(self.train_data))[0])\n print(f\"[GPU{self.global_rank}] Epoch {epoch} | Batchsize: {b_sz} | Steps: {len(self.train_data)}\")\n self.train_data.sampler.set_epoch(epoch)\n for source, targets in self.train_data:\n source = source.to(self.local_rank)\n targets = targets.to(self.local_rank)\n self._run_batch(source, targets)\n\n def _save_snapshot(self, epoch):\n snapshot = {\n \"MODEL_STATE\": self.model.module.state_dict(),\n \"EPOCHS_RUN\": epoch,\n }\n torch.save(snapshot, self.snapshot_path)\n print(f\"Epoch {epoch} | Training snapshot saved at {self.snapshot_path}\")\n\n def train(self, max_epochs: int):\n for epoch in range(self.epochs_run, max_epochs):\n self._run_epoch(epoch)\n if self.local_rank == 0 and epoch % self.save_every == 0:\n self._save_snapshot(epoch)\n\n\ndef load_train_objs():\n train_set = MyTrainDataset(2048) # load your dataset\n model = torch.nn.Linear(20, 1) # load your model\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n return train_set, model, optimizer\n\n\ndef prepare_dataloader(dataset: Dataset, batch_size: int):\n return DataLoader(\n dataset,\n batch_size=batch_size,\n pin_memory=True,\n shuffle=False,\n sampler=DistributedSampler(dataset)\n )\n\n\ndef main(save_every: int, total_epochs: int, batch_size: int, snapshot_path: str = \"snapshot.pt\"):\n ddp_setup()\n dataset, model, optimizer = load_train_objs()\n train_data = prepare_dataloader(dataset, batch_size)\n trainer = Trainer(model, train_data, optimizer, save_every, snapshot_path)\n trainer.train(total_epochs)\n destroy_process_group()\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description='simple distributed training job')\n parser.add_argument('total_epochs', type=int, help='Total epochs to train the model')\n parser.add_argument('save_every', type=int, help='How often to save a snapshot')\n parser.add_argument('--batch_size', default=32, help='Input batch size on each device (default: 32)')\n args = parser.parse_args()\n \n main(args.save_every, args.total_epochs, args.batch_size)\n", "path": "distributed/ddp-tutorial-series/multinode.py"}]} | 1,826 | 172 |
gh_patches_debug_22395 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-1858 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update CONTRIBUTING, README and package metadata
### Description:
- [x] Update `CONTRIBUTING.md` to mention the usage of `nox` instead of `tox`
- [x] Reorganize `README.md`
- [x] Update `setup.py` to add links to the project
CI/CD: Tox -> Nox
We are python project, and working with python files should be easier for any python developer, over working with text configuration in tox.
</issue>
<code>
[start of noxfile.py]
1 """Nox tool configuration file.
2
3 Nox is Tox tool replacement.
4 """
5 import shutil
6 from pathlib import Path
7
8 import nox
9
10 nox.options.keywords = "not docs"
11
12
13 def base_install(session):
14 """Create basic environment setup for tests and linting."""
15 session.install("-r", "test_requirements.txt")
16 session.install("-e", ".")
17 return session
18
19
20 @nox.session(python="3.10")
21 def lint(session):
22 """Run linting check locally."""
23 session.install("pre-commit")
24 session.run("pre-commit", "run", "-a")
25
26
27 @nox.session(python=["3.7", "3.8", "3.9", "3.10", "3.11"])
28 def tests(session):
29 """Run test suite with pytest."""
30 session = base_install(session)
31 session.run(
32 "pytest",
33 "--cov-report=html",
34 "--cov-report=xml",
35 "--cov-branch",
36 "--cov-fail-under=100",
37 )
38
39
40 @nox.session(python=["3.7", "3.8", "3.9", "3.10", "3.11"])
41 def safety_tests(session):
42 """Run safety tests."""
43 session = base_install(session)
44 session.run("safety", "check", "--full-report")
45
46
47 @nox.session(python="3.10")
48 def documentation_tests(session):
49 """Run documentation tests."""
50 return docs(session, batch_run=True)
51
52
53 @nox.session(python="3.10")
54 def docs(session, batch_run: bool = False):
55 """Build the documentation or serve documentation interactively."""
56 shutil.rmtree(Path("docs").joinpath("_build"), ignore_errors=True)
57 session.install("-r", "docs/requirements.txt")
58 session.install("-e", ".")
59 session.cd("docs")
60 sphinx_args = ["-b", "html", "-W", ".", "_build/html"]
61
62 if not session.interactive or batch_run:
63 sphinx_cmd = "sphinx-build"
64 else:
65 sphinx_cmd = "sphinx-autobuild"
66 sphinx_args.extend(
67 [
68 "--open-browser",
69 "--port",
70 "9812",
71 "--watch",
72 "../*.md",
73 "--watch",
74 "../*.rst",
75 "--watch",
76 "../*.py",
77 "--watch",
78 "../cookiecutter",
79 ]
80 )
81
82 session.run(sphinx_cmd, *sphinx_args)
83
[end of noxfile.py]
[start of setup.py]
1 """cookiecutter distutils configuration."""
2 from setuptools import setup
3
4 version = "2.1.2.dev0"
5
6 with open('README.md', encoding='utf-8') as readme_file:
7 readme = readme_file.read()
8
9 requirements = [
10 'binaryornot>=0.4.4',
11 'Jinja2>=2.7,<4.0.0',
12 'click>=7.0,<9.0.0',
13 'pyyaml>=5.3.1',
14 'jinja2-time>=0.2.0',
15 'python-slugify>=4.0.0',
16 'requests>=2.23.0',
17 ]
18
19 setup(
20 name='cookiecutter',
21 version=version,
22 description=(
23 'A command-line utility that creates projects from project '
24 'templates, e.g. creating a Python package project from a '
25 'Python package project template.'
26 ),
27 long_description=readme,
28 long_description_content_type='text/markdown',
29 author='Audrey Feldroy',
30 author_email='[email protected]',
31 url='https://github.com/cookiecutter/cookiecutter',
32 packages=['cookiecutter'],
33 package_dir={'cookiecutter': 'cookiecutter'},
34 entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},
35 include_package_data=True,
36 python_requires='>=3.7',
37 install_requires=requirements,
38 license='BSD',
39 zip_safe=False,
40 classifiers=[
41 "Development Status :: 5 - Production/Stable",
42 "Environment :: Console",
43 "Intended Audience :: Developers",
44 "Natural Language :: English",
45 "License :: OSI Approved :: BSD License",
46 "Programming Language :: Python :: 3 :: Only",
47 "Programming Language :: Python :: 3",
48 "Programming Language :: Python :: 3.7",
49 "Programming Language :: Python :: 3.8",
50 "Programming Language :: Python :: 3.9",
51 "Programming Language :: Python :: 3.10",
52 "Programming Language :: Python :: 3.11",
53 "Programming Language :: Python :: Implementation :: CPython",
54 "Programming Language :: Python :: Implementation :: PyPy",
55 "Programming Language :: Python",
56 "Topic :: Software Development",
57 ],
58 keywords=[
59 "cookiecutter",
60 "Python",
61 "projects",
62 "project templates",
63 "Jinja2",
64 "skeleton",
65 "scaffolding",
66 "project directory",
67 "package",
68 "packaging",
69 ],
70 )
71
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -28,12 +28,14 @@
def tests(session):
"""Run test suite with pytest."""
session = base_install(session)
+ posargs = session.posargs or ""
session.run(
"pytest",
"--cov-report=html",
"--cov-report=xml",
"--cov-branch",
"--cov-fail-under=100",
+ *posargs,
)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,6 +29,11 @@
author='Audrey Feldroy',
author_email='[email protected]',
url='https://github.com/cookiecutter/cookiecutter',
+ project_urls={
+ "Documentation": "https://cookiecutter.readthedocs.io",
+ "Issues": "https://github.com/cookiecutter/cookiecutter/issues",
+ "Discord": "https://discord.gg/9BrxzPKuEW",
+ },
packages=['cookiecutter'],
package_dir={'cookiecutter': 'cookiecutter'},
entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -28,12 +28,14 @@\n def tests(session):\n \"\"\"Run test suite with pytest.\"\"\"\n session = base_install(session)\n+ posargs = session.posargs or \"\"\n session.run(\n \"pytest\",\n \"--cov-report=html\",\n \"--cov-report=xml\",\n \"--cov-branch\",\n \"--cov-fail-under=100\",\n+ *posargs,\n )\n \n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,6 +29,11 @@\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n+ project_urls={\n+ \"Documentation\": \"https://cookiecutter.readthedocs.io\",\n+ \"Issues\": \"https://github.com/cookiecutter/cookiecutter/issues\",\n+ \"Discord\": \"https://discord.gg/9BrxzPKuEW\",\n+ },\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n", "issue": "Update CONTRIBUTING, README and package metadata\n### Description:\r\n\r\n- [x] Update `CONTRIBUTING.md` to mention the usage of `nox` instead of `tox`\r\n- [x] Reorganize `README.md`\r\n- [x] Update `setup.py` to add links to the project\nCI/CD: Tox -> Nox\nWe are python project, and working with python files should be easier for any python developer, over working with text configuration in tox.\n", "before_files": [{"content": "\"\"\"Nox tool configuration file.\n\nNox is Tox tool replacement.\n\"\"\"\nimport shutil\nfrom pathlib import Path\n\nimport nox\n\nnox.options.keywords = \"not docs\"\n\n\ndef base_install(session):\n \"\"\"Create basic environment setup for tests and linting.\"\"\"\n session.install(\"-r\", \"test_requirements.txt\")\n session.install(\"-e\", \".\")\n return session\n\n\[email protected](python=\"3.10\")\ndef lint(session):\n \"\"\"Run linting check locally.\"\"\"\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"-a\")\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\"])\ndef tests(session):\n \"\"\"Run test suite with pytest.\"\"\"\n session = base_install(session)\n session.run(\n \"pytest\",\n \"--cov-report=html\",\n \"--cov-report=xml\",\n \"--cov-branch\",\n \"--cov-fail-under=100\",\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\"])\ndef safety_tests(session):\n \"\"\"Run safety tests.\"\"\"\n session = base_install(session)\n session.run(\"safety\", \"check\", \"--full-report\")\n\n\[email protected](python=\"3.10\")\ndef documentation_tests(session):\n \"\"\"Run documentation tests.\"\"\"\n return docs(session, batch_run=True)\n\n\[email protected](python=\"3.10\")\ndef docs(session, batch_run: bool = False):\n \"\"\"Build the documentation or serve documentation interactively.\"\"\"\n shutil.rmtree(Path(\"docs\").joinpath(\"_build\"), ignore_errors=True)\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\"-e\", \".\")\n session.cd(\"docs\")\n sphinx_args = [\"-b\", \"html\", \"-W\", \".\", \"_build/html\"]\n\n if not session.interactive or batch_run:\n sphinx_cmd = \"sphinx-build\"\n else:\n sphinx_cmd = \"sphinx-autobuild\"\n sphinx_args.extend(\n [\n \"--open-browser\",\n \"--port\",\n \"9812\",\n \"--watch\",\n \"../*.md\",\n \"--watch\",\n \"../*.rst\",\n \"--watch\",\n \"../*.py\",\n \"--watch\",\n \"../cookiecutter\",\n ]\n )\n\n session.run(sphinx_cmd, *sphinx_args)\n", "path": "noxfile.py"}, {"content": "\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.1.2.dev0\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<9.0.0',\n 'pyyaml>=5.3.1',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.7',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n", "path": "setup.py"}]} | 2,012 | 290 |
gh_patches_debug_40014 | rasdani/github-patches | git_diff | DDMAL__CantusDB-1352 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
we should clean up CantusDB/django/cantusdb_project
```shell
root@7aa2f88fb303:/code/django/cantusdb_project# ls
align_text_mel.py create_fixtures.sh error_log.txt manage.py requirements.txt
articles differentia_data.txt latin_syllabification.py next_chants.py static
break_json.py editors_chant.csv load_fixtures.sh old_users_list.py templates
cantusdb editors_source.csv main_app oldcantususer_uid_role.csv users
```
The following files have already served their purpose:
- `differentia_data.txt` (used in #1137)
- `editors_chant.csv` (used in the data sync from OldCantus to New)
- `editors_source.csv` (used in the data sync from OldCantus to New)
- `old_users_list.py` (used in the data sync from OldCantus to New)
- `oldcantususer_uid_role.csv` (used in the data sync from OldCantus to New, created by `old_users_list.py`)
- `create_fixtures.sh`, (we no longer migrate data using fixtures)
- `error_log.txt` (This is a log generated during the syncing process from OldCantus to NewCantus. It's not clear why it was committed to the repo in the first place.)
- `break_json.py` (we no longer migrate data using fixtures)
- `load_fixtures.sh` (we no longer migrate data using fixtures)
I propose deleting these files, so that future developers don't need to spend time figuring out what they are.
</issue>
<code>
[start of django/cantusdb_project/old_users_list.py]
1 import csv
2 import lxml.html as lh
3 import requests
4
5 with open("oldcantususer_uid_role.csv", "r") as csvinput:
6 with open("oldcantususer_uid_role_detailed.csv", "w") as csvoutput:
7 with open("id_username_email.csv", "r") as csvinput_username_email:
8 writer = csv.writer(csvoutput, lineterminator="\n")
9 reader = csv.reader(csvinput)
10 reader_username_email = csv.reader(csvinput_username_email)
11
12 # header
13 writer.writerow(
14 [
15 "uid",
16 "old role",
17 "new role",
18 "name",
19 "surname",
20 "institution",
21 "town",
22 "country",
23 "username",
24 "email",
25 ]
26 )
27
28 for row, row_username_email in zip(reader, reader_username_email):
29 old_role = row[1]
30 if old_role == "administrator":
31 row.append("project manager")
32 elif old_role == "anonymous user":
33 row.append("")
34 elif old_role == "authenticated user":
35 row.append("")
36 elif old_role == "contributor":
37 row.append("contributor")
38 elif old_role == "Debra":
39 row.append("project manager")
40 elif old_role == "editor":
41 row.append("editor")
42 elif old_role == "power":
43 row.append("editor")
44 elif old_role == "proofreader":
45 row.append("editor")
46 elif old_role == "SIMSSA contributor":
47 row.append("contributor")
48
49 id = row[0]
50 url = f"https://cantus.uwaterloo.ca/user/{id}"
51 response = requests.get(url)
52 doc = lh.fromstring(response.content)
53
54 try:
55 name = (
56 doc.find_class("field-name-field-name")[0]
57 .find_class("field-item")[0]
58 .text_content()
59 )
60 except:
61 name = ""
62 try:
63 surname = (
64 doc.find_class("field-name-field-surname")[0]
65 .find_class("field-item")[0]
66 .text_content()
67 )
68 except:
69 surname = ""
70 try:
71 institution = (
72 doc.find_class("field-name-field-institution")[0]
73 .find_class("field-item")[0]
74 .text_content()
75 )
76 except:
77 institution = ""
78 try:
79 town = (
80 doc.find_class("field-name-field-town")[0]
81 .find_class("field-item")[0]
82 .text_content()
83 )
84 except:
85 town = ""
86 try:
87 country = (
88 doc.find_class("field-name-field-country")[0]
89 .find_class("field-item")[0]
90 .text_content()
91 )
92 except:
93 country = ""
94
95 username = row_username_email[1]
96 email = row_username_email[2]
97
98 row.append(name)
99 row.append(surname)
100 row.append(institution)
101 row.append(town)
102 row.append(country)
103 row.append(username)
104 row.append(email)
105
106 writer.writerow(row)
107
[end of django/cantusdb_project/old_users_list.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django/cantusdb_project/old_users_list.py b/django/cantusdb_project/old_users_list.py
deleted file mode 100755
--- a/django/cantusdb_project/old_users_list.py
+++ /dev/null
@@ -1,106 +0,0 @@
-import csv
-import lxml.html as lh
-import requests
-
-with open("oldcantususer_uid_role.csv", "r") as csvinput:
- with open("oldcantususer_uid_role_detailed.csv", "w") as csvoutput:
- with open("id_username_email.csv", "r") as csvinput_username_email:
- writer = csv.writer(csvoutput, lineterminator="\n")
- reader = csv.reader(csvinput)
- reader_username_email = csv.reader(csvinput_username_email)
-
- # header
- writer.writerow(
- [
- "uid",
- "old role",
- "new role",
- "name",
- "surname",
- "institution",
- "town",
- "country",
- "username",
- "email",
- ]
- )
-
- for row, row_username_email in zip(reader, reader_username_email):
- old_role = row[1]
- if old_role == "administrator":
- row.append("project manager")
- elif old_role == "anonymous user":
- row.append("")
- elif old_role == "authenticated user":
- row.append("")
- elif old_role == "contributor":
- row.append("contributor")
- elif old_role == "Debra":
- row.append("project manager")
- elif old_role == "editor":
- row.append("editor")
- elif old_role == "power":
- row.append("editor")
- elif old_role == "proofreader":
- row.append("editor")
- elif old_role == "SIMSSA contributor":
- row.append("contributor")
-
- id = row[0]
- url = f"https://cantus.uwaterloo.ca/user/{id}"
- response = requests.get(url)
- doc = lh.fromstring(response.content)
-
- try:
- name = (
- doc.find_class("field-name-field-name")[0]
- .find_class("field-item")[0]
- .text_content()
- )
- except:
- name = ""
- try:
- surname = (
- doc.find_class("field-name-field-surname")[0]
- .find_class("field-item")[0]
- .text_content()
- )
- except:
- surname = ""
- try:
- institution = (
- doc.find_class("field-name-field-institution")[0]
- .find_class("field-item")[0]
- .text_content()
- )
- except:
- institution = ""
- try:
- town = (
- doc.find_class("field-name-field-town")[0]
- .find_class("field-item")[0]
- .text_content()
- )
- except:
- town = ""
- try:
- country = (
- doc.find_class("field-name-field-country")[0]
- .find_class("field-item")[0]
- .text_content()
- )
- except:
- country = ""
-
- username = row_username_email[1]
- email = row_username_email[2]
-
- row.append(name)
- row.append(surname)
- row.append(institution)
- row.append(town)
- row.append(country)
- row.append(username)
- row.append(email)
-
- writer.writerow(row)
| {"golden_diff": "diff --git a/django/cantusdb_project/old_users_list.py b/django/cantusdb_project/old_users_list.py\ndeleted file mode 100755\n--- a/django/cantusdb_project/old_users_list.py\n+++ /dev/null\n@@ -1,106 +0,0 @@\n-import csv\n-import lxml.html as lh\n-import requests\n-\n-with open(\"oldcantususer_uid_role.csv\", \"r\") as csvinput:\n- with open(\"oldcantususer_uid_role_detailed.csv\", \"w\") as csvoutput:\n- with open(\"id_username_email.csv\", \"r\") as csvinput_username_email:\n- writer = csv.writer(csvoutput, lineterminator=\"\\n\")\n- reader = csv.reader(csvinput)\n- reader_username_email = csv.reader(csvinput_username_email)\n-\n- # header\n- writer.writerow(\n- [\n- \"uid\",\n- \"old role\",\n- \"new role\",\n- \"name\",\n- \"surname\",\n- \"institution\",\n- \"town\",\n- \"country\",\n- \"username\",\n- \"email\",\n- ]\n- )\n-\n- for row, row_username_email in zip(reader, reader_username_email):\n- old_role = row[1]\n- if old_role == \"administrator\":\n- row.append(\"project manager\")\n- elif old_role == \"anonymous user\":\n- row.append(\"\")\n- elif old_role == \"authenticated user\":\n- row.append(\"\")\n- elif old_role == \"contributor\":\n- row.append(\"contributor\")\n- elif old_role == \"Debra\":\n- row.append(\"project manager\")\n- elif old_role == \"editor\":\n- row.append(\"editor\")\n- elif old_role == \"power\":\n- row.append(\"editor\")\n- elif old_role == \"proofreader\":\n- row.append(\"editor\")\n- elif old_role == \"SIMSSA contributor\":\n- row.append(\"contributor\")\n-\n- id = row[0]\n- url = f\"https://cantus.uwaterloo.ca/user/{id}\"\n- response = requests.get(url)\n- doc = lh.fromstring(response.content)\n-\n- try:\n- name = (\n- doc.find_class(\"field-name-field-name\")[0]\n- .find_class(\"field-item\")[0]\n- .text_content()\n- )\n- except:\n- name = \"\"\n- try:\n- surname = (\n- doc.find_class(\"field-name-field-surname\")[0]\n- .find_class(\"field-item\")[0]\n- .text_content()\n- )\n- except:\n- surname = \"\"\n- try:\n- institution = (\n- doc.find_class(\"field-name-field-institution\")[0]\n- .find_class(\"field-item\")[0]\n- .text_content()\n- )\n- except:\n- institution = \"\"\n- try:\n- town = (\n- doc.find_class(\"field-name-field-town\")[0]\n- .find_class(\"field-item\")[0]\n- .text_content()\n- )\n- except:\n- town = \"\"\n- try:\n- country = (\n- doc.find_class(\"field-name-field-country\")[0]\n- .find_class(\"field-item\")[0]\n- .text_content()\n- )\n- except:\n- country = \"\"\n-\n- username = row_username_email[1]\n- email = row_username_email[2]\n-\n- row.append(name)\n- row.append(surname)\n- row.append(institution)\n- row.append(town)\n- row.append(country)\n- row.append(username)\n- row.append(email)\n-\n- writer.writerow(row)\n", "issue": "we should clean up CantusDB/django/cantusdb_project\n```shell\r\nroot@7aa2f88fb303:/code/django/cantusdb_project# ls\r\nalign_text_mel.py create_fixtures.sh\t error_log.txt\t\t manage.py\t\t requirements.txt\r\narticles\t differentia_data.txt latin_syllabification.py next_chants.py\t static\r\nbreak_json.py\t editors_chant.csv\t load_fixtures.sh\t old_users_list.py\t templates\r\ncantusdb\t editors_source.csv\t main_app\t\t oldcantususer_uid_role.csv users\r\n```\r\nThe following files have already served their purpose:\r\n- `differentia_data.txt` (used in #1137)\r\n- `editors_chant.csv` (used in the data sync from OldCantus to New)\r\n- `editors_source.csv` (used in the data sync from OldCantus to New)\r\n- `old_users_list.py` (used in the data sync from OldCantus to New)\r\n- `oldcantususer_uid_role.csv` (used in the data sync from OldCantus to New, created by `old_users_list.py`)\r\n- `create_fixtures.sh`, (we no longer migrate data using fixtures)\r\n- `error_log.txt` (This is a log generated during the syncing process from OldCantus to NewCantus. It's not clear why it was committed to the repo in the first place.)\r\n- `break_json.py` (we no longer migrate data using fixtures)\r\n- `load_fixtures.sh` (we no longer migrate data using fixtures)\r\n\r\nI propose deleting these files, so that future developers don't need to spend time figuring out what they are.\n", "before_files": [{"content": "import csv\nimport lxml.html as lh\nimport requests\n\nwith open(\"oldcantususer_uid_role.csv\", \"r\") as csvinput:\n with open(\"oldcantususer_uid_role_detailed.csv\", \"w\") as csvoutput:\n with open(\"id_username_email.csv\", \"r\") as csvinput_username_email:\n writer = csv.writer(csvoutput, lineterminator=\"\\n\")\n reader = csv.reader(csvinput)\n reader_username_email = csv.reader(csvinput_username_email)\n\n # header\n writer.writerow(\n [\n \"uid\",\n \"old role\",\n \"new role\",\n \"name\",\n \"surname\",\n \"institution\",\n \"town\",\n \"country\",\n \"username\",\n \"email\",\n ]\n )\n\n for row, row_username_email in zip(reader, reader_username_email):\n old_role = row[1]\n if old_role == \"administrator\":\n row.append(\"project manager\")\n elif old_role == \"anonymous user\":\n row.append(\"\")\n elif old_role == \"authenticated user\":\n row.append(\"\")\n elif old_role == \"contributor\":\n row.append(\"contributor\")\n elif old_role == \"Debra\":\n row.append(\"project manager\")\n elif old_role == \"editor\":\n row.append(\"editor\")\n elif old_role == \"power\":\n row.append(\"editor\")\n elif old_role == \"proofreader\":\n row.append(\"editor\")\n elif old_role == \"SIMSSA contributor\":\n row.append(\"contributor\")\n\n id = row[0]\n url = f\"https://cantus.uwaterloo.ca/user/{id}\"\n response = requests.get(url)\n doc = lh.fromstring(response.content)\n\n try:\n name = (\n doc.find_class(\"field-name-field-name\")[0]\n .find_class(\"field-item\")[0]\n .text_content()\n )\n except:\n name = \"\"\n try:\n surname = (\n doc.find_class(\"field-name-field-surname\")[0]\n .find_class(\"field-item\")[0]\n .text_content()\n )\n except:\n surname = \"\"\n try:\n institution = (\n doc.find_class(\"field-name-field-institution\")[0]\n .find_class(\"field-item\")[0]\n .text_content()\n )\n except:\n institution = \"\"\n try:\n town = (\n doc.find_class(\"field-name-field-town\")[0]\n .find_class(\"field-item\")[0]\n .text_content()\n )\n except:\n town = \"\"\n try:\n country = (\n doc.find_class(\"field-name-field-country\")[0]\n .find_class(\"field-item\")[0]\n .text_content()\n )\n except:\n country = \"\"\n\n username = row_username_email[1]\n email = row_username_email[2]\n\n row.append(name)\n row.append(surname)\n row.append(institution)\n row.append(town)\n row.append(country)\n row.append(username)\n row.append(email)\n\n writer.writerow(row)\n", "path": "django/cantusdb_project/old_users_list.py"}]} | 1,765 | 809 |
gh_patches_debug_19105 | rasdani/github-patches | git_diff | dotkom__onlineweb4-321 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Registering with an existing e-mail creates the user
When registering:
Choose a username
Enter an email-adresse already in use.
The user will be created, and your chosen username will be taken!
Registering with an existing e-mail creates the user
When registering:
Choose a username
Enter an email-adresse already in use.
The user will be created, and your chosen username will be taken!
</issue>
<code>
[start of apps/authentication/forms.py]
1 # -*- coding: utf-8 -*-
2
3 import datetime
4 import re
5
6 from django import forms
7 from django.contrib import auth
8 from django.utils.translation import ugettext as _
9
10 from apps.authentication.models import OnlineUser as User
11
12 class LoginForm(forms.Form):
13 username = forms.CharField(widget=forms.TextInput(), label=_("Brukernavn"), max_length=50)
14 password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"Passord"))
15 user = None
16
17 def clean(self):
18 if self._errors:
19 return
20
21 user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])
22
23 if user:
24 if user.is_active:
25 self.user = user
26 else:
27 self._errors['username'] = self.error_class([_(u"Din konto er ikke aktiv. Forsøk gjenoppretning av passord.")])
28 else:
29 self._errors['username'] = self.error_class([_(u"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.")])
30 return self.cleaned_data
31
32 def login(self, request):
33 try:
34 User.objects.get(username=request.POST['username'])
35 except:
36 return False
37 if self.is_valid():
38 auth.login(request, self.user)
39 request.session.set_expiry(0)
40 return True
41 return False
42
43 class RegisterForm(forms.Form):
44 username = forms.CharField(label=_("brukernavn"), max_length=20)
45 first_name = forms.CharField(label=_("fornavn"), max_length=50)
46 last_name = forms.CharField(label=_("etternavn"), max_length=50)
47 email = forms.EmailField(label=_("epost"), max_length=50)
48 password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("passord"))
49 repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("gjenta passord"))
50 address = forms.CharField(label=_("adresse"), max_length=50)
51 zip_code = forms.CharField(label=_("postnummer"), max_length=4)
52 phone = forms.CharField(label=_("telefon"), max_length=20)
53
54 def clean(self):
55 super(RegisterForm, self).clean()
56 if self.is_valid():
57 cleaned_data = self.cleaned_data
58
59 # Check passwords
60 if cleaned_data['password'] != cleaned_data['repeat_password']:
61 self._errors['repeat_password'] = self.error_class([_(u"Passordene er ikke like.")])
62
63 # Check username
64 username = cleaned_data['username']
65 if User.objects.filter(username=username).count() > 0:
66 self._errors['username'] = self.error_class([_(u"Brukernavnet er allerede registrert.")])
67 if not re.match("^[a-zA-Z0-9_-]+$", username):
68 self._errors['username'] = self.error_class([_(u"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _")])
69
70 # Check email
71 email = cleaned_data['email']
72 if User.objects.filter(email=email).count() > 0:
73 self._errors['email'] = self.error_class([_(u"Det fins allerede en bruker med denne epostadressen.")])
74
75 # ZIP code digits only
76 zip_code = cleaned_data['zip_code']
77 if len(zip_code) != 4 or not zip_code.isdigit():
78 self._errors['zip_code'] = self.error_class([_(u"Postnummer må bestå av fire siffer.")])
79
80 return cleaned_data
81
82 class RecoveryForm(forms.Form):
83 email = forms.EmailField(label="Email", max_length=50)
84
85 class ChangePasswordForm(forms.Form):
86 new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"nytt passord"))
87 repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"gjenta passord"))
88
89 def clean(self):
90 super(ChangePasswordForm, self).clean()
91 if self.is_valid():
92 cleaned_data = self.cleaned_data
93
94 # Check passwords
95 if cleaned_data['new_password'] != cleaned_data['repeat_password']:
96 self._errors['repeat_password'] = self.error_class([_(u"Passordene er ikke like.")])
97
98 return cleaned_data
99
100
101 class NewEmailForm(forms.Form):
102 new_email = forms.EmailField(_(u"ny epostadresse"))
103
[end of apps/authentication/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/authentication/forms.py b/apps/authentication/forms.py
--- a/apps/authentication/forms.py
+++ b/apps/authentication/forms.py
@@ -7,7 +7,7 @@
from django.contrib import auth
from django.utils.translation import ugettext as _
-from apps.authentication.models import OnlineUser as User
+from apps.authentication.models import OnlineUser as User, Email
class LoginForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(), label=_("Brukernavn"), max_length=50)
@@ -69,7 +69,7 @@
# Check email
email = cleaned_data['email']
- if User.objects.filter(email=email).count() > 0:
+ if Email.objects.filter(email=email).count() > 0:
self._errors['email'] = self.error_class([_(u"Det fins allerede en bruker med denne epostadressen.")])
# ZIP code digits only
| {"golden_diff": "diff --git a/apps/authentication/forms.py b/apps/authentication/forms.py\n--- a/apps/authentication/forms.py\n+++ b/apps/authentication/forms.py\n@@ -7,7 +7,7 @@\n from django.contrib import auth\n from django.utils.translation import ugettext as _\n \n-from apps.authentication.models import OnlineUser as User\n+from apps.authentication.models import OnlineUser as User, Email\n \n class LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(), label=_(\"Brukernavn\"), max_length=50)\n@@ -69,7 +69,7 @@\n \n # Check email\n email = cleaned_data['email']\n- if User.objects.filter(email=email).count() > 0:\n+ if Email.objects.filter(email=email).count() > 0:\n self._errors['email'] = self.error_class([_(u\"Det fins allerede en bruker med denne epostadressen.\")])\n \n # ZIP code digits only\n", "issue": "Registering with an existing e-mail creates the user\nWhen registering:\n\nChoose a username\nEnter an email-adresse already in use.\nThe user will be created, and your chosen username will be taken!\n\nRegistering with an existing e-mail creates the user\nWhen registering:\n\nChoose a username\nEnter an email-adresse already in use.\nThe user will be created, and your chosen username will be taken!\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser as User\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(), label=_(\"Brukernavn\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"Passord\"))\n user = None\n\n def clean(self):\n if self._errors:\n return\n \n user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])\n\n if user:\n if user.is_active:\n self.user = user\n else:\n self._errors['username'] = self.error_class([_(u\"Din konto er ikke aktiv. Fors\u00f8k gjenoppretning av passord.\")])\n else:\n self._errors['username'] = self.error_class([_(u\"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.\")])\n return self.cleaned_data\n\n def login(self, request):\n try:\n User.objects.get(username=request.POST['username'])\n except:\n return False\n if self.is_valid():\n auth.login(request, self.user)\n request.session.set_expiry(0)\n return True\n return False\n\nclass RegisterForm(forms.Form):\n username = forms.CharField(label=_(\"brukernavn\"), max_length=20)\n first_name = forms.CharField(label=_(\"fornavn\"), max_length=50)\n last_name = forms.CharField(label=_(\"etternavn\"), max_length=50)\n email = forms.EmailField(label=_(\"epost\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"gjenta passord\"))\n address = forms.CharField(label=_(\"adresse\"), max_length=50)\n zip_code = forms.CharField(label=_(\"postnummer\"), max_length=4)\n phone = forms.CharField(label=_(\"telefon\"), max_length=20)\n \n def clean(self):\n super(RegisterForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([_(u\"Passordene er ikke like.\")])\n\n # Check username\n username = cleaned_data['username']\n if User.objects.filter(username=username).count() > 0:\n self._errors['username'] = self.error_class([_(u\"Brukernavnet er allerede registrert.\")])\n if not re.match(\"^[a-zA-Z0-9_-]+$\", username):\n self._errors['username'] = self.error_class([_(u\"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _\")])\n\n # Check email\n email = cleaned_data['email']\n if User.objects.filter(email=email).count() > 0:\n self._errors['email'] = self.error_class([_(u\"Det fins allerede en bruker med denne epostadressen.\")])\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 4 or not zip_code.isdigit():\n self._errors['zip_code'] = self.error_class([_(u\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data \n\nclass RecoveryForm(forms.Form):\n email = forms.EmailField(label=\"Email\", max_length=50)\n\nclass ChangePasswordForm(forms.Form):\n new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"nytt passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"gjenta passord\"))\n\n def clean(self):\n super(ChangePasswordForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['new_password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([_(u\"Passordene er ikke like.\")])\n\n return cleaned_data\n\n\nclass NewEmailForm(forms.Form):\n new_email = forms.EmailField(_(u\"ny epostadresse\"))\n", "path": "apps/authentication/forms.py"}]} | 1,780 | 200 |
gh_patches_debug_8087 | rasdani/github-patches | git_diff | ansible__ansible-11609 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
set_fact skipping
The following code works under ansible 1.9 but fails on the latest devel checkout. It runs the same rules, but skips the set_fact command.
```
- name: Set zookeeper ID facts
gather_facts: True
hosts: zookeeper
user: username
sudo: True
tasks:
- set_fact: zkid={{ item.0 | int + 1 }}
when: hostvars[item.1]['ansible_hostname'] == ansible_hostname
with_indexed_items: groups['zookeeper']
```
This assigns a unique id to each zookeeper instance dynamically.
</issue>
<code>
[start of lib/ansible/executor/task_result.py]
1 # (c) 2012-2014, Michael DeHaan <[email protected]>
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17
18 # Make coding more python3-ish
19 from __future__ import (absolute_import, division, print_function)
20 __metaclass__ = type
21
22 from ansible.parsing import DataLoader
23
24 class TaskResult:
25 '''
26 This class is responsible for interpretting the resulting data
27 from an executed task, and provides helper methods for determining
28 the result of a given task.
29 '''
30
31 def __init__(self, host, task, return_data):
32 self._host = host
33 self._task = task
34 if isinstance(return_data, dict):
35 self._result = return_data.copy()
36 else:
37 self._result = DataLoader().load(return_data)
38
39 def is_changed(self):
40 return self._check_key('changed')
41
42 def is_skipped(self):
43 return self._check_key('skipped')
44
45 def is_failed(self):
46 if 'failed_when_result' in self._result or \
47 'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:
48 return self._check_key('failed_when_result')
49 else:
50 return self._check_key('failed') or self._result.get('rc', 0) != 0
51
52 def is_unreachable(self):
53 return self._check_key('unreachable')
54
55 def _check_key(self, key):
56 if 'results' in self._result:
57 flag = False
58 for res in self._result.get('results', []):
59 if isinstance(res, dict):
60 flag |= res.get(key, False)
61 return flag
62 else:
63 return self._result.get(key, False)
64
[end of lib/ansible/executor/task_result.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py
--- a/lib/ansible/executor/task_result.py
+++ b/lib/ansible/executor/task_result.py
@@ -40,7 +40,14 @@
return self._check_key('changed')
def is_skipped(self):
- return self._check_key('skipped')
+ if 'results' in self._result:
+ flag = True
+ for res in self._result.get('results', []):
+ if isinstance(res, dict):
+ flag &= res.get('skipped', False)
+ return flag
+ else:
+ return self._result.get('skipped', False)
def is_failed(self):
if 'failed_when_result' in self._result or \
| {"golden_diff": "diff --git a/lib/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py\n--- a/lib/ansible/executor/task_result.py\n+++ b/lib/ansible/executor/task_result.py\n@@ -40,7 +40,14 @@\n return self._check_key('changed')\n \n def is_skipped(self):\n- return self._check_key('skipped')\n+ if 'results' in self._result:\n+ flag = True\n+ for res in self._result.get('results', []):\n+ if isinstance(res, dict):\n+ flag &= res.get('skipped', False)\n+ return flag\n+ else:\n+ return self._result.get('skipped', False)\n \n def is_failed(self):\n if 'failed_when_result' in self._result or \\\n", "issue": "set_fact skipping\nThe following code works under ansible 1.9 but fails on the latest devel checkout. It runs the same rules, but skips the set_fact command.\n\n```\n- name: Set zookeeper ID facts\n gather_facts: True\n hosts: zookeeper\n user: username\n sudo: True\n tasks:\n - set_fact: zkid={{ item.0 | int + 1 }}\n when: hostvars[item.1]['ansible_hostname'] == ansible_hostname\n with_indexed_items: groups['zookeeper']\n```\n\nThis assigns a unique id to each zookeeper instance dynamically.\n\n", "before_files": [{"content": "# (c) 2012-2014, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom ansible.parsing import DataLoader\n\nclass TaskResult:\n '''\n This class is responsible for interpretting the resulting data\n from an executed task, and provides helper methods for determining\n the result of a given task.\n '''\n\n def __init__(self, host, task, return_data):\n self._host = host\n self._task = task\n if isinstance(return_data, dict):\n self._result = return_data.copy()\n else:\n self._result = DataLoader().load(return_data)\n\n def is_changed(self):\n return self._check_key('changed')\n\n def is_skipped(self):\n return self._check_key('skipped')\n\n def is_failed(self):\n if 'failed_when_result' in self._result or \\\n 'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:\n return self._check_key('failed_when_result')\n else:\n return self._check_key('failed') or self._result.get('rc', 0) != 0\n\n def is_unreachable(self):\n return self._check_key('unreachable')\n\n def _check_key(self, key):\n if 'results' in self._result:\n flag = False\n for res in self._result.get('results', []):\n if isinstance(res, dict):\n flag |= res.get(key, False)\n return flag\n else:\n return self._result.get(key, False)\n", "path": "lib/ansible/executor/task_result.py"}]} | 1,322 | 179 |
gh_patches_debug_28365 | rasdani/github-patches | git_diff | learningequality__kolibri-8691 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Learner - Cannot change username and full name
## Observed behavior
The fields for editing a learner's username and full name are disabled and I cannot change them.
## Expected behavior
It should be possible for a learner to edit their username and full name if the options are enabled in Facility settings
## Steps to reproduce the issue
1. Install the following [build.](https://github.com/learningequality/kolibri/releases/tag/v0.15.0-beta2)
2. Create a facility with enabled options for 'Allow learners to edit their username' and 'Allow learners to edit their full name' in Facility settings
3. Create a Learner user
4. Sign in with the Learner, go to Profile and select the Edit button.
5. Attempt to edit the Full name and Username fields.
## Additional information

Logs:
[logs.zip](https://github.com/learningequality/kolibri/files/7540298/logs.zip)
## Usage Details
- OS: Windows 10
- Browser: Chrome
</issue>
<code>
[start of kolibri/core/auth/middleware.py]
1 from django.apps import apps
2 from django.conf import settings
3 from django.contrib.auth import _get_user_session_key
4 from django.contrib.auth import get_user
5 from django.contrib.auth.middleware import AuthenticationMiddleware
6 from django.contrib.sessions.middleware import SessionMiddleware
7 from django.core.cache import cache
8 from django.core.exceptions import ImproperlyConfigured
9 from django.utils.functional import SimpleLazyObject
10
11
12 def get_anonymous_user_model():
13 """
14 Return the Anonymous User model that is active in this project.
15 """
16 try:
17 app_name = settings.AUTH_ANONYMOUS_USER_MODEL.split(".")[0]
18 except AttributeError:
19 raise ImproperlyConfigured("AUTH_ANONYMOUS_USER_MODEL is not a string")
20 try:
21 model_name = settings.AUTH_ANONYMOUS_USER_MODEL.split(".")[1]
22 app = apps.get_app_config(app_name)
23 models_module = app.models_module
24 except IndexError:
25 raise ImproperlyConfigured(
26 "AUTH_ANONYMOUS_USER_MODEL must be of the form 'app_label.model_name'"
27 )
28 except LookupError:
29 raise ImproperlyConfigured(
30 "AUTH_ANONYMOUS_USER_MODEL refers to an app '{}' that has not been installed".format(
31 app_name
32 )
33 )
34 try:
35 return getattr(models_module, model_name)
36 except AttributeError:
37 raise ImproperlyConfigured(
38 "AUTH_ANONYMOUS_USER_MODEL refers to a model '{}' that does not exist in the app '{}'".format(
39 model_name, app_name
40 )
41 )
42
43
44 def _get_user(request):
45 if not hasattr(request, "_cached_user"):
46 try:
47 user_id = _get_user_session_key(request)
48 USER_CACHE_KEY = "USER_BY_SESSION_CACHE_{}".format(user_id)
49 user = cache.get(USER_CACHE_KEY)
50 if not user:
51 user = get_user(request)
52 cache.set(USER_CACHE_KEY, user)
53 except KeyError:
54 user = get_user(request)
55 if user.is_anonymous():
56 AnonymousUser = get_anonymous_user_model()
57 user = AnonymousUser()
58 request._cached_user = user
59
60 return request._cached_user
61
62
63 class CustomAuthenticationMiddleware(AuthenticationMiddleware):
64 """
65 Adaptation of Django's ``account.middleware.AuthenticationMiddleware``
66 to replace the default AnonymousUser with a custom implementation.
67 """
68
69 def process_request(self, request):
70 if not hasattr(request, "session"):
71 raise AssertionError(
72 "The authentication middleware requires session middleware "
73 "to be installed. Edit your MIDDLEWARE_CLASSES setting to insert "
74 "'django.contrib.sessions.middleware.SessionMiddleware' before "
75 "'kolibri.core.auth.middleware.CustomAuthenticationMiddleware'."
76 )
77 request.user = SimpleLazyObject(lambda: _get_user(request))
78
79
80 class XhrPreventLoginPromptMiddleware(object):
81 """
82 By default, HTTP 401 responses are sent with a ``WWW-Authenticate``
83 header. Web browsers react to this header by displaying a login prompt
84 dialog. By removing the header, the login prompt can be avoided. While
85 this isn't recommended in general, there's a convention of removing it
86 for XHR requests, so that unauthenticated XHR requests don't trigger a
87 popup.
88
89 See `here <https://stackoverflow.com/a/20221330>`_ for reference.
90 """
91
92 def __init__(self, get_response):
93 self.get_response = get_response
94
95 def __call__(self, request):
96 response = self.get_response(request)
97 if response and response.status_code == 401 and request.is_ajax():
98 del response["WWW-Authenticate"]
99 return response
100
101
102 SESSION_EXEMPT = "_session_exempt"
103
104
105 def session_exempt(view):
106 def wrapper_func(*args, **kwargs):
107 return view(*args, **kwargs)
108
109 setattr(wrapper_func, SESSION_EXEMPT, True)
110 return wrapper_func
111
112
113 class KolibriSessionMiddleware(SessionMiddleware):
114 def _is_exempt(self, obj):
115 return hasattr(obj, SESSION_EXEMPT)
116
117 def process_view(self, request, callback, callback_args, callback_kwargs):
118 if self._is_exempt(callback):
119 setattr(request, SESSION_EXEMPT, True)
120 return None
121
122 def process_response(self, request, response):
123 if self._is_exempt(request):
124 return response
125 return super(KolibriSessionMiddleware, self).process_response(request, response)
126
[end of kolibri/core/auth/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/core/auth/middleware.py b/kolibri/core/auth/middleware.py
--- a/kolibri/core/auth/middleware.py
+++ b/kolibri/core/auth/middleware.py
@@ -6,6 +6,7 @@
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
+from django.db.models.signals import post_save
from django.utils.functional import SimpleLazyObject
@@ -41,11 +42,14 @@
)
+USER_SESSION_CACHE_KEY = "USER_BY_SESSION_CACHE_{}"
+
+
def _get_user(request):
if not hasattr(request, "_cached_user"):
try:
user_id = _get_user_session_key(request)
- USER_CACHE_KEY = "USER_BY_SESSION_CACHE_{}".format(user_id)
+ USER_CACHE_KEY = USER_SESSION_CACHE_KEY.format(user_id)
user = cache.get(USER_CACHE_KEY)
if not user:
user = get_user(request)
@@ -60,6 +64,14 @@
return request._cached_user
+def clear_user_cache(sender, instance, created, **kwargs):
+ if not created:
+ cache.delete(USER_SESSION_CACHE_KEY.format(instance.id))
+
+
+post_save.connect(clear_user_cache, sender=settings.AUTH_USER_MODEL)
+
+
class CustomAuthenticationMiddleware(AuthenticationMiddleware):
"""
Adaptation of Django's ``account.middleware.AuthenticationMiddleware``
| {"golden_diff": "diff --git a/kolibri/core/auth/middleware.py b/kolibri/core/auth/middleware.py\n--- a/kolibri/core/auth/middleware.py\n+++ b/kolibri/core/auth/middleware.py\n@@ -6,6 +6,7 @@\n from django.contrib.sessions.middleware import SessionMiddleware\n from django.core.cache import cache\n from django.core.exceptions import ImproperlyConfigured\n+from django.db.models.signals import post_save\n from django.utils.functional import SimpleLazyObject\n \n \n@@ -41,11 +42,14 @@\n )\n \n \n+USER_SESSION_CACHE_KEY = \"USER_BY_SESSION_CACHE_{}\"\n+\n+\n def _get_user(request):\n if not hasattr(request, \"_cached_user\"):\n try:\n user_id = _get_user_session_key(request)\n- USER_CACHE_KEY = \"USER_BY_SESSION_CACHE_{}\".format(user_id)\n+ USER_CACHE_KEY = USER_SESSION_CACHE_KEY.format(user_id)\n user = cache.get(USER_CACHE_KEY)\n if not user:\n user = get_user(request)\n@@ -60,6 +64,14 @@\n return request._cached_user\n \n \n+def clear_user_cache(sender, instance, created, **kwargs):\n+ if not created:\n+ cache.delete(USER_SESSION_CACHE_KEY.format(instance.id))\n+\n+\n+post_save.connect(clear_user_cache, sender=settings.AUTH_USER_MODEL)\n+\n+\n class CustomAuthenticationMiddleware(AuthenticationMiddleware):\n \"\"\"\n Adaptation of Django's ``account.middleware.AuthenticationMiddleware``\n", "issue": "Learner - Cannot change username and full name\n## Observed behavior\r\nThe fields for editing a learner's username and full name are disabled and I cannot change them.\r\n\r\n## Expected behavior\r\nIt should be possible for a learner to edit their username and full name if the options are enabled in Facility settings\r\n\r\n## Steps to reproduce the issue\r\n1. Install the following [build.](https://github.com/learningequality/kolibri/releases/tag/v0.15.0-beta2)\r\n2. Create a facility with enabled options for 'Allow learners to edit their username' and 'Allow learners to edit their full name' in Facility settings\r\n3. Create a Learner user\r\n4. Sign in with the Learner, go to Profile and select the Edit button.\r\n5. Attempt to edit the Full name and Username fields.\r\n\r\n## Additional information\r\n\r\n\r\nLogs: \r\n[logs.zip](https://github.com/learningequality/kolibri/files/7540298/logs.zip)\r\n\r\n## Usage Details\r\n - OS: Windows 10\r\n - Browser: Chrome\n", "before_files": [{"content": "from django.apps import apps\nfrom django.conf import settings\nfrom django.contrib.auth import _get_user_session_key\nfrom django.contrib.auth import get_user\nfrom django.contrib.auth.middleware import AuthenticationMiddleware\nfrom django.contrib.sessions.middleware import SessionMiddleware\nfrom django.core.cache import cache\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.functional import SimpleLazyObject\n\n\ndef get_anonymous_user_model():\n \"\"\"\n Return the Anonymous User model that is active in this project.\n \"\"\"\n try:\n app_name = settings.AUTH_ANONYMOUS_USER_MODEL.split(\".\")[0]\n except AttributeError:\n raise ImproperlyConfigured(\"AUTH_ANONYMOUS_USER_MODEL is not a string\")\n try:\n model_name = settings.AUTH_ANONYMOUS_USER_MODEL.split(\".\")[1]\n app = apps.get_app_config(app_name)\n models_module = app.models_module\n except IndexError:\n raise ImproperlyConfigured(\n \"AUTH_ANONYMOUS_USER_MODEL must be of the form 'app_label.model_name'\"\n )\n except LookupError:\n raise ImproperlyConfigured(\n \"AUTH_ANONYMOUS_USER_MODEL refers to an app '{}' that has not been installed\".format(\n app_name\n )\n )\n try:\n return getattr(models_module, model_name)\n except AttributeError:\n raise ImproperlyConfigured(\n \"AUTH_ANONYMOUS_USER_MODEL refers to a model '{}' that does not exist in the app '{}'\".format(\n model_name, app_name\n )\n )\n\n\ndef _get_user(request):\n if not hasattr(request, \"_cached_user\"):\n try:\n user_id = _get_user_session_key(request)\n USER_CACHE_KEY = \"USER_BY_SESSION_CACHE_{}\".format(user_id)\n user = cache.get(USER_CACHE_KEY)\n if not user:\n user = get_user(request)\n cache.set(USER_CACHE_KEY, user)\n except KeyError:\n user = get_user(request)\n if user.is_anonymous():\n AnonymousUser = get_anonymous_user_model()\n user = AnonymousUser()\n request._cached_user = user\n\n return request._cached_user\n\n\nclass CustomAuthenticationMiddleware(AuthenticationMiddleware):\n \"\"\"\n Adaptation of Django's ``account.middleware.AuthenticationMiddleware``\n to replace the default AnonymousUser with a custom implementation.\n \"\"\"\n\n def process_request(self, request):\n if not hasattr(request, \"session\"):\n raise AssertionError(\n \"The authentication middleware requires session middleware \"\n \"to be installed. Edit your MIDDLEWARE_CLASSES setting to insert \"\n \"'django.contrib.sessions.middleware.SessionMiddleware' before \"\n \"'kolibri.core.auth.middleware.CustomAuthenticationMiddleware'.\"\n )\n request.user = SimpleLazyObject(lambda: _get_user(request))\n\n\nclass XhrPreventLoginPromptMiddleware(object):\n \"\"\"\n By default, HTTP 401 responses are sent with a ``WWW-Authenticate``\n header. Web browsers react to this header by displaying a login prompt\n dialog. By removing the header, the login prompt can be avoided. While\n this isn't recommended in general, there's a convention of removing it\n for XHR requests, so that unauthenticated XHR requests don't trigger a\n popup.\n\n See `here <https://stackoverflow.com/a/20221330>`_ for reference.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n response = self.get_response(request)\n if response and response.status_code == 401 and request.is_ajax():\n del response[\"WWW-Authenticate\"]\n return response\n\n\nSESSION_EXEMPT = \"_session_exempt\"\n\n\ndef session_exempt(view):\n def wrapper_func(*args, **kwargs):\n return view(*args, **kwargs)\n\n setattr(wrapper_func, SESSION_EXEMPT, True)\n return wrapper_func\n\n\nclass KolibriSessionMiddleware(SessionMiddleware):\n def _is_exempt(self, obj):\n return hasattr(obj, SESSION_EXEMPT)\n\n def process_view(self, request, callback, callback_args, callback_kwargs):\n if self._is_exempt(callback):\n setattr(request, SESSION_EXEMPT, True)\n return None\n\n def process_response(self, request, response):\n if self._is_exempt(request):\n return response\n return super(KolibriSessionMiddleware, self).process_response(request, response)\n", "path": "kolibri/core/auth/middleware.py"}]} | 2,041 | 311 |
gh_patches_debug_24177 | rasdani/github-patches | git_diff | pre-commit__pre-commit-756 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
venv tests break virtualenv's `pip` when run from a `-mvirtualenv` virtualenv
Here's a reproduction, not exactly sure what's happening here:
```
$ tox -e py36 -r --notest
GLOB sdist-make: /home/asottile/workspace/pre-commit/setup.py
py36 create: /home/asottile/workspace/pre-commit/.tox/py36
py36 installdeps: -rrequirements-dev.txt
py36 inst: /home/asottile/workspace/pre-commit/.tox/dist/pre_commit-1.10.0.zip
py36 installed: You are using pip version 9.0.1, however version 10.0.1 is available.,You should consider upgrading via the 'pip install --upgrade pip' command.,aspy.yaml==1.1.1,atomicwrites==1.1.5,attrs==18.1.0,cached-property==1.4.2,cfgv==1.0.0,coverage==4.5.1,flake8==3.5.0,identify==1.0.18,mccabe==0.6.1,mock==2.0.0,more-itertools==4.2.0,nodeenv==1.3.0,pbr==4.0.3,pluggy==0.6.0,-e [email protected]:pre-commit/pre-commit@97fb49a533de9a378d20f0a41e79df118362e534#egg=pre_commit,py==1.5.3,pycodestyle==2.3.1,pyflakes==1.6.0,pytest==3.6.0,pytest-env==0.6.2,PyYAML==3.12,six==1.11.0,toml==0.9.4,virtualenv==16.0.0
___________________________________ summary ____________________________________
py36: skipped tests
congratulations :)
$ head -1 .tox/py36/bin/pip
#!/home/asottile/workspace/pre-commit/.tox/py36/bin/python3.6
$ .tox/py36/bin/pytest tests -k venv
============================= test session starts ==============================
platform linux -- Python 3.6.5, pytest-3.6.0, py-1.5.3, pluggy-0.6.0
rootdir: /home/asottile/workspace/pre-commit, inifile: tox.ini
plugins: env-0.6.2
collected 500 items / 492 deselected
tests/repository_test.py .. [ 25%]
tests/commands/install_uninstall_test.py . [ 37%]
tests/languages/all_test.py ..... [100%]
=================== 8 passed, 492 deselected in 4.12 seconds ===================
$ head -1 .tox/py36/bin/pip
#!/home/asottile/workspace/pre-commit/.tox/py36/bin/python3.6
$ tox -e py36 -- tests -k venv
GLOB sdist-make: /home/asottile/workspace/pre-commit/setup.py
py36 inst-nodeps: /home/asottile/workspace/pre-commit/.tox/dist/pre_commit-1.10.0.zip
py36 installed: You are using pip version 9.0.1, however version 10.0.1 is available.,You should consider upgrading via the 'pip install --upgrade pip' command.,aspy.yaml==1.1.1,atomicwrites==1.1.5,attrs==18.1.0,cached-property==1.4.2,cfgv==1.0.0,coverage==4.5.1,flake8==3.5.0,identify==1.0.18,mccabe==0.6.1,mock==2.0.0,more-itertools==4.2.0,nodeenv==1.3.0,pbr==4.0.3,pluggy==0.6.0,pre-commit==1.10.0,py==1.5.3,pycodestyle==2.3.1,pyflakes==1.6.0,pytest==3.6.0,pytest-env==0.6.2,PyYAML==3.12,six==1.11.0,toml==0.9.4,virtualenv==16.0.0
py36 runtests: PYTHONHASHSEED='93802395'
py36 runtests: commands[0] | coverage erase
py36 runtests: commands[1] | coverage run -m pytest tests -k venv
============================= test session starts ==============================
platform linux -- Python 3.6.5, pytest-3.6.0, py-1.5.3, pluggy-0.6.0
rootdir: /home/asottile/workspace/pre-commit, inifile: tox.ini
plugins: env-0.6.2
collected 500 items / 492 deselected
tests/repository_test.py .. [ 25%]
tests/commands/install_uninstall_test.py . [ 37%]
tests/languages/all_test.py ..... [100%]
=================== 8 passed, 492 deselected in 4.32 seconds ===================
py36 runtests: commands[2] | coverage report --fail-under 99
Name Stmts Miss Branch BrPart Cover Missing
---------------------------------------------------------------------------------------------
...
17 files skipped due to complete coverage.
ERROR: InvocationError: '/home/asottile/workspace/pre-commit/.tox/py36/bin/coverage report --fail-under 99'
___________________________________ summary ____________________________________
ERROR: py36: commands failed
$ head -1 .tox/py36/bin/pip
#!/tmp/pytest-of-asottile/pytest-3/test_python_venv0/0/.pre-commit/repo5xcuq11q/py_venv-python3.6/bin/python3.6
```
</issue>
<code>
[start of pre_commit/languages/python_venv.py]
1 from __future__ import unicode_literals
2
3 from pre_commit.languages import python
4 from pre_commit.util import cmd_output
5
6
7 ENVIRONMENT_DIR = 'py_venv'
8
9
10 def make_venv(envdir, python):
11 cmd_output(python, '-mvenv', envdir, cwd='/')
12
13
14 get_default_version = python.get_default_version
15 _interface = python.py_interface(ENVIRONMENT_DIR, make_venv)
16 in_env, healthy, run_hook, install_environment = _interface
17
[end of pre_commit/languages/python_venv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/languages/python_venv.py b/pre_commit/languages/python_venv.py
--- a/pre_commit/languages/python_venv.py
+++ b/pre_commit/languages/python_venv.py
@@ -1,14 +1,46 @@
from __future__ import unicode_literals
+import os.path
+
from pre_commit.languages import python
+from pre_commit.util import CalledProcessError
from pre_commit.util import cmd_output
ENVIRONMENT_DIR = 'py_venv'
+def orig_py_exe(exe): # pragma: no cover (platform specific)
+ """A -mvenv virtualenv made from a -mvirtualenv virtualenv installs
+ packages to the incorrect location. Attempt to find the _original_ exe
+ and invoke `-mvenv` from there.
+
+ See:
+ - https://github.com/pre-commit/pre-commit/issues/755
+ - https://github.com/pypa/virtualenv/issues/1095
+ - https://bugs.python.org/issue30811
+ """
+ try:
+ prefix_script = 'import sys; print(sys.real_prefix)'
+ _, prefix, _ = cmd_output(exe, '-c', prefix_script)
+ prefix = prefix.strip()
+ except CalledProcessError:
+ # not created from -mvirtualenv
+ return exe
+
+ if os.name == 'nt':
+ expected = os.path.join(prefix, 'python.exe')
+ else:
+ expected = os.path.join(prefix, 'bin', os.path.basename(exe))
+
+ if os.path.exists(expected):
+ return expected
+ else:
+ return exe
+
+
def make_venv(envdir, python):
- cmd_output(python, '-mvenv', envdir, cwd='/')
+ cmd_output(orig_py_exe(python), '-mvenv', envdir, cwd='/')
get_default_version = python.get_default_version
| {"golden_diff": "diff --git a/pre_commit/languages/python_venv.py b/pre_commit/languages/python_venv.py\n--- a/pre_commit/languages/python_venv.py\n+++ b/pre_commit/languages/python_venv.py\n@@ -1,14 +1,46 @@\n from __future__ import unicode_literals\n \n+import os.path\n+\n from pre_commit.languages import python\n+from pre_commit.util import CalledProcessError\n from pre_commit.util import cmd_output\n \n \n ENVIRONMENT_DIR = 'py_venv'\n \n \n+def orig_py_exe(exe): # pragma: no cover (platform specific)\n+ \"\"\"A -mvenv virtualenv made from a -mvirtualenv virtualenv installs\n+ packages to the incorrect location. Attempt to find the _original_ exe\n+ and invoke `-mvenv` from there.\n+\n+ See:\n+ - https://github.com/pre-commit/pre-commit/issues/755\n+ - https://github.com/pypa/virtualenv/issues/1095\n+ - https://bugs.python.org/issue30811\n+ \"\"\"\n+ try:\n+ prefix_script = 'import sys; print(sys.real_prefix)'\n+ _, prefix, _ = cmd_output(exe, '-c', prefix_script)\n+ prefix = prefix.strip()\n+ except CalledProcessError:\n+ # not created from -mvirtualenv\n+ return exe\n+\n+ if os.name == 'nt':\n+ expected = os.path.join(prefix, 'python.exe')\n+ else:\n+ expected = os.path.join(prefix, 'bin', os.path.basename(exe))\n+\n+ if os.path.exists(expected):\n+ return expected\n+ else:\n+ return exe\n+\n+\n def make_venv(envdir, python):\n- cmd_output(python, '-mvenv', envdir, cwd='/')\n+ cmd_output(orig_py_exe(python), '-mvenv', envdir, cwd='/')\n \n \n get_default_version = python.get_default_version\n", "issue": "venv tests break virtualenv's `pip` when run from a `-mvirtualenv` virtualenv\nHere's a reproduction, not exactly sure what's happening here:\r\n\r\n```\r\n$ tox -e py36 -r --notest\r\nGLOB sdist-make: /home/asottile/workspace/pre-commit/setup.py\r\npy36 create: /home/asottile/workspace/pre-commit/.tox/py36\r\npy36 installdeps: -rrequirements-dev.txt\r\npy36 inst: /home/asottile/workspace/pre-commit/.tox/dist/pre_commit-1.10.0.zip\r\npy36 installed: You are using pip version 9.0.1, however version 10.0.1 is available.,You should consider upgrading via the 'pip install --upgrade pip' command.,aspy.yaml==1.1.1,atomicwrites==1.1.5,attrs==18.1.0,cached-property==1.4.2,cfgv==1.0.0,coverage==4.5.1,flake8==3.5.0,identify==1.0.18,mccabe==0.6.1,mock==2.0.0,more-itertools==4.2.0,nodeenv==1.3.0,pbr==4.0.3,pluggy==0.6.0,-e [email protected]:pre-commit/pre-commit@97fb49a533de9a378d20f0a41e79df118362e534#egg=pre_commit,py==1.5.3,pycodestyle==2.3.1,pyflakes==1.6.0,pytest==3.6.0,pytest-env==0.6.2,PyYAML==3.12,six==1.11.0,toml==0.9.4,virtualenv==16.0.0\r\n___________________________________ summary ____________________________________\r\n py36: skipped tests\r\n congratulations :)\r\n\r\n$ head -1 .tox/py36/bin/pip\r\n#!/home/asottile/workspace/pre-commit/.tox/py36/bin/python3.6\r\n$ .tox/py36/bin/pytest tests -k venv\r\n============================= test session starts ==============================\r\nplatform linux -- Python 3.6.5, pytest-3.6.0, py-1.5.3, pluggy-0.6.0\r\nrootdir: /home/asottile/workspace/pre-commit, inifile: tox.ini\r\nplugins: env-0.6.2\r\ncollected 500 items / 492 deselected \r\n\r\ntests/repository_test.py .. [ 25%]\r\ntests/commands/install_uninstall_test.py . [ 37%]\r\ntests/languages/all_test.py ..... [100%]\r\n\r\n=================== 8 passed, 492 deselected in 4.12 seconds ===================\r\n$ head -1 .tox/py36/bin/pip\r\n#!/home/asottile/workspace/pre-commit/.tox/py36/bin/python3.6\r\n$ tox -e py36 -- tests -k venv\r\nGLOB sdist-make: /home/asottile/workspace/pre-commit/setup.py\r\npy36 inst-nodeps: /home/asottile/workspace/pre-commit/.tox/dist/pre_commit-1.10.0.zip\r\npy36 installed: You are using pip version 9.0.1, however version 10.0.1 is available.,You should consider upgrading via the 'pip install --upgrade pip' command.,aspy.yaml==1.1.1,atomicwrites==1.1.5,attrs==18.1.0,cached-property==1.4.2,cfgv==1.0.0,coverage==4.5.1,flake8==3.5.0,identify==1.0.18,mccabe==0.6.1,mock==2.0.0,more-itertools==4.2.0,nodeenv==1.3.0,pbr==4.0.3,pluggy==0.6.0,pre-commit==1.10.0,py==1.5.3,pycodestyle==2.3.1,pyflakes==1.6.0,pytest==3.6.0,pytest-env==0.6.2,PyYAML==3.12,six==1.11.0,toml==0.9.4,virtualenv==16.0.0\r\npy36 runtests: PYTHONHASHSEED='93802395'\r\npy36 runtests: commands[0] | coverage erase\r\npy36 runtests: commands[1] | coverage run -m pytest tests -k venv\r\n============================= test session starts ==============================\r\nplatform linux -- Python 3.6.5, pytest-3.6.0, py-1.5.3, pluggy-0.6.0\r\nrootdir: /home/asottile/workspace/pre-commit, inifile: tox.ini\r\nplugins: env-0.6.2\r\ncollected 500 items / 492 deselected \r\n\r\ntests/repository_test.py .. [ 25%]\r\ntests/commands/install_uninstall_test.py . [ 37%]\r\ntests/languages/all_test.py ..... [100%]\r\n\r\n=================== 8 passed, 492 deselected in 4.32 seconds ===================\r\npy36 runtests: commands[2] | coverage report --fail-under 99\r\nName Stmts Miss Branch BrPart Cover Missing\r\n---------------------------------------------------------------------------------------------\r\n...\r\n17 files skipped due to complete coverage.\r\nERROR: InvocationError: '/home/asottile/workspace/pre-commit/.tox/py36/bin/coverage report --fail-under 99'\r\n___________________________________ summary ____________________________________\r\nERROR: py36: commands failed\r\n\r\n$ head -1 .tox/py36/bin/pip\r\n#!/tmp/pytest-of-asottile/pytest-3/test_python_venv0/0/.pre-commit/repo5xcuq11q/py_venv-python3.6/bin/python3.6\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom pre_commit.languages import python\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'py_venv'\n\n\ndef make_venv(envdir, python):\n cmd_output(python, '-mvenv', envdir, cwd='/')\n\n\nget_default_version = python.get_default_version\n_interface = python.py_interface(ENVIRONMENT_DIR, make_venv)\nin_env, healthy, run_hook, install_environment = _interface\n", "path": "pre_commit/languages/python_venv.py"}]} | 2,046 | 437 |
gh_patches_debug_35079 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-962 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[PORT] [Certificate Authentication] Expose sendX5c parameter
> Port this change from botbuilder-dotnet/master branch:
https://github.com/microsoft/botbuilder-dotnet/pull/3741
This parameter enables application developers to achieve easy certificates roll-over in Azure AD: setting this parameter to true will send the public certificate to Azure AD along with the token request, so that Azure AD can use it to validate the subject name based on a trusted issuer policy. This saves the application admin from the need to explicitly manage the certificate rollover (either via portal or powershell/CLI operation)
# Changed projects
* Microsoft.Bot.Connector
[R9,authentication]
</issue>
<code>
[start of libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from abc import ABC
5
6 from msal import ConfidentialClientApplication
7
8 from .app_credentials import AppCredentials
9
10
11 class CertificateAppCredentials(AppCredentials, ABC):
12 """
13 AppCredentials implementation using a certificate.
14
15 See:
16 https://github.com/AzureAD/microsoft-authentication-library-for-python/wiki/Client-Credentials#client-credentials-with-certificate
17 """
18
19 def __init__(
20 self,
21 app_id: str,
22 certificate_thumbprint: str,
23 certificate_private_key: str,
24 channel_auth_tenant: str = None,
25 oauth_scope: str = None,
26 ):
27 # super will set proper scope and endpoint.
28 super().__init__(
29 app_id=app_id,
30 channel_auth_tenant=channel_auth_tenant,
31 oauth_scope=oauth_scope,
32 )
33
34 self.scopes = [self.oauth_scope]
35 self.app = None
36 self.certificate_thumbprint = certificate_thumbprint
37 self.certificate_private_key = certificate_private_key
38
39 def get_access_token(self, force_refresh: bool = False) -> str:
40 """
41 Implementation of AppCredentials.get_token.
42 :return: The access token for the given certificate.
43 """
44
45 # Firstly, looks up a token from cache
46 # Since we are looking for token for the current app, NOT for an end user,
47 # notice we give account parameter as None.
48 auth_token = self.__get_msal_app().acquire_token_silent(
49 self.scopes, account=None
50 )
51 if not auth_token:
52 # No suitable token exists in cache. Let's get a new one from AAD.
53 auth_token = self.__get_msal_app().acquire_token_for_client(
54 scopes=self.scopes
55 )
56 return auth_token["access_token"]
57
58 def __get_msal_app(self):
59 if not self.app:
60 self.app = ConfidentialClientApplication(
61 client_id=self.microsoft_app_id,
62 authority=self.oauth_endpoint,
63 client_credential={
64 "thumbprint": self.certificate_thumbprint,
65 "private_key": self.certificate_private_key,
66 },
67 )
68
69 return self.app
70
[end of libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py]
[start of libraries/botframework-connector/setup.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 import os
4 from setuptools import setup
5
6 NAME = "botframework-connector"
7 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.7.1"
8 REQUIRES = [
9 "msrest==0.6.10",
10 "requests==2.22.0",
11 "cryptography==2.8.0",
12 "PyJWT==1.5.3",
13 "botbuilder-schema>=4.7.1",
14 "adal==1.2.1",
15 "msal==1.1.0",
16 ]
17
18 root = os.path.abspath(os.path.dirname(__file__))
19
20 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
21 long_description = f.read()
22
23 setup(
24 name=NAME,
25 version=VERSION,
26 description="Microsoft Bot Framework Bot Builder SDK for Python.",
27 author="Microsoft",
28 url="https://www.github.com/Microsoft/botbuilder-python",
29 keywords=["BotFrameworkConnector", "bots", "ai", "botframework", "botbuilder"],
30 install_requires=REQUIRES,
31 packages=[
32 "botframework.connector",
33 "botframework.connector.auth",
34 "botframework.connector.async_mixin",
35 "botframework.connector.operations",
36 "botframework.connector.models",
37 "botframework.connector.aio",
38 "botframework.connector.aio.operations_async",
39 "botframework.connector.teams",
40 "botframework.connector.teams.operations",
41 "botframework.connector.token_api",
42 "botframework.connector.token_api.aio",
43 "botframework.connector.token_api.models",
44 "botframework.connector.token_api.operations",
45 ],
46 include_package_data=True,
47 long_description=long_description,
48 long_description_content_type="text/x-rst",
49 license="MIT",
50 classifiers=[
51 "Programming Language :: Python :: 3.7",
52 "Intended Audience :: Developers",
53 "License :: OSI Approved :: MIT License",
54 "Operating System :: OS Independent",
55 "Development Status :: 5 - Production/Stable",
56 "Topic :: Scientific/Engineering :: Artificial Intelligence",
57 ],
58 )
59
[end of libraries/botframework-connector/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py b/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py
--- a/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py
+++ b/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py
@@ -23,7 +23,20 @@
certificate_private_key: str,
channel_auth_tenant: str = None,
oauth_scope: str = None,
+ certificate_public: str = None,
):
+ """
+ AppCredentials implementation using a certificate.
+
+ :param app_id:
+ :param certificate_thumbprint:
+ :param certificate_private_key:
+ :param channel_auth_tenant:
+ :param oauth_scope:
+ :param certificate_public: public_certificate (optional) is public key certificate which will be sent
+ through ‘x5c’ JWT header only for subject name and issuer authentication to support cert auto rolls.
+ """
+
# super will set proper scope and endpoint.
super().__init__(
app_id=app_id,
@@ -35,6 +48,7 @@
self.app = None
self.certificate_thumbprint = certificate_thumbprint
self.certificate_private_key = certificate_private_key
+ self.certificate_public = certificate_public
def get_access_token(self, force_refresh: bool = False) -> str:
"""
@@ -63,6 +77,9 @@
client_credential={
"thumbprint": self.certificate_thumbprint,
"private_key": self.certificate_private_key,
+ "public_certificate": self.certificate_public
+ if self.certificate_public
+ else None,
},
)
diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py
--- a/libraries/botframework-connector/setup.py
+++ b/libraries/botframework-connector/setup.py
@@ -12,7 +12,7 @@
"PyJWT==1.5.3",
"botbuilder-schema>=4.7.1",
"adal==1.2.1",
- "msal==1.1.0",
+ "msal==1.2.0",
]
root = os.path.abspath(os.path.dirname(__file__))
| {"golden_diff": "diff --git a/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py b/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py\n--- a/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py\n+++ b/libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py\n@@ -23,7 +23,20 @@\n certificate_private_key: str,\r\n channel_auth_tenant: str = None,\r\n oauth_scope: str = None,\r\n+ certificate_public: str = None,\r\n ):\r\n+ \"\"\"\r\n+ AppCredentials implementation using a certificate.\r\n+\r\n+ :param app_id:\r\n+ :param certificate_thumbprint:\r\n+ :param certificate_private_key:\r\n+ :param channel_auth_tenant:\r\n+ :param oauth_scope:\r\n+ :param certificate_public: public_certificate (optional) is public key certificate which will be sent\r\n+ through \u2018x5c\u2019 JWT header only for subject name and issuer authentication to support cert auto rolls.\r\n+ \"\"\"\r\n+\r\n # super will set proper scope and endpoint.\r\n super().__init__(\r\n app_id=app_id,\r\n@@ -35,6 +48,7 @@\n self.app = None\r\n self.certificate_thumbprint = certificate_thumbprint\r\n self.certificate_private_key = certificate_private_key\r\n+ self.certificate_public = certificate_public\r\n \r\n def get_access_token(self, force_refresh: bool = False) -> str:\r\n \"\"\"\r\n@@ -63,6 +77,9 @@\n client_credential={\r\n \"thumbprint\": self.certificate_thumbprint,\r\n \"private_key\": self.certificate_private_key,\r\n+ \"public_certificate\": self.certificate_public\r\n+ if self.certificate_public\r\n+ else None,\r\n },\r\n )\r\n \r\ndiff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py\n--- a/libraries/botframework-connector/setup.py\n+++ b/libraries/botframework-connector/setup.py\n@@ -12,7 +12,7 @@\n \"PyJWT==1.5.3\",\n \"botbuilder-schema>=4.7.1\",\n \"adal==1.2.1\",\n- \"msal==1.1.0\",\n+ \"msal==1.2.0\",\n ]\n \n root = os.path.abspath(os.path.dirname(__file__))\n", "issue": "[PORT] [Certificate Authentication] Expose sendX5c parameter\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/3741\n\nThis parameter enables application developers to achieve easy certificates roll-over in Azure AD: setting this parameter to true will send the public certificate to Azure AD along with the token request, so that Azure AD can use it to validate the subject name based on a trusted issuer policy. This saves the application admin from the need to explicitly manage the certificate rollover (either via portal or powershell/CLI operation)\n\n\r\n# Changed projects\r\n* Microsoft.Bot.Connector\r\n\r\n[R9,authentication]\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nfrom abc import ABC\r\n\r\nfrom msal import ConfidentialClientApplication\r\n\r\nfrom .app_credentials import AppCredentials\r\n\r\n\r\nclass CertificateAppCredentials(AppCredentials, ABC):\r\n \"\"\"\r\n AppCredentials implementation using a certificate.\r\n\r\n See:\r\n https://github.com/AzureAD/microsoft-authentication-library-for-python/wiki/Client-Credentials#client-credentials-with-certificate\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n app_id: str,\r\n certificate_thumbprint: str,\r\n certificate_private_key: str,\r\n channel_auth_tenant: str = None,\r\n oauth_scope: str = None,\r\n ):\r\n # super will set proper scope and endpoint.\r\n super().__init__(\r\n app_id=app_id,\r\n channel_auth_tenant=channel_auth_tenant,\r\n oauth_scope=oauth_scope,\r\n )\r\n\r\n self.scopes = [self.oauth_scope]\r\n self.app = None\r\n self.certificate_thumbprint = certificate_thumbprint\r\n self.certificate_private_key = certificate_private_key\r\n\r\n def get_access_token(self, force_refresh: bool = False) -> str:\r\n \"\"\"\r\n Implementation of AppCredentials.get_token.\r\n :return: The access token for the given certificate.\r\n \"\"\"\r\n\r\n # Firstly, looks up a token from cache\r\n # Since we are looking for token for the current app, NOT for an end user,\r\n # notice we give account parameter as None.\r\n auth_token = self.__get_msal_app().acquire_token_silent(\r\n self.scopes, account=None\r\n )\r\n if not auth_token:\r\n # No suitable token exists in cache. Let's get a new one from AAD.\r\n auth_token = self.__get_msal_app().acquire_token_for_client(\r\n scopes=self.scopes\r\n )\r\n return auth_token[\"access_token\"]\r\n\r\n def __get_msal_app(self):\r\n if not self.app:\r\n self.app = ConfidentialClientApplication(\r\n client_id=self.microsoft_app_id,\r\n authority=self.oauth_endpoint,\r\n client_credential={\r\n \"thumbprint\": self.certificate_thumbprint,\r\n \"private_key\": self.certificate_private_key,\r\n },\r\n )\r\n\r\n return self.app\r\n", "path": "libraries/botframework-connector/botframework/connector/auth/certificate_app_credentials.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nimport os\nfrom setuptools import setup\n\nNAME = \"botframework-connector\"\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.7.1\"\nREQUIRES = [\n \"msrest==0.6.10\",\n \"requests==2.22.0\",\n \"cryptography==2.8.0\",\n \"PyJWT==1.5.3\",\n \"botbuilder-schema>=4.7.1\",\n \"adal==1.2.1\",\n \"msal==1.1.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"Microsoft Bot Framework Bot Builder SDK for Python.\",\n author=\"Microsoft\",\n url=\"https://www.github.com/Microsoft/botbuilder-python\",\n keywords=[\"BotFrameworkConnector\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n install_requires=REQUIRES,\n packages=[\n \"botframework.connector\",\n \"botframework.connector.auth\",\n \"botframework.connector.async_mixin\",\n \"botframework.connector.operations\",\n \"botframework.connector.models\",\n \"botframework.connector.aio\",\n \"botframework.connector.aio.operations_async\",\n \"botframework.connector.teams\",\n \"botframework.connector.teams.operations\",\n \"botframework.connector.token_api\",\n \"botframework.connector.token_api.aio\",\n \"botframework.connector.token_api.models\",\n \"botframework.connector.token_api.operations\",\n ],\n include_package_data=True,\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=\"MIT\",\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botframework-connector/setup.py"}]} | 1,906 | 529 |
gh_patches_debug_19257 | rasdani/github-patches | git_diff | ESMCI__cime-1240 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PR #1230 appears to break batch systems
After merging #1230 I get an error from Z_FullSystemTest:
Traceback (most recent call last):
File "./scripts_regression_tests.py", line 1175, in test_full_system
self.assertTrue(test_time > 0, msg="test time was zero for %s" % test_status)
AssertionError: test time was zero for /scratch/cluster/jedwards/scripts_regression_test.20170313_145646/ERR.f45_g37_rx1.A.hobart_intel.fake_testing_only_20170313_151740/TestStatus
All of the tests actually passed.
</issue>
<code>
[start of scripts/lib/CIME/case_submit.py]
1 #!/usr/bin/env python
2
3 """
4 case.submit - Submit a cesm workflow to the queueing system or run it
5 if there is no queueing system. A cesm workflow may include multiple
6 jobs.
7 """
8 import socket
9 from CIME.XML.standard_module_setup import *
10 from CIME.utils import expect, run_and_log_case_status
11 from CIME.preview_namelists import create_namelists
12 from CIME.check_lockedfiles import check_lockedfiles
13 from CIME.check_input_data import check_all_input_data
14 from CIME.test_status import *
15
16 logger = logging.getLogger(__name__)
17
18 def _submit(case, job=None, resubmit=False, no_batch=False, batch_args=None):
19 caseroot = case.get_value("CASEROOT")
20
21 if job is None:
22 if case.get_value("TEST"):
23 job = "case.test"
24 else:
25 job = "case.run"
26
27 if resubmit:
28 resub = case.get_value("RESUBMIT")
29 logger.info("Submitting job '%s', resubmit=%d" % (job, resub))
30 case.set_value("RESUBMIT",resub-1)
31 if case.get_value("RESUBMIT_SETS_CONTINUE_RUN"):
32 case.set_value("CONTINUE_RUN", True)
33 else:
34 if job in ("case.test","case.run"):
35 check_case(case, caseroot)
36 check_DA_settings(case)
37 if case.get_value("MACH") == "mira":
38 with open(".original_host","w") as fd:
39 fd.write( socket.gethostname())
40
41 # if case.submit is called with the no_batch flag then we assume that this
42 # flag will stay in effect for the duration of the RESUBMITs
43 env_batch = case.get_env("batch")
44 if not resubmit:
45 case.set_value("IS_FIRST_RUN", True)
46 if no_batch:
47 batch_system = "none"
48 else:
49 batch_system = env_batch.get_batch_system_type()
50 case.set_value("BATCH_SYSTEM", batch_system)
51 else:
52 if env_batch.get_batch_system_type() == "none":
53 no_batch = True
54
55 # This is a resubmission, do not reinitialize test values
56 case.set_value("IS_FIRST_RUN", False)
57
58 #Load Modules
59 case.load_env()
60
61 case.set_value("RUN_WITH_SUBMIT",True)
62 case.flush()
63
64 logger.warn("submit_jobs %s" % job)
65 job_ids = case.submit_jobs(no_batch=no_batch, job=job, batch_args=batch_args)
66 logger.info("Submitted job ids %s" % job_ids)
67
68 def submit(case, job=None, resubmit=False, no_batch=False, batch_args=None):
69 if case.get_value("TEST"):
70 caseroot = case.get_value("CASEROOT")
71 casebaseid = case.get_value("CASEBASEID")
72 # This should take care of the race condition where the submitted job
73 # begins immediately and tries to set RUN phase. We proactively assume
74 # a passed SUBMIT phase.
75 with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:
76 ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)
77
78 try:
79 functor = lambda: _submit(case, job, resubmit, no_batch, batch_args)
80 run_and_log_case_status(functor, "case.submit", caseroot=case.get_value("CASEROOT"))
81 except:
82 # If something failed in the batch system, make sure to mark
83 # the test as failed if we are running a test.
84 if case.get_value("TEST"):
85 with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:
86 ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS)
87
88 raise
89
90 def check_case(case, caseroot):
91 check_lockedfiles(caseroot)
92 create_namelists(case) # Must be called before check_all_input_data
93 logger.info("Checking that inputdata is available as part of case submission")
94 check_all_input_data(case)
95
96 expect(case.get_value("BUILD_COMPLETE"), "Build complete is "
97 "not True please rebuild the model by calling case.build")
98 logger.info("Check case OK")
99
100 def check_DA_settings(case):
101 if case.get_value("DATA_ASSIMILATION"):
102 script = case.get_value("DATA_ASSIMILATION_SCRIPT")
103 cycles = case.get_value("DATA_ASSIMILATION_CYCLES")
104 logger.info("Data Assimilation enabled using script %s with %d cycles"%(script,cycles))
105
106
[end of scripts/lib/CIME/case_submit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/lib/CIME/case_submit.py b/scripts/lib/CIME/case_submit.py
--- a/scripts/lib/CIME/case_submit.py
+++ b/scripts/lib/CIME/case_submit.py
@@ -71,9 +71,13 @@
casebaseid = case.get_value("CASEBASEID")
# This should take care of the race condition where the submitted job
# begins immediately and tries to set RUN phase. We proactively assume
- # a passed SUBMIT phase.
+ # a passed SUBMIT phase. If this state is already PASS, don't set it again
+ # because then we'll lose RUN phase info if it's there. This info is important
+ # for system_tests_common to know if it needs to reinitialize the test or not.
with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:
- ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)
+ phase_status = ts.get_status(SUBMIT_PHASE)
+ if phase_status != TEST_PASS_STATUS:
+ ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)
try:
functor = lambda: _submit(case, job, resubmit, no_batch, batch_args)
| {"golden_diff": "diff --git a/scripts/lib/CIME/case_submit.py b/scripts/lib/CIME/case_submit.py\n--- a/scripts/lib/CIME/case_submit.py\n+++ b/scripts/lib/CIME/case_submit.py\n@@ -71,9 +71,13 @@\n casebaseid = case.get_value(\"CASEBASEID\")\n # This should take care of the race condition where the submitted job\n # begins immediately and tries to set RUN phase. We proactively assume\n- # a passed SUBMIT phase.\n+ # a passed SUBMIT phase. If this state is already PASS, don't set it again\n+ # because then we'll lose RUN phase info if it's there. This info is important\n+ # for system_tests_common to know if it needs to reinitialize the test or not.\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n- ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)\n+ phase_status = ts.get_status(SUBMIT_PHASE)\n+ if phase_status != TEST_PASS_STATUS:\n+ ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)\n \n try:\n functor = lambda: _submit(case, job, resubmit, no_batch, batch_args)\n", "issue": "PR #1230 appears to break batch systems \nAfter merging #1230 I get an error from Z_FullSystemTest:\r\n\r\nTraceback (most recent call last):\r\n File \"./scripts_regression_tests.py\", line 1175, in test_full_system\r\n self.assertTrue(test_time > 0, msg=\"test time was zero for %s\" % test_status)\r\nAssertionError: test time was zero for /scratch/cluster/jedwards/scripts_regression_test.20170313_145646/ERR.f45_g37_rx1.A.hobart_intel.fake_testing_only_20170313_151740/TestStatus\r\n\r\n\r\nAll of the tests actually passed. \n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\ncase.submit - Submit a cesm workflow to the queueing system or run it\nif there is no queueing system. A cesm workflow may include multiple\njobs.\n\"\"\"\nimport socket\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect, run_and_log_case_status\nfrom CIME.preview_namelists import create_namelists\nfrom CIME.check_lockedfiles import check_lockedfiles\nfrom CIME.check_input_data import check_all_input_data\nfrom CIME.test_status import *\n\nlogger = logging.getLogger(__name__)\n\ndef _submit(case, job=None, resubmit=False, no_batch=False, batch_args=None):\n caseroot = case.get_value(\"CASEROOT\")\n\n if job is None:\n if case.get_value(\"TEST\"):\n job = \"case.test\"\n else:\n job = \"case.run\"\n\n if resubmit:\n resub = case.get_value(\"RESUBMIT\")\n logger.info(\"Submitting job '%s', resubmit=%d\" % (job, resub))\n case.set_value(\"RESUBMIT\",resub-1)\n if case.get_value(\"RESUBMIT_SETS_CONTINUE_RUN\"):\n case.set_value(\"CONTINUE_RUN\", True)\n else:\n if job in (\"case.test\",\"case.run\"):\n check_case(case, caseroot)\n check_DA_settings(case)\n if case.get_value(\"MACH\") == \"mira\":\n with open(\".original_host\",\"w\") as fd:\n fd.write( socket.gethostname())\n\n # if case.submit is called with the no_batch flag then we assume that this\n # flag will stay in effect for the duration of the RESUBMITs\n env_batch = case.get_env(\"batch\")\n if not resubmit:\n case.set_value(\"IS_FIRST_RUN\", True)\n if no_batch:\n batch_system = \"none\"\n else:\n batch_system = env_batch.get_batch_system_type()\n case.set_value(\"BATCH_SYSTEM\", batch_system)\n else:\n if env_batch.get_batch_system_type() == \"none\":\n no_batch = True\n\n # This is a resubmission, do not reinitialize test values\n case.set_value(\"IS_FIRST_RUN\", False)\n\n #Load Modules\n case.load_env()\n\n case.set_value(\"RUN_WITH_SUBMIT\",True)\n case.flush()\n\n logger.warn(\"submit_jobs %s\" % job)\n job_ids = case.submit_jobs(no_batch=no_batch, job=job, batch_args=batch_args)\n logger.info(\"Submitted job ids %s\" % job_ids)\n\ndef submit(case, job=None, resubmit=False, no_batch=False, batch_args=None):\n if case.get_value(\"TEST\"):\n caseroot = case.get_value(\"CASEROOT\")\n casebaseid = case.get_value(\"CASEBASEID\")\n # This should take care of the race condition where the submitted job\n # begins immediately and tries to set RUN phase. We proactively assume\n # a passed SUBMIT phase.\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)\n\n try:\n functor = lambda: _submit(case, job, resubmit, no_batch, batch_args)\n run_and_log_case_status(functor, \"case.submit\", caseroot=case.get_value(\"CASEROOT\"))\n except:\n # If something failed in the batch system, make sure to mark\n # the test as failed if we are running a test.\n if case.get_value(\"TEST\"):\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS)\n\n raise\n\ndef check_case(case, caseroot):\n check_lockedfiles(caseroot)\n create_namelists(case) # Must be called before check_all_input_data\n logger.info(\"Checking that inputdata is available as part of case submission\")\n check_all_input_data(case)\n\n expect(case.get_value(\"BUILD_COMPLETE\"), \"Build complete is \"\n \"not True please rebuild the model by calling case.build\")\n logger.info(\"Check case OK\")\n\ndef check_DA_settings(case):\n if case.get_value(\"DATA_ASSIMILATION\"):\n script = case.get_value(\"DATA_ASSIMILATION_SCRIPT\")\n cycles = case.get_value(\"DATA_ASSIMILATION_CYCLES\")\n logger.info(\"Data Assimilation enabled using script %s with %d cycles\"%(script,cycles))\n\n", "path": "scripts/lib/CIME/case_submit.py"}]} | 1,897 | 270 |
gh_patches_debug_18728 | rasdani/github-patches | git_diff | TheAlgorithms__Python-6190 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[suggestion] use relative path in DIRECTORY.md
when openning DIRECTORY.md in local machine, the links in it refers to https://github.com/TheAlgorithms/Python/blob/master/xxx.
it's not convinient for reading locally.
I suggest to make a new file "TOC.md", which removes "https://github.com/TheAlgorithms/Python/blob/master/" in every link.
</issue>
<code>
[start of scripts/build_directory_md.py]
1 #!/usr/bin/env python3
2
3 import os
4 from typing import Iterator
5
6 URL_BASE = "https://github.com/TheAlgorithms/Python/blob/master"
7
8
9 def good_file_paths(top_dir: str = ".") -> Iterator[str]:
10 for dir_path, dir_names, filenames in os.walk(top_dir):
11 dir_names[:] = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
12 for filename in filenames:
13 if filename == "__init__.py":
14 continue
15 if os.path.splitext(filename)[1] in (".py", ".ipynb"):
16 yield os.path.join(dir_path, filename).lstrip("./")
17
18
19 def md_prefix(i):
20 return f"{i * ' '}*" if i else "\n##"
21
22
23 def print_path(old_path: str, new_path: str) -> str:
24 old_parts = old_path.split(os.sep)
25 for i, new_part in enumerate(new_path.split(os.sep)):
26 if i + 1 > len(old_parts) or old_parts[i] != new_part:
27 if new_part:
28 print(f"{md_prefix(i)} {new_part.replace('_', ' ').title()}")
29 return new_path
30
31
32 def print_directory_md(top_dir: str = ".") -> None:
33 old_path = ""
34 for filepath in sorted(good_file_paths(top_dir)):
35 filepath, filename = os.path.split(filepath)
36 if filepath != old_path:
37 old_path = print_path(old_path, filepath)
38 indent = (filepath.count(os.sep) + 1) if filepath else 0
39 url = "/".join((URL_BASE, filepath, filename)).replace(" ", "%20")
40 filename = os.path.splitext(filename.replace("_", " ").title())[0]
41 print(f"{md_prefix(indent)} [{filename}]({url})")
42
43
44 if __name__ == "__main__":
45 print_directory_md(".")
46
[end of scripts/build_directory_md.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py
--- a/scripts/build_directory_md.py
+++ b/scripts/build_directory_md.py
@@ -3,8 +3,6 @@
import os
from typing import Iterator
-URL_BASE = "https://github.com/TheAlgorithms/Python/blob/master"
-
def good_file_paths(top_dir: str = ".") -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(top_dir):
@@ -36,7 +34,7 @@
if filepath != old_path:
old_path = print_path(old_path, filepath)
indent = (filepath.count(os.sep) + 1) if filepath else 0
- url = "/".join((URL_BASE, filepath, filename)).replace(" ", "%20")
+ url = "/".join((filepath, filename)).replace(" ", "%20")
filename = os.path.splitext(filename.replace("_", " ").title())[0]
print(f"{md_prefix(indent)} [{filename}]({url})")
| {"golden_diff": "diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py\n--- a/scripts/build_directory_md.py\n+++ b/scripts/build_directory_md.py\n@@ -3,8 +3,6 @@\n import os\n from typing import Iterator\n \n-URL_BASE = \"https://github.com/TheAlgorithms/Python/blob/master\"\n-\n \n def good_file_paths(top_dir: str = \".\") -> Iterator[str]:\n for dir_path, dir_names, filenames in os.walk(top_dir):\n@@ -36,7 +34,7 @@\n if filepath != old_path:\n old_path = print_path(old_path, filepath)\n indent = (filepath.count(os.sep) + 1) if filepath else 0\n- url = \"/\".join((URL_BASE, filepath, filename)).replace(\" \", \"%20\")\n+ url = \"/\".join((filepath, filename)).replace(\" \", \"%20\")\n filename = os.path.splitext(filename.replace(\"_\", \" \").title())[0]\n print(f\"{md_prefix(indent)} [{filename}]({url})\")\n", "issue": "[suggestion] use relative path in DIRECTORY.md\nwhen openning DIRECTORY.md in local machine, the links in it refers to https://github.com/TheAlgorithms/Python/blob/master/xxx.\r\n\r\nit's not convinient for reading locally.\r\n\r\nI suggest to make a new file \"TOC.md\", which removes \"https://github.com/TheAlgorithms/Python/blob/master/\" in every link.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport os\nfrom typing import Iterator\n\nURL_BASE = \"https://github.com/TheAlgorithms/Python/blob/master\"\n\n\ndef good_file_paths(top_dir: str = \".\") -> Iterator[str]:\n for dir_path, dir_names, filenames in os.walk(top_dir):\n dir_names[:] = [d for d in dir_names if d != \"scripts\" and d[0] not in \"._\"]\n for filename in filenames:\n if filename == \"__init__.py\":\n continue\n if os.path.splitext(filename)[1] in (\".py\", \".ipynb\"):\n yield os.path.join(dir_path, filename).lstrip(\"./\")\n\n\ndef md_prefix(i):\n return f\"{i * ' '}*\" if i else \"\\n##\"\n\n\ndef print_path(old_path: str, new_path: str) -> str:\n old_parts = old_path.split(os.sep)\n for i, new_part in enumerate(new_path.split(os.sep)):\n if i + 1 > len(old_parts) or old_parts[i] != new_part:\n if new_part:\n print(f\"{md_prefix(i)} {new_part.replace('_', ' ').title()}\")\n return new_path\n\n\ndef print_directory_md(top_dir: str = \".\") -> None:\n old_path = \"\"\n for filepath in sorted(good_file_paths(top_dir)):\n filepath, filename = os.path.split(filepath)\n if filepath != old_path:\n old_path = print_path(old_path, filepath)\n indent = (filepath.count(os.sep) + 1) if filepath else 0\n url = \"/\".join((URL_BASE, filepath, filename)).replace(\" \", \"%20\")\n filename = os.path.splitext(filename.replace(\"_\", \" \").title())[0]\n print(f\"{md_prefix(indent)} [{filename}]({url})\")\n\n\nif __name__ == \"__main__\":\n print_directory_md(\".\")\n", "path": "scripts/build_directory_md.py"}]} | 1,105 | 221 |
gh_patches_debug_14360 | rasdani/github-patches | git_diff | huggingface__dataset-viewer-2811 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
opus decoding error
see https://huggingface.co/datasets/stable-speech/mls_eng_10k/discussions/1#65ef6e9d440a5fc3d94a40ad
To fix this maybe we should pin `soundfile` library to `>=1.0.31` (first version that supported opus) like [we do in `datasets` library](https://github.com/huggingface/datasets/blob/main/src/datasets/config.py#L144).
</issue>
<code>
[start of libs/libcommon/src/libcommon/viewer_utils/asset.py]
1 # SPDX-License-Identifier: Apache-2.0
2 # Copyright 2022 The HuggingFace Authors.
3
4 from io import BytesIO
5 from pathlib import Path
6 from tempfile import NamedTemporaryFile
7 from typing import Optional, TypedDict
8 from urllib import parse
9
10 from PIL import Image, ImageOps
11 from pydub import AudioSegment # type:ignore
12
13 from libcommon.constants import DATASET_SEPARATOR
14 from libcommon.storage import StrPath, remove_dir
15 from libcommon.storage_client import StorageClient
16
17 SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE = {".wav": "audio/wav", ".mp3": "audio/mpeg"}
18
19
20 def delete_asset_dir(dataset: str, directory: StrPath) -> None:
21 dir_path = Path(directory).resolve() / dataset
22 remove_dir(dir_path)
23
24
25 class ImageSource(TypedDict):
26 src: str
27 height: int
28 width: int
29
30
31 class AudioSource(TypedDict):
32 src: str
33 type: str
34
35
36 def generate_object_key(
37 dataset: str, revision: str, config: str, split: str, row_idx: int, column: str, filename: str
38 ) -> str:
39 return f"{parse.quote(dataset)}/{DATASET_SEPARATOR}/{revision}/{DATASET_SEPARATOR}/{parse.quote(config)}/{parse.quote(split)}/{str(row_idx)}/{parse.quote(column)}/{filename}"
40
41
42 def create_image_file(
43 dataset: str,
44 revision: str,
45 config: str,
46 split: str,
47 row_idx: int,
48 column: str,
49 filename: str,
50 image: Image.Image,
51 format: str,
52 storage_client: StorageClient,
53 ) -> ImageSource:
54 object_key = generate_object_key(
55 dataset=dataset,
56 revision=revision,
57 config=config,
58 split=split,
59 row_idx=row_idx,
60 column=column,
61 filename=filename,
62 )
63 if storage_client.overwrite or not storage_client.exists(object_key):
64 image = ImageOps.exif_transpose(image) # type: ignore[assignment]
65 buffer = BytesIO()
66 image.save(fp=buffer, format=format)
67 buffer.seek(0)
68 with storage_client._fs.open(storage_client.get_full_path(object_key), "wb") as f:
69 f.write(buffer.read())
70 return ImageSource(src=storage_client.get_url(object_key), height=image.height, width=image.width)
71
72
73 def create_audio_file(
74 dataset: str,
75 revision: str,
76 config: str,
77 split: str,
78 row_idx: int,
79 column: str,
80 audio_file_bytes: bytes,
81 audio_file_extension: Optional[str],
82 filename: str,
83 storage_client: StorageClient,
84 ) -> list[AudioSource]:
85 object_key = generate_object_key(
86 dataset=dataset,
87 revision=revision,
88 config=config,
89 split=split,
90 row_idx=row_idx,
91 column=column,
92 filename=filename,
93 )
94 suffix = f".{filename.split('.')[-1]}"
95 if suffix not in SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE:
96 raise ValueError(
97 f"Audio format {suffix} is not supported. Supported formats are"
98 f" {','.join(SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE)}."
99 )
100 media_type = SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE[suffix]
101
102 if storage_client.overwrite or not storage_client.exists(object_key):
103 audio_path = storage_client.get_full_path(object_key)
104 if audio_file_extension == suffix:
105 with storage_client._fs.open(audio_path, "wb") as f:
106 f.write(audio_file_bytes)
107 else: # we need to convert
108 # might spawn a process to convert the audio file using ffmpeg
109 with NamedTemporaryFile("wb", suffix=audio_file_extension) as tmpfile:
110 tmpfile.write(audio_file_bytes)
111 segment: AudioSegment = AudioSegment.from_file(
112 tmpfile.name, audio_file_extension[1:] if audio_file_extension else None
113 )
114 buffer = BytesIO()
115 segment.export(buffer, format=suffix[1:])
116 buffer.seek(0)
117 with storage_client._fs.open(audio_path, "wb") as f:
118 f.write(buffer.read())
119 return [AudioSource(src=storage_client.get_url(object_key), type=media_type)]
120
[end of libs/libcommon/src/libcommon/viewer_utils/asset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libs/libcommon/src/libcommon/viewer_utils/asset.py b/libs/libcommon/src/libcommon/viewer_utils/asset.py
--- a/libs/libcommon/src/libcommon/viewer_utils/asset.py
+++ b/libs/libcommon/src/libcommon/viewer_utils/asset.py
@@ -108,9 +108,7 @@
# might spawn a process to convert the audio file using ffmpeg
with NamedTemporaryFile("wb", suffix=audio_file_extension) as tmpfile:
tmpfile.write(audio_file_bytes)
- segment: AudioSegment = AudioSegment.from_file(
- tmpfile.name, audio_file_extension[1:] if audio_file_extension else None
- )
+ segment: AudioSegment = AudioSegment.from_file(tmpfile.name)
buffer = BytesIO()
segment.export(buffer, format=suffix[1:])
buffer.seek(0)
| {"golden_diff": "diff --git a/libs/libcommon/src/libcommon/viewer_utils/asset.py b/libs/libcommon/src/libcommon/viewer_utils/asset.py\n--- a/libs/libcommon/src/libcommon/viewer_utils/asset.py\n+++ b/libs/libcommon/src/libcommon/viewer_utils/asset.py\n@@ -108,9 +108,7 @@\n # might spawn a process to convert the audio file using ffmpeg\n with NamedTemporaryFile(\"wb\", suffix=audio_file_extension) as tmpfile:\n tmpfile.write(audio_file_bytes)\n- segment: AudioSegment = AudioSegment.from_file(\n- tmpfile.name, audio_file_extension[1:] if audio_file_extension else None\n- )\n+ segment: AudioSegment = AudioSegment.from_file(tmpfile.name)\n buffer = BytesIO()\n segment.export(buffer, format=suffix[1:])\n buffer.seek(0)\n", "issue": "opus decoding error\nsee https://huggingface.co/datasets/stable-speech/mls_eng_10k/discussions/1#65ef6e9d440a5fc3d94a40ad\r\n\r\nTo fix this maybe we should pin `soundfile` library to `>=1.0.31` (first version that supported opus) like [we do in `datasets` library](https://github.com/huggingface/datasets/blob/main/src/datasets/config.py#L144). \r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nfrom io import BytesIO\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\nfrom typing import Optional, TypedDict\nfrom urllib import parse\n\nfrom PIL import Image, ImageOps\nfrom pydub import AudioSegment # type:ignore\n\nfrom libcommon.constants import DATASET_SEPARATOR\nfrom libcommon.storage import StrPath, remove_dir\nfrom libcommon.storage_client import StorageClient\n\nSUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE = {\".wav\": \"audio/wav\", \".mp3\": \"audio/mpeg\"}\n\n\ndef delete_asset_dir(dataset: str, directory: StrPath) -> None:\n dir_path = Path(directory).resolve() / dataset\n remove_dir(dir_path)\n\n\nclass ImageSource(TypedDict):\n src: str\n height: int\n width: int\n\n\nclass AudioSource(TypedDict):\n src: str\n type: str\n\n\ndef generate_object_key(\n dataset: str, revision: str, config: str, split: str, row_idx: int, column: str, filename: str\n) -> str:\n return f\"{parse.quote(dataset)}/{DATASET_SEPARATOR}/{revision}/{DATASET_SEPARATOR}/{parse.quote(config)}/{parse.quote(split)}/{str(row_idx)}/{parse.quote(column)}/{filename}\"\n\n\ndef create_image_file(\n dataset: str,\n revision: str,\n config: str,\n split: str,\n row_idx: int,\n column: str,\n filename: str,\n image: Image.Image,\n format: str,\n storage_client: StorageClient,\n) -> ImageSource:\n object_key = generate_object_key(\n dataset=dataset,\n revision=revision,\n config=config,\n split=split,\n row_idx=row_idx,\n column=column,\n filename=filename,\n )\n if storage_client.overwrite or not storage_client.exists(object_key):\n image = ImageOps.exif_transpose(image) # type: ignore[assignment]\n buffer = BytesIO()\n image.save(fp=buffer, format=format)\n buffer.seek(0)\n with storage_client._fs.open(storage_client.get_full_path(object_key), \"wb\") as f:\n f.write(buffer.read())\n return ImageSource(src=storage_client.get_url(object_key), height=image.height, width=image.width)\n\n\ndef create_audio_file(\n dataset: str,\n revision: str,\n config: str,\n split: str,\n row_idx: int,\n column: str,\n audio_file_bytes: bytes,\n audio_file_extension: Optional[str],\n filename: str,\n storage_client: StorageClient,\n) -> list[AudioSource]:\n object_key = generate_object_key(\n dataset=dataset,\n revision=revision,\n config=config,\n split=split,\n row_idx=row_idx,\n column=column,\n filename=filename,\n )\n suffix = f\".{filename.split('.')[-1]}\"\n if suffix not in SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE:\n raise ValueError(\n f\"Audio format {suffix} is not supported. Supported formats are\"\n f\" {','.join(SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE)}.\"\n )\n media_type = SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE[suffix]\n\n if storage_client.overwrite or not storage_client.exists(object_key):\n audio_path = storage_client.get_full_path(object_key)\n if audio_file_extension == suffix:\n with storage_client._fs.open(audio_path, \"wb\") as f:\n f.write(audio_file_bytes)\n else: # we need to convert\n # might spawn a process to convert the audio file using ffmpeg\n with NamedTemporaryFile(\"wb\", suffix=audio_file_extension) as tmpfile:\n tmpfile.write(audio_file_bytes)\n segment: AudioSegment = AudioSegment.from_file(\n tmpfile.name, audio_file_extension[1:] if audio_file_extension else None\n )\n buffer = BytesIO()\n segment.export(buffer, format=suffix[1:])\n buffer.seek(0)\n with storage_client._fs.open(audio_path, \"wb\") as f:\n f.write(buffer.read())\n return [AudioSource(src=storage_client.get_url(object_key), type=media_type)]\n", "path": "libs/libcommon/src/libcommon/viewer_utils/asset.py"}]} | 1,823 | 187 |
gh_patches_debug_538 | rasdani/github-patches | git_diff | bokeh__bokeh-5378 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Correct comment in Dimension example plot
The comment in [this example](http://bokeh.pydata.org/en/latest/docs/user_guide/styling.html#dimensions) says `# create a new plot with a title`. I expect this example was copied from the one below is demonstrating how to add a title. This comment should be changed to `# create a new plot with specific dimensions`.
</issue>
<code>
[start of sphinx/source/docs/user_guide/source_examples/styling_dimensions.py]
1 from bokeh.plotting import figure, output_file, show
2
3 output_file("dimensions.html")
4
5 # create a new plot with a title
6 p = figure(plot_width=700)
7 p.plot_height = 300
8
9 p.circle([1, 2, 3, 4, 5], [2, 5, 8, 2, 7], size=10)
10
11 show(p)
12
[end of sphinx/source/docs/user_guide/source_examples/styling_dimensions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py b/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py
--- a/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py
+++ b/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py
@@ -2,7 +2,7 @@
output_file("dimensions.html")
-# create a new plot with a title
+# create a new plot with specific dimensions
p = figure(plot_width=700)
p.plot_height = 300
| {"golden_diff": "diff --git a/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py b/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py\n--- a/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py\n+++ b/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py\n@@ -2,7 +2,7 @@\n \n output_file(\"dimensions.html\")\n \n-# create a new plot with a title\n+# create a new plot with specific dimensions\n p = figure(plot_width=700)\n p.plot_height = 300\n", "issue": "Correct comment in Dimension example plot\nThe comment in [this example](http://bokeh.pydata.org/en/latest/docs/user_guide/styling.html#dimensions) says `# create a new plot with a title`. I expect this example was copied from the one below is demonstrating how to add a title. This comment should be changed to `# create a new plot with specific dimensions`.\n\n", "before_files": [{"content": "from bokeh.plotting import figure, output_file, show\n\noutput_file(\"dimensions.html\")\n\n# create a new plot with a title\np = figure(plot_width=700)\np.plot_height = 300\n\np.circle([1, 2, 3, 4, 5], [2, 5, 8, 2, 7], size=10)\n\nshow(p)\n", "path": "sphinx/source/docs/user_guide/source_examples/styling_dimensions.py"}]} | 736 | 117 |
gh_patches_debug_3795 | rasdani/github-patches | git_diff | magenta__magenta-541 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
documentation: missing dependencies
I installed magenta from scratch on a clean Mac using the README and some tests from`bazel test //magenta/...` were failing because of the missing dependencies:
```
pip install IPython
pip install intervaltree
```
The other failures are related to https://github.com/tensorflow/magenta/issues/529 and apparently should be fixed with the upcoming version of Bazel
This issue might be related to the fact that I wasn't using conda, which could totally be a user error.
</issue>
<code>
[start of magenta/tools/pip/setup.py]
1 # Copyright 2016 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """A setuptools based setup module for magenta."""
15
16 from setuptools import find_packages
17 from setuptools import setup
18
19 # Bit of a hack to parse the version string stored in version.py without
20 # executing __init__.py, which will end up requiring a bunch of dependencies to
21 # execute (e.g., tensorflow, pretty_midi, etc.).
22 # Makes the __version__ variable available.
23 execfile('magenta/version.py')
24
25
26 REQUIRED_PACKAGES = [
27 'intervaltree >= 2.1.0',
28 'mido >= 1.1.17',
29 'Pillow >= 3.4.2',
30 'pretty_midi >= 0.2.6',
31 'scipy >= 0.18.1',
32 'tensorflow >= 1.0.0',
33 'matplotlib >= 1.5.3',
34 'wheel',
35 ]
36
37 CONSOLE_SCRIPTS = [
38 'magenta.interfaces.midi.magenta_midi',
39 'magenta.models.drums_rnn.drums_rnn_create_dataset',
40 'magenta.models.drums_rnn.drums_rnn_generate',
41 'magenta.models.drums_rnn.drums_rnn_train',
42 'magenta.models.image_stylization.image_stylization_create_dataset',
43 'magenta.models.image_stylization.image_stylization_evaluate',
44 'magenta.models.image_stylization.image_stylization_finetune',
45 'magenta.models.image_stylization.image_stylization_train',
46 'magenta.models.image_stylization.image_stylization_transform',
47 'magenta.models.improv_rnn.improv_rnn_create_dataset',
48 'magenta.models.improv_rnn.improv_rnn_generate',
49 'magenta.models.improv_rnn.improv_rnn_train',
50 'magenta.models.melody_rnn.melody_rnn_create_dataset',
51 'magenta.models.melody_rnn.melody_rnn_generate',
52 'magenta.models.melody_rnn.melody_rnn_train',
53 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',
54 'magenta.models.polyphony_rnn.polyphony_rnn_generate',
55 'magenta.models.polyphony_rnn.polyphony_rnn_train',
56 'magenta.models.rl_tuner.rl_tuner_train',
57 'magenta.scripts.convert_dir_to_note_sequences',
58 ]
59
60 setup(
61 name='magenta',
62 version=__version__, # pylint: disable=undefined-variable
63 description='Use machine learning to create art and music',
64 long_description='',
65 url='https://magenta.tensorflow.org/',
66 author='Google Inc.',
67 author_email='[email protected]',
68 license='Apache 2',
69 # PyPI package information.
70 classifiers=[
71 'Development Status :: 4 - Beta',
72 'Intended Audience :: Developers',
73 'Intended Audience :: Education',
74 'Intended Audience :: Science/Research',
75 'License :: OSI Approved :: Apache Software License',
76 'Programming Language :: Python :: 2.7',
77 'Topic :: Scientific/Engineering :: Mathematics',
78 'Topic :: Software Development :: Libraries :: Python Modules',
79 'Topic :: Software Development :: Libraries',
80 ],
81 keywords='tensorflow machine learning magenta music art',
82
83 packages=find_packages(),
84 install_requires=REQUIRED_PACKAGES,
85 entry_points={
86 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in
87 ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],
88 },
89
90 include_package_data=True,
91 package_data={
92 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],
93 },
94 )
95
96
[end of magenta/tools/pip/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/magenta/tools/pip/setup.py b/magenta/tools/pip/setup.py
--- a/magenta/tools/pip/setup.py
+++ b/magenta/tools/pip/setup.py
@@ -24,13 +24,14 @@
REQUIRED_PACKAGES = [
+ 'IPython',
+ 'Pillow >= 3.4.2',
'intervaltree >= 2.1.0',
+ 'matplotlib >= 1.5.3',
'mido >= 1.1.17',
- 'Pillow >= 3.4.2',
'pretty_midi >= 0.2.6',
'scipy >= 0.18.1',
'tensorflow >= 1.0.0',
- 'matplotlib >= 1.5.3',
'wheel',
]
| {"golden_diff": "diff --git a/magenta/tools/pip/setup.py b/magenta/tools/pip/setup.py\n--- a/magenta/tools/pip/setup.py\n+++ b/magenta/tools/pip/setup.py\n@@ -24,13 +24,14 @@\n \n \n REQUIRED_PACKAGES = [\n+ 'IPython',\n+ 'Pillow >= 3.4.2',\n 'intervaltree >= 2.1.0',\n+ 'matplotlib >= 1.5.3',\n 'mido >= 1.1.17',\n- 'Pillow >= 3.4.2',\n 'pretty_midi >= 0.2.6',\n 'scipy >= 0.18.1',\n 'tensorflow >= 1.0.0',\n- 'matplotlib >= 1.5.3',\n 'wheel',\n ]\n", "issue": "documentation: missing dependencies\nI installed magenta from scratch on a clean Mac using the README and some tests from`bazel test //magenta/...` were failing because of the missing dependencies:\r\n\r\n```\r\npip install IPython\r\npip install intervaltree\r\n```\r\n\r\nThe other failures are related to https://github.com/tensorflow/magenta/issues/529 and apparently should be fixed with the upcoming version of Bazel\r\n\r\nThis issue might be related to the fact that I wasn't using conda, which could totally be a user error.\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A setuptools based setup module for magenta.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Bit of a hack to parse the version string stored in version.py without\n# executing __init__.py, which will end up requiring a bunch of dependencies to\n# execute (e.g., tensorflow, pretty_midi, etc.).\n# Makes the __version__ variable available.\nexecfile('magenta/version.py')\n\n\nREQUIRED_PACKAGES = [\n 'intervaltree >= 2.1.0',\n 'mido >= 1.1.17',\n 'Pillow >= 3.4.2',\n 'pretty_midi >= 0.2.6',\n 'scipy >= 0.18.1',\n 'tensorflow >= 1.0.0',\n 'matplotlib >= 1.5.3',\n 'wheel',\n]\n\nCONSOLE_SCRIPTS = [\n 'magenta.interfaces.midi.magenta_midi',\n 'magenta.models.drums_rnn.drums_rnn_create_dataset',\n 'magenta.models.drums_rnn.drums_rnn_generate',\n 'magenta.models.drums_rnn.drums_rnn_train',\n 'magenta.models.image_stylization.image_stylization_create_dataset',\n 'magenta.models.image_stylization.image_stylization_evaluate',\n 'magenta.models.image_stylization.image_stylization_finetune',\n 'magenta.models.image_stylization.image_stylization_train',\n 'magenta.models.image_stylization.image_stylization_transform',\n 'magenta.models.improv_rnn.improv_rnn_create_dataset',\n 'magenta.models.improv_rnn.improv_rnn_generate',\n 'magenta.models.improv_rnn.improv_rnn_train',\n 'magenta.models.melody_rnn.melody_rnn_create_dataset',\n 'magenta.models.melody_rnn.melody_rnn_generate',\n 'magenta.models.melody_rnn.melody_rnn_train',\n 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',\n 'magenta.models.polyphony_rnn.polyphony_rnn_generate',\n 'magenta.models.polyphony_rnn.polyphony_rnn_train',\n 'magenta.models.rl_tuner.rl_tuner_train',\n 'magenta.scripts.convert_dir_to_note_sequences',\n]\n\nsetup(\n name='magenta',\n version=__version__, # pylint: disable=undefined-variable\n description='Use machine learning to create art and music',\n long_description='',\n url='https://magenta.tensorflow.org/',\n author='Google Inc.',\n author_email='[email protected]',\n license='Apache 2',\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='tensorflow machine learning magenta music art',\n\n packages=find_packages(),\n install_requires=REQUIRED_PACKAGES,\n entry_points={\n 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in\n ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],\n },\n\n include_package_data=True,\n package_data={\n 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],\n },\n)\n\n", "path": "magenta/tools/pip/setup.py"}]} | 1,729 | 186 |
gh_patches_debug_19493 | rasdani/github-patches | git_diff | xorbitsai__inference-566 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ENH: configurable XINFERENCE_HOME
### Is your feature request related to a problem? Please describe
XINFERENCE_HOME should be configurable.
### Describe the solution you'd like
We could starts with supporting the XINFERENCE_HOME env variable. This could be done by adding an util `get_xinference_home` and make sure this is the only way to get xinference home.
</issue>
<code>
[start of xinference/constants.py]
1 # Copyright 2022-2023 XProbe Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 from pathlib import Path
17
18 XINFERENCE_HOME = str(Path.home() / ".xinference")
19 XINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, "cache")
20 XINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, "model")
21 XINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, "logs")
22 XINFERENCE_IMAGE_DIR = os.path.join(XINFERENCE_HOME, "image")
23
24 XINFERENCE_DEFAULT_LOCAL_HOST = "127.0.0.1"
25 XINFERENCE_DEFAULT_DISTRIBUTED_HOST = "0.0.0.0"
26 XINFERENCE_DEFAULT_ENDPOINT_PORT = 9997
27
28 XINFERENCE_ENV_ENDPOINT = "XINFERENCE_ENDPOINT"
29 XINFERENCE_ENV_MODEL_SRC = "XINFERENCE_MODEL_SRC"
30
[end of xinference/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xinference/constants.py b/xinference/constants.py
--- a/xinference/constants.py
+++ b/xinference/constants.py
@@ -15,7 +15,16 @@
import os
from pathlib import Path
-XINFERENCE_HOME = str(Path.home() / ".xinference")
+XINFERENCE_ENV_ENDPOINT = "XINFERENCE_ENDPOINT"
+XINFERENCE_ENV_MODEL_SRC = "XINFERENCE_MODEL_SRC"
+XINFERENCE_ENV_HOME_PATH = "XINFERENCE_HOME"
+
+
+def get_xinference_home():
+ return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / ".xinference"))
+
+
+XINFERENCE_HOME = get_xinference_home()
XINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, "cache")
XINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, "model")
XINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, "logs")
@@ -24,6 +33,3 @@
XINFERENCE_DEFAULT_LOCAL_HOST = "127.0.0.1"
XINFERENCE_DEFAULT_DISTRIBUTED_HOST = "0.0.0.0"
XINFERENCE_DEFAULT_ENDPOINT_PORT = 9997
-
-XINFERENCE_ENV_ENDPOINT = "XINFERENCE_ENDPOINT"
-XINFERENCE_ENV_MODEL_SRC = "XINFERENCE_MODEL_SRC"
| {"golden_diff": "diff --git a/xinference/constants.py b/xinference/constants.py\n--- a/xinference/constants.py\n+++ b/xinference/constants.py\n@@ -15,7 +15,16 @@\n import os\n from pathlib import Path\n \n-XINFERENCE_HOME = str(Path.home() / \".xinference\")\n+XINFERENCE_ENV_ENDPOINT = \"XINFERENCE_ENDPOINT\"\n+XINFERENCE_ENV_MODEL_SRC = \"XINFERENCE_MODEL_SRC\"\n+XINFERENCE_ENV_HOME_PATH = \"XINFERENCE_HOME\"\n+\n+\n+def get_xinference_home():\n+ return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / \".xinference\"))\n+\n+\n+XINFERENCE_HOME = get_xinference_home()\n XINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, \"cache\")\n XINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, \"model\")\n XINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, \"logs\")\n@@ -24,6 +33,3 @@\n XINFERENCE_DEFAULT_LOCAL_HOST = \"127.0.0.1\"\n XINFERENCE_DEFAULT_DISTRIBUTED_HOST = \"0.0.0.0\"\n XINFERENCE_DEFAULT_ENDPOINT_PORT = 9997\n-\n-XINFERENCE_ENV_ENDPOINT = \"XINFERENCE_ENDPOINT\"\n-XINFERENCE_ENV_MODEL_SRC = \"XINFERENCE_MODEL_SRC\"\n", "issue": "ENH: configurable XINFERENCE_HOME\n### Is your feature request related to a problem? Please describe\r\nXINFERENCE_HOME should be configurable.\r\n\r\n### Describe the solution you'd like\r\nWe could starts with supporting the XINFERENCE_HOME env variable. This could be done by adding an util `get_xinference_home` and make sure this is the only way to get xinference home.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom pathlib import Path\n\nXINFERENCE_HOME = str(Path.home() / \".xinference\")\nXINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, \"cache\")\nXINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, \"model\")\nXINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, \"logs\")\nXINFERENCE_IMAGE_DIR = os.path.join(XINFERENCE_HOME, \"image\")\n\nXINFERENCE_DEFAULT_LOCAL_HOST = \"127.0.0.1\"\nXINFERENCE_DEFAULT_DISTRIBUTED_HOST = \"0.0.0.0\"\nXINFERENCE_DEFAULT_ENDPOINT_PORT = 9997\n\nXINFERENCE_ENV_ENDPOINT = \"XINFERENCE_ENDPOINT\"\nXINFERENCE_ENV_MODEL_SRC = \"XINFERENCE_MODEL_SRC\"\n", "path": "xinference/constants.py"}]} | 975 | 295 |
gh_patches_debug_60837 | rasdani/github-patches | git_diff | openedx__ecommerce-348 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Running migrations for Travis builds
We run migrations to ensure no migrations are missing, and they work on fresh installs.
</issue>
<code>
[start of ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from django.db import models, migrations
5
6
7 def create_shipping_event(apps, schema_editor):
8 """
9
10 Create a single new shipping event type that can be applied to an order. This will allow us to initiate order
11 shipment.
12
13 """
14 # Create all our Product Types.
15 ShippingEventType = apps.get_model("order", "ShippingEventType")
16 shipped_event = ShippingEventType(code="shipped", name="Shipped")
17 shipped_event.save()
18
19
20 class Migration(migrations.Migration):
21
22 dependencies = [
23 ('order', '0002_auto_20141007_2032'),
24 ]
25
26 operations = [
27 migrations.RunPython(create_shipping_event),
28 ]
29
[end of ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py b/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py
--- a/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py
+++ b/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py
@@ -13,8 +13,7 @@
"""
# Create all our Product Types.
ShippingEventType = apps.get_model("order", "ShippingEventType")
- shipped_event = ShippingEventType(code="shipped", name="Shipped")
- shipped_event.save()
+ ShippingEventType.objects.create(code="shipped", name="Shipped")
class Migration(migrations.Migration):
| {"golden_diff": "diff --git a/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py b/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py\n--- a/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py\n+++ b/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py\n@@ -13,8 +13,7 @@\n \"\"\"\n # Create all our Product Types.\n ShippingEventType = apps.get_model(\"order\", \"ShippingEventType\")\n- shipped_event = ShippingEventType(code=\"shipped\", name=\"Shipped\")\n- shipped_event.save()\n+ ShippingEventType.objects.create(code=\"shipped\", name=\"Shipped\")\n \n \n class Migration(migrations.Migration):\n", "issue": "Running migrations for Travis builds\nWe run migrations to ensure no migrations are missing, and they work on fresh installs.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef create_shipping_event(apps, schema_editor):\n \"\"\"\n\n Create a single new shipping event type that can be applied to an order. This will allow us to initiate order\n shipment.\n\n \"\"\"\n # Create all our Product Types.\n ShippingEventType = apps.get_model(\"order\", \"ShippingEventType\")\n shipped_event = ShippingEventType(code=\"shipped\", name=\"Shipped\")\n shipped_event.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('order', '0002_auto_20141007_2032'),\n ]\n\n operations = [\n migrations.RunPython(create_shipping_event),\n ]\n", "path": "ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py"}]} | 819 | 208 |
gh_patches_debug_6915 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3596 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_DOCKER_1 rule not failed when using EXPOSE 22/TCP
**Issue**
If it is related to an existing check, CKV_DOCKER_1.
Dockerfile EXPOSE can accept port using tcp proto by defaut if not specified or you can also specify the protocol with /tcp or /udp.
When using "EXPOSE 22/tcp" the rule does not detect the SSH port. something like '22/tcp'.split('/')[0] will return port without protocol or we can test both '22' or '22/tcp' in the rule.
**Examples**
Issue can be replicated with a Dockerfile which uses "EXPOSE 22/tcp".
**Version**
- Checkov Version 2.1.87
**Specification**
EXPOSE specification https://docs.docker.com/engine/reference/builder/#expose
</issue>
<code>
[start of checkov/dockerfile/checks/ExposePort22.py]
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 from checkov.common.models.enums import CheckCategories, CheckResult
6 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
7
8 if TYPE_CHECKING:
9 from dockerfile_parse.parser import _Instruction
10
11
12 class ExposePort22(BaseDockerfileCheck):
13 def __init__(self) -> None:
14 name = "Ensure port 22 is not exposed"
15 id = "CKV_DOCKER_1"
16 supported_instructions = ("EXPOSE",)
17 categories = (CheckCategories.NETWORKING,)
18 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
19
20 def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:
21 for expose in conf:
22 if "22" in expose["value"].split(" "):
23 return CheckResult.FAILED, [expose]
24
25 return CheckResult.PASSED, None
26
27
28 check = ExposePort22()
29
[end of checkov/dockerfile/checks/ExposePort22.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/dockerfile/checks/ExposePort22.py b/checkov/dockerfile/checks/ExposePort22.py
--- a/checkov/dockerfile/checks/ExposePort22.py
+++ b/checkov/dockerfile/checks/ExposePort22.py
@@ -19,7 +19,7 @@
def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:
for expose in conf:
- if "22" in expose["value"].split(" "):
+ if any(port in expose["value"].split(" ") for port in ("22", "22/tcp")):
return CheckResult.FAILED, [expose]
return CheckResult.PASSED, None
| {"golden_diff": "diff --git a/checkov/dockerfile/checks/ExposePort22.py b/checkov/dockerfile/checks/ExposePort22.py\n--- a/checkov/dockerfile/checks/ExposePort22.py\n+++ b/checkov/dockerfile/checks/ExposePort22.py\n@@ -19,7 +19,7 @@\n \n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n for expose in conf:\n- if \"22\" in expose[\"value\"].split(\" \"):\n+ if any(port in expose[\"value\"].split(\" \") for port in (\"22\", \"22/tcp\")):\n return CheckResult.FAILED, [expose]\n \n return CheckResult.PASSED, None\n", "issue": "CKV_DOCKER_1 rule not failed when using EXPOSE 22/TCP\n**Issue**\r\nIf it is related to an existing check, CKV_DOCKER_1.\r\nDockerfile EXPOSE can accept port using tcp proto by defaut if not specified or you can also specify the protocol with /tcp or /udp.\r\n\r\nWhen using \"EXPOSE 22/tcp\" the rule does not detect the SSH port. something like '22/tcp'.split('/')[0] will return port without protocol or we can test both '22' or '22/tcp' in the rule.\r\n\r\n**Examples**\r\nIssue can be replicated with a Dockerfile which uses \"EXPOSE 22/tcp\".\r\n\r\n**Version**\r\n - Checkov Version 2.1.87\r\n\r\n**Specification**\r\nEXPOSE specification https://docs.docker.com/engine/reference/builder/#expose\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n\n\nclass ExposePort22(BaseDockerfileCheck):\n def __init__(self) -> None:\n name = \"Ensure port 22 is not exposed\"\n id = \"CKV_DOCKER_1\"\n supported_instructions = (\"EXPOSE\",)\n categories = (CheckCategories.NETWORKING,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n for expose in conf:\n if \"22\" in expose[\"value\"].split(\" \"):\n return CheckResult.FAILED, [expose]\n\n return CheckResult.PASSED, None\n\n\ncheck = ExposePort22()\n", "path": "checkov/dockerfile/checks/ExposePort22.py"}]} | 1,013 | 171 |
gh_patches_debug_32547 | rasdani/github-patches | git_diff | buildbot__buildbot-6996 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
There's a small window where events will be missed by reporters during reconfig
The code in `ReporterBase.reconfigService()` stops consuming events for all event keys and then starts consuming events for all event keys again. This is not necessary for any wanted event keys which are wanted in both old and new configuration. The code should stop consuming events for no longer wanted event keys, start consuming events for newly wanted event keys and leave the rest untouched.
</issue>
<code>
[start of master/buildbot/reporters/base.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 import abc
17
18 from twisted.internet import defer
19 from twisted.python import log
20
21 from buildbot import config
22 from buildbot.reporters import utils
23 from buildbot.util import service
24 from buildbot.util import tuplematch
25
26 ENCODING = 'utf-8'
27
28
29 class ReporterBase(service.BuildbotService):
30 name = None
31 __meta__ = abc.ABCMeta
32
33 compare_attrs = ['generators']
34
35 def __init__(self, *args, **kwargs):
36 super().__init__(*args, **kwargs)
37 self.generators = None
38 self._event_consumers = []
39 self._pending_got_event_calls = {}
40
41 def checkConfig(self, generators):
42 if not isinstance(generators, list):
43 config.error('{}: generators argument must be a list')
44
45 for g in generators:
46 g.check()
47
48 if self.name is None:
49 self.name = self.__class__.__name__
50 for g in generators:
51 self.name += "_" + g.generate_name()
52
53 @defer.inlineCallbacks
54 def reconfigService(self, generators):
55
56 for consumer in self._event_consumers:
57 yield consumer.stopConsuming()
58 self._event_consumers = []
59
60 self.generators = generators
61
62 wanted_event_keys = set()
63 for g in self.generators:
64 wanted_event_keys.update(g.wanted_event_keys)
65
66 for key in sorted(list(wanted_event_keys)):
67 consumer = yield self.master.mq.startConsuming(self._got_event, key)
68 self._event_consumers.append(consumer)
69
70 @defer.inlineCallbacks
71 def stopService(self):
72 for consumer in self._event_consumers:
73 yield consumer.stopConsuming()
74 self._event_consumers = []
75
76 for pending_call in list(self._pending_got_event_calls.values()):
77 yield pending_call
78 self._pending_got_event_calls = {}
79
80 yield super().stopService()
81
82 def _does_generator_want_key(self, generator, key):
83 for filter in generator.wanted_event_keys:
84 if tuplematch.matchTuple(key, filter):
85 return True
86 return False
87
88 def _get_chain_key_for_event(self, key, msg):
89 if key[0] in ["builds", "buildrequests"]:
90 return ("buildrequestid", msg["buildrequestid"])
91 return None
92
93 @defer.inlineCallbacks
94 def _got_event(self, key, msg):
95 chain_key = self._get_chain_key_for_event(key, msg)
96 if chain_key is not None:
97 d = defer.Deferred()
98 pending_call = self._pending_got_event_calls.get(chain_key)
99 self._pending_got_event_calls[chain_key] = d
100 # Wait for previously pending call, if any, to ensure
101 # reports are sent out in the order events were queued.
102 if pending_call is not None:
103 yield pending_call
104
105 try:
106 reports = []
107 for g in self.generators:
108 if self._does_generator_want_key(g, key):
109 report = yield g.generate(self.master, self, key, msg)
110 if report is not None:
111 reports.append(report)
112
113 if reports:
114 yield self.sendMessage(reports)
115 except Exception as e:
116 log.err(e, 'Got exception when handling reporter events')
117
118 if chain_key is not None:
119 if self._pending_got_event_calls.get(chain_key) == d:
120 del self._pending_got_event_calls[chain_key]
121 d.callback(None) # This event is now fully handled
122
123 def getResponsibleUsersForBuild(self, master, buildid):
124 # Use library method but subclassers may want to override that
125 return utils.getResponsibleUsersForBuild(master, buildid)
126
127 @abc.abstractmethod
128 def sendMessage(self, reports):
129 pass
130
[end of master/buildbot/reporters/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/master/buildbot/reporters/base.py b/master/buildbot/reporters/base.py
--- a/master/buildbot/reporters/base.py
+++ b/master/buildbot/reporters/base.py
@@ -35,7 +35,7 @@
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generators = None
- self._event_consumers = []
+ self._event_consumers = {}
self._pending_got_event_calls = {}
def checkConfig(self, generators):
@@ -52,26 +52,29 @@
@defer.inlineCallbacks
def reconfigService(self, generators):
-
- for consumer in self._event_consumers:
- yield consumer.stopConsuming()
- self._event_consumers = []
-
self.generators = generators
wanted_event_keys = set()
for g in self.generators:
wanted_event_keys.update(g.wanted_event_keys)
+ # Remove consumers for keys that are no longer wanted
+ for key in list(self._event_consumers.keys()):
+ if key not in wanted_event_keys:
+ yield self._event_consumers[key].stopConsuming()
+ del self._event_consumers[key]
+
+ # Add consumers for new keys
for key in sorted(list(wanted_event_keys)):
- consumer = yield self.master.mq.startConsuming(self._got_event, key)
- self._event_consumers.append(consumer)
+ if key not in self._event_consumers:
+ self._event_consumers[key] = \
+ yield self.master.mq.startConsuming(self._got_event, key)
@defer.inlineCallbacks
def stopService(self):
- for consumer in self._event_consumers:
+ for consumer in self._event_consumers.values():
yield consumer.stopConsuming()
- self._event_consumers = []
+ self._event_consumers = {}
for pending_call in list(self._pending_got_event_calls.values()):
yield pending_call
| {"golden_diff": "diff --git a/master/buildbot/reporters/base.py b/master/buildbot/reporters/base.py\n--- a/master/buildbot/reporters/base.py\n+++ b/master/buildbot/reporters/base.py\n@@ -35,7 +35,7 @@\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.generators = None\n- self._event_consumers = []\n+ self._event_consumers = {}\n self._pending_got_event_calls = {}\n \n def checkConfig(self, generators):\n@@ -52,26 +52,29 @@\n \n @defer.inlineCallbacks\n def reconfigService(self, generators):\n-\n- for consumer in self._event_consumers:\n- yield consumer.stopConsuming()\n- self._event_consumers = []\n-\n self.generators = generators\n \n wanted_event_keys = set()\n for g in self.generators:\n wanted_event_keys.update(g.wanted_event_keys)\n \n+ # Remove consumers for keys that are no longer wanted\n+ for key in list(self._event_consumers.keys()):\n+ if key not in wanted_event_keys:\n+ yield self._event_consumers[key].stopConsuming()\n+ del self._event_consumers[key]\n+\n+ # Add consumers for new keys\n for key in sorted(list(wanted_event_keys)):\n- consumer = yield self.master.mq.startConsuming(self._got_event, key)\n- self._event_consumers.append(consumer)\n+ if key not in self._event_consumers:\n+ self._event_consumers[key] = \\\n+ yield self.master.mq.startConsuming(self._got_event, key)\n \n @defer.inlineCallbacks\n def stopService(self):\n- for consumer in self._event_consumers:\n+ for consumer in self._event_consumers.values():\n yield consumer.stopConsuming()\n- self._event_consumers = []\n+ self._event_consumers = {}\n \n for pending_call in list(self._pending_got_event_calls.values()):\n yield pending_call\n", "issue": "There's a small window where events will be missed by reporters during reconfig\nThe code in `ReporterBase.reconfigService()` stops consuming events for all event keys and then starts consuming events for all event keys again. This is not necessary for any wanted event keys which are wanted in both old and new configuration. The code should stop consuming events for no longer wanted event keys, start consuming events for newly wanted event keys and leave the rest untouched.\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport abc\n\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nfrom buildbot import config\nfrom buildbot.reporters import utils\nfrom buildbot.util import service\nfrom buildbot.util import tuplematch\n\nENCODING = 'utf-8'\n\n\nclass ReporterBase(service.BuildbotService):\n name = None\n __meta__ = abc.ABCMeta\n\n compare_attrs = ['generators']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.generators = None\n self._event_consumers = []\n self._pending_got_event_calls = {}\n\n def checkConfig(self, generators):\n if not isinstance(generators, list):\n config.error('{}: generators argument must be a list')\n\n for g in generators:\n g.check()\n\n if self.name is None:\n self.name = self.__class__.__name__\n for g in generators:\n self.name += \"_\" + g.generate_name()\n\n @defer.inlineCallbacks\n def reconfigService(self, generators):\n\n for consumer in self._event_consumers:\n yield consumer.stopConsuming()\n self._event_consumers = []\n\n self.generators = generators\n\n wanted_event_keys = set()\n for g in self.generators:\n wanted_event_keys.update(g.wanted_event_keys)\n\n for key in sorted(list(wanted_event_keys)):\n consumer = yield self.master.mq.startConsuming(self._got_event, key)\n self._event_consumers.append(consumer)\n\n @defer.inlineCallbacks\n def stopService(self):\n for consumer in self._event_consumers:\n yield consumer.stopConsuming()\n self._event_consumers = []\n\n for pending_call in list(self._pending_got_event_calls.values()):\n yield pending_call\n self._pending_got_event_calls = {}\n\n yield super().stopService()\n\n def _does_generator_want_key(self, generator, key):\n for filter in generator.wanted_event_keys:\n if tuplematch.matchTuple(key, filter):\n return True\n return False\n\n def _get_chain_key_for_event(self, key, msg):\n if key[0] in [\"builds\", \"buildrequests\"]:\n return (\"buildrequestid\", msg[\"buildrequestid\"])\n return None\n\n @defer.inlineCallbacks\n def _got_event(self, key, msg):\n chain_key = self._get_chain_key_for_event(key, msg)\n if chain_key is not None:\n d = defer.Deferred()\n pending_call = self._pending_got_event_calls.get(chain_key)\n self._pending_got_event_calls[chain_key] = d\n # Wait for previously pending call, if any, to ensure\n # reports are sent out in the order events were queued.\n if pending_call is not None:\n yield pending_call\n\n try:\n reports = []\n for g in self.generators:\n if self._does_generator_want_key(g, key):\n report = yield g.generate(self.master, self, key, msg)\n if report is not None:\n reports.append(report)\n\n if reports:\n yield self.sendMessage(reports)\n except Exception as e:\n log.err(e, 'Got exception when handling reporter events')\n\n if chain_key is not None:\n if self._pending_got_event_calls.get(chain_key) == d:\n del self._pending_got_event_calls[chain_key]\n d.callback(None) # This event is now fully handled\n\n def getResponsibleUsersForBuild(self, master, buildid):\n # Use library method but subclassers may want to override that\n return utils.getResponsibleUsersForBuild(master, buildid)\n\n @abc.abstractmethod\n def sendMessage(self, reports):\n pass\n", "path": "master/buildbot/reporters/base.py"}]} | 1,900 | 447 |
gh_patches_debug_13512 | rasdani/github-patches | git_diff | larq__larq-596 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
__version__
### Feature motivation
Is there a way to dynamically poll the version of larq (or lce or larq-zoo for that matter)?
If not, could it be done using `__version__` as usual for standard library modules?
### Feature description
```
import larq
print(larq.__version__)
```
</issue>
<code>
[start of setup.py]
1 from setuptools import find_packages, setup
2
3
4 def readme():
5 with open("README.md", "r") as f:
6 return f.read()
7
8
9 setup(
10 name="larq",
11 version="0.10.1",
12 python_requires=">=3.6",
13 author="Plumerai",
14 author_email="[email protected]",
15 description="An Open Source Machine Learning Library for Training Binarized Neural Networks",
16 long_description=readme(),
17 long_description_content_type="text/markdown",
18 url="https://larq.dev/",
19 packages=find_packages(exclude=["larq.snapshots"]),
20 license="Apache 2.0",
21 install_requires=[
22 "numpy >= 1.15.4, < 2.0",
23 "terminaltables>=3.1.0",
24 "dataclasses ; python_version<'3.7'",
25 ],
26 extras_require={
27 "tensorflow": ["tensorflow>=1.14.0"],
28 "tensorflow_gpu": ["tensorflow-gpu>=1.14.0"],
29 "test": [
30 "black==20.8b1",
31 "flake8>=3.7.9,<3.9.0",
32 "isort==5.6.4",
33 "packaging>=19.2,<21.0",
34 "pytest>=5.2.4,<6.2.0",
35 "pytest-cov>=2.8.1,<2.11.0",
36 "pytest-xdist>=1.30,<2.2",
37 "pytest-mock>=2.0,<3.4",
38 "pytype==2020.10.8",
39 "snapshottest>=0.5.1,<0.7.0",
40 ],
41 },
42 classifiers=[
43 "Development Status :: 4 - Beta",
44 "Intended Audience :: Developers",
45 "Intended Audience :: Education",
46 "Intended Audience :: Science/Research",
47 "License :: OSI Approved :: Apache Software License",
48 "Programming Language :: Python :: 3",
49 "Programming Language :: Python :: 3 :: Only",
50 "Programming Language :: Python :: 3.6",
51 "Programming Language :: Python :: 3.7",
52 "Programming Language :: Python :: 3.8",
53 "Topic :: Scientific/Engineering",
54 "Topic :: Scientific/Engineering :: Mathematics",
55 "Topic :: Scientific/Engineering :: Artificial Intelligence",
56 "Topic :: Software Development",
57 "Topic :: Software Development :: Libraries",
58 "Topic :: Software Development :: Libraries :: Python Modules",
59 ],
60 )
61
[end of setup.py]
[start of larq/__init__.py]
1 from larq import (
2 activations,
3 callbacks,
4 constraints,
5 context,
6 layers,
7 math,
8 metrics,
9 models,
10 optimizers,
11 quantizers,
12 utils,
13 )
14
15 __all__ = [
16 "layers",
17 "activations",
18 "callbacks",
19 "constraints",
20 "context",
21 "math",
22 "metrics",
23 "models",
24 "quantizers",
25 "optimizers",
26 "utils",
27 ]
28
[end of larq/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/larq/__init__.py b/larq/__init__.py
--- a/larq/__init__.py
+++ b/larq/__init__.py
@@ -12,6 +12,14 @@
utils,
)
+try:
+ from importlib import metadata # type: ignore
+except ImportError:
+ # Running on pre-3.8 Python; use importlib-metadata package
+ import importlib_metadata as metadata # type: ignore
+
+__version__ = metadata.version("larq")
+
__all__ = [
"layers",
"activations",
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,6 +22,7 @@
"numpy >= 1.15.4, < 2.0",
"terminaltables>=3.1.0",
"dataclasses ; python_version<'3.7'",
+ "importlib-metadata ~= 2.0 ; python_version<'3.8'",
],
extras_require={
"tensorflow": ["tensorflow>=1.14.0"],
| {"golden_diff": "diff --git a/larq/__init__.py b/larq/__init__.py\n--- a/larq/__init__.py\n+++ b/larq/__init__.py\n@@ -12,6 +12,14 @@\n utils,\n )\n \n+try:\n+ from importlib import metadata # type: ignore\n+except ImportError:\n+ # Running on pre-3.8 Python; use importlib-metadata package\n+ import importlib_metadata as metadata # type: ignore\n+\n+__version__ = metadata.version(\"larq\")\n+\n __all__ = [\n \"layers\",\n \"activations\",\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,6 +22,7 @@\n \"numpy >= 1.15.4, < 2.0\",\n \"terminaltables>=3.1.0\",\n \"dataclasses ; python_version<'3.7'\",\n+ \"importlib-metadata ~= 2.0 ; python_version<'3.8'\",\n ],\n extras_require={\n \"tensorflow\": [\"tensorflow>=1.14.0\"],\n", "issue": "__version__\n### Feature motivation\r\nIs there a way to dynamically poll the version of larq (or lce or larq-zoo for that matter)?\r\nIf not, could it be done using `__version__` as usual for standard library modules?\r\n\r\n### Feature description\r\n```\r\nimport larq\r\nprint(larq.__version__)\r\n```\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\n\ndef readme():\n with open(\"README.md\", \"r\") as f:\n return f.read()\n\n\nsetup(\n name=\"larq\",\n version=\"0.10.1\",\n python_requires=\">=3.6\",\n author=\"Plumerai\",\n author_email=\"[email protected]\",\n description=\"An Open Source Machine Learning Library for Training Binarized Neural Networks\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://larq.dev/\",\n packages=find_packages(exclude=[\"larq.snapshots\"]),\n license=\"Apache 2.0\",\n install_requires=[\n \"numpy >= 1.15.4, < 2.0\",\n \"terminaltables>=3.1.0\",\n \"dataclasses ; python_version<'3.7'\",\n ],\n extras_require={\n \"tensorflow\": [\"tensorflow>=1.14.0\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.14.0\"],\n \"test\": [\n \"black==20.8b1\",\n \"flake8>=3.7.9,<3.9.0\",\n \"isort==5.6.4\",\n \"packaging>=19.2,<21.0\",\n \"pytest>=5.2.4,<6.2.0\",\n \"pytest-cov>=2.8.1,<2.11.0\",\n \"pytest-xdist>=1.30,<2.2\",\n \"pytest-mock>=2.0,<3.4\",\n \"pytype==2020.10.8\",\n \"snapshottest>=0.5.1,<0.7.0\",\n ],\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}, {"content": "from larq import (\n activations,\n callbacks,\n constraints,\n context,\n layers,\n math,\n metrics,\n models,\n optimizers,\n quantizers,\n utils,\n)\n\n__all__ = [\n \"layers\",\n \"activations\",\n \"callbacks\",\n \"constraints\",\n \"context\",\n \"math\",\n \"metrics\",\n \"models\",\n \"quantizers\",\n \"optimizers\",\n \"utils\",\n]\n", "path": "larq/__init__.py"}]} | 1,430 | 250 |
gh_patches_debug_36493 | rasdani/github-patches | git_diff | pwndbg__pwndbg-2087 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Port checksec command to native command and add colors
This is annoying IMHO. I want colors and no pwntools update checks here!

</issue>
<code>
[start of pwndbg/wrappers/checksec.py]
1 from __future__ import annotations
2
3 from subprocess import CalledProcessError
4
5 import pwndbg.commands
6 import pwndbg.lib.cache
7 import pwndbg.wrappers
8
9 cmd_name = "checksec"
10 cmd_pwntools = ["pwn", "checksec"]
11
12
13 @pwndbg.wrappers.OnlyWithCommand(cmd_name, cmd_pwntools)
14 @pwndbg.lib.cache.cache_until("objfile")
15 def get_raw_out(local_path: str) -> str:
16 try:
17 return pwndbg.wrappers.call_cmd(get_raw_out.cmd + ["--file=" + local_path])
18 except CalledProcessError:
19 pass
20 try:
21 return pwndbg.wrappers.call_cmd(get_raw_out.cmd + ["--file", local_path])
22 except CalledProcessError:
23 pass
24 return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [local_path])
25
26
27 @pwndbg.wrappers.OnlyWithCommand(cmd_name, cmd_pwntools)
28 def relro_status(local_path: str) -> str:
29 relro = "No RELRO"
30 out = get_raw_out(local_path)
31
32 if "Full RELRO" in out:
33 relro = "Full RELRO"
34 elif "Partial RELRO" in out:
35 relro = "Partial RELRO"
36
37 return relro
38
39
40 @pwndbg.wrappers.OnlyWithCommand(cmd_name, cmd_pwntools)
41 def pie_status(local_path: str) -> str:
42 pie = "No PIE"
43 out = get_raw_out(local_path)
44
45 if "PIE enabled" in out:
46 pie = "PIE enabled"
47
48 return pie
49
[end of pwndbg/wrappers/checksec.py]
[start of pwndbg/commands/checksec.py]
1 from __future__ import annotations
2
3 import pwndbg.color
4 import pwndbg.commands
5 import pwndbg.gdblib.file
6 import pwndbg.wrappers.checksec
7
8
9 def color_line(line: str) -> str:
10 return pwndbg.color.normal(
11 line.replace("*", pwndbg.color.green("*"))
12 .replace(":", f":{pwndbg.color.GREEN}")
13 .replace("No", f"{pwndbg.color.RED}No")
14 )
15
16
17 def color_lines(output: str) -> str:
18 return "\n".join(map(color_line, output.split("\n")))
19
20
21 @pwndbg.commands.ArgparsedCommand("Prints out the binary security settings using `checksec`.")
22 @pwndbg.commands.OnlyWithFile
23 def checksec() -> None:
24 output = pwndbg.wrappers.checksec.get_raw_out(pwndbg.gdblib.file.get_proc_exe_file())
25 print(color_lines(output))
26
[end of pwndbg/commands/checksec.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/commands/checksec.py b/pwndbg/commands/checksec.py
--- a/pwndbg/commands/checksec.py
+++ b/pwndbg/commands/checksec.py
@@ -1,10 +1,18 @@
from __future__ import annotations
+import argparse
+
import pwndbg.color
import pwndbg.commands
import pwndbg.gdblib.file
import pwndbg.wrappers.checksec
+parser = argparse.ArgumentParser(
+ description="Prints out the binary security settings using `checksec`."
+)
+
+parser.add_argument("-f", "--file", type=str, help="Specify the file to run `checksec` on.")
+
def color_line(line: str) -> str:
return pwndbg.color.normal(
@@ -18,8 +26,9 @@
return "\n".join(map(color_line, output.split("\n")))
[email protected]("Prints out the binary security settings using `checksec`.")
[email protected](parser, command_name="checksec")
@pwndbg.commands.OnlyWithFile
-def checksec() -> None:
- output = pwndbg.wrappers.checksec.get_raw_out(pwndbg.gdblib.file.get_proc_exe_file())
+def checksec(file: str) -> None:
+ local_path = file or pwndbg.gdblib.file.get_proc_exe_file()
+ output = pwndbg.wrappers.checksec.get_raw_out(local_path)
print(color_lines(output))
diff --git a/pwndbg/wrappers/checksec.py b/pwndbg/wrappers/checksec.py
--- a/pwndbg/wrappers/checksec.py
+++ b/pwndbg/wrappers/checksec.py
@@ -1,30 +1,16 @@
from __future__ import annotations
-from subprocess import CalledProcessError
+from pwnlib.elf import ELF
-import pwndbg.commands
-import pwndbg.lib.cache
-import pwndbg.wrappers
-cmd_name = "checksec"
-cmd_pwntools = ["pwn", "checksec"]
+def get_raw_out(local_path: str) -> str:
+ elf = ELF(local_path)
+ output = f"File: {elf.path}\n"
+ output += f"Arch: {elf.arch}\n"
+ output += elf.checksec()
+ return output
[email protected](cmd_name, cmd_pwntools)
[email protected]_until("objfile")
-def get_raw_out(local_path: str) -> str:
- try:
- return pwndbg.wrappers.call_cmd(get_raw_out.cmd + ["--file=" + local_path])
- except CalledProcessError:
- pass
- try:
- return pwndbg.wrappers.call_cmd(get_raw_out.cmd + ["--file", local_path])
- except CalledProcessError:
- pass
- return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [local_path])
-
-
[email protected](cmd_name, cmd_pwntools)
def relro_status(local_path: str) -> str:
relro = "No RELRO"
out = get_raw_out(local_path)
@@ -37,7 +23,6 @@
return relro
[email protected](cmd_name, cmd_pwntools)
def pie_status(local_path: str) -> str:
pie = "No PIE"
out = get_raw_out(local_path)
| {"golden_diff": "diff --git a/pwndbg/commands/checksec.py b/pwndbg/commands/checksec.py\n--- a/pwndbg/commands/checksec.py\n+++ b/pwndbg/commands/checksec.py\n@@ -1,10 +1,18 @@\n from __future__ import annotations\n \n+import argparse\n+\n import pwndbg.color\n import pwndbg.commands\n import pwndbg.gdblib.file\n import pwndbg.wrappers.checksec\n \n+parser = argparse.ArgumentParser(\n+ description=\"Prints out the binary security settings using `checksec`.\"\n+)\n+\n+parser.add_argument(\"-f\", \"--file\", type=str, help=\"Specify the file to run `checksec` on.\")\n+\n \n def color_line(line: str) -> str:\n return pwndbg.color.normal(\n@@ -18,8 +26,9 @@\n return \"\\n\".join(map(color_line, output.split(\"\\n\")))\n \n \[email protected](\"Prints out the binary security settings using `checksec`.\")\[email protected](parser, command_name=\"checksec\")\n @pwndbg.commands.OnlyWithFile\n-def checksec() -> None:\n- output = pwndbg.wrappers.checksec.get_raw_out(pwndbg.gdblib.file.get_proc_exe_file())\n+def checksec(file: str) -> None:\n+ local_path = file or pwndbg.gdblib.file.get_proc_exe_file()\n+ output = pwndbg.wrappers.checksec.get_raw_out(local_path)\n print(color_lines(output))\ndiff --git a/pwndbg/wrappers/checksec.py b/pwndbg/wrappers/checksec.py\n--- a/pwndbg/wrappers/checksec.py\n+++ b/pwndbg/wrappers/checksec.py\n@@ -1,30 +1,16 @@\n from __future__ import annotations\n \n-from subprocess import CalledProcessError\n+from pwnlib.elf import ELF\n \n-import pwndbg.commands\n-import pwndbg.lib.cache\n-import pwndbg.wrappers\n \n-cmd_name = \"checksec\"\n-cmd_pwntools = [\"pwn\", \"checksec\"]\n+def get_raw_out(local_path: str) -> str:\n+ elf = ELF(local_path)\n+ output = f\"File: {elf.path}\\n\"\n+ output += f\"Arch: {elf.arch}\\n\"\n+ output += elf.checksec()\n+ return output\n \n \[email protected](cmd_name, cmd_pwntools)\[email protected]_until(\"objfile\")\n-def get_raw_out(local_path: str) -> str:\n- try:\n- return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [\"--file=\" + local_path])\n- except CalledProcessError:\n- pass\n- try:\n- return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [\"--file\", local_path])\n- except CalledProcessError:\n- pass\n- return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [local_path])\n-\n-\[email protected](cmd_name, cmd_pwntools)\n def relro_status(local_path: str) -> str:\n relro = \"No RELRO\"\n out = get_raw_out(local_path)\n@@ -37,7 +23,6 @@\n return relro\n \n \[email protected](cmd_name, cmd_pwntools)\n def pie_status(local_path: str) -> str:\n pie = \"No PIE\"\n out = get_raw_out(local_path)\n", "issue": "Port checksec command to native command and add colors\nThis is annoying IMHO. I want colors and no pwntools update checks here!\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom subprocess import CalledProcessError\n\nimport pwndbg.commands\nimport pwndbg.lib.cache\nimport pwndbg.wrappers\n\ncmd_name = \"checksec\"\ncmd_pwntools = [\"pwn\", \"checksec\"]\n\n\[email protected](cmd_name, cmd_pwntools)\[email protected]_until(\"objfile\")\ndef get_raw_out(local_path: str) -> str:\n try:\n return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [\"--file=\" + local_path])\n except CalledProcessError:\n pass\n try:\n return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [\"--file\", local_path])\n except CalledProcessError:\n pass\n return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [local_path])\n\n\[email protected](cmd_name, cmd_pwntools)\ndef relro_status(local_path: str) -> str:\n relro = \"No RELRO\"\n out = get_raw_out(local_path)\n\n if \"Full RELRO\" in out:\n relro = \"Full RELRO\"\n elif \"Partial RELRO\" in out:\n relro = \"Partial RELRO\"\n\n return relro\n\n\[email protected](cmd_name, cmd_pwntools)\ndef pie_status(local_path: str) -> str:\n pie = \"No PIE\"\n out = get_raw_out(local_path)\n\n if \"PIE enabled\" in out:\n pie = \"PIE enabled\"\n\n return pie\n", "path": "pwndbg/wrappers/checksec.py"}, {"content": "from __future__ import annotations\n\nimport pwndbg.color\nimport pwndbg.commands\nimport pwndbg.gdblib.file\nimport pwndbg.wrappers.checksec\n\n\ndef color_line(line: str) -> str:\n return pwndbg.color.normal(\n line.replace(\"*\", pwndbg.color.green(\"*\"))\n .replace(\":\", f\":{pwndbg.color.GREEN}\")\n .replace(\"No\", f\"{pwndbg.color.RED}No\")\n )\n\n\ndef color_lines(output: str) -> str:\n return \"\\n\".join(map(color_line, output.split(\"\\n\")))\n\n\[email protected](\"Prints out the binary security settings using `checksec`.\")\[email protected]\ndef checksec() -> None:\n output = pwndbg.wrappers.checksec.get_raw_out(pwndbg.gdblib.file.get_proc_exe_file())\n print(color_lines(output))\n", "path": "pwndbg/commands/checksec.py"}]} | 1,339 | 777 |
gh_patches_debug_38547 | rasdani/github-patches | git_diff | cornellius-gp__gpytorch-584 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Linear kernel's variance prior is broken
Repro:
[linear_kernel_prior_issue.ipynb.txt](https://github.com/cornellius-gp/gpytorch/files/2979717/linear_kernel_prior_issue.ipynb.txt)
</issue>
<code>
[start of gpytorch/kernels/linear_kernel.py]
1 #!/usr/bin/env python3
2
3 import torch
4 from .kernel import Kernel
5 from ..lazy import MatmulLazyTensor, RootLazyTensor
6
7
8 class LinearKernel(Kernel):
9 r"""
10 Computes a covariance matrix based on the Linear kernel
11 between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`:
12
13 .. math::
14 \begin{equation*}
15 k_\text{Linear}(\mathbf{x_1}, \mathbf{x_2}) = (\mathbf{x_1} - \mathbf{o})^\top
16 (\mathbf{x_2} - \mathbf{o}) + v.
17 \end{equation*}
18
19 where
20
21 * :math:`\mathbf o` is an :attr:`offset` parameter.
22 * :math:`v` is a :attr:`variance` parameter.
23
24
25 .. note::
26
27 To implement this efficiently, we use a :obj:`gpytorch.lazy.RootLazyTensor` during training and a
28 :class:`gpytorch.lazy.MatmulLazyTensor` during test. These lazy tensors represent matrices of the form
29 :math:`K = XX^{\top}` and :math:`K = XZ^{\top}`. This makes inference
30 efficient because a matrix-vector product :math:`Kv` can be computed as
31 :math:`Kv=X(X^{\top}v)`, where the base multiply :math:`Xv` takes only
32 :math:`O(nd)` time and space.
33
34 Args:
35 :attr:`num_dimensions` (int):
36 Number of data dimensions to expect. This
37 is necessary to create the offset parameter.
38 :attr:`variance_prior` (:class:`gpytorch.priors.Prior`):
39 Prior over the variance parameter (default `None`).
40 :attr:`offset_prior` (:class:`gpytorch.priors.Prior`):
41 Prior over the offset parameter (default `None`).
42 :attr:`active_dims` (list):
43 List of data dimensions to operate on.
44 `len(active_dims)` should equal `num_dimensions`.
45 """
46
47 def __init__(self, num_dimensions, variance_prior=None, active_dims=None):
48 super(LinearKernel, self).__init__(active_dims=active_dims)
49 self.register_parameter(name="raw_variance", parameter=torch.nn.Parameter(torch.zeros(1)))
50 self.register_parameter(name="offset", parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions)))
51 if variance_prior is not None:
52 self.register_prior("variance_prior", variance_prior, "variance")
53
54 @property
55 def variance(self):
56 return self._param_transform(self.raw_variance)
57
58 @variance.setter
59 def variance(self, value):
60 self._set_variance(value)
61
62 def _set_variance(self, value):
63 if not torch.is_tensor(value):
64 value = torch.tensor(value)
65 self.initialize(raw_variance=self._inv_param_transform(value))
66
67 def forward(self, x1, x2, diag=False, batch_dims=None, **params):
68 x1_ = x1 * self.variance.sqrt()
69 if batch_dims == (0, 2):
70 x1_ = x1_.view(x1_.size(0), x1_.size(1), -1, 1)
71 x1_ = x1_.permute(0, 2, 1, 3).contiguous()
72 x1_ = x1_.view(-1, x1_.size(-2), x1_.size(-1))
73
74 if x1.size() == x2.size() and torch.equal(x1, x2):
75 # Use RootLazyTensor when x1 == x2 for efficiency when composing
76 # with other kernels
77 prod = RootLazyTensor(x1_)
78
79 else:
80 x2_ = x2 * self.variance.sqrt()
81 if batch_dims == (0, 2):
82 x2_ = x2_.view(x2_.size(0), x2_.size(1), -1, 1)
83 x2_ = x2_.permute(0, 2, 1, 3).contiguous()
84 x2_ = x2_.view(-1, x2_.size(-2), x2_.size(-1))
85
86 prod = MatmulLazyTensor(x1_, x2_.transpose(2, 1))
87
88 return prod
89
[end of gpytorch/kernels/linear_kernel.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gpytorch/kernels/linear_kernel.py b/gpytorch/kernels/linear_kernel.py
--- a/gpytorch/kernels/linear_kernel.py
+++ b/gpytorch/kernels/linear_kernel.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
import torch
+import warnings
from .kernel import Kernel
from ..lazy import MatmulLazyTensor, RootLazyTensor
@@ -12,13 +13,12 @@
.. math::
\begin{equation*}
- k_\text{Linear}(\mathbf{x_1}, \mathbf{x_2}) = (\mathbf{x_1} - \mathbf{o})^\top
- (\mathbf{x_2} - \mathbf{o}) + v.
+ k_\text{Linear}(\mathbf{x_1}, \mathbf{x_2}) = v\mathbf{x_1}^\top
+ \mathbf{x_2}.
\end{equation*}
where
- * :math:`\mathbf o` is an :attr:`offset` parameter.
* :math:`v` is a :attr:`variance` parameter.
@@ -32,24 +32,37 @@
:math:`O(nd)` time and space.
Args:
- :attr:`num_dimensions` (int):
- Number of data dimensions to expect. This
- is necessary to create the offset parameter.
:attr:`variance_prior` (:class:`gpytorch.priors.Prior`):
Prior over the variance parameter (default `None`).
- :attr:`offset_prior` (:class:`gpytorch.priors.Prior`):
- Prior over the offset parameter (default `None`).
:attr:`active_dims` (list):
List of data dimensions to operate on.
`len(active_dims)` should equal `num_dimensions`.
"""
- def __init__(self, num_dimensions, variance_prior=None, active_dims=None):
+ def __init__(self, num_dimensions=None, offset_prior=None, variance_prior=None, active_dims=None):
super(LinearKernel, self).__init__(active_dims=active_dims)
+ if num_dimensions is not None:
+ warnings.warn(
+ "The `num_dimensions` argument is deprecated and no longer used.",
+ DeprecationWarning
+ )
+ self.register_parameter(
+ name="offset",
+ parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions))
+ )
+ if offset_prior is not None:
+ warnings.warn(
+ "The `offset_prior` argument is deprecated and no longer used.",
+ DeprecationWarning
+ )
self.register_parameter(name="raw_variance", parameter=torch.nn.Parameter(torch.zeros(1)))
- self.register_parameter(name="offset", parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions)))
if variance_prior is not None:
- self.register_prior("variance_prior", variance_prior, "variance")
+ self.register_prior(
+ "variance_prior",
+ variance_prior,
+ lambda: self.variance,
+ lambda v: self._set_variance(v)
+ )
@property
def variance(self):
| {"golden_diff": "diff --git a/gpytorch/kernels/linear_kernel.py b/gpytorch/kernels/linear_kernel.py\n--- a/gpytorch/kernels/linear_kernel.py\n+++ b/gpytorch/kernels/linear_kernel.py\n@@ -1,6 +1,7 @@\n #!/usr/bin/env python3\n \n import torch\n+import warnings\n from .kernel import Kernel\n from ..lazy import MatmulLazyTensor, RootLazyTensor\n \n@@ -12,13 +13,12 @@\n \n .. math::\n \\begin{equation*}\n- k_\\text{Linear}(\\mathbf{x_1}, \\mathbf{x_2}) = (\\mathbf{x_1} - \\mathbf{o})^\\top\n- (\\mathbf{x_2} - \\mathbf{o}) + v.\n+ k_\\text{Linear}(\\mathbf{x_1}, \\mathbf{x_2}) = v\\mathbf{x_1}^\\top\n+ \\mathbf{x_2}.\n \\end{equation*}\n \n where\n \n- * :math:`\\mathbf o` is an :attr:`offset` parameter.\n * :math:`v` is a :attr:`variance` parameter.\n \n \n@@ -32,24 +32,37 @@\n :math:`O(nd)` time and space.\n \n Args:\n- :attr:`num_dimensions` (int):\n- Number of data dimensions to expect. This\n- is necessary to create the offset parameter.\n :attr:`variance_prior` (:class:`gpytorch.priors.Prior`):\n Prior over the variance parameter (default `None`).\n- :attr:`offset_prior` (:class:`gpytorch.priors.Prior`):\n- Prior over the offset parameter (default `None`).\n :attr:`active_dims` (list):\n List of data dimensions to operate on.\n `len(active_dims)` should equal `num_dimensions`.\n \"\"\"\n \n- def __init__(self, num_dimensions, variance_prior=None, active_dims=None):\n+ def __init__(self, num_dimensions=None, offset_prior=None, variance_prior=None, active_dims=None):\n super(LinearKernel, self).__init__(active_dims=active_dims)\n+ if num_dimensions is not None:\n+ warnings.warn(\n+ \"The `num_dimensions` argument is deprecated and no longer used.\",\n+ DeprecationWarning\n+ )\n+ self.register_parameter(\n+ name=\"offset\",\n+ parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions))\n+ )\n+ if offset_prior is not None:\n+ warnings.warn(\n+ \"The `offset_prior` argument is deprecated and no longer used.\",\n+ DeprecationWarning\n+ )\n self.register_parameter(name=\"raw_variance\", parameter=torch.nn.Parameter(torch.zeros(1)))\n- self.register_parameter(name=\"offset\", parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions)))\n if variance_prior is not None:\n- self.register_prior(\"variance_prior\", variance_prior, \"variance\")\n+ self.register_prior(\n+ \"variance_prior\",\n+ variance_prior,\n+ lambda: self.variance,\n+ lambda v: self._set_variance(v)\n+ )\n \n @property\n def variance(self):\n", "issue": "Linear kernel's variance prior is broken\nRepro: \r\n[linear_kernel_prior_issue.ipynb.txt](https://github.com/cornellius-gp/gpytorch/files/2979717/linear_kernel_prior_issue.ipynb.txt)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport torch\nfrom .kernel import Kernel\nfrom ..lazy import MatmulLazyTensor, RootLazyTensor\n\n\nclass LinearKernel(Kernel):\n r\"\"\"\n Computes a covariance matrix based on the Linear kernel\n between inputs :math:`\\mathbf{x_1}` and :math:`\\mathbf{x_2}`:\n\n .. math::\n \\begin{equation*}\n k_\\text{Linear}(\\mathbf{x_1}, \\mathbf{x_2}) = (\\mathbf{x_1} - \\mathbf{o})^\\top\n (\\mathbf{x_2} - \\mathbf{o}) + v.\n \\end{equation*}\n\n where\n\n * :math:`\\mathbf o` is an :attr:`offset` parameter.\n * :math:`v` is a :attr:`variance` parameter.\n\n\n .. note::\n\n To implement this efficiently, we use a :obj:`gpytorch.lazy.RootLazyTensor` during training and a\n :class:`gpytorch.lazy.MatmulLazyTensor` during test. These lazy tensors represent matrices of the form\n :math:`K = XX^{\\top}` and :math:`K = XZ^{\\top}`. This makes inference\n efficient because a matrix-vector product :math:`Kv` can be computed as\n :math:`Kv=X(X^{\\top}v)`, where the base multiply :math:`Xv` takes only\n :math:`O(nd)` time and space.\n\n Args:\n :attr:`num_dimensions` (int):\n Number of data dimensions to expect. This\n is necessary to create the offset parameter.\n :attr:`variance_prior` (:class:`gpytorch.priors.Prior`):\n Prior over the variance parameter (default `None`).\n :attr:`offset_prior` (:class:`gpytorch.priors.Prior`):\n Prior over the offset parameter (default `None`).\n :attr:`active_dims` (list):\n List of data dimensions to operate on.\n `len(active_dims)` should equal `num_dimensions`.\n \"\"\"\n\n def __init__(self, num_dimensions, variance_prior=None, active_dims=None):\n super(LinearKernel, self).__init__(active_dims=active_dims)\n self.register_parameter(name=\"raw_variance\", parameter=torch.nn.Parameter(torch.zeros(1)))\n self.register_parameter(name=\"offset\", parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions)))\n if variance_prior is not None:\n self.register_prior(\"variance_prior\", variance_prior, \"variance\")\n\n @property\n def variance(self):\n return self._param_transform(self.raw_variance)\n\n @variance.setter\n def variance(self, value):\n self._set_variance(value)\n\n def _set_variance(self, value):\n if not torch.is_tensor(value):\n value = torch.tensor(value)\n self.initialize(raw_variance=self._inv_param_transform(value))\n\n def forward(self, x1, x2, diag=False, batch_dims=None, **params):\n x1_ = x1 * self.variance.sqrt()\n if batch_dims == (0, 2):\n x1_ = x1_.view(x1_.size(0), x1_.size(1), -1, 1)\n x1_ = x1_.permute(0, 2, 1, 3).contiguous()\n x1_ = x1_.view(-1, x1_.size(-2), x1_.size(-1))\n\n if x1.size() == x2.size() and torch.equal(x1, x2):\n # Use RootLazyTensor when x1 == x2 for efficiency when composing\n # with other kernels\n prod = RootLazyTensor(x1_)\n\n else:\n x2_ = x2 * self.variance.sqrt()\n if batch_dims == (0, 2):\n x2_ = x2_.view(x2_.size(0), x2_.size(1), -1, 1)\n x2_ = x2_.permute(0, 2, 1, 3).contiguous()\n x2_ = x2_.view(-1, x2_.size(-2), x2_.size(-1))\n\n prod = MatmulLazyTensor(x1_, x2_.transpose(2, 1))\n\n return prod\n", "path": "gpytorch/kernels/linear_kernel.py"}]} | 1,727 | 718 |
gh_patches_debug_16953 | rasdani/github-patches | git_diff | saleor__saleor-1503 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hide 'Clear filters' when list view has page in GET
### What I'm trying to achieve
Clear filters button should appear only when some filters are applied.
### Steps to reproduce the problem
1. Go to dashboard or store front page with filters and pagination and move to next page (/dashboard/products/?page=2)
2. Clear filters button is present in filters card
### What I expected to happen
Clear filters button should be only present if some filters are applied.
### What happened instead/how it failed
Clear filter button is present.

</issue>
<code>
[start of saleor/core/filters.py]
1 from django_filters import FilterSet
2
3
4 class SortedFilterSet(FilterSet):
5 '''
6 Base class for filtersets used in dashboard views. Adds flag
7 is_bound_unsorted to indicate if FilterSet has data from filters other
8 than sort_by.
9 '''
10 def __init__(self, data, *args, **kwargs):
11 data_copy = data.copy() if data else None
12 self.is_bound_unsorted = self.set_is_bound_unsorted(data_copy)
13 super().__init__(data, *args, **kwargs)
14
15 def set_is_bound_unsorted(self, data_copy):
16 if data_copy and data_copy.get('sort_by', None):
17 del data_copy['sort_by']
18 if data_copy:
19 return True
20 return False
21
[end of saleor/core/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/core/filters.py b/saleor/core/filters.py
--- a/saleor/core/filters.py
+++ b/saleor/core/filters.py
@@ -5,16 +5,11 @@
'''
Base class for filtersets used in dashboard views. Adds flag
is_bound_unsorted to indicate if FilterSet has data from filters other
- than sort_by.
+ than sort_by or page.
'''
def __init__(self, data, *args, **kwargs):
- data_copy = data.copy() if data else None
- self.is_bound_unsorted = self.set_is_bound_unsorted(data_copy)
- super().__init__(data, *args, **kwargs)
+ self.is_bound_unsorted = self.set_is_bound_unsorted(data)
+ super(SortedFilterSet, self).__init__(data, *args, **kwargs)
- def set_is_bound_unsorted(self, data_copy):
- if data_copy and data_copy.get('sort_by', None):
- del data_copy['sort_by']
- if data_copy:
- return True
- return False
+ def set_is_bound_unsorted(self, data):
+ return any([key not in {'sort_by', 'page'} for key in data.keys()])
| {"golden_diff": "diff --git a/saleor/core/filters.py b/saleor/core/filters.py\n--- a/saleor/core/filters.py\n+++ b/saleor/core/filters.py\n@@ -5,16 +5,11 @@\n '''\n Base class for filtersets used in dashboard views. Adds flag\n is_bound_unsorted to indicate if FilterSet has data from filters other\n- than sort_by.\n+ than sort_by or page.\n '''\n def __init__(self, data, *args, **kwargs):\n- data_copy = data.copy() if data else None\n- self.is_bound_unsorted = self.set_is_bound_unsorted(data_copy)\n- super().__init__(data, *args, **kwargs)\n+ self.is_bound_unsorted = self.set_is_bound_unsorted(data)\n+ super(SortedFilterSet, self).__init__(data, *args, **kwargs)\n \n- def set_is_bound_unsorted(self, data_copy):\n- if data_copy and data_copy.get('sort_by', None):\n- del data_copy['sort_by']\n- if data_copy:\n- return True\n- return False\n+ def set_is_bound_unsorted(self, data):\n+ return any([key not in {'sort_by', 'page'} for key in data.keys()])\n", "issue": "Hide 'Clear filters' when list view has page in GET\n### What I'm trying to achieve\r\n\r\nClear filters button should appear only when some filters are applied.\r\n\r\n### Steps to reproduce the problem\r\n\r\n1. Go to dashboard or store front page with filters and pagination and move to next page (/dashboard/products/?page=2)\r\n2. Clear filters button is present in filters card\r\n\r\n### What I expected to happen\r\n\r\nClear filters button should be only present if some filters are applied.\r\n\r\n### What happened instead/how it failed\r\n\r\nClear filter button is present.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django_filters import FilterSet\n\n\nclass SortedFilterSet(FilterSet):\n '''\n Base class for filtersets used in dashboard views. Adds flag\n is_bound_unsorted to indicate if FilterSet has data from filters other\n than sort_by.\n '''\n def __init__(self, data, *args, **kwargs):\n data_copy = data.copy() if data else None\n self.is_bound_unsorted = self.set_is_bound_unsorted(data_copy)\n super().__init__(data, *args, **kwargs)\n\n def set_is_bound_unsorted(self, data_copy):\n if data_copy and data_copy.get('sort_by', None):\n del data_copy['sort_by']\n if data_copy:\n return True\n return False\n", "path": "saleor/core/filters.py"}]} | 927 | 283 |
gh_patches_debug_54080 | rasdani/github-patches | git_diff | e-valuation__EvaP-728 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Warning in courses with small number of participants
In courses with 5 or less participants a warning should be shown above the course's questionnaire:
_This course has only a small number of participants. Please remember that your comments will be visible for the responsible person and the contributors you're evaluating. If two or more people evaluate the course, the results of all voting questions will also be published._
</issue>
<code>
[start of evap/student/views.py]
1 from django.contrib import messages
2 from django.core.exceptions import PermissionDenied
3 from django.db import transaction
4 from django.shortcuts import get_object_or_404, redirect, render
5 from django.utils.translation import ugettext as _
6
7 from evap.evaluation.auth import participant_required
8 from evap.evaluation.models import Course, Semester
9 from evap.evaluation.tools import STUDENT_STATES_ORDERED
10
11 from evap.student.forms import QuestionsForm
12 from evap.student.tools import make_form_identifier
13
14 from collections import OrderedDict
15
16 @participant_required
17 def index(request):
18 # retrieve all courses, where the user is a participant and that are not new
19 courses = list(set(Course.objects.filter(participants=request.user).exclude(state="new")))
20 voted_courses = list(set(Course.objects.filter(voters=request.user)))
21 due_courses = list(set(Course.objects.filter(participants=request.user, state='inEvaluation').exclude(voters=request.user)))
22
23 sorter = lambda course: (list(STUDENT_STATES_ORDERED.keys()).index(course.student_state), course.vote_end_date, course.name)
24 courses.sort(key=sorter)
25
26 semesters = Semester.objects.all()
27 semester_list = [dict(semester_name=semester.name, id=semester.id, courses=[course for course in courses if course.semester_id == semester.id]) for semester in semesters]
28
29 template_data = dict(
30 semester_list=semester_list,
31 voted_courses=voted_courses,
32 due_courses=due_courses,
33 can_download_grades=request.user.can_download_grades,
34 )
35 return render(request, "student_index.html", template_data)
36
37
38 def vote_preview(request, course):
39 """
40 Renders a preview of the voting page for the given course.
41 Not used by the student app itself, but by staff and contributor.
42 """
43 form_groups = helper_create_voting_form_groups(request, course.contributions.all())
44 course_form_group = form_groups.pop(course.general_contribution)
45 contributor_form_groups = list((contribution.contributor, contribution.label, form_group, False) for contribution, form_group in form_groups.items())
46
47 template_data = dict(
48 errors_exist=False,
49 course_form_group=course_form_group,
50 contributor_form_groups=contributor_form_groups,
51 course=course,
52 preview=True)
53 return render(request, "student_vote.html", template_data)
54
55
56 @participant_required
57 def vote(request, course_id):
58 # retrieve course and make sure that the user is allowed to vote
59 course = get_object_or_404(Course, id=course_id)
60 if not course.can_user_vote(request.user):
61 raise PermissionDenied
62
63 # prevent a user from voting on themselves.
64 contributions_to_vote_on = course.contributions.exclude(contributor=request.user).all()
65 form_groups = helper_create_voting_form_groups(request, contributions_to_vote_on)
66
67 if not all(all(form.is_valid() for form in form_group) for form_group in form_groups.values()):
68 errors_exist = any(helper_has_errors(form_group) for form_group in form_groups.values())
69
70 course_form_group = form_groups.pop(course.general_contribution)
71
72 contributor_form_groups = list((contribution.contributor, contribution.label, form_group, helper_has_errors(form_group)) for contribution, form_group in form_groups.items())
73
74 template_data = dict(
75 errors_exist=errors_exist,
76 course_form_group=course_form_group,
77 contributor_form_groups=contributor_form_groups,
78 course=course,
79 preview=False)
80 return render(request, "student_vote.html", template_data)
81
82 # all forms are valid, begin vote operation
83 with transaction.atomic():
84 for contribution, form_group in form_groups.items():
85 for questionnaire_form in form_group:
86 questionnaire = questionnaire_form.questionnaire
87 for question in questionnaire.question_set.all():
88 identifier = make_form_identifier(contribution, questionnaire, question)
89 value = questionnaire_form.cleaned_data.get(identifier)
90
91 if question.is_text_question:
92 if value:
93 question.answer_class.objects.create(
94 contribution=contribution,
95 question=question,
96 answer=value)
97 else:
98 if value != 6:
99 answer_counter, created = question.answer_class.objects.get_or_create(contribution=contribution, question=question, answer=value)
100 answer_counter.add_vote()
101 answer_counter.save()
102
103 # remember that the user voted already
104 course.voters.add(request.user)
105
106 course.was_evaluated(request)
107
108 messages.success(request, _("Your vote was recorded."))
109 return redirect('student:index')
110
111
112 def helper_create_form_group(request, contribution):
113 return list(QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire) for questionnaire in contribution.questionnaires.all())
114
115 def helper_create_voting_form_groups(request, contributions):
116 form_groups = OrderedDict()
117 for contribution in contributions:
118 form_groups[contribution] = helper_create_form_group(request, contribution)
119 return form_groups
120
121 def helper_has_errors(form_group):
122 return any(form.errors for form in form_group)
123
[end of evap/student/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/student/views.py b/evap/student/views.py
--- a/evap/student/views.py
+++ b/evap/student/views.py
@@ -76,6 +76,7 @@
course_form_group=course_form_group,
contributor_form_groups=contributor_form_groups,
course=course,
+ participants_warning=course.num_participants <= 5,
preview=False)
return render(request, "student_vote.html", template_data)
| {"golden_diff": "diff --git a/evap/student/views.py b/evap/student/views.py\n--- a/evap/student/views.py\n+++ b/evap/student/views.py\n@@ -76,6 +76,7 @@\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n+ participants_warning=course.num_participants <= 5,\n preview=False)\n return render(request, \"student_vote.html\", template_data)\n", "issue": "Warning in courses with small number of participants\nIn courses with 5 or less participants a warning should be shown above the course's questionnaire:\n\n_This course has only a small number of participants. Please remember that your comments will be visible for the responsible person and the contributors you're evaluating. If two or more people evaluate the course, the results of all voting questions will also be published._\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.auth import participant_required\nfrom evap.evaluation.models import Course, Semester\nfrom evap.evaluation.tools import STUDENT_STATES_ORDERED\n\nfrom evap.student.forms import QuestionsForm\nfrom evap.student.tools import make_form_identifier\n\nfrom collections import OrderedDict\n\n@participant_required\ndef index(request):\n # retrieve all courses, where the user is a participant and that are not new\n courses = list(set(Course.objects.filter(participants=request.user).exclude(state=\"new\")))\n voted_courses = list(set(Course.objects.filter(voters=request.user)))\n due_courses = list(set(Course.objects.filter(participants=request.user, state='inEvaluation').exclude(voters=request.user)))\n\n sorter = lambda course: (list(STUDENT_STATES_ORDERED.keys()).index(course.student_state), course.vote_end_date, course.name)\n courses.sort(key=sorter)\n\n semesters = Semester.objects.all()\n semester_list = [dict(semester_name=semester.name, id=semester.id, courses=[course for course in courses if course.semester_id == semester.id]) for semester in semesters]\n\n template_data = dict(\n semester_list=semester_list,\n voted_courses=voted_courses,\n due_courses=due_courses,\n can_download_grades=request.user.can_download_grades,\n )\n return render(request, \"student_index.html\", template_data)\n\n\ndef vote_preview(request, course):\n \"\"\"\n Renders a preview of the voting page for the given course.\n Not used by the student app itself, but by staff and contributor.\n \"\"\"\n form_groups = helper_create_voting_form_groups(request, course.contributions.all())\n course_form_group = form_groups.pop(course.general_contribution)\n contributor_form_groups = list((contribution.contributor, contribution.label, form_group, False) for contribution, form_group in form_groups.items())\n\n template_data = dict(\n errors_exist=False,\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n preview=True)\n return render(request, \"student_vote.html\", template_data)\n\n\n@participant_required\ndef vote(request, course_id):\n # retrieve course and make sure that the user is allowed to vote\n course = get_object_or_404(Course, id=course_id)\n if not course.can_user_vote(request.user):\n raise PermissionDenied\n\n # prevent a user from voting on themselves.\n contributions_to_vote_on = course.contributions.exclude(contributor=request.user).all()\n form_groups = helper_create_voting_form_groups(request, contributions_to_vote_on)\n\n if not all(all(form.is_valid() for form in form_group) for form_group in form_groups.values()):\n errors_exist = any(helper_has_errors(form_group) for form_group in form_groups.values())\n\n course_form_group = form_groups.pop(course.general_contribution)\n\n contributor_form_groups = list((contribution.contributor, contribution.label, form_group, helper_has_errors(form_group)) for contribution, form_group in form_groups.items())\n\n template_data = dict(\n errors_exist=errors_exist,\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n preview=False)\n return render(request, \"student_vote.html\", template_data)\n\n # all forms are valid, begin vote operation\n with transaction.atomic():\n for contribution, form_group in form_groups.items():\n for questionnaire_form in form_group:\n questionnaire = questionnaire_form.questionnaire\n for question in questionnaire.question_set.all():\n identifier = make_form_identifier(contribution, questionnaire, question)\n value = questionnaire_form.cleaned_data.get(identifier)\n\n if question.is_text_question:\n if value:\n question.answer_class.objects.create(\n contribution=contribution,\n question=question,\n answer=value)\n else:\n if value != 6:\n answer_counter, created = question.answer_class.objects.get_or_create(contribution=contribution, question=question, answer=value)\n answer_counter.add_vote()\n answer_counter.save()\n\n # remember that the user voted already\n course.voters.add(request.user)\n\n course.was_evaluated(request)\n\n messages.success(request, _(\"Your vote was recorded.\"))\n return redirect('student:index')\n\n\ndef helper_create_form_group(request, contribution):\n return list(QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire) for questionnaire in contribution.questionnaires.all())\n\ndef helper_create_voting_form_groups(request, contributions):\n form_groups = OrderedDict()\n for contribution in contributions:\n form_groups[contribution] = helper_create_form_group(request, contribution)\n return form_groups\n\ndef helper_has_errors(form_group):\n return any(form.errors for form in form_group)\n", "path": "evap/student/views.py"}]} | 1,927 | 103 |
gh_patches_debug_16108 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1620 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E3503 does not match CloudFormation - requires ValidationDomain when CF does not want it
*cfn-lint version: (`cfn-lint --version`)*
cfn-lint 0.33.2
*Description of issue.*
I created an ACM certificate resource, and there were problems configuring the `DomainValidationOptions` block. If using DNS validation, the only properties needed are `DomainName` and `HostedZoneId`. However, cfn-lint was demanding a third property named `ValidationDomain`. When submitting the stack for deployment to CF, it triggered an immediate rollback because CF views `HostedZoneId` and `ValidationDomain` as mutually exclusive.
Adding an ignore rule to skip the E3503 error allowed me to proceed without issues. This rule should be adjusted to match what CF enforces.
**Sample:**
```yaml
Resources:
Certificate:
Type: AWS::CertificateManager::Certificate
Metadata:
cfn-lint:
config:
ignore_checks:
- E3503
Properties:
DomainName: "*.aws.domain.com"
ValidationMethod: DNS
DomainValidationOptions:
- DomainName: aws.domain.com
HostedZoneId: !ImportValue SubdomainHostedZoneId
```
</issue>
<code>
[start of src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import six
6 from cfnlint.rules import CloudFormationLintRule
7 from cfnlint.rules import RuleMatch
8
9
10 class DomainValidationOptions(CloudFormationLintRule):
11 """Check if a certificate's domain validation options are set up correctly"""
12 id = 'E3503'
13 shortdesc = 'ValidationDomain is superdomain of DomainName'
14 description = 'In ValidationDomainOptions, the ValidationDomain must be a superdomain of the DomainName being validated'
15 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-certificatemanager-certificate-domainvalidationoption.html#cfn-certificatemanager-certificate-domainvalidationoption-validationdomain'
16 tags = ['certificate', 'certificatemanager', 'domainvalidationoptions', 'validationdomain']
17
18 def __init__(self):
19 """ Init """
20 super(DomainValidationOptions, self).__init__()
21 self.resource_property_types = ['AWS::CertificateManager::Certificate']
22
23 def check_value(self, value, path, **kwargs):
24 """ Check value inside the list of DomainValidationOptions"""
25 matches = []
26 cfn = kwargs.get('cfn')
27 if isinstance(value, dict):
28 property_sets = cfn.get_object_without_conditions(value)
29 for property_set in property_sets:
30 properties = property_set.get('Object')
31 scenario = property_set.get('Scenario')
32 domain_name = properties.get('DomainName', '')
33 validation_domain = properties.get('ValidationDomain', '')
34 if isinstance(domain_name, six.string_types) and isinstance(validation_domain, six.string_types):
35 if domain_name == validation_domain:
36 continue
37
38 if not domain_name.endswith('.' + validation_domain):
39 message = 'ValidationDomain must be a superdomain of DomainName at {}'
40 if scenario is None:
41 matches.append(
42 RuleMatch(path[:] + ['DomainName'], message.format('/'.join(map(str, path)))))
43 else:
44 scenario_text = ' and '.join(
45 ['when condition "%s" is %s' % (k, v) for (k, v) in scenario.items()])
46 matches.append(
47 RuleMatch(path[:] + ['DomainName'], message.format('/'.join(map(str, path)) + ' ' + scenario_text)))
48 return matches
49
50 def match_resource_properties(self, properties, _, path, cfn):
51 matches = []
52 matches.extend(cfn.check_value(
53 properties, 'DomainValidationOptions', path[:],
54 check_value=self.check_value,
55 cfn=cfn,
56 ))
57
58 return matches
59
[end of src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py b/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py
--- a/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py
+++ b/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py
@@ -29,8 +29,8 @@
for property_set in property_sets:
properties = property_set.get('Object')
scenario = property_set.get('Scenario')
- domain_name = properties.get('DomainName', '')
- validation_domain = properties.get('ValidationDomain', '')
+ domain_name = properties.get('DomainName', None)
+ validation_domain = properties.get('ValidationDomain', None)
if isinstance(domain_name, six.string_types) and isinstance(validation_domain, six.string_types):
if domain_name == validation_domain:
continue
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py b/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py\n--- a/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py\n+++ b/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py\n@@ -29,8 +29,8 @@\n for property_set in property_sets:\n properties = property_set.get('Object')\n scenario = property_set.get('Scenario')\n- domain_name = properties.get('DomainName', '')\n- validation_domain = properties.get('ValidationDomain', '')\n+ domain_name = properties.get('DomainName', None)\n+ validation_domain = properties.get('ValidationDomain', None)\n if isinstance(domain_name, six.string_types) and isinstance(validation_domain, six.string_types):\n if domain_name == validation_domain:\n continue\n", "issue": "E3503 does not match CloudFormation - requires ValidationDomain when CF does not want it\n*cfn-lint version: (`cfn-lint --version`)*\r\n\r\ncfn-lint 0.33.2\r\n\r\n*Description of issue.*\r\n\r\nI created an ACM certificate resource, and there were problems configuring the `DomainValidationOptions` block. If using DNS validation, the only properties needed are `DomainName` and `HostedZoneId`. However, cfn-lint was demanding a third property named `ValidationDomain`. When submitting the stack for deployment to CF, it triggered an immediate rollback because CF views `HostedZoneId` and `ValidationDomain` as mutually exclusive.\r\n\r\nAdding an ignore rule to skip the E3503 error allowed me to proceed without issues. This rule should be adjusted to match what CF enforces.\r\n\r\n**Sample:**\r\n\r\n```yaml\r\nResources:\r\n Certificate:\r\n Type: AWS::CertificateManager::Certificate\r\n Metadata:\r\n cfn-lint:\r\n config:\r\n ignore_checks:\r\n - E3503\r\n Properties:\r\n DomainName: \"*.aws.domain.com\"\r\n ValidationMethod: DNS\r\n DomainValidationOptions:\r\n - DomainName: aws.domain.com\r\n HostedZoneId: !ImportValue SubdomainHostedZoneId\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass DomainValidationOptions(CloudFormationLintRule):\n \"\"\"Check if a certificate's domain validation options are set up correctly\"\"\"\n id = 'E3503'\n shortdesc = 'ValidationDomain is superdomain of DomainName'\n description = 'In ValidationDomainOptions, the ValidationDomain must be a superdomain of the DomainName being validated'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-certificatemanager-certificate-domainvalidationoption.html#cfn-certificatemanager-certificate-domainvalidationoption-validationdomain'\n tags = ['certificate', 'certificatemanager', 'domainvalidationoptions', 'validationdomain']\n\n def __init__(self):\n \"\"\" Init \"\"\"\n super(DomainValidationOptions, self).__init__()\n self.resource_property_types = ['AWS::CertificateManager::Certificate']\n\n def check_value(self, value, path, **kwargs):\n \"\"\" Check value inside the list of DomainValidationOptions\"\"\"\n matches = []\n cfn = kwargs.get('cfn')\n if isinstance(value, dict):\n property_sets = cfn.get_object_without_conditions(value)\n for property_set in property_sets:\n properties = property_set.get('Object')\n scenario = property_set.get('Scenario')\n domain_name = properties.get('DomainName', '')\n validation_domain = properties.get('ValidationDomain', '')\n if isinstance(domain_name, six.string_types) and isinstance(validation_domain, six.string_types):\n if domain_name == validation_domain:\n continue\n\n if not domain_name.endswith('.' + validation_domain):\n message = 'ValidationDomain must be a superdomain of DomainName at {}'\n if scenario is None:\n matches.append(\n RuleMatch(path[:] + ['DomainName'], message.format('/'.join(map(str, path)))))\n else:\n scenario_text = ' and '.join(\n ['when condition \"%s\" is %s' % (k, v) for (k, v) in scenario.items()])\n matches.append(\n RuleMatch(path[:] + ['DomainName'], message.format('/'.join(map(str, path)) + ' ' + scenario_text)))\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n matches = []\n matches.extend(cfn.check_value(\n properties, 'DomainValidationOptions', path[:],\n check_value=self.check_value,\n cfn=cfn,\n ))\n\n return matches\n", "path": "src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py"}]} | 1,493 | 195 |
gh_patches_debug_18653 | rasdani/github-patches | git_diff | frappe__frappe-26417 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
(List View): In sidebar filter link field show Title of Field if set
**Is your feature request related to a problem? Please describe.**
In sidebar filter link field show field.name but not field.title
**Describe the solution you'd like**
if link field has title field - show title.

</issue>
<code>
[start of frappe/desk/listview.py]
1 # Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
2 # License: MIT. See LICENSE
3
4 import frappe
5 from frappe.model import is_default_field
6 from frappe.query_builder import Order
7 from frappe.query_builder.functions import Count
8 from frappe.query_builder.terms import SubQuery
9 from frappe.query_builder.utils import DocType
10
11
12 @frappe.whitelist()
13 def get_list_settings(doctype):
14 try:
15 return frappe.get_cached_doc("List View Settings", doctype)
16 except frappe.DoesNotExistError:
17 frappe.clear_messages()
18
19
20 @frappe.whitelist()
21 def set_list_settings(doctype, values):
22 try:
23 doc = frappe.get_doc("List View Settings", doctype)
24 except frappe.DoesNotExistError:
25 doc = frappe.new_doc("List View Settings")
26 doc.name = doctype
27 frappe.clear_messages()
28 doc.update(frappe.parse_json(values))
29 doc.save()
30
31
32 @frappe.whitelist()
33 def get_group_by_count(doctype: str, current_filters: str, field: str) -> list[dict]:
34 current_filters = frappe.parse_json(current_filters)
35
36 if field == "assigned_to":
37 ToDo = DocType("ToDo")
38 User = DocType("User")
39 count = Count("*").as_("count")
40 filtered_records = frappe.qb.get_query(
41 doctype,
42 filters=current_filters,
43 fields=["name"],
44 validate_filters=True,
45 )
46
47 return (
48 frappe.qb.from_(ToDo)
49 .from_(User)
50 .select(ToDo.allocated_to.as_("name"), count)
51 .where(
52 (ToDo.status != "Cancelled")
53 & (ToDo.allocated_to == User.name)
54 & (User.user_type == "System User")
55 & (ToDo.reference_name.isin(SubQuery(filtered_records)))
56 )
57 .groupby(ToDo.allocated_to)
58 .orderby(count, order=Order.desc)
59 .limit(50)
60 .run(as_dict=True)
61 )
62
63 if not frappe.get_meta(doctype).has_field(field) and not is_default_field(field):
64 raise ValueError("Field does not belong to doctype")
65
66 return frappe.get_list(
67 doctype,
68 filters=current_filters,
69 group_by=f"`tab{doctype}`.{field}",
70 fields=["count(*) as count", f"`{field}` as name"],
71 order_by="count desc",
72 limit=50,
73 )
74
[end of frappe/desk/listview.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/frappe/desk/listview.py b/frappe/desk/listview.py
--- a/frappe/desk/listview.py
+++ b/frappe/desk/listview.py
@@ -60,10 +60,12 @@
.run(as_dict=True)
)
- if not frappe.get_meta(doctype).has_field(field) and not is_default_field(field):
+ meta = frappe.get_meta(doctype)
+
+ if not meta.has_field(field) and not is_default_field(field):
raise ValueError("Field does not belong to doctype")
- return frappe.get_list(
+ data = frappe.get_list(
doctype,
filters=current_filters,
group_by=f"`tab{doctype}`.{field}",
@@ -71,3 +73,13 @@
order_by="count desc",
limit=50,
)
+
+ # Add in title if it's a link field and `show_title_field_in_link` is set
+ if (field_meta := meta.get_field(field)) and field_meta.fieldtype == "Link":
+ link_meta = frappe.get_meta(field_meta.options)
+ if link_meta.show_title_field_in_link:
+ title_field = link_meta.get_title_field()
+ for item in data:
+ item.title = frappe.get_value(field_meta.options, item.name, title_field)
+
+ return data
| {"golden_diff": "diff --git a/frappe/desk/listview.py b/frappe/desk/listview.py\n--- a/frappe/desk/listview.py\n+++ b/frappe/desk/listview.py\n@@ -60,10 +60,12 @@\n \t\t\t.run(as_dict=True)\n \t\t)\n \n-\tif not frappe.get_meta(doctype).has_field(field) and not is_default_field(field):\n+\tmeta = frappe.get_meta(doctype)\n+\n+\tif not meta.has_field(field) and not is_default_field(field):\n \t\traise ValueError(\"Field does not belong to doctype\")\n \n-\treturn frappe.get_list(\n+\tdata = frappe.get_list(\n \t\tdoctype,\n \t\tfilters=current_filters,\n \t\tgroup_by=f\"`tab{doctype}`.{field}\",\n@@ -71,3 +73,13 @@\n \t\torder_by=\"count desc\",\n \t\tlimit=50,\n \t)\n+\n+\t# Add in title if it's a link field and `show_title_field_in_link` is set\n+\tif (field_meta := meta.get_field(field)) and field_meta.fieldtype == \"Link\":\n+\t\tlink_meta = frappe.get_meta(field_meta.options)\n+\t\tif link_meta.show_title_field_in_link:\n+\t\t\ttitle_field = link_meta.get_title_field()\n+\t\t\tfor item in data:\n+\t\t\t\titem.title = frappe.get_value(field_meta.options, item.name, title_field)\n+\n+\treturn data\n", "issue": "(List View): In sidebar filter link field show Title of Field if set\n**Is your feature request related to a problem? Please describe.**\r\nIn sidebar filter link field show field.name but not field.title\r\n\r\n**Describe the solution you'd like**\r\nif link field has title field - show title.\r\n\n", "before_files": [{"content": "# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe.model import is_default_field\nfrom frappe.query_builder import Order\nfrom frappe.query_builder.functions import Count\nfrom frappe.query_builder.terms import SubQuery\nfrom frappe.query_builder.utils import DocType\n\n\[email protected]()\ndef get_list_settings(doctype):\n\ttry:\n\t\treturn frappe.get_cached_doc(\"List View Settings\", doctype)\n\texcept frappe.DoesNotExistError:\n\t\tfrappe.clear_messages()\n\n\[email protected]()\ndef set_list_settings(doctype, values):\n\ttry:\n\t\tdoc = frappe.get_doc(\"List View Settings\", doctype)\n\texcept frappe.DoesNotExistError:\n\t\tdoc = frappe.new_doc(\"List View Settings\")\n\t\tdoc.name = doctype\n\t\tfrappe.clear_messages()\n\tdoc.update(frappe.parse_json(values))\n\tdoc.save()\n\n\[email protected]()\ndef get_group_by_count(doctype: str, current_filters: str, field: str) -> list[dict]:\n\tcurrent_filters = frappe.parse_json(current_filters)\n\n\tif field == \"assigned_to\":\n\t\tToDo = DocType(\"ToDo\")\n\t\tUser = DocType(\"User\")\n\t\tcount = Count(\"*\").as_(\"count\")\n\t\tfiltered_records = frappe.qb.get_query(\n\t\t\tdoctype,\n\t\t\tfilters=current_filters,\n\t\t\tfields=[\"name\"],\n\t\t\tvalidate_filters=True,\n\t\t)\n\n\t\treturn (\n\t\t\tfrappe.qb.from_(ToDo)\n\t\t\t.from_(User)\n\t\t\t.select(ToDo.allocated_to.as_(\"name\"), count)\n\t\t\t.where(\n\t\t\t\t(ToDo.status != \"Cancelled\")\n\t\t\t\t& (ToDo.allocated_to == User.name)\n\t\t\t\t& (User.user_type == \"System User\")\n\t\t\t\t& (ToDo.reference_name.isin(SubQuery(filtered_records)))\n\t\t\t)\n\t\t\t.groupby(ToDo.allocated_to)\n\t\t\t.orderby(count, order=Order.desc)\n\t\t\t.limit(50)\n\t\t\t.run(as_dict=True)\n\t\t)\n\n\tif not frappe.get_meta(doctype).has_field(field) and not is_default_field(field):\n\t\traise ValueError(\"Field does not belong to doctype\")\n\n\treturn frappe.get_list(\n\t\tdoctype,\n\t\tfilters=current_filters,\n\t\tgroup_by=f\"`tab{doctype}`.{field}\",\n\t\tfields=[\"count(*) as count\", f\"`{field}` as name\"],\n\t\torder_by=\"count desc\",\n\t\tlimit=50,\n\t)\n", "path": "frappe/desk/listview.py"}]} | 1,339 | 303 |
gh_patches_debug_9424 | rasdani/github-patches | git_diff | napari__napari-6057 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[ipython] File > Save Screenshot... warns about replacing if extension is provided
## 🐛 Bug
If a napari viewer is launched from ipython and you use File > Save Screenshot... then everything works fine if you just enter a file name, **but if you specify the extension**—out of habit or to change the file format—then napari will warn that the file already exists, even if it does not.
Regardless of choice, the file is created and everything works.
Looking carefully, when one clicks Save the screen flashes, the file appears in the list in the dialog, and the warning pops up--the dialog remains up.
It's like the file is created and then it tries a second time when it goes to close the dialog.
Edit:
On the other hand, *if you pass a filename that exists,* **but without extension**, it will correctly warn that the file exists, but if say `no` to overwriting, the dialog closes with: `WARNING: QDialog::exec: Recursive call detected`
If you *do pass a full name with extension*, it will correctly warn that the file exists, and saying `no` will return you to the dialog to modify the name—as expected.
## To Reproduce
Steps to reproduce the behavior:
1. launch ipython and use
```
import napari
viewer = napari.Viewer()
```
3. open any sample image or add any layer
4. File > Save Screenshot...
5. enter a file name **with an extension** but make sure it's a unique name.
## Expected behavior
No warning should be raised if the file doesn't exist, the dialog should just disappear and the file should be created.
If the warning is raised and the user clicks `no` to not overwrite, the dialog should remain open so the user can change the name.
## Environment
```
napari: 0.4.18
Platform: macOS-13.4.1-arm64-arm-64bit
System: MacOS 13.4.1
Python: 3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:41:52) [Clang 15.0.7 ]
Qt: 5.15.6
PyQt5: 5.15.7
NumPy: 1.25.1
SciPy: 1.11.1
Dask: 2023.7.0
VisPy: 0.12.2
magicgui: 0.7.2
superqt: unknown
in-n-out: 0.1.8
app-model: 0.2.0
npe2: 0.7.0
OpenGL:
- GL version: 2.1 Metal - 83.1
- MAX_TEXTURE_SIZE: 16384
Screens:
- screen 1: resolution 1800x1169, scale 2.0
Settings path:
- /Users/sobolp/Library/Application Support/napari/napari-418_d279d6cf5d4193876ed97858e757fe322037331b/settings.yaml
```
## Additional context
Other dialogs, like Save Selected Layer work fine.
Using the native dialog (when running napari from the command line) does not have this issue.
</issue>
<code>
[start of napari/_qt/dialogs/screenshot_dialog.py]
1 import os
2 from pathlib import Path
3 from typing import Any, Callable
4
5 from qtpy.QtWidgets import QFileDialog, QMessageBox
6
7 from napari.utils.misc import in_ipython
8 from napari.utils.translations import trans
9
10 HOME_DIRECTORY = str(Path.home())
11
12
13 class ScreenshotDialog(QFileDialog):
14 """
15 Dialog to chose save location of screenshot.
16
17 Parameters
18 ----------
19 save_function : Callable[[str], Any],
20 Function to be called on success of selecting save location
21 parent : QWidget, optional
22 Optional parent widget for this widget..
23 directory : str, optional
24 Starting directory to be set to File Dialog
25
26 """
27
28 def __init__(
29 self,
30 save_function: Callable[[str], Any],
31 parent=None,
32 directory=HOME_DIRECTORY,
33 history=None,
34 ) -> None:
35 super().__init__(parent, trans._("Save screenshot"))
36 self.setAcceptMode(QFileDialog.AcceptSave)
37 self.setFileMode(QFileDialog.AnyFile)
38 self.setNameFilter(
39 trans._("Image files (*.png *.bmp *.gif *.tif *.tiff)")
40 )
41 self.setDirectory(directory)
42 self.setHistory(history)
43
44 if in_ipython():
45 self.setOptions(QFileDialog.DontUseNativeDialog)
46
47 self.save_function = save_function
48
49 def accept(self):
50 save_path = self.selectedFiles()[0]
51 if os.path.splitext(save_path)[1] == "":
52 save_path = save_path + ".png"
53 if os.path.exists(save_path):
54 res = QMessageBox().warning(
55 self,
56 trans._("Confirm overwrite"),
57 trans._(
58 "{save_path} already exists. Do you want to replace it?",
59 save_path=save_path,
60 ),
61 QMessageBox.Yes | QMessageBox.No,
62 QMessageBox.No,
63 )
64 if res != QMessageBox.Yes:
65 # standard accept return 1, reject 0. This inform that dialog should be reopened
66 super().accept()
67 self.exec_()
68 self.save_function(save_path)
69 return super().accept()
70
[end of napari/_qt/dialogs/screenshot_dialog.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/napari/_qt/dialogs/screenshot_dialog.py b/napari/_qt/dialogs/screenshot_dialog.py
--- a/napari/_qt/dialogs/screenshot_dialog.py
+++ b/napari/_qt/dialogs/screenshot_dialog.py
@@ -62,8 +62,9 @@
QMessageBox.No,
)
if res != QMessageBox.Yes:
- # standard accept return 1, reject 0. This inform that dialog should be reopened
- super().accept()
- self.exec_()
- self.save_function(save_path)
- return super().accept()
+ # return in this case since a valid name for the
+ # file is needed so the dialog needs to be visible
+ return
+ super().accept()
+ if self.result():
+ self.save_function(save_path)
| {"golden_diff": "diff --git a/napari/_qt/dialogs/screenshot_dialog.py b/napari/_qt/dialogs/screenshot_dialog.py\n--- a/napari/_qt/dialogs/screenshot_dialog.py\n+++ b/napari/_qt/dialogs/screenshot_dialog.py\n@@ -62,8 +62,9 @@\n QMessageBox.No,\n )\n if res != QMessageBox.Yes:\n- # standard accept return 1, reject 0. This inform that dialog should be reopened\n- super().accept()\n- self.exec_()\n- self.save_function(save_path)\n- return super().accept()\n+ # return in this case since a valid name for the\n+ # file is needed so the dialog needs to be visible\n+ return\n+ super().accept()\n+ if self.result():\n+ self.save_function(save_path)\n", "issue": "[ipython] File > Save Screenshot... warns about replacing if extension is provided\n## \ud83d\udc1b Bug\r\n\r\nIf a napari viewer is launched from ipython and you use File > Save Screenshot... then everything works fine if you just enter a file name, **but if you specify the extension**\u2014out of habit or to change the file format\u2014then napari will warn that the file already exists, even if it does not.\r\nRegardless of choice, the file is created and everything works.\r\n\r\nLooking carefully, when one clicks Save the screen flashes, the file appears in the list in the dialog, and the warning pops up--the dialog remains up.\r\nIt's like the file is created and then it tries a second time when it goes to close the dialog.\r\n\r\nEdit:\r\nOn the other hand, *if you pass a filename that exists,* **but without extension**, it will correctly warn that the file exists, but if say `no` to overwriting, the dialog closes with: `WARNING: QDialog::exec: Recursive call detected`\r\nIf you *do pass a full name with extension*, it will correctly warn that the file exists, and saying `no` will return you to the dialog to modify the name\u2014as expected.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. launch ipython and use \r\n```\r\nimport napari\r\nviewer = napari.Viewer()\r\n```\r\n3. open any sample image or add any layer\r\n4. File > Save Screenshot...\r\n5. enter a file name **with an extension** but make sure it's a unique name.\r\n\r\n## Expected behavior\r\n\r\nNo warning should be raised if the file doesn't exist, the dialog should just disappear and the file should be created.\r\nIf the warning is raised and the user clicks `no` to not overwrite, the dialog should remain open so the user can change the name.\r\n\r\n## Environment\r\n\r\n```\r\nnapari: 0.4.18\r\nPlatform: macOS-13.4.1-arm64-arm-64bit\r\nSystem: MacOS 13.4.1\r\nPython: 3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:41:52) [Clang 15.0.7 ]\r\nQt: 5.15.6\r\nPyQt5: 5.15.7\r\nNumPy: 1.25.1\r\nSciPy: 1.11.1\r\nDask: 2023.7.0\r\nVisPy: 0.12.2\r\nmagicgui: 0.7.2\r\nsuperqt: unknown\r\nin-n-out: 0.1.8\r\napp-model: 0.2.0\r\nnpe2: 0.7.0\r\n\r\nOpenGL:\r\n- GL version: 2.1 Metal - 83.1\r\n- MAX_TEXTURE_SIZE: 16384\r\n\r\nScreens:\r\n- screen 1: resolution 1800x1169, scale 2.0\r\n\r\nSettings path:\r\n- /Users/sobolp/Library/Application Support/napari/napari-418_d279d6cf5d4193876ed97858e757fe322037331b/settings.yaml\r\n\r\n```\r\n\r\n## Additional context\r\n\r\nOther dialogs, like Save Selected Layer work fine.\r\nUsing the native dialog (when running napari from the command line) does not have this issue.\n", "before_files": [{"content": "import os\nfrom pathlib import Path\nfrom typing import Any, Callable\n\nfrom qtpy.QtWidgets import QFileDialog, QMessageBox\n\nfrom napari.utils.misc import in_ipython\nfrom napari.utils.translations import trans\n\nHOME_DIRECTORY = str(Path.home())\n\n\nclass ScreenshotDialog(QFileDialog):\n \"\"\"\n Dialog to chose save location of screenshot.\n\n Parameters\n ----------\n save_function : Callable[[str], Any],\n Function to be called on success of selecting save location\n parent : QWidget, optional\n Optional parent widget for this widget..\n directory : str, optional\n Starting directory to be set to File Dialog\n\n \"\"\"\n\n def __init__(\n self,\n save_function: Callable[[str], Any],\n parent=None,\n directory=HOME_DIRECTORY,\n history=None,\n ) -> None:\n super().__init__(parent, trans._(\"Save screenshot\"))\n self.setAcceptMode(QFileDialog.AcceptSave)\n self.setFileMode(QFileDialog.AnyFile)\n self.setNameFilter(\n trans._(\"Image files (*.png *.bmp *.gif *.tif *.tiff)\")\n )\n self.setDirectory(directory)\n self.setHistory(history)\n\n if in_ipython():\n self.setOptions(QFileDialog.DontUseNativeDialog)\n\n self.save_function = save_function\n\n def accept(self):\n save_path = self.selectedFiles()[0]\n if os.path.splitext(save_path)[1] == \"\":\n save_path = save_path + \".png\"\n if os.path.exists(save_path):\n res = QMessageBox().warning(\n self,\n trans._(\"Confirm overwrite\"),\n trans._(\n \"{save_path} already exists. Do you want to replace it?\",\n save_path=save_path,\n ),\n QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No,\n )\n if res != QMessageBox.Yes:\n # standard accept return 1, reject 0. This inform that dialog should be reopened\n super().accept()\n self.exec_()\n self.save_function(save_path)\n return super().accept()\n", "path": "napari/_qt/dialogs/screenshot_dialog.py"}]} | 1,853 | 181 |
gh_patches_debug_14823 | rasdani/github-patches | git_diff | kornia__kornia-2620 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
expose `average_endpoint_error` on `kornia.metrics.__init__.py`
can you expose it on kornia.metrics.__init__.py ? - to use as `kornia.metrics.average_endpoint_error` too
_Originally posted by @johnnv1 in https://github.com/kornia/kornia/pull/2615#discussion_r1351007042_
</issue>
<code>
[start of kornia/metrics/__init__.py]
1 from .accuracy import accuracy
2 from .average_meter import AverageMeter
3 from .confusion_matrix import confusion_matrix
4 from .endpoint_error import AEPE, aepe
5 from .mean_average_precision import mean_average_precision
6 from .mean_iou import mean_iou, mean_iou_bbox
7 from .psnr import psnr
8 from .ssim import SSIM, ssim
9 from .ssim3d import SSIM3D, ssim3d
10
11 __all__ = [
12 "accuracy",
13 "AverageMeter",
14 "confusion_matrix",
15 "aepe",
16 "AEPE",
17 "mean_iou",
18 "mean_iou_bbox",
19 "mean_average_precision",
20 "psnr",
21 "ssim",
22 "ssim3d",
23 "SSIM",
24 "SSIM3D",
25 ]
26
[end of kornia/metrics/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kornia/metrics/__init__.py b/kornia/metrics/__init__.py
--- a/kornia/metrics/__init__.py
+++ b/kornia/metrics/__init__.py
@@ -1,7 +1,7 @@
from .accuracy import accuracy
from .average_meter import AverageMeter
from .confusion_matrix import confusion_matrix
-from .endpoint_error import AEPE, aepe
+from .endpoint_error import AEPE, aepe, average_endpoint_error
from .mean_average_precision import mean_average_precision
from .mean_iou import mean_iou, mean_iou_bbox
from .psnr import psnr
@@ -14,6 +14,7 @@
"confusion_matrix",
"aepe",
"AEPE",
+ "average_endpoint_error",
"mean_iou",
"mean_iou_bbox",
"mean_average_precision",
| {"golden_diff": "diff --git a/kornia/metrics/__init__.py b/kornia/metrics/__init__.py\n--- a/kornia/metrics/__init__.py\n+++ b/kornia/metrics/__init__.py\n@@ -1,7 +1,7 @@\n from .accuracy import accuracy\n from .average_meter import AverageMeter\n from .confusion_matrix import confusion_matrix\n-from .endpoint_error import AEPE, aepe\n+from .endpoint_error import AEPE, aepe, average_endpoint_error\n from .mean_average_precision import mean_average_precision\n from .mean_iou import mean_iou, mean_iou_bbox\n from .psnr import psnr\n@@ -14,6 +14,7 @@\n \"confusion_matrix\",\n \"aepe\",\n \"AEPE\",\n+ \"average_endpoint_error\",\n \"mean_iou\",\n \"mean_iou_bbox\",\n \"mean_average_precision\",\n", "issue": "expose `average_endpoint_error` on `kornia.metrics.__init__.py`\n can you expose it on kornia.metrics.__init__.py ? - to use as `kornia.metrics.average_endpoint_error` too\r\n\r\n_Originally posted by @johnnv1 in https://github.com/kornia/kornia/pull/2615#discussion_r1351007042_\r\n \n", "before_files": [{"content": "from .accuracy import accuracy\nfrom .average_meter import AverageMeter\nfrom .confusion_matrix import confusion_matrix\nfrom .endpoint_error import AEPE, aepe\nfrom .mean_average_precision import mean_average_precision\nfrom .mean_iou import mean_iou, mean_iou_bbox\nfrom .psnr import psnr\nfrom .ssim import SSIM, ssim\nfrom .ssim3d import SSIM3D, ssim3d\n\n__all__ = [\n \"accuracy\",\n \"AverageMeter\",\n \"confusion_matrix\",\n \"aepe\",\n \"AEPE\",\n \"mean_iou\",\n \"mean_iou_bbox\",\n \"mean_average_precision\",\n \"psnr\",\n \"ssim\",\n \"ssim3d\",\n \"SSIM\",\n \"SSIM3D\",\n]\n", "path": "kornia/metrics/__init__.py"}]} | 851 | 196 |
gh_patches_debug_26242 | rasdani/github-patches | git_diff | bokeh__bokeh-4929 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Checkbox example is not working as expected
The new checkbox example in master (examples/plotting/file/line_on_off.py) is not working as expected, the plotted lines are depend on how many checkbox are ticked and not on which one. The reason is that the js code is not checking for the values but for the existence of the index.
I have a fix for this with a PR coming, and opening this only as an associated issue.
</issue>
<code>
[start of examples/plotting/file/line_on_off.py]
1 """ Example demonstrating turning lines on and off - with JS only
2
3 """
4
5 import numpy as np
6
7 from bokeh.io import output_file, show
8 from bokeh.layouts import row
9 from bokeh.palettes import Viridis3
10 from bokeh.plotting import figure
11 from bokeh.models import CheckboxGroup, CustomJS
12
13 output_file("line_on_off.html", title="line_on_off.py example")
14
15 code = """
16 if (0 in checkbox.active) {
17 l0.visible = true
18 } else {
19 l0.visible = false
20 }
21 if (1 in checkbox.active) {
22 l1.visible = true
23 } else {
24 l1.visible = false
25 }
26 if (2 in checkbox.active) {
27 l2.visible = true
28 } else {
29 l2.visible = false
30 }
31 """
32
33 p = figure()
34 props = dict(line_width=4, line_alpha=0.7)
35 x = np.linspace(0, 4 * np.pi, 100)
36 l0 = p.line(x, np.sin(x), color=Viridis3[0], legend="Line 0", **props)
37 l1 = p.line(x, 4 * np.cos(x), color=Viridis3[1], legend="Line 1", **props)
38 l2 = p.line(x, np.tan(x), color=Viridis3[2], legend="Line 2", **props)
39
40 callback = CustomJS(code=code, args={})
41 checkbox = CheckboxGroup(labels=["Line 0", "Line 1", "Line 2"], active=[0, 1, 2], callback=callback, width=100)
42 callback.args = dict(l0=l0, l1=l1, l2=l2, checkbox=checkbox)
43
44 layout = row(checkbox, p)
45 show(layout)
46
[end of examples/plotting/file/line_on_off.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/plotting/file/line_on_off.py b/examples/plotting/file/line_on_off.py
--- a/examples/plotting/file/line_on_off.py
+++ b/examples/plotting/file/line_on_off.py
@@ -12,24 +12,6 @@
output_file("line_on_off.html", title="line_on_off.py example")
-code = """
- if (0 in checkbox.active) {
- l0.visible = true
- } else {
- l0.visible = false
- }
- if (1 in checkbox.active) {
- l1.visible = true
- } else {
- l1.visible = false
- }
- if (2 in checkbox.active) {
- l2.visible = true
- } else {
- l2.visible = false
- }
-"""
-
p = figure()
props = dict(line_width=4, line_alpha=0.7)
x = np.linspace(0, 4 * np.pi, 100)
@@ -37,9 +19,14 @@
l1 = p.line(x, 4 * np.cos(x), color=Viridis3[1], legend="Line 1", **props)
l2 = p.line(x, np.tan(x), color=Viridis3[2], legend="Line 2", **props)
-callback = CustomJS(code=code, args={})
-checkbox = CheckboxGroup(labels=["Line 0", "Line 1", "Line 2"], active=[0, 1, 2], callback=callback, width=100)
-callback.args = dict(l0=l0, l1=l1, l2=l2, checkbox=checkbox)
+checkbox = CheckboxGroup(labels=["Line 0", "Line 1", "Line 2"],
+ active=[0, 1, 2], width=100)
+checkbox.callback = CustomJS(args=dict(l0=l0, l1=l1, l2=l2, checkbox=checkbox),
+ lang="coffeescript", code="""
+l0.visible = 0 in checkbox.active;
+l1.visible = 1 in checkbox.active;
+l2.visible = 2 in checkbox.active;
+""")
layout = row(checkbox, p)
show(layout)
| {"golden_diff": "diff --git a/examples/plotting/file/line_on_off.py b/examples/plotting/file/line_on_off.py\n--- a/examples/plotting/file/line_on_off.py\n+++ b/examples/plotting/file/line_on_off.py\n@@ -12,24 +12,6 @@\n \n output_file(\"line_on_off.html\", title=\"line_on_off.py example\")\n \n-code = \"\"\"\n- if (0 in checkbox.active) {\n- l0.visible = true\n- } else {\n- l0.visible = false\n- }\n- if (1 in checkbox.active) {\n- l1.visible = true\n- } else {\n- l1.visible = false\n- }\n- if (2 in checkbox.active) {\n- l2.visible = true\n- } else {\n- l2.visible = false\n- }\n-\"\"\"\n-\n p = figure()\n props = dict(line_width=4, line_alpha=0.7)\n x = np.linspace(0, 4 * np.pi, 100)\n@@ -37,9 +19,14 @@\n l1 = p.line(x, 4 * np.cos(x), color=Viridis3[1], legend=\"Line 1\", **props)\n l2 = p.line(x, np.tan(x), color=Viridis3[2], legend=\"Line 2\", **props)\n \n-callback = CustomJS(code=code, args={})\n-checkbox = CheckboxGroup(labels=[\"Line 0\", \"Line 1\", \"Line 2\"], active=[0, 1, 2], callback=callback, width=100)\n-callback.args = dict(l0=l0, l1=l1, l2=l2, checkbox=checkbox)\n+checkbox = CheckboxGroup(labels=[\"Line 0\", \"Line 1\", \"Line 2\"],\n+ active=[0, 1, 2], width=100)\n+checkbox.callback = CustomJS(args=dict(l0=l0, l1=l1, l2=l2, checkbox=checkbox),\n+ lang=\"coffeescript\", code=\"\"\"\n+l0.visible = 0 in checkbox.active;\n+l1.visible = 1 in checkbox.active;\n+l2.visible = 2 in checkbox.active;\n+\"\"\")\n \n layout = row(checkbox, p)\n show(layout)\n", "issue": "Checkbox example is not working as expected\nThe new checkbox example in master (examples/plotting/file/line_on_off.py) is not working as expected, the plotted lines are depend on how many checkbox are ticked and not on which one. The reason is that the js code is not checking for the values but for the existence of the index.\n\nI have a fix for this with a PR coming, and opening this only as an associated issue.\n\n", "before_files": [{"content": "\"\"\" Example demonstrating turning lines on and off - with JS only\n\n\"\"\"\n\nimport numpy as np\n\nfrom bokeh.io import output_file, show\nfrom bokeh.layouts import row\nfrom bokeh.palettes import Viridis3\nfrom bokeh.plotting import figure\nfrom bokeh.models import CheckboxGroup, CustomJS\n\noutput_file(\"line_on_off.html\", title=\"line_on_off.py example\")\n\ncode = \"\"\"\n if (0 in checkbox.active) {\n l0.visible = true\n } else {\n l0.visible = false\n }\n if (1 in checkbox.active) {\n l1.visible = true\n } else {\n l1.visible = false\n }\n if (2 in checkbox.active) {\n l2.visible = true\n } else {\n l2.visible = false\n }\n\"\"\"\n\np = figure()\nprops = dict(line_width=4, line_alpha=0.7)\nx = np.linspace(0, 4 * np.pi, 100)\nl0 = p.line(x, np.sin(x), color=Viridis3[0], legend=\"Line 0\", **props)\nl1 = p.line(x, 4 * np.cos(x), color=Viridis3[1], legend=\"Line 1\", **props)\nl2 = p.line(x, np.tan(x), color=Viridis3[2], legend=\"Line 2\", **props)\n\ncallback = CustomJS(code=code, args={})\ncheckbox = CheckboxGroup(labels=[\"Line 0\", \"Line 1\", \"Line 2\"], active=[0, 1, 2], callback=callback, width=100)\ncallback.args = dict(l0=l0, l1=l1, l2=l2, checkbox=checkbox)\n\nlayout = row(checkbox, p)\nshow(layout)\n", "path": "examples/plotting/file/line_on_off.py"}]} | 1,110 | 501 |
gh_patches_debug_33573 | rasdani/github-patches | git_diff | pre-commit__pre-commit-966 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve platform command line length limits
I have a question related to #510
When I run `pre-commit run` all hooks work as expected (processing just changed files), but `pre-commit run -a` divides a list of files into 5 bulks and run the same hook in parallel executions. This creates unnecessary checks of files which belong to the same directory.
Is it possible to control that certain hooks should not be executed in parallel but rather pass all filenames to it?
Alternatively, is there an environment variable or something like that which says that pre-commit is running a hook for all files?
</issue>
<code>
[start of pre_commit/xargs.py]
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import unicode_literals
4
5 import concurrent.futures
6 import contextlib
7 import math
8 import sys
9
10 import six
11
12 from pre_commit import parse_shebang
13 from pre_commit.util import cmd_output
14
15
16 # TODO: properly compute max_length value
17 def _get_platform_max_length():
18 # posix minimum
19 return 4 * 1024
20
21
22 def _command_length(*cmd):
23 full_cmd = ' '.join(cmd)
24
25 # win32 uses the amount of characters, more details at:
26 # https://github.com/pre-commit/pre-commit/pull/839
27 if sys.platform == 'win32':
28 # the python2.x apis require bytes, we encode as UTF-8
29 if six.PY2:
30 return len(full_cmd.encode('utf-8'))
31 else:
32 return len(full_cmd.encode('utf-16le')) // 2
33 else:
34 return len(full_cmd.encode(sys.getfilesystemencoding()))
35
36
37 class ArgumentTooLongError(RuntimeError):
38 pass
39
40
41 def partition(cmd, varargs, target_concurrency, _max_length=None):
42 _max_length = _max_length or _get_platform_max_length()
43
44 # Generally, we try to partition evenly into at least `target_concurrency`
45 # partitions, but we don't want a bunch of tiny partitions.
46 max_args = max(4, math.ceil(len(varargs) / target_concurrency))
47
48 cmd = tuple(cmd)
49 ret = []
50
51 ret_cmd = []
52 # Reversed so arguments are in order
53 varargs = list(reversed(varargs))
54
55 total_length = _command_length(*cmd)
56 while varargs:
57 arg = varargs.pop()
58
59 arg_length = _command_length(arg) + 1
60 if (
61 total_length + arg_length <= _max_length and
62 len(ret_cmd) < max_args
63 ):
64 ret_cmd.append(arg)
65 total_length += arg_length
66 elif not ret_cmd:
67 raise ArgumentTooLongError(arg)
68 else:
69 # We've exceeded the length, yield a command
70 ret.append(cmd + tuple(ret_cmd))
71 ret_cmd = []
72 total_length = _command_length(*cmd)
73 varargs.append(arg)
74
75 ret.append(cmd + tuple(ret_cmd))
76
77 return tuple(ret)
78
79
80 @contextlib.contextmanager
81 def _thread_mapper(maxsize):
82 if maxsize == 1:
83 yield map
84 else:
85 with concurrent.futures.ThreadPoolExecutor(maxsize) as ex:
86 yield ex.map
87
88
89 def xargs(cmd, varargs, **kwargs):
90 """A simplified implementation of xargs.
91
92 negate: Make nonzero successful and zero a failure
93 target_concurrency: Target number of partitions to run concurrently
94 """
95 negate = kwargs.pop('negate', False)
96 target_concurrency = kwargs.pop('target_concurrency', 1)
97 retcode = 0
98 stdout = b''
99 stderr = b''
100
101 try:
102 parse_shebang.normexe(cmd[0])
103 except parse_shebang.ExecutableNotFoundError as e:
104 return e.to_output()
105
106 partitions = partition(cmd, varargs, target_concurrency, **kwargs)
107
108 def run_cmd_partition(run_cmd):
109 return cmd_output(*run_cmd, encoding=None, retcode=None)
110
111 threads = min(len(partitions), target_concurrency)
112 with _thread_mapper(threads) as thread_map:
113 results = thread_map(run_cmd_partition, partitions)
114
115 for proc_retcode, proc_out, proc_err in results:
116 # This is *slightly* too clever so I'll explain it.
117 # First the xor boolean table:
118 # T | F |
119 # +-------+
120 # T | F | T |
121 # --+-------+
122 # F | T | F |
123 # --+-------+
124 # When negate is True, it has the effect of flipping the return
125 # code. Otherwise, the returncode is unchanged.
126 retcode |= bool(proc_retcode) ^ negate
127 stdout += proc_out
128 stderr += proc_err
129
130 return retcode, stdout, stderr
131
[end of pre_commit/xargs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/xargs.py b/pre_commit/xargs.py
--- a/pre_commit/xargs.py
+++ b/pre_commit/xargs.py
@@ -5,6 +5,7 @@
import concurrent.futures
import contextlib
import math
+import os
import sys
import six
@@ -13,10 +14,24 @@
from pre_commit.util import cmd_output
-# TODO: properly compute max_length value
-def _get_platform_max_length():
- # posix minimum
- return 4 * 1024
+def _environ_size(_env=None):
+ environ = _env if _env is not None else getattr(os, 'environb', os.environ)
+ size = 8 * len(environ) # number of pointers in `envp`
+ for k, v in environ.items():
+ size += len(k) + len(v) + 2 # c strings in `envp`
+ return size
+
+
+def _get_platform_max_length(): # pragma: no cover (platform specific)
+ if os.name == 'posix':
+ maximum = os.sysconf(str('SC_ARG_MAX')) - 2048 - _environ_size()
+ maximum = min(maximum, 2 ** 17)
+ return maximum
+ elif os.name == 'nt':
+ return 2 ** 15 - 2048 # UNICODE_STRING max - headroom
+ else:
+ # posix minimum
+ return 2 ** 12
def _command_length(*cmd):
@@ -52,7 +67,7 @@
# Reversed so arguments are in order
varargs = list(reversed(varargs))
- total_length = _command_length(*cmd)
+ total_length = _command_length(*cmd) + 1
while varargs:
arg = varargs.pop()
@@ -69,7 +84,7 @@
# We've exceeded the length, yield a command
ret.append(cmd + tuple(ret_cmd))
ret_cmd = []
- total_length = _command_length(*cmd)
+ total_length = _command_length(*cmd) + 1
varargs.append(arg)
ret.append(cmd + tuple(ret_cmd))
@@ -99,7 +114,7 @@
stderr = b''
try:
- parse_shebang.normexe(cmd[0])
+ cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
return e.to_output()
| {"golden_diff": "diff --git a/pre_commit/xargs.py b/pre_commit/xargs.py\n--- a/pre_commit/xargs.py\n+++ b/pre_commit/xargs.py\n@@ -5,6 +5,7 @@\n import concurrent.futures\n import contextlib\n import math\n+import os\n import sys\n \n import six\n@@ -13,10 +14,24 @@\n from pre_commit.util import cmd_output\n \n \n-# TODO: properly compute max_length value\n-def _get_platform_max_length():\n- # posix minimum\n- return 4 * 1024\n+def _environ_size(_env=None):\n+ environ = _env if _env is not None else getattr(os, 'environb', os.environ)\n+ size = 8 * len(environ) # number of pointers in `envp`\n+ for k, v in environ.items():\n+ size += len(k) + len(v) + 2 # c strings in `envp`\n+ return size\n+\n+\n+def _get_platform_max_length(): # pragma: no cover (platform specific)\n+ if os.name == 'posix':\n+ maximum = os.sysconf(str('SC_ARG_MAX')) - 2048 - _environ_size()\n+ maximum = min(maximum, 2 ** 17)\n+ return maximum\n+ elif os.name == 'nt':\n+ return 2 ** 15 - 2048 # UNICODE_STRING max - headroom\n+ else:\n+ # posix minimum\n+ return 2 ** 12\n \n \n def _command_length(*cmd):\n@@ -52,7 +67,7 @@\n # Reversed so arguments are in order\n varargs = list(reversed(varargs))\n \n- total_length = _command_length(*cmd)\n+ total_length = _command_length(*cmd) + 1\n while varargs:\n arg = varargs.pop()\n \n@@ -69,7 +84,7 @@\n # We've exceeded the length, yield a command\n ret.append(cmd + tuple(ret_cmd))\n ret_cmd = []\n- total_length = _command_length(*cmd)\n+ total_length = _command_length(*cmd) + 1\n varargs.append(arg)\n \n ret.append(cmd + tuple(ret_cmd))\n@@ -99,7 +114,7 @@\n stderr = b''\n \n try:\n- parse_shebang.normexe(cmd[0])\n+ cmd = parse_shebang.normalize_cmd(cmd)\n except parse_shebang.ExecutableNotFoundError as e:\n return e.to_output()\n", "issue": "Improve platform command line length limits\nI have a question related to #510 \r\n\r\nWhen I run `pre-commit run` all hooks work as expected (processing just changed files), but `pre-commit run -a` divides a list of files into 5 bulks and run the same hook in parallel executions. This creates unnecessary checks of files which belong to the same directory.\r\n\r\nIs it possible to control that certain hooks should not be executed in parallel but rather pass all filenames to it?\r\n\r\nAlternatively, is there an environment variable or something like that which says that pre-commit is running a hook for all files?\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport concurrent.futures\nimport contextlib\nimport math\nimport sys\n\nimport six\n\nfrom pre_commit import parse_shebang\nfrom pre_commit.util import cmd_output\n\n\n# TODO: properly compute max_length value\ndef _get_platform_max_length():\n # posix minimum\n return 4 * 1024\n\n\ndef _command_length(*cmd):\n full_cmd = ' '.join(cmd)\n\n # win32 uses the amount of characters, more details at:\n # https://github.com/pre-commit/pre-commit/pull/839\n if sys.platform == 'win32':\n # the python2.x apis require bytes, we encode as UTF-8\n if six.PY2:\n return len(full_cmd.encode('utf-8'))\n else:\n return len(full_cmd.encode('utf-16le')) // 2\n else:\n return len(full_cmd.encode(sys.getfilesystemencoding()))\n\n\nclass ArgumentTooLongError(RuntimeError):\n pass\n\n\ndef partition(cmd, varargs, target_concurrency, _max_length=None):\n _max_length = _max_length or _get_platform_max_length()\n\n # Generally, we try to partition evenly into at least `target_concurrency`\n # partitions, but we don't want a bunch of tiny partitions.\n max_args = max(4, math.ceil(len(varargs) / target_concurrency))\n\n cmd = tuple(cmd)\n ret = []\n\n ret_cmd = []\n # Reversed so arguments are in order\n varargs = list(reversed(varargs))\n\n total_length = _command_length(*cmd)\n while varargs:\n arg = varargs.pop()\n\n arg_length = _command_length(arg) + 1\n if (\n total_length + arg_length <= _max_length and\n len(ret_cmd) < max_args\n ):\n ret_cmd.append(arg)\n total_length += arg_length\n elif not ret_cmd:\n raise ArgumentTooLongError(arg)\n else:\n # We've exceeded the length, yield a command\n ret.append(cmd + tuple(ret_cmd))\n ret_cmd = []\n total_length = _command_length(*cmd)\n varargs.append(arg)\n\n ret.append(cmd + tuple(ret_cmd))\n\n return tuple(ret)\n\n\[email protected]\ndef _thread_mapper(maxsize):\n if maxsize == 1:\n yield map\n else:\n with concurrent.futures.ThreadPoolExecutor(maxsize) as ex:\n yield ex.map\n\n\ndef xargs(cmd, varargs, **kwargs):\n \"\"\"A simplified implementation of xargs.\n\n negate: Make nonzero successful and zero a failure\n target_concurrency: Target number of partitions to run concurrently\n \"\"\"\n negate = kwargs.pop('negate', False)\n target_concurrency = kwargs.pop('target_concurrency', 1)\n retcode = 0\n stdout = b''\n stderr = b''\n\n try:\n parse_shebang.normexe(cmd[0])\n except parse_shebang.ExecutableNotFoundError as e:\n return e.to_output()\n\n partitions = partition(cmd, varargs, target_concurrency, **kwargs)\n\n def run_cmd_partition(run_cmd):\n return cmd_output(*run_cmd, encoding=None, retcode=None)\n\n threads = min(len(partitions), target_concurrency)\n with _thread_mapper(threads) as thread_map:\n results = thread_map(run_cmd_partition, partitions)\n\n for proc_retcode, proc_out, proc_err in results:\n # This is *slightly* too clever so I'll explain it.\n # First the xor boolean table:\n # T | F |\n # +-------+\n # T | F | T |\n # --+-------+\n # F | T | F |\n # --+-------+\n # When negate is True, it has the effect of flipping the return\n # code. Otherwise, the returncode is unchanged.\n retcode |= bool(proc_retcode) ^ negate\n stdout += proc_out\n stderr += proc_err\n\n return retcode, stdout, stderr\n", "path": "pre_commit/xargs.py"}]} | 1,850 | 567 |
gh_patches_debug_97 | rasdani/github-patches | git_diff | uccser__cs-unplugged-434 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check desired orientation of binary to alphabet resource
Currently is displayed in portrait but half the page is unused. May be better to switch to landscape which will increase the size of table cells.
</issue>
<code>
[start of csunplugged/resources/views/binary_to_alphabet.py]
1 """Module for generating Binary to Alphabet resource."""
2
3 from PIL import Image, ImageDraw, ImageFont
4 from utils.retrieve_query_parameter import retrieve_query_parameter
5
6
7 def resource_image(request, resource):
8 """Create a image for Binary to Alphabet resource.
9
10 Args:
11 request: HTTP request object
12 resource: Object of resource data.
13
14 Returns:
15 A Pillow image object.
16 """
17 # Retrieve relevant image
18 parameter_options = valid_options()
19 worksheet_version = retrieve_query_parameter(request, "worksheet_version", parameter_options["worksheet_version"])
20 if worksheet_version == "student":
21 image_path = "static/img/resources/binary-to-alphabet/table.png"
22 else:
23 image_path = "static/img/resources/binary-to-alphabet/table-teacher.png"
24 image = Image.open(image_path)
25 draw = ImageDraw.Draw(image)
26
27 font_size = 30
28 font_path = "static/fonts/PatrickHand-Regular.ttf"
29 font = ImageFont.truetype(font_path, font_size)
30
31 # Draw headings
32 column_headings = ["Base 10", "Binary", "Letter"]
33 heading_coord_x = 18
34 heading_coord_y = 6
35
36 i = 0
37 while i < 9: # 9 = number of columns
38
39 if i % 3 == 0:
40 text = str(column_headings[0])
41 elif i % 3 == 1:
42 text = str(column_headings[1])
43 else:
44 text = str(column_headings[2])
45
46 draw.text(
47 (heading_coord_x, heading_coord_y),
48 text,
49 font=font,
50 fill="#000"
51 )
52
53 heading_coord_x += 113
54
55 i += 1
56
57 # Draw numbers
58 # Column data: (min number, max number), x coord
59 columns_data = [((0, 9), 58), ((9, 18), 397), ((18, 27), 736)]
60
61 for column_set in columns_data:
62 start, end = column_set[0]
63 base_coord_x = column_set[1]
64 base_coord_y = 75
65
66 for number in range(start, end):
67 text = str(number)
68 text_width, text_height = draw.textsize(text, font=font)
69 coord_x = base_coord_x - (text_width / 2)
70 coord_y = base_coord_y - (text_height / 2)
71
72 draw.text(
73 (coord_x, coord_y),
74 text,
75 font=font,
76 fill="#000"
77 )
78
79 base_coord_y += 54
80
81 return image
82
83
84 def subtitle(request, resource):
85 """Return the subtitle string of the resource.
86
87 Used after the resource name in the filename, and
88 also on the resource image.
89
90 Args:
91 request: HTTP request object
92 resource: Object of resource data.
93
94 Returns:
95 text for subtitle (string)
96 """
97 text = "{} - {}".format(
98 retrieve_query_parameter(request, "worksheet_version"),
99 retrieve_query_parameter(request, "paper_size")
100 )
101 return text
102
103
104 def valid_options():
105 """Provide dictionary of all valid parameters.
106
107 This excludes the header text parameter.
108
109 Returns:
110 All valid options (dict).
111 """
112 return {
113 "worksheet_version": ["student", "teacher"],
114 "paper_size": ["a4", "letter"]
115 }
116
[end of csunplugged/resources/views/binary_to_alphabet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/csunplugged/resources/views/binary_to_alphabet.py b/csunplugged/resources/views/binary_to_alphabet.py
--- a/csunplugged/resources/views/binary_to_alphabet.py
+++ b/csunplugged/resources/views/binary_to_alphabet.py
@@ -78,6 +78,7 @@
base_coord_y += 54
+ image = image.rotate(90, expand=True)
return image
| {"golden_diff": "diff --git a/csunplugged/resources/views/binary_to_alphabet.py b/csunplugged/resources/views/binary_to_alphabet.py\n--- a/csunplugged/resources/views/binary_to_alphabet.py\n+++ b/csunplugged/resources/views/binary_to_alphabet.py\n@@ -78,6 +78,7 @@\n \n base_coord_y += 54\n \n+ image = image.rotate(90, expand=True)\n return image\n", "issue": "Check desired orientation of binary to alphabet resource\nCurrently is displayed in portrait but half the page is unused. May be better to switch to landscape which will increase the size of table cells.\n", "before_files": [{"content": "\"\"\"Module for generating Binary to Alphabet resource.\"\"\"\n\nfrom PIL import Image, ImageDraw, ImageFont\nfrom utils.retrieve_query_parameter import retrieve_query_parameter\n\n\ndef resource_image(request, resource):\n \"\"\"Create a image for Binary to Alphabet resource.\n\n Args:\n request: HTTP request object\n resource: Object of resource data.\n\n Returns:\n A Pillow image object.\n \"\"\"\n # Retrieve relevant image\n parameter_options = valid_options()\n worksheet_version = retrieve_query_parameter(request, \"worksheet_version\", parameter_options[\"worksheet_version\"])\n if worksheet_version == \"student\":\n image_path = \"static/img/resources/binary-to-alphabet/table.png\"\n else:\n image_path = \"static/img/resources/binary-to-alphabet/table-teacher.png\"\n image = Image.open(image_path)\n draw = ImageDraw.Draw(image)\n\n font_size = 30\n font_path = \"static/fonts/PatrickHand-Regular.ttf\"\n font = ImageFont.truetype(font_path, font_size)\n\n # Draw headings\n column_headings = [\"Base 10\", \"Binary\", \"Letter\"]\n heading_coord_x = 18\n heading_coord_y = 6\n\n i = 0\n while i < 9: # 9 = number of columns\n\n if i % 3 == 0:\n text = str(column_headings[0])\n elif i % 3 == 1:\n text = str(column_headings[1])\n else:\n text = str(column_headings[2])\n\n draw.text(\n (heading_coord_x, heading_coord_y),\n text,\n font=font,\n fill=\"#000\"\n )\n\n heading_coord_x += 113\n\n i += 1\n\n # Draw numbers\n # Column data: (min number, max number), x coord\n columns_data = [((0, 9), 58), ((9, 18), 397), ((18, 27), 736)]\n\n for column_set in columns_data:\n start, end = column_set[0]\n base_coord_x = column_set[1]\n base_coord_y = 75\n\n for number in range(start, end):\n text = str(number)\n text_width, text_height = draw.textsize(text, font=font)\n coord_x = base_coord_x - (text_width / 2)\n coord_y = base_coord_y - (text_height / 2)\n\n draw.text(\n (coord_x, coord_y),\n text,\n font=font,\n fill=\"#000\"\n )\n\n base_coord_y += 54\n\n return image\n\n\ndef subtitle(request, resource):\n \"\"\"Return the subtitle string of the resource.\n\n Used after the resource name in the filename, and\n also on the resource image.\n\n Args:\n request: HTTP request object\n resource: Object of resource data.\n\n Returns:\n text for subtitle (string)\n \"\"\"\n text = \"{} - {}\".format(\n retrieve_query_parameter(request, \"worksheet_version\"),\n retrieve_query_parameter(request, \"paper_size\")\n )\n return text\n\n\ndef valid_options():\n \"\"\"Provide dictionary of all valid parameters.\n\n This excludes the header text parameter.\n\n Returns:\n All valid options (dict).\n \"\"\"\n return {\n \"worksheet_version\": [\"student\", \"teacher\"],\n \"paper_size\": [\"a4\", \"letter\"]\n }\n", "path": "csunplugged/resources/views/binary_to_alphabet.py"}]} | 1,580 | 100 |
gh_patches_debug_12983 | rasdani/github-patches | git_diff | scikit-image__scikit-image-7211 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consistently use lazy loading for all `skimage.*` submodules
### Description:
With `lazy_loader` successfully being used for `skimage`, `skimage.data` and `skimage.filters` why not use it for every of our public submodules? I see no significant disadvantage here (when using the approach with PYI files) and it is what is proposed in [SPEC 1](https://scientific-python.org/specs/spec-0001/).
Feel free to remove the good first issue label if there are concerns. Otherwise I would suggest to tackle this with separate PRs for each module that copy the examples mentioned above.
</issue>
<code>
[start of skimage/metrics/__init__.py]
1 from ._adapted_rand_error import adapted_rand_error
2 from ._contingency_table import contingency_table
3 from ._structural_similarity import structural_similarity
4 from ._variation_of_information import variation_of_information
5 from .set_metrics import hausdorff_distance, hausdorff_pair
6 from .simple_metrics import (
7 mean_squared_error,
8 normalized_mutual_information,
9 normalized_root_mse,
10 peak_signal_noise_ratio,
11 )
12
13 __all__ = [
14 "adapted_rand_error",
15 "variation_of_information",
16 "contingency_table",
17 "mean_squared_error",
18 "normalized_mutual_information",
19 "normalized_root_mse",
20 "peak_signal_noise_ratio",
21 "structural_similarity",
22 "hausdorff_distance",
23 "hausdorff_pair",
24 ]
25
[end of skimage/metrics/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/metrics/__init__.py b/skimage/metrics/__init__.py
--- a/skimage/metrics/__init__.py
+++ b/skimage/metrics/__init__.py
@@ -1,24 +1,3 @@
-from ._adapted_rand_error import adapted_rand_error
-from ._contingency_table import contingency_table
-from ._structural_similarity import structural_similarity
-from ._variation_of_information import variation_of_information
-from .set_metrics import hausdorff_distance, hausdorff_pair
-from .simple_metrics import (
- mean_squared_error,
- normalized_mutual_information,
- normalized_root_mse,
- peak_signal_noise_ratio,
-)
+import lazy_loader as lazy
-__all__ = [
- "adapted_rand_error",
- "variation_of_information",
- "contingency_table",
- "mean_squared_error",
- "normalized_mutual_information",
- "normalized_root_mse",
- "peak_signal_noise_ratio",
- "structural_similarity",
- "hausdorff_distance",
- "hausdorff_pair",
-]
+__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__)
| {"golden_diff": "diff --git a/skimage/metrics/__init__.py b/skimage/metrics/__init__.py\n--- a/skimage/metrics/__init__.py\n+++ b/skimage/metrics/__init__.py\n@@ -1,24 +1,3 @@\n-from ._adapted_rand_error import adapted_rand_error\n-from ._contingency_table import contingency_table\n-from ._structural_similarity import structural_similarity\n-from ._variation_of_information import variation_of_information\n-from .set_metrics import hausdorff_distance, hausdorff_pair\n-from .simple_metrics import (\n- mean_squared_error,\n- normalized_mutual_information,\n- normalized_root_mse,\n- peak_signal_noise_ratio,\n-)\n+import lazy_loader as lazy\n \n-__all__ = [\n- \"adapted_rand_error\",\n- \"variation_of_information\",\n- \"contingency_table\",\n- \"mean_squared_error\",\n- \"normalized_mutual_information\",\n- \"normalized_root_mse\",\n- \"peak_signal_noise_ratio\",\n- \"structural_similarity\",\n- \"hausdorff_distance\",\n- \"hausdorff_pair\",\n-]\n+__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__)\n", "issue": "Consistently use lazy loading for all `skimage.*` submodules\n### Description:\r\n\r\nWith `lazy_loader` successfully being used for `skimage`, `skimage.data` and `skimage.filters` why not use it for every of our public submodules? I see no significant disadvantage here (when using the approach with PYI files) and it is what is proposed in [SPEC 1](https://scientific-python.org/specs/spec-0001/).\r\n\r\nFeel free to remove the good first issue label if there are concerns. Otherwise I would suggest to tackle this with separate PRs for each module that copy the examples mentioned above.\n", "before_files": [{"content": "from ._adapted_rand_error import adapted_rand_error\nfrom ._contingency_table import contingency_table\nfrom ._structural_similarity import structural_similarity\nfrom ._variation_of_information import variation_of_information\nfrom .set_metrics import hausdorff_distance, hausdorff_pair\nfrom .simple_metrics import (\n mean_squared_error,\n normalized_mutual_information,\n normalized_root_mse,\n peak_signal_noise_ratio,\n)\n\n__all__ = [\n \"adapted_rand_error\",\n \"variation_of_information\",\n \"contingency_table\",\n \"mean_squared_error\",\n \"normalized_mutual_information\",\n \"normalized_root_mse\",\n \"peak_signal_noise_ratio\",\n \"structural_similarity\",\n \"hausdorff_distance\",\n \"hausdorff_pair\",\n]\n", "path": "skimage/metrics/__init__.py"}]} | 876 | 264 |
gh_patches_debug_4682 | rasdani/github-patches | git_diff | ethereum__web3.py-709 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use EthereumTesterProvider backed by eth-tester in default import
`from web3 import EthereumTesterProvider` should use the eth-tester one, not the testrpc one.
</issue>
<code>
[start of web3/__init__.py]
1 import pkg_resources
2 import sys
3
4 if sys.version_info < (3, 5):
5 raise EnvironmentError("Python 3.5 or above is required")
6
7 from eth_account import Account # noqa: E402
8 from web3.main import Web3 # noqa: E402
9 from web3.providers.rpc import ( # noqa: E402
10 HTTPProvider,
11 )
12 from web3.providers.tester import ( # noqa: E402
13 TestRPCProvider,
14 EthereumTesterProvider,
15 )
16 from web3.providers.ipc import ( # noqa: E402
17 IPCProvider,
18 )
19
20 __version__ = pkg_resources.get_distribution("web3").version
21
22 __all__ = [
23 "__version__",
24 "Web3",
25 "HTTPProvider",
26 "IPCProvider",
27 "TestRPCProvider",
28 "EthereumTesterProvider",
29 "Account",
30 ]
31
[end of web3/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/web3/__init__.py b/web3/__init__.py
--- a/web3/__init__.py
+++ b/web3/__init__.py
@@ -9,9 +9,11 @@
from web3.providers.rpc import ( # noqa: E402
HTTPProvider,
)
+from web3.providers.eth_tester import ( # noqa: E402
+ EthereumTesterProvider,
+)
from web3.providers.tester import ( # noqa: E402
TestRPCProvider,
- EthereumTesterProvider,
)
from web3.providers.ipc import ( # noqa: E402
IPCProvider,
| {"golden_diff": "diff --git a/web3/__init__.py b/web3/__init__.py\n--- a/web3/__init__.py\n+++ b/web3/__init__.py\n@@ -9,9 +9,11 @@\n from web3.providers.rpc import ( # noqa: E402\n HTTPProvider,\n )\n+from web3.providers.eth_tester import ( # noqa: E402\n+ EthereumTesterProvider,\n+)\n from web3.providers.tester import ( # noqa: E402\n TestRPCProvider,\n- EthereumTesterProvider,\n )\n from web3.providers.ipc import ( # noqa: E402\n IPCProvider,\n", "issue": "Use EthereumTesterProvider backed by eth-tester in default import\n`from web3 import EthereumTesterProvider` should use the eth-tester one, not the testrpc one.\n", "before_files": [{"content": "import pkg_resources\nimport sys\n\nif sys.version_info < (3, 5):\n raise EnvironmentError(\"Python 3.5 or above is required\")\n\nfrom eth_account import Account # noqa: E402\nfrom web3.main import Web3 # noqa: E402\nfrom web3.providers.rpc import ( # noqa: E402\n HTTPProvider,\n)\nfrom web3.providers.tester import ( # noqa: E402\n TestRPCProvider,\n EthereumTesterProvider,\n)\nfrom web3.providers.ipc import ( # noqa: E402\n IPCProvider,\n)\n\n__version__ = pkg_resources.get_distribution(\"web3\").version\n\n__all__ = [\n \"__version__\",\n \"Web3\",\n \"HTTPProvider\",\n \"IPCProvider\",\n \"TestRPCProvider\",\n \"EthereumTesterProvider\",\n \"Account\",\n]\n", "path": "web3/__init__.py"}]} | 823 | 146 |
gh_patches_debug_19778 | rasdani/github-patches | git_diff | Mailu__Mailu-1198 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make access logging of admin server dependent on log level
Currently, admin logs all access logs to the console, regardless of which log level is set.
This is caused by starting gunicorn with `--access-logfile -`. This should be switched dependent on the log level set, e.g. on for levels >= INFO and off for <=WARNING
</issue>
<code>
[start of core/admin/start.py]
1 #!/usr/bin/python3
2
3 import os
4
5 os.system("flask mailu advertise")
6 os.system("flask db upgrade")
7
8 account = os.environ.get("INITIAL_ADMIN_ACCOUNT")
9 domain = os.environ.get("INITIAL_ADMIN_DOMAIN")
10 password = os.environ.get("INITIAL_ADMIN_PW")
11
12 if account is not None and domain is not None and password is not None:
13 mode = os.environ.get("INITIAL_ADMIN_MODE", default="ifmissing")
14 os.system("flask mailu admin %s %s '%s' --mode %s" % (account, domain, password, mode))
15
16 os.system("gunicorn -w 4 -b :80 --access-logfile - --error-logfile - --preload 'mailu:create_app()'")
17
[end of core/admin/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/start.py b/core/admin/start.py
--- a/core/admin/start.py
+++ b/core/admin/start.py
@@ -1,6 +1,10 @@
#!/usr/bin/python3
import os
+import logging as log
+import sys
+
+log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "INFO"))
os.system("flask mailu advertise")
os.system("flask db upgrade")
@@ -11,6 +15,14 @@
if account is not None and domain is not None and password is not None:
mode = os.environ.get("INITIAL_ADMIN_MODE", default="ifmissing")
+ log.info("Creating initial admin accout %s@%s with mode %s",account,domain,mode)
os.system("flask mailu admin %s %s '%s' --mode %s" % (account, domain, password, mode))
-os.system("gunicorn -w 4 -b :80 --access-logfile - --error-logfile - --preload 'mailu:create_app()'")
+start_command="".join([
+ "gunicorn -w 4 -b :80 ",
+ "--access-logfile - " if (log.root.level<=log.INFO) else "",
+ "--error-logfile - ",
+ "--preload ",
+ "'mailu:create_app()'"])
+
+os.system(start_command)
| {"golden_diff": "diff --git a/core/admin/start.py b/core/admin/start.py\n--- a/core/admin/start.py\n+++ b/core/admin/start.py\n@@ -1,6 +1,10 @@\n #!/usr/bin/python3\n \n import os\n+import logging as log\n+import sys\n+\n+log.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"INFO\"))\n \n os.system(\"flask mailu advertise\")\n os.system(\"flask db upgrade\")\n@@ -11,6 +15,14 @@\n \n if account is not None and domain is not None and password is not None:\n mode = os.environ.get(\"INITIAL_ADMIN_MODE\", default=\"ifmissing\")\n+ log.info(\"Creating initial admin accout %s@%s with mode %s\",account,domain,mode)\n os.system(\"flask mailu admin %s %s '%s' --mode %s\" % (account, domain, password, mode))\n \n-os.system(\"gunicorn -w 4 -b :80 --access-logfile - --error-logfile - --preload 'mailu:create_app()'\")\n+start_command=\"\".join([\n+ \"gunicorn -w 4 -b :80 \",\n+ \"--access-logfile - \" if (log.root.level<=log.INFO) else \"\",\n+ \"--error-logfile - \",\n+ \"--preload \",\n+ \"'mailu:create_app()'\"])\n+\n+os.system(start_command)\n", "issue": "Make access logging of admin server dependent on log level\nCurrently, admin logs all access logs to the console, regardless of which log level is set.\r\nThis is caused by starting gunicorn with `--access-logfile -`. This should be switched dependent on the log level set, e.g. on for levels >= INFO and off for <=WARNING\r\n \n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\n\nos.system(\"flask mailu advertise\")\nos.system(\"flask db upgrade\")\n\naccount = os.environ.get(\"INITIAL_ADMIN_ACCOUNT\")\ndomain = os.environ.get(\"INITIAL_ADMIN_DOMAIN\")\npassword = os.environ.get(\"INITIAL_ADMIN_PW\")\n\nif account is not None and domain is not None and password is not None:\n mode = os.environ.get(\"INITIAL_ADMIN_MODE\", default=\"ifmissing\")\n os.system(\"flask mailu admin %s %s '%s' --mode %s\" % (account, domain, password, mode))\n\nos.system(\"gunicorn -w 4 -b :80 --access-logfile - --error-logfile - --preload 'mailu:create_app()'\")\n", "path": "core/admin/start.py"}]} | 792 | 305 |
gh_patches_debug_4918 | rasdani/github-patches | git_diff | rlworkgroup__garage-1639 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs page "Ensure your experiments are reproducible"
See #1426
</issue>
<code>
[start of src/garage/envs/point_env.py]
1 """Simple 2D environment containing a point and a goal location."""
2 import gym
3 import numpy as np
4
5 from garage.envs.step import Step
6
7
8 class PointEnv(gym.Env):
9 """A simple 2D point environment.
10
11 Attributes:
12 observation_space (gym.spaces.Box): The observation space
13 action_space (gym.spaces.Box): The action space
14
15 Args:
16 goal (np.ndarray): A 2D array representing the goal position
17 arena_size (float): The size of arena where the point is constrained
18 within (-arena_size, arena_size) in each dimension
19 done_bonus (float): A numerical bonus added to the reward
20 once the point as reached the goal
21 never_done (bool): Never send a `done` signal, even if the
22 agent achieves the goal
23
24 """
25
26 def __init__(
27 self,
28 goal=np.array((1., 1.), dtype=np.float32),
29 arena_size=5.,
30 done_bonus=0.,
31 never_done=False,
32 ):
33 goal = np.array(goal, dtype=np.float32)
34 self._goal = goal
35 self._done_bonus = done_bonus
36 self._never_done = never_done
37 self._arena_size = arena_size
38
39 assert ((goal >= -arena_size) & (goal <= arena_size)).all()
40
41 self._point = np.zeros_like(self._goal)
42 self._task = {'goal': self._goal}
43 self._observation_space = gym.spaces.Box(low=-np.inf,
44 high=np.inf,
45 shape=(3, ),
46 dtype=np.float32)
47 self._action_space = gym.spaces.Box(low=-0.1,
48 high=0.1,
49 shape=(2, ),
50 dtype=np.float32)
51
52 @property
53 def observation_space(self):
54 """gym.spaces.Box: The observation space."""
55 return self._observation_space
56
57 @property
58 def action_space(self):
59 """gym.spaces.Box: The action space."""
60 return self._action_space
61
62 def reset(self):
63 """Reset the environment.
64
65 Returns:
66 np.ndarray: Observation of the environment.
67
68 """
69 self._point = np.zeros_like(self._goal)
70 dist = np.linalg.norm(self._point - self._goal)
71 return np.concatenate([self._point, (dist, )])
72
73 def step(self, action):
74 """Step the environment state.
75
76 Args:
77 action (np.ndarray): The action to take in the environment.
78
79 Returns:
80 np.ndarray: Observation. The observation of the environment.
81 float: Reward. The reward acquired at this time step.
82 boolean: Done. Whether the environment was completed at this
83 time step. Always False for this environment.
84
85 """
86 # enforce action space
87 a = action.copy() # NOTE: we MUST copy the action before modifying it
88 a = np.clip(a, self.action_space.low, self.action_space.high)
89
90 self._point = np.clip(self._point + a, -self._arena_size,
91 self._arena_size)
92 dist = np.linalg.norm(self._point - self._goal)
93 succ = dist < np.linalg.norm(self.action_space.low)
94
95 # dense reward
96 reward = -dist
97 # done bonus
98 if succ:
99 reward += self._done_bonus
100
101 # sometimes we don't want to terminate
102 done = succ and not self._never_done
103
104 obs = np.concatenate([self._point, (dist, )])
105
106 return Step(obs, reward, done, task=self._task, success=succ)
107
108 def render(self, mode='human'):
109 """Draw the environment.
110
111 Not implemented.
112
113 Args:
114 mode (str): Ignored.
115
116 """
117 # pylint: disable=no-self-use
118
119 def sample_tasks(self, num_tasks):
120 """Sample a list of `num_tasks` tasks.
121
122 Args:
123 num_tasks (int): Number of tasks to sample.
124
125 Returns:
126 list[dict[str, np.ndarray]]: A list of "tasks", where each task is
127 a dictionary containing a single key, "goal", mapping to a
128 point in 2D space.
129
130 """
131 goals = np.random.uniform(-2, 2, size=(num_tasks, 2))
132 tasks = [{'goal': goal} for goal in goals]
133 return tasks
134
135 def set_task(self, task):
136 """Reset with a task.
137
138 Args:
139 task (dict[str, np.ndarray]): A task (a dictionary containing a
140 single key, "goal", which should be a point in 2D space).
141
142 """
143 self._task = task
144 self._goal = task['goal']
145
[end of src/garage/envs/point_env.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/garage/envs/point_env.py b/src/garage/envs/point_env.py
--- a/src/garage/envs/point_env.py
+++ b/src/garage/envs/point_env.py
@@ -24,11 +24,11 @@
"""
def __init__(
- self,
- goal=np.array((1., 1.), dtype=np.float32),
- arena_size=5.,
- done_bonus=0.,
- never_done=False,
+ self,
+ goal=np.array((1., 1.), dtype=np.float32),
+ arena_size=5.,
+ done_bonus=0.,
+ never_done=False,
):
goal = np.array(goal, dtype=np.float32)
self._goal = goal
| {"golden_diff": "diff --git a/src/garage/envs/point_env.py b/src/garage/envs/point_env.py\n--- a/src/garage/envs/point_env.py\n+++ b/src/garage/envs/point_env.py\n@@ -24,11 +24,11 @@\n \"\"\"\n \n def __init__(\n- self,\n- goal=np.array((1., 1.), dtype=np.float32),\n- arena_size=5.,\n- done_bonus=0.,\n- never_done=False,\n+ self,\n+ goal=np.array((1., 1.), dtype=np.float32),\n+ arena_size=5.,\n+ done_bonus=0.,\n+ never_done=False,\n ):\n goal = np.array(goal, dtype=np.float32)\n self._goal = goal\n", "issue": "Docs page \"Ensure your experiments are reproducible\"\nSee #1426 \n", "before_files": [{"content": "\"\"\"Simple 2D environment containing a point and a goal location.\"\"\"\nimport gym\nimport numpy as np\n\nfrom garage.envs.step import Step\n\n\nclass PointEnv(gym.Env):\n \"\"\"A simple 2D point environment.\n\n Attributes:\n observation_space (gym.spaces.Box): The observation space\n action_space (gym.spaces.Box): The action space\n\n Args:\n goal (np.ndarray): A 2D array representing the goal position\n arena_size (float): The size of arena where the point is constrained\n within (-arena_size, arena_size) in each dimension\n done_bonus (float): A numerical bonus added to the reward\n once the point as reached the goal\n never_done (bool): Never send a `done` signal, even if the\n agent achieves the goal\n\n \"\"\"\n\n def __init__(\n self,\n goal=np.array((1., 1.), dtype=np.float32),\n arena_size=5.,\n done_bonus=0.,\n never_done=False,\n ):\n goal = np.array(goal, dtype=np.float32)\n self._goal = goal\n self._done_bonus = done_bonus\n self._never_done = never_done\n self._arena_size = arena_size\n\n assert ((goal >= -arena_size) & (goal <= arena_size)).all()\n\n self._point = np.zeros_like(self._goal)\n self._task = {'goal': self._goal}\n self._observation_space = gym.spaces.Box(low=-np.inf,\n high=np.inf,\n shape=(3, ),\n dtype=np.float32)\n self._action_space = gym.spaces.Box(low=-0.1,\n high=0.1,\n shape=(2, ),\n dtype=np.float32)\n\n @property\n def observation_space(self):\n \"\"\"gym.spaces.Box: The observation space.\"\"\"\n return self._observation_space\n\n @property\n def action_space(self):\n \"\"\"gym.spaces.Box: The action space.\"\"\"\n return self._action_space\n\n def reset(self):\n \"\"\"Reset the environment.\n\n Returns:\n np.ndarray: Observation of the environment.\n\n \"\"\"\n self._point = np.zeros_like(self._goal)\n dist = np.linalg.norm(self._point - self._goal)\n return np.concatenate([self._point, (dist, )])\n\n def step(self, action):\n \"\"\"Step the environment state.\n\n Args:\n action (np.ndarray): The action to take in the environment.\n\n Returns:\n np.ndarray: Observation. The observation of the environment.\n float: Reward. The reward acquired at this time step.\n boolean: Done. Whether the environment was completed at this\n time step. Always False for this environment.\n\n \"\"\"\n # enforce action space\n a = action.copy() # NOTE: we MUST copy the action before modifying it\n a = np.clip(a, self.action_space.low, self.action_space.high)\n\n self._point = np.clip(self._point + a, -self._arena_size,\n self._arena_size)\n dist = np.linalg.norm(self._point - self._goal)\n succ = dist < np.linalg.norm(self.action_space.low)\n\n # dense reward\n reward = -dist\n # done bonus\n if succ:\n reward += self._done_bonus\n\n # sometimes we don't want to terminate\n done = succ and not self._never_done\n\n obs = np.concatenate([self._point, (dist, )])\n\n return Step(obs, reward, done, task=self._task, success=succ)\n\n def render(self, mode='human'):\n \"\"\"Draw the environment.\n\n Not implemented.\n\n Args:\n mode (str): Ignored.\n\n \"\"\"\n # pylint: disable=no-self-use\n\n def sample_tasks(self, num_tasks):\n \"\"\"Sample a list of `num_tasks` tasks.\n\n Args:\n num_tasks (int): Number of tasks to sample.\n\n Returns:\n list[dict[str, np.ndarray]]: A list of \"tasks\", where each task is\n a dictionary containing a single key, \"goal\", mapping to a\n point in 2D space.\n\n \"\"\"\n goals = np.random.uniform(-2, 2, size=(num_tasks, 2))\n tasks = [{'goal': goal} for goal in goals]\n return tasks\n\n def set_task(self, task):\n \"\"\"Reset with a task.\n\n Args:\n task (dict[str, np.ndarray]): A task (a dictionary containing a\n single key, \"goal\", which should be a point in 2D space).\n\n \"\"\"\n self._task = task\n self._goal = task['goal']\n", "path": "src/garage/envs/point_env.py"}]} | 1,917 | 179 |
gh_patches_debug_25231 | rasdani/github-patches | git_diff | litestar-org__litestar-1286 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: `SessionCookieConfig` import DeprecationWarning - suggested import path does not exist
**Describe the bug**
In Starlite >= 1.47, the statement `from starlite.middleware.session.cookie_backend import CookieBackendConfig` raises a DeprecationWarning:
```
DeprecationWarning: Import of deprecated import 'SessionCookieConfig from starlite.middleware.session'.
Deprecated in starlite 1.47.0. This import will be removed in the next major version.
Use "'from startlite.middleware.sessions.cookie_backend import CookieBackendConfig'" instead
```
The suggested import path does not exist and contains a typo.
</issue>
<code>
[start of starlite/middleware/session/__init__.py]
1 from typing import Any
2
3 from starlite.utils import warn_deprecation
4
5 from .base import SessionMiddleware
6
7
8 def __getattr__(name: str) -> Any:
9 """Provide lazy importing as per https://peps.python.org/pep-0562/"""
10
11 if name != "SessionCookieConfig":
12 raise AttributeError(f"Module {__package__} has no attribute {name}")
13
14 from .cookie_backend import CookieBackendConfig
15
16 warn_deprecation(
17 deprecated_name=f"{name} from {__package__}",
18 kind="import",
19 alternative="'from startlite.middleware.sessions.cookie_backend import CookieBackendConfig'",
20 version="1.47.0",
21 )
22
23 globals()[name] = CookieBackendConfig
24 return CookieBackendConfig
25
26
27 __all__ = ["SessionMiddleware"]
28
[end of starlite/middleware/session/__init__.py]
[start of starlite/template/__init__.py]
1 from typing import Any
2
3 from .base import TemplateEngineProtocol, TemplateProtocol
4
5 __all__ = ("TemplateEngineProtocol", "TemplateProtocol")
6
7 from ..utils import warn_deprecation
8
9
10 def __getattr__(name: str) -> Any:
11 """Provide lazy importing as per https://peps.python.org/pep-0562/"""
12
13 if name not in {"JinjaTemplateEngine", "MakoTemplateEngine", "MakoTemplate"}:
14 raise AttributeError(f"Module {__package__} has no attribute {name}")
15
16 if name == "JinjaTemplateEngine":
17 from starlite.contrib.jinja import JinjaTemplateEngine
18
19 export: Any = JinjaTemplateEngine
20 module = "jinja"
21 elif name == "MakoTemplateEngine":
22 from starlite.contrib.mako import MakoTemplateEngine
23
24 export = MakoTemplateEngine
25 module = "mako"
26 else:
27 from starlite.contrib.mako import MakoTemplate
28
29 export = MakoTemplate
30 module = "mako"
31
32 warn_deprecation(
33 deprecated_name=f"{name} from {__package__}",
34 kind="import",
35 alternative=f"'from startlite.contrib.{module} import {name}'",
36 version="1.46.0",
37 )
38
39 globals()[name] = export
40 return export
41
[end of starlite/template/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlite/middleware/session/__init__.py b/starlite/middleware/session/__init__.py
--- a/starlite/middleware/session/__init__.py
+++ b/starlite/middleware/session/__init__.py
@@ -8,7 +8,7 @@
def __getattr__(name: str) -> Any:
"""Provide lazy importing as per https://peps.python.org/pep-0562/"""
- if name != "SessionCookieConfig":
+ if name != "CookieBackendConfig":
raise AttributeError(f"Module {__package__} has no attribute {name}")
from .cookie_backend import CookieBackendConfig
@@ -16,7 +16,7 @@
warn_deprecation(
deprecated_name=f"{name} from {__package__}",
kind="import",
- alternative="'from startlite.middleware.sessions.cookie_backend import CookieBackendConfig'",
+ alternative="'from starlite.middleware.session.cookie_backend import CookieBackendConfig'",
version="1.47.0",
)
diff --git a/starlite/template/__init__.py b/starlite/template/__init__.py
--- a/starlite/template/__init__.py
+++ b/starlite/template/__init__.py
@@ -32,7 +32,7 @@
warn_deprecation(
deprecated_name=f"{name} from {__package__}",
kind="import",
- alternative=f"'from startlite.contrib.{module} import {name}'",
+ alternative=f"'from starlite.contrib.{module} import {name}'",
version="1.46.0",
)
| {"golden_diff": "diff --git a/starlite/middleware/session/__init__.py b/starlite/middleware/session/__init__.py\n--- a/starlite/middleware/session/__init__.py\n+++ b/starlite/middleware/session/__init__.py\n@@ -8,7 +8,7 @@\n def __getattr__(name: str) -> Any:\n \"\"\"Provide lazy importing as per https://peps.python.org/pep-0562/\"\"\"\n \n- if name != \"SessionCookieConfig\":\n+ if name != \"CookieBackendConfig\":\n raise AttributeError(f\"Module {__package__} has no attribute {name}\")\n \n from .cookie_backend import CookieBackendConfig\n@@ -16,7 +16,7 @@\n warn_deprecation(\n deprecated_name=f\"{name} from {__package__}\",\n kind=\"import\",\n- alternative=\"'from startlite.middleware.sessions.cookie_backend import CookieBackendConfig'\",\n+ alternative=\"'from starlite.middleware.session.cookie_backend import CookieBackendConfig'\",\n version=\"1.47.0\",\n )\n \ndiff --git a/starlite/template/__init__.py b/starlite/template/__init__.py\n--- a/starlite/template/__init__.py\n+++ b/starlite/template/__init__.py\n@@ -32,7 +32,7 @@\n warn_deprecation(\n deprecated_name=f\"{name} from {__package__}\",\n kind=\"import\",\n- alternative=f\"'from startlite.contrib.{module} import {name}'\",\n+ alternative=f\"'from starlite.contrib.{module} import {name}'\",\n version=\"1.46.0\",\n )\n", "issue": "Bug: `SessionCookieConfig` import DeprecationWarning - suggested import path does not exist\n**Describe the bug**\r\nIn Starlite >= 1.47, the statement `from starlite.middleware.session.cookie_backend import CookieBackendConfig` raises a DeprecationWarning:\r\n```\r\nDeprecationWarning: Import of deprecated import 'SessionCookieConfig from starlite.middleware.session'.\r\nDeprecated in starlite 1.47.0. This import will be removed in the next major version.\r\nUse \"'from startlite.middleware.sessions.cookie_backend import CookieBackendConfig'\" instead\r\n```\r\nThe suggested import path does not exist and contains a typo.\r\n\r\n\n", "before_files": [{"content": "from typing import Any\n\nfrom starlite.utils import warn_deprecation\n\nfrom .base import SessionMiddleware\n\n\ndef __getattr__(name: str) -> Any:\n \"\"\"Provide lazy importing as per https://peps.python.org/pep-0562/\"\"\"\n\n if name != \"SessionCookieConfig\":\n raise AttributeError(f\"Module {__package__} has no attribute {name}\")\n\n from .cookie_backend import CookieBackendConfig\n\n warn_deprecation(\n deprecated_name=f\"{name} from {__package__}\",\n kind=\"import\",\n alternative=\"'from startlite.middleware.sessions.cookie_backend import CookieBackendConfig'\",\n version=\"1.47.0\",\n )\n\n globals()[name] = CookieBackendConfig\n return CookieBackendConfig\n\n\n__all__ = [\"SessionMiddleware\"]\n", "path": "starlite/middleware/session/__init__.py"}, {"content": "from typing import Any\n\nfrom .base import TemplateEngineProtocol, TemplateProtocol\n\n__all__ = (\"TemplateEngineProtocol\", \"TemplateProtocol\")\n\nfrom ..utils import warn_deprecation\n\n\ndef __getattr__(name: str) -> Any:\n \"\"\"Provide lazy importing as per https://peps.python.org/pep-0562/\"\"\"\n\n if name not in {\"JinjaTemplateEngine\", \"MakoTemplateEngine\", \"MakoTemplate\"}:\n raise AttributeError(f\"Module {__package__} has no attribute {name}\")\n\n if name == \"JinjaTemplateEngine\":\n from starlite.contrib.jinja import JinjaTemplateEngine\n\n export: Any = JinjaTemplateEngine\n module = \"jinja\"\n elif name == \"MakoTemplateEngine\":\n from starlite.contrib.mako import MakoTemplateEngine\n\n export = MakoTemplateEngine\n module = \"mako\"\n else:\n from starlite.contrib.mako import MakoTemplate\n\n export = MakoTemplate\n module = \"mako\"\n\n warn_deprecation(\n deprecated_name=f\"{name} from {__package__}\",\n kind=\"import\",\n alternative=f\"'from startlite.contrib.{module} import {name}'\",\n version=\"1.46.0\",\n )\n\n globals()[name] = export\n return export\n", "path": "starlite/template/__init__.py"}]} | 1,287 | 343 |
gh_patches_debug_7119 | rasdani/github-patches | git_diff | Netflix__lemur-148 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error messages not displaying long enough
Currently error messages are displayed for only a period of time. They should be displayed until the user acknowledges the error.
</issue>
<code>
[start of lemur/common/utils.py]
1 """
2 .. module: lemur.common.utils
3 :platform: Unix
4 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
5 :license: Apache, see LICENSE for more details.
6
7 .. moduleauthor:: Kevin Glisson <[email protected]>
8 """
9 import string
10 import random
11 from functools import wraps
12
13 from flask import current_app
14
15 from flask.ext.restful import marshal
16 from flask.ext.restful.reqparse import RequestParser
17 from flask.ext.sqlalchemy import Pagination
18
19
20 def get_psuedo_random_string():
21 """
22 Create a random and strongish challenge.
23 """
24 challenge = ''.join(random.choice(string.ascii_uppercase) for x in range(6)) # noqa
25 challenge += ''.join(random.choice("~!@#$%^&*()_+") for x in range(6)) # noqa
26 challenge += ''.join(random.choice(string.ascii_lowercase) for x in range(6))
27 challenge += ''.join(random.choice(string.digits) for x in range(6)) # noqa
28 return challenge
29
30
31 class marshal_items(object):
32 def __init__(self, fields, envelope=None):
33 self.fields = fields
34 self.envelop = envelope
35
36 def __call__(self, f):
37 def _filter_items(items):
38 filtered_items = []
39 for item in items:
40 filtered_items.append(marshal(item, self.fields))
41 return filtered_items
42
43 @wraps(f)
44 def wrapper(*args, **kwargs):
45 try:
46 resp = f(*args, **kwargs)
47
48 # this is a bit weird way to handle non standard error codes returned from the marshaled function
49 if isinstance(resp, tuple):
50 return resp[0], resp[1]
51
52 if isinstance(resp, Pagination):
53 return {'items': _filter_items(resp.items), 'total': resp.total}
54
55 if isinstance(resp, list):
56 return {'items': _filter_items(resp), 'total': len(resp)}
57
58 return marshal(resp, self.fields)
59 except Exception as e:
60 current_app.logger.exception(e)
61 # this is a little weird hack to respect flask restful parsing errors on marshaled functions
62 if hasattr(e, 'code'):
63 if hasattr(e, 'data'):
64 return {'message': e.data['message']}, 400
65 else:
66 return {'message': 'unknown'}, 400
67 else:
68 return {'message': str(e)}, 400
69 return wrapper
70
71
72 paginated_parser = RequestParser()
73
74 paginated_parser.add_argument('count', type=int, default=10, location='args')
75 paginated_parser.add_argument('page', type=int, default=1, location='args')
76 paginated_parser.add_argument('sortDir', type=str, dest='sort_dir', location='args')
77 paginated_parser.add_argument('sortBy', type=str, dest='sort_by', location='args')
78 paginated_parser.add_argument('filter', type=str, location='args')
79
[end of lemur/common/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lemur/common/utils.py b/lemur/common/utils.py
--- a/lemur/common/utils.py
+++ b/lemur/common/utils.py
@@ -63,9 +63,9 @@
if hasattr(e, 'data'):
return {'message': e.data['message']}, 400
else:
- return {'message': 'unknown'}, 400
+ return {'message': {'exception': 'unknown'}}, 400
else:
- return {'message': str(e)}, 400
+ return {'message': {'exception': str(e)}}, 400
return wrapper
| {"golden_diff": "diff --git a/lemur/common/utils.py b/lemur/common/utils.py\n--- a/lemur/common/utils.py\n+++ b/lemur/common/utils.py\n@@ -63,9 +63,9 @@\n if hasattr(e, 'data'):\n return {'message': e.data['message']}, 400\n else:\n- return {'message': 'unknown'}, 400\n+ return {'message': {'exception': 'unknown'}}, 400\n else:\n- return {'message': str(e)}, 400\n+ return {'message': {'exception': str(e)}}, 400\n return wrapper\n", "issue": "Error messages not displaying long enough\nCurrently error messages are displayed for only a period of time. They should be displayed until the user acknowledges the error. \n\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.common.utils\n :platform: Unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nimport string\nimport random\nfrom functools import wraps\n\nfrom flask import current_app\n\nfrom flask.ext.restful import marshal\nfrom flask.ext.restful.reqparse import RequestParser\nfrom flask.ext.sqlalchemy import Pagination\n\n\ndef get_psuedo_random_string():\n \"\"\"\n Create a random and strongish challenge.\n \"\"\"\n challenge = ''.join(random.choice(string.ascii_uppercase) for x in range(6)) # noqa\n challenge += ''.join(random.choice(\"~!@#$%^&*()_+\") for x in range(6)) # noqa\n challenge += ''.join(random.choice(string.ascii_lowercase) for x in range(6))\n challenge += ''.join(random.choice(string.digits) for x in range(6)) # noqa\n return challenge\n\n\nclass marshal_items(object):\n def __init__(self, fields, envelope=None):\n self.fields = fields\n self.envelop = envelope\n\n def __call__(self, f):\n def _filter_items(items):\n filtered_items = []\n for item in items:\n filtered_items.append(marshal(item, self.fields))\n return filtered_items\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n try:\n resp = f(*args, **kwargs)\n\n # this is a bit weird way to handle non standard error codes returned from the marshaled function\n if isinstance(resp, tuple):\n return resp[0], resp[1]\n\n if isinstance(resp, Pagination):\n return {'items': _filter_items(resp.items), 'total': resp.total}\n\n if isinstance(resp, list):\n return {'items': _filter_items(resp), 'total': len(resp)}\n\n return marshal(resp, self.fields)\n except Exception as e:\n current_app.logger.exception(e)\n # this is a little weird hack to respect flask restful parsing errors on marshaled functions\n if hasattr(e, 'code'):\n if hasattr(e, 'data'):\n return {'message': e.data['message']}, 400\n else:\n return {'message': 'unknown'}, 400\n else:\n return {'message': str(e)}, 400\n return wrapper\n\n\npaginated_parser = RequestParser()\n\npaginated_parser.add_argument('count', type=int, default=10, location='args')\npaginated_parser.add_argument('page', type=int, default=1, location='args')\npaginated_parser.add_argument('sortDir', type=str, dest='sort_dir', location='args')\npaginated_parser.add_argument('sortBy', type=str, dest='sort_by', location='args')\npaginated_parser.add_argument('filter', type=str, location='args')\n", "path": "lemur/common/utils.py"}]} | 1,354 | 147 |
gh_patches_debug_12176 | rasdani/github-patches | git_diff | dotkom__onlineweb4-1818 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to remove image from event in dashboard
If an image has been selected for an event there is currently no way to remove the image after saving.
A shitty workaround is to inspect the hidden input field and set the value to an empty string.
</issue>
<code>
[start of apps/gallery/widgets.py]
1 # -*- coding: utf8 -*-
2 #
3 # Created by 'myth' on 10/14/15
4
5 from django.conf import settings
6 from django.core.urlresolvers import reverse_lazy
7 from django.forms import HiddenInput, TextInput
8 from django.forms.utils import flatatt, force_text, format_html
9
10 from apps.gallery.models import ResponsiveImage
11
12
13 WIDGET_STRING = """<br /><input{} />\r\n
14 <div id="single-image-field-thumbnail">{}</div>
15 <a href="#" class="btn btn-primary" id="add-responsive-image">\r\n
16 <i class="fa fa-plus fa-lg"></i> Velg</a>\r\n
17 <a href="{}" class="btn btn-primary" target="_blank">\r\n
18 <i class="fa fa-image fa-lg"></i> Last opp</a><br>\r\n
19 <div id="image-selection-wrapper">\r\n
20 <h2 id="image-selection-title">Velg bilde</h2>\r\n
21 <div class="row">\r\n
22 <div class="col-md-12">\r\n
23 <div class="input-group">\r\n
24 <input type="text" id="image-gallery-search" class="form-control" placeholder="Skriv inn søkeord...">\r\n
25 <span class="input-group-btn">\r\n
26 <a class="btn btn-primary" id="image-gallery-search-button" type="button">Søk!</a>\r\n
27 </span>\r\n
28 </div>\r\n
29 </div>\r\n
30 </div>\r\n
31 <hr />\r\n
32 <div class="row" id="image-gallery-search-results"></div>\r\n
33 </div>\r\n"""
34
35
36 class SingleImageInput(HiddenInput):
37 """
38 SingleImageField adds wrapper HTML around the hidden input field containing the ResponsiveImage ID
39 """
40
41 def __init__(self, attrs=None):
42 super(SingleImageInput, self).__init__(attrs)
43 self.input_type = 'hidden'
44
45 def render(self, name, value, attrs=None):
46 """
47 Renders this field widget as HTML
48 :param name: Field input name
49 :param value: Field input value
50 :param attrs: Field input attributes
51 :return: An HTML string representing this widget
52 """
53
54 if value is None:
55 value = ''
56
57 img_thumb = 'Det er ikke valgt noe bilde.'
58 final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
59 if value != '':
60 # Only add the value attribute if the value is non-empty
61 final_attrs['value'] = force_text(self._format_value(value))
62 img = ResponsiveImage.objects.get(pk=value)
63 img_thumb = format_html(
64 '<img src="{}" alt title="{}"/>',
65 settings.MEDIA_URL + str(img.thumbnail),
66 str(img.name),
67 encoding='utf-8'
68 )
69
70 upload_url = reverse_lazy('gallery_dashboard:upload')
71
72 return format_html(WIDGET_STRING, flatatt(final_attrs), img_thumb, upload_url)
73
74
75 class TagInputField(TextInput):
76 """
77 Adds some extras to a TextInputField to support space or comma separated tagging
78 """
79
80 def __init__(self, attrs=None):
81 super(TagInputField, self).__init__(attrs=attrs)
82
83 def render(self, name, value, attrs=None):
84 """
85 Renders this field widget as HTML
86 :param name: Field input name
87 :param value: Field input value
88 :param attrs: Field input attributes
89 :return: An HTML string representing this widget
90 """
91
92 return super(TagInputField, self).render(name, value, attrs=attrs)
93
[end of apps/gallery/widgets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/gallery/widgets.py b/apps/gallery/widgets.py
--- a/apps/gallery/widgets.py
+++ b/apps/gallery/widgets.py
@@ -15,7 +15,9 @@
<a href="#" class="btn btn-primary" id="add-responsive-image">\r\n
<i class="fa fa-plus fa-lg"></i> Velg</a>\r\n
<a href="{}" class="btn btn-primary" target="_blank">\r\n
-<i class="fa fa-image fa-lg"></i> Last opp</a><br>\r\n
+<i class="fa fa-image fa-lg"></i> Last opp</a>\r\n
+<a href="#" class="btn btn-danger" id="dashboard-gallery-remove-image">\r\n
+<i class="fa fa-times fa-lg"></i> Fjern bilde</a><br>\r\n
<div id="image-selection-wrapper">\r\n
<h2 id="image-selection-title">Velg bilde</h2>\r\n
<div class="row">\r\n
| {"golden_diff": "diff --git a/apps/gallery/widgets.py b/apps/gallery/widgets.py\n--- a/apps/gallery/widgets.py\n+++ b/apps/gallery/widgets.py\n@@ -15,7 +15,9 @@\n <a href=\"#\" class=\"btn btn-primary\" id=\"add-responsive-image\">\\r\\n\n <i class=\"fa fa-plus fa-lg\"></i> Velg</a>\\r\\n\n <a href=\"{}\" class=\"btn btn-primary\" target=\"_blank\">\\r\\n\n-<i class=\"fa fa-image fa-lg\"></i> Last opp</a><br>\\r\\n\n+<i class=\"fa fa-image fa-lg\"></i> Last opp</a>\\r\\n\n+<a href=\"#\" class=\"btn btn-danger\" id=\"dashboard-gallery-remove-image\">\\r\\n\n+<i class=\"fa fa-times fa-lg\"></i> Fjern bilde</a><br>\\r\\n\n <div id=\"image-selection-wrapper\">\\r\\n\n <h2 id=\"image-selection-title\">Velg bilde</h2>\\r\\n\n <div class=\"row\">\\r\\n\n", "issue": "Unable to remove image from event in dashboard\nIf an image has been selected for an event there is currently no way to remove the image after saving. \r\n\r\nA shitty workaround is to inspect the hidden input field and set the value to an empty string.\n", "before_files": [{"content": "# -*- coding: utf8 -*-\n#\n# Created by 'myth' on 10/14/15\n\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.forms import HiddenInput, TextInput\nfrom django.forms.utils import flatatt, force_text, format_html\n\nfrom apps.gallery.models import ResponsiveImage\n\n\nWIDGET_STRING = \"\"\"<br /><input{} />\\r\\n\n<div id=\"single-image-field-thumbnail\">{}</div>\n<a href=\"#\" class=\"btn btn-primary\" id=\"add-responsive-image\">\\r\\n\n<i class=\"fa fa-plus fa-lg\"></i> Velg</a>\\r\\n\n<a href=\"{}\" class=\"btn btn-primary\" target=\"_blank\">\\r\\n\n<i class=\"fa fa-image fa-lg\"></i> Last opp</a><br>\\r\\n\n<div id=\"image-selection-wrapper\">\\r\\n\n<h2 id=\"image-selection-title\">Velg bilde</h2>\\r\\n\n<div class=\"row\">\\r\\n\n<div class=\"col-md-12\">\\r\\n\n<div class=\"input-group\">\\r\\n\n<input type=\"text\" id=\"image-gallery-search\" class=\"form-control\" placeholder=\"Skriv inn s\u00f8keord...\">\\r\\n\n<span class=\"input-group-btn\">\\r\\n\n<a class=\"btn btn-primary\" id=\"image-gallery-search-button\" type=\"button\">S\u00f8k!</a>\\r\\n\n</span>\\r\\n\n</div>\\r\\n\n</div>\\r\\n\n</div>\\r\\n\n<hr />\\r\\n\n<div class=\"row\" id=\"image-gallery-search-results\"></div>\\r\\n\n</div>\\r\\n\"\"\"\n\n\nclass SingleImageInput(HiddenInput):\n \"\"\"\n SingleImageField adds wrapper HTML around the hidden input field containing the ResponsiveImage ID\n \"\"\"\n\n def __init__(self, attrs=None):\n super(SingleImageInput, self).__init__(attrs)\n self.input_type = 'hidden'\n\n def render(self, name, value, attrs=None):\n \"\"\"\n Renders this field widget as HTML\n :param name: Field input name\n :param value: Field input value\n :param attrs: Field input attributes\n :return: An HTML string representing this widget\n \"\"\"\n\n if value is None:\n value = ''\n\n img_thumb = 'Det er ikke valgt noe bilde.'\n final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)\n if value != '':\n # Only add the value attribute if the value is non-empty\n final_attrs['value'] = force_text(self._format_value(value))\n img = ResponsiveImage.objects.get(pk=value)\n img_thumb = format_html(\n '<img src=\"{}\" alt title=\"{}\"/>',\n settings.MEDIA_URL + str(img.thumbnail),\n str(img.name),\n encoding='utf-8'\n )\n\n upload_url = reverse_lazy('gallery_dashboard:upload')\n\n return format_html(WIDGET_STRING, flatatt(final_attrs), img_thumb, upload_url)\n\n\nclass TagInputField(TextInput):\n \"\"\"\n Adds some extras to a TextInputField to support space or comma separated tagging\n \"\"\"\n\n def __init__(self, attrs=None):\n super(TagInputField, self).__init__(attrs=attrs)\n\n def render(self, name, value, attrs=None):\n \"\"\"\n Renders this field widget as HTML\n :param name: Field input name\n :param value: Field input value\n :param attrs: Field input attributes\n :return: An HTML string representing this widget\n \"\"\"\n\n return super(TagInputField, self).render(name, value, attrs=attrs)\n", "path": "apps/gallery/widgets.py"}]} | 1,557 | 230 |
gh_patches_debug_28339 | rasdani/github-patches | git_diff | mlflow__mlflow-5914 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Replace `unittest.TestCase.assertRaises` with `unittest.TestCase.assertRaisesRegex`
Some tests use `unittest.TestCase.assertRaises` to test an exception is raised for illegal operations, but they need to be replaces with `unittest.TestCase.assertRaisesRegex`.
### Why do we need this change?
Let's say we have a function that raises an exception:
```python
def throw_exception(...):
if condition_1:
raise TypeError("condition_1")
if condition_2:
raise TypeError("condition_2")
...
```
If we test this function using `assertRaises`:
```python
class MyTest(unittest.TestCase):
def test_throw_exception(self):
# Does `throw_exception` really raise the second TypeError?
# It might throw the first TypeError, then the test will pass.
with self.assertRaises(TypeError):
throw_exception(...) # should raise TypeError("condition_2")
```
If we test this function using `assertRaisesRegex`:
```python
class MyTest(unittest.TestCase):
def test_throw_exception(self):
# This test fails when `throw_exception` raises the first TypeError.
with self. assertRaisesRegex(TypeError, "condition_b"):
throw_exception(...) # should raise TypeError("condition_2")
```
### Example
https://github.com/mlflow/mlflow/blob/fe6618823a2e6038149ee0da675503d2764552ca/tests/store/tracking/test_sqlalchemy_store.py#L107
The code above needs to be fixed to the following:
```python
# "<string that matches the error message>" must be replaced
with self.assertRaisesRegex(MlflowException, "<string that matches the error message>") as e:
```
### References
- https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertRaises
- https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertRaisesRegex
### Instructions
https://github.com/mlflow/mlflow/blob/101ad6e8eb383c769178df0df83d1d2a1cea6b4a/pylint_plugins/assert_raises_without_msg.py#L20-L33
Ping me with the file you want to work on :)
| File | Assignee | PR | Done |
| :---------------------------------------------------- | :---------- | :---- | :--- |
| `tests/entities/test_run_status.py` | @Sumanth077 | | |
| `tests/store/model_registry/test_sqlalchemy_store.py` | @ognis1205 | #5875 | ✅ |
| `tests/store/db/test_utils.py` | @erich-db | | |
| `tests/store/tracking/__init__.py` | @Sumanth077 | | |
| `tests/store/tracking/test_file_store.py` | @andy1122 | | |
| `tests/store/tracking/test_sqlalchemy_store.py` | @ognis1205 | #5875 | ✅ |
</issue>
<code>
[start of pylint_plugins/unittest_assert_raises.py]
1 import os
2
3 import astroid
4 from pylint.interfaces import IAstroidChecker
5 from pylint.checkers import BaseChecker
6
7
8 def _is_unittest_assert_raises(node: astroid.Call):
9 return isinstance(node.func, astroid.Attribute) and node.func.as_string() == "self.assertRaises"
10
11
12 IGNORE_FILES = list(
13 map(
14 os.path.abspath,
15 [
16 # Instructions
17 # ============
18 # 1. Select a file in the list below and remove it.
19 # 2. Run pylint and confirm it fails.
20 # 3. Fix the lines printed out in the previous step.
21 # 4. Run pylint again and confirm it succeeds now.
22 # 5. Run pytest and confirm the changed lines don't fail.
23 # 6. Open a PR.
24 "tests/entities/test_run_status.py",
25 "tests/store/db/test_utils.py",
26 "tests/store/tracking/__init__.py",
27 "tests/store/tracking/test_file_store.py",
28 ],
29 )
30 )
31
32
33 def _should_ignore(path: str):
34 return path in IGNORE_FILES
35
36
37 class UnittestAssertRaises(BaseChecker):
38 __implements__ = IAstroidChecker
39
40 name = "unittest-assert-raises"
41 msgs = {
42 "W0003": (
43 "`assertRaises` must be replaced with `assertRaisesRegex`",
44 name,
45 "Use `assertRaisesRegex` instead",
46 ),
47 }
48 priority = -1
49
50 def visit_call(self, node: astroid.Call):
51 if not _should_ignore(node.root().file) and _is_unittest_assert_raises(node):
52 self.add_message(self.name, node=node)
53
[end of pylint_plugins/unittest_assert_raises.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pylint_plugins/unittest_assert_raises.py b/pylint_plugins/unittest_assert_raises.py
--- a/pylint_plugins/unittest_assert_raises.py
+++ b/pylint_plugins/unittest_assert_raises.py
@@ -1,5 +1,3 @@
-import os
-
import astroid
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
@@ -9,31 +7,6 @@
return isinstance(node.func, astroid.Attribute) and node.func.as_string() == "self.assertRaises"
-IGNORE_FILES = list(
- map(
- os.path.abspath,
- [
- # Instructions
- # ============
- # 1. Select a file in the list below and remove it.
- # 2. Run pylint and confirm it fails.
- # 3. Fix the lines printed out in the previous step.
- # 4. Run pylint again and confirm it succeeds now.
- # 5. Run pytest and confirm the changed lines don't fail.
- # 6. Open a PR.
- "tests/entities/test_run_status.py",
- "tests/store/db/test_utils.py",
- "tests/store/tracking/__init__.py",
- "tests/store/tracking/test_file_store.py",
- ],
- )
-)
-
-
-def _should_ignore(path: str):
- return path in IGNORE_FILES
-
-
class UnittestAssertRaises(BaseChecker):
__implements__ = IAstroidChecker
@@ -48,5 +21,5 @@
priority = -1
def visit_call(self, node: astroid.Call):
- if not _should_ignore(node.root().file) and _is_unittest_assert_raises(node):
+ if _is_unittest_assert_raises(node):
self.add_message(self.name, node=node)
| {"golden_diff": "diff --git a/pylint_plugins/unittest_assert_raises.py b/pylint_plugins/unittest_assert_raises.py\n--- a/pylint_plugins/unittest_assert_raises.py\n+++ b/pylint_plugins/unittest_assert_raises.py\n@@ -1,5 +1,3 @@\n-import os\n-\n import astroid\n from pylint.interfaces import IAstroidChecker\n from pylint.checkers import BaseChecker\n@@ -9,31 +7,6 @@\n return isinstance(node.func, astroid.Attribute) and node.func.as_string() == \"self.assertRaises\"\n \n \n-IGNORE_FILES = list(\n- map(\n- os.path.abspath,\n- [\n- # Instructions\n- # ============\n- # 1. Select a file in the list below and remove it.\n- # 2. Run pylint and confirm it fails.\n- # 3. Fix the lines printed out in the previous step.\n- # 4. Run pylint again and confirm it succeeds now.\n- # 5. Run pytest and confirm the changed lines don't fail.\n- # 6. Open a PR.\n- \"tests/entities/test_run_status.py\",\n- \"tests/store/db/test_utils.py\",\n- \"tests/store/tracking/__init__.py\",\n- \"tests/store/tracking/test_file_store.py\",\n- ],\n- )\n-)\n-\n-\n-def _should_ignore(path: str):\n- return path in IGNORE_FILES\n-\n-\n class UnittestAssertRaises(BaseChecker):\n __implements__ = IAstroidChecker\n \n@@ -48,5 +21,5 @@\n priority = -1\n \n def visit_call(self, node: astroid.Call):\n- if not _should_ignore(node.root().file) and _is_unittest_assert_raises(node):\n+ if _is_unittest_assert_raises(node):\n self.add_message(self.name, node=node)\n", "issue": "Replace `unittest.TestCase.assertRaises` with `unittest.TestCase.assertRaisesRegex`\nSome tests use `unittest.TestCase.assertRaises` to test an exception is raised for illegal operations, but they need to be replaces with `unittest.TestCase.assertRaisesRegex`.\r\n\r\n### Why do we need this change?\r\n\r\nLet's say we have a function that raises an exception:\r\n\r\n```python\r\ndef throw_exception(...):\r\n if condition_1:\r\n raise TypeError(\"condition_1\")\r\n if condition_2:\r\n raise TypeError(\"condition_2\")\r\n ...\r\n```\r\n\r\nIf we test this function using `assertRaises`:\r\n\r\n```python\r\nclass MyTest(unittest.TestCase):\r\n def test_throw_exception(self):\r\n # Does `throw_exception` really raise the second TypeError?\r\n # It might throw the first TypeError, then the test will pass.\r\n with self.assertRaises(TypeError):\r\n throw_exception(...) # should raise TypeError(\"condition_2\")\r\n```\r\n\r\nIf we test this function using `assertRaisesRegex`:\r\n\r\n```python\r\nclass MyTest(unittest.TestCase):\r\n def test_throw_exception(self):\r\n # This test fails when `throw_exception` raises the first TypeError.\r\n with self. assertRaisesRegex(TypeError, \"condition_b\"):\r\n throw_exception(...) # should raise TypeError(\"condition_2\")\r\n```\r\n\r\n### Example\r\n\r\nhttps://github.com/mlflow/mlflow/blob/fe6618823a2e6038149ee0da675503d2764552ca/tests/store/tracking/test_sqlalchemy_store.py#L107\r\n\r\nThe code above needs to be fixed to the following:\r\n\r\n```python\r\n # \"<string that matches the error message>\" must be replaced\r\n with self.assertRaisesRegex(MlflowException, \"<string that matches the error message>\") as e:\r\n```\r\n\r\n### References\r\n\r\n- https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertRaises\r\n- https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertRaisesRegex\r\n\r\n### Instructions\r\n\r\nhttps://github.com/mlflow/mlflow/blob/101ad6e8eb383c769178df0df83d1d2a1cea6b4a/pylint_plugins/assert_raises_without_msg.py#L20-L33\r\n\r\nPing me with the file you want to work on :)\r\n\r\n| File | Assignee | PR | Done |\r\n| :---------------------------------------------------- | :---------- | :---- | :--- |\r\n| `tests/entities/test_run_status.py` | @Sumanth077 | | |\r\n| `tests/store/model_registry/test_sqlalchemy_store.py` | @ognis1205 | #5875 | \u2705 |\r\n| `tests/store/db/test_utils.py` | @erich-db | | |\r\n| `tests/store/tracking/__init__.py` | @Sumanth077 | | |\r\n| `tests/store/tracking/test_file_store.py` | @andy1122 | | |\r\n| `tests/store/tracking/test_sqlalchemy_store.py` | @ognis1205 | #5875 | \u2705 |\r\n\n", "before_files": [{"content": "import os\n\nimport astroid\nfrom pylint.interfaces import IAstroidChecker\nfrom pylint.checkers import BaseChecker\n\n\ndef _is_unittest_assert_raises(node: astroid.Call):\n return isinstance(node.func, astroid.Attribute) and node.func.as_string() == \"self.assertRaises\"\n\n\nIGNORE_FILES = list(\n map(\n os.path.abspath,\n [\n # Instructions\n # ============\n # 1. Select a file in the list below and remove it.\n # 2. Run pylint and confirm it fails.\n # 3. Fix the lines printed out in the previous step.\n # 4. Run pylint again and confirm it succeeds now.\n # 5. Run pytest and confirm the changed lines don't fail.\n # 6. Open a PR.\n \"tests/entities/test_run_status.py\",\n \"tests/store/db/test_utils.py\",\n \"tests/store/tracking/__init__.py\",\n \"tests/store/tracking/test_file_store.py\",\n ],\n )\n)\n\n\ndef _should_ignore(path: str):\n return path in IGNORE_FILES\n\n\nclass UnittestAssertRaises(BaseChecker):\n __implements__ = IAstroidChecker\n\n name = \"unittest-assert-raises\"\n msgs = {\n \"W0003\": (\n \"`assertRaises` must be replaced with `assertRaisesRegex`\",\n name,\n \"Use `assertRaisesRegex` instead\",\n ),\n }\n priority = -1\n\n def visit_call(self, node: astroid.Call):\n if not _should_ignore(node.root().file) and _is_unittest_assert_raises(node):\n self.add_message(self.name, node=node)\n", "path": "pylint_plugins/unittest_assert_raises.py"}]} | 1,661 | 392 |
gh_patches_debug_40242 | rasdani/github-patches | git_diff | SCons__scons-3862 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add timestamp override to Zip builder
Zip should have an option to override the timestamp on files in the archive instead of taking it from the file system. This can be useful for repeatable builds or for anonymizing the archive.
</issue>
<code>
[start of SCons/Tool/zip.py]
1 """SCons.Tool.zip
2
3 Tool-specific initialization for zip.
4
5 There normally shouldn't be any need to import this module directly.
6 It will usually be imported through the generic SCons.Tool.Tool()
7 selection method.
8
9 """
10
11 #
12 # __COPYRIGHT__
13 #
14 # Permission is hereby granted, free of charge, to any person obtaining
15 # a copy of this software and associated documentation files (the
16 # "Software"), to deal in the Software without restriction, including
17 # without limitation the rights to use, copy, modify, merge, publish,
18 # distribute, sublicense, and/or sell copies of the Software, and to
19 # permit persons to whom the Software is furnished to do so, subject to
20 # the following conditions:
21 #
22 # The above copyright notice and this permission notice shall be included
23 # in all copies or substantial portions of the Software.
24 #
25 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
26 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
27 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
29 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
30 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
31 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
32 #
33
34 __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
35
36 import os.path
37
38 import SCons.Builder
39 import SCons.Defaults
40 import SCons.Node.FS
41 import SCons.Util
42
43 import zipfile
44
45 zip_compression = zipfile.ZIP_DEFLATED
46
47
48 def zip(target, source, env):
49 compression = env.get('ZIPCOMPRESSION', 0)
50 zf = zipfile.ZipFile(str(target[0]), 'w', compression)
51 for s in source:
52 if s.isdir():
53 for dirpath, dirnames, filenames in os.walk(str(s)):
54 for fname in filenames:
55 path = os.path.join(dirpath, fname)
56 if os.path.isfile(path):
57 zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))))
58 else:
59 zf.write(str(s), os.path.relpath(str(s), str(env.get('ZIPROOT', ''))))
60 zf.close()
61
62 # Fix PR #3569 - If you don't specify ZIPCOM and ZIPCOMSTR when creating
63 # env, then it will ignore ZIPCOMSTR set afterwards.
64 zipAction = SCons.Action.Action(zip, "$ZIPCOMSTR", varlist=['ZIPCOMPRESSION'])
65
66 ZipBuilder = SCons.Builder.Builder(action=SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),
67 source_factory=SCons.Node.FS.Entry,
68 source_scanner=SCons.Defaults.DirScanner,
69 suffix='$ZIPSUFFIX',
70 multi=1)
71
72
73 def generate(env):
74 """Add Builders and construction variables for zip to an Environment."""
75 try:
76 bld = env['BUILDERS']['Zip']
77 except KeyError:
78 bld = ZipBuilder
79 env['BUILDERS']['Zip'] = bld
80
81 env['ZIP'] = 'zip'
82 env['ZIPFLAGS'] = SCons.Util.CLVar('')
83 env['ZIPCOM'] = zipAction
84 env['ZIPCOMPRESSION'] = zip_compression
85 env['ZIPSUFFIX'] = '.zip'
86 env['ZIPROOT'] = SCons.Util.CLVar('')
87
88
89 def exists(env):
90 return True
91
92 # Local Variables:
93 # tab-width:4
94 # indent-tabs-mode:nil
95 # End:
96 # vim: set expandtab tabstop=4 shiftwidth=4:
97
[end of SCons/Tool/zip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/SCons/Tool/zip.py b/SCons/Tool/zip.py
--- a/SCons/Tool/zip.py
+++ b/SCons/Tool/zip.py
@@ -8,8 +8,9 @@
"""
+# MIT License
#
-# __COPYRIGHT__
+# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
@@ -29,39 +30,62 @@
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-import os.path
+import os
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
+import time
import zipfile
+
zip_compression = zipfile.ZIP_DEFLATED
-def zip(target, source, env):
- compression = env.get('ZIPCOMPRESSION', 0)
- zf = zipfile.ZipFile(str(target[0]), 'w', compression)
+def _create_zipinfo_for_file(fname, arcname, date_time, compression):
+ st = os.stat(fname)
+ if not date_time:
+ mtime = time.localtime(st.st_mtime)
+ date_time = mtime[0:6]
+ zinfo = zipfile.ZipInfo(filename=arcname, date_time=date_time)
+ zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
+ zinfo.compress_type = compression
+ zinfo.file_size = st.st_size
+ return zinfo
+
+
+def zip_builder(target, source, env):
+ compression = env.get('ZIPCOMPRESSION', zipfile.ZIP_STORED)
+ zip_root = str(env.get('ZIPROOT', ''))
+ date_time = env.get('ZIP_OVERRIDE_TIMESTAMP')
+
+ files = []
for s in source:
if s.isdir():
for dirpath, dirnames, filenames in os.walk(str(s)):
for fname in filenames:
path = os.path.join(dirpath, fname)
if os.path.isfile(path):
- zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))))
+ files.append(path)
else:
- zf.write(str(s), os.path.relpath(str(s), str(env.get('ZIPROOT', ''))))
- zf.close()
+ files.append(str(s))
+
+ with zipfile.ZipFile(str(target[0]), 'w', compression) as zf:
+ for fname in files:
+ arcname = os.path.relpath(fname, zip_root)
+ # TODO: Switch to ZipInfo.from_file when 3.6 becomes the base python version
+ zinfo = _create_zipinfo_for_file(fname, arcname, date_time, compression)
+ with open(fname, "rb") as f:
+ zf.writestr(zinfo, f.read())
+
# Fix PR #3569 - If you don't specify ZIPCOM and ZIPCOMSTR when creating
# env, then it will ignore ZIPCOMSTR set afterwards.
-zipAction = SCons.Action.Action(zip, "$ZIPCOMSTR", varlist=['ZIPCOMPRESSION'])
+zipAction = SCons.Action.Action(zip_builder, "$ZIPCOMSTR",
+ varlist=['ZIPCOMPRESSION', 'ZIPROOT', 'ZIP_OVERRIDE_TIMESTAMP'])
ZipBuilder = SCons.Builder.Builder(action=SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),
source_factory=SCons.Node.FS.Entry,
| {"golden_diff": "diff --git a/SCons/Tool/zip.py b/SCons/Tool/zip.py\n--- a/SCons/Tool/zip.py\n+++ b/SCons/Tool/zip.py\n@@ -8,8 +8,9 @@\n \n \"\"\"\n \n+# MIT License\n #\n-# __COPYRIGHT__\n+# Copyright The SCons Foundation\n #\n # Permission is hereby granted, free of charge, to any person obtaining\n # a copy of this software and associated documentation files (the\n@@ -29,39 +30,62 @@\n # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n-#\n-\n-__revision__ = \"__FILE__ __REVISION__ __DATE__ __DEVELOPER__\"\n \n-import os.path\n+import os\n \n import SCons.Builder\n import SCons.Defaults\n import SCons.Node.FS\n import SCons.Util\n \n+import time\n import zipfile\n \n+\n zip_compression = zipfile.ZIP_DEFLATED\n \n \n-def zip(target, source, env):\n- compression = env.get('ZIPCOMPRESSION', 0)\n- zf = zipfile.ZipFile(str(target[0]), 'w', compression)\n+def _create_zipinfo_for_file(fname, arcname, date_time, compression):\n+ st = os.stat(fname)\n+ if not date_time:\n+ mtime = time.localtime(st.st_mtime)\n+ date_time = mtime[0:6]\n+ zinfo = zipfile.ZipInfo(filename=arcname, date_time=date_time)\n+ zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes\n+ zinfo.compress_type = compression\n+ zinfo.file_size = st.st_size\n+ return zinfo\n+\n+\n+def zip_builder(target, source, env):\n+ compression = env.get('ZIPCOMPRESSION', zipfile.ZIP_STORED)\n+ zip_root = str(env.get('ZIPROOT', ''))\n+ date_time = env.get('ZIP_OVERRIDE_TIMESTAMP')\n+\n+ files = []\n for s in source:\n if s.isdir():\n for dirpath, dirnames, filenames in os.walk(str(s)):\n for fname in filenames:\n path = os.path.join(dirpath, fname)\n if os.path.isfile(path):\n- zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))))\n+ files.append(path)\n else:\n- zf.write(str(s), os.path.relpath(str(s), str(env.get('ZIPROOT', ''))))\n- zf.close()\n+ files.append(str(s))\n+\n+ with zipfile.ZipFile(str(target[0]), 'w', compression) as zf:\n+ for fname in files:\n+ arcname = os.path.relpath(fname, zip_root)\n+ # TODO: Switch to ZipInfo.from_file when 3.6 becomes the base python version\n+ zinfo = _create_zipinfo_for_file(fname, arcname, date_time, compression)\n+ with open(fname, \"rb\") as f:\n+ zf.writestr(zinfo, f.read())\n+\n \n # Fix PR #3569 - If you don't specify ZIPCOM and ZIPCOMSTR when creating\n # env, then it will ignore ZIPCOMSTR set afterwards.\n-zipAction = SCons.Action.Action(zip, \"$ZIPCOMSTR\", varlist=['ZIPCOMPRESSION'])\n+zipAction = SCons.Action.Action(zip_builder, \"$ZIPCOMSTR\",\n+ varlist=['ZIPCOMPRESSION', 'ZIPROOT', 'ZIP_OVERRIDE_TIMESTAMP'])\n \n ZipBuilder = SCons.Builder.Builder(action=SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),\n source_factory=SCons.Node.FS.Entry,\n", "issue": "Add timestamp override to Zip builder\nZip should have an option to override the timestamp on files in the archive instead of taking it from the file system. This can be useful for repeatable builds or for anonymizing the archive.\n", "before_files": [{"content": "\"\"\"SCons.Tool.zip\n\nTool-specific initialization for zip.\n\nThere normally shouldn't be any need to import this module directly.\nIt will usually be imported through the generic SCons.Tool.Tool()\nselection method.\n\n\"\"\"\n\n#\n# __COPYRIGHT__\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\n__revision__ = \"__FILE__ __REVISION__ __DATE__ __DEVELOPER__\"\n\nimport os.path\n\nimport SCons.Builder\nimport SCons.Defaults\nimport SCons.Node.FS\nimport SCons.Util\n\nimport zipfile\n\nzip_compression = zipfile.ZIP_DEFLATED\n\n\ndef zip(target, source, env):\n compression = env.get('ZIPCOMPRESSION', 0)\n zf = zipfile.ZipFile(str(target[0]), 'w', compression)\n for s in source:\n if s.isdir():\n for dirpath, dirnames, filenames in os.walk(str(s)):\n for fname in filenames:\n path = os.path.join(dirpath, fname)\n if os.path.isfile(path):\n zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))))\n else:\n zf.write(str(s), os.path.relpath(str(s), str(env.get('ZIPROOT', ''))))\n zf.close()\n\n# Fix PR #3569 - If you don't specify ZIPCOM and ZIPCOMSTR when creating\n# env, then it will ignore ZIPCOMSTR set afterwards.\nzipAction = SCons.Action.Action(zip, \"$ZIPCOMSTR\", varlist=['ZIPCOMPRESSION'])\n\nZipBuilder = SCons.Builder.Builder(action=SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),\n source_factory=SCons.Node.FS.Entry,\n source_scanner=SCons.Defaults.DirScanner,\n suffix='$ZIPSUFFIX',\n multi=1)\n\n\ndef generate(env):\n \"\"\"Add Builders and construction variables for zip to an Environment.\"\"\"\n try:\n bld = env['BUILDERS']['Zip']\n except KeyError:\n bld = ZipBuilder\n env['BUILDERS']['Zip'] = bld\n\n env['ZIP'] = 'zip'\n env['ZIPFLAGS'] = SCons.Util.CLVar('')\n env['ZIPCOM'] = zipAction\n env['ZIPCOMPRESSION'] = zip_compression\n env['ZIPSUFFIX'] = '.zip'\n env['ZIPROOT'] = SCons.Util.CLVar('')\n\n\ndef exists(env):\n return True\n\n# Local Variables:\n# tab-width:4\n# indent-tabs-mode:nil\n# End:\n# vim: set expandtab tabstop=4 shiftwidth=4:\n", "path": "SCons/Tool/zip.py"}]} | 1,533 | 811 |
gh_patches_debug_23861 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1669 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IL: Capture "Subject Matter" for `event`s
IL events have a Subject Matter field on the legislature's website, which we should capture, probably as the `description` value for that `event` object.
For example, look at one of the hearings linked from [here](http://www.ilga.gov/senate/committees/hearing.asp?CommitteeID=1927), like this:
<img width="864" alt="screen shot 2017-04-23 at 00 19 37" src="https://cloud.githubusercontent.com/assets/4959135/25310740/bb6f24b8-27ba-11e7-96e3-9b102819581c.png">
cc @jonrogoff, @justgosh
</issue>
<code>
[start of openstates/il/events.py]
1 import datetime as dt
2 import re
3
4 from openstates.utils import LXMLMixin
5 from billy.scrape.events import Event, EventScraper
6
7 import lxml.html
8 import pytz
9
10 urls = {
11 "upper": "http://www.ilga.gov/senate/schedules/weeklyhearings.asp",
12 "lower": "http://www.ilga.gov/house/schedules/weeklyhearings.asp"
13 }
14
15
16 class ILEventScraper(EventScraper, LXMLMixin):
17 jurisdiction = 'il'
18 _tz = pytz.timezone('US/Eastern')
19
20 def scrape_page(self, url, session, chamber):
21 page = self.lxmlize(url)
22
23 ctty_name = page.xpath("//span[@class='heading']")[0].text_content()
24
25 tables = page.xpath("//table[@cellpadding='3']")
26 info = tables[0]
27 rows = info.xpath(".//tr")
28 metainf = {}
29 for row in rows:
30 tds = row.xpath(".//td")
31 key = tds[0].text_content().strip()
32 value = tds[1].text_content().strip()
33 metainf[key] = value
34
35 where = metainf['Location:']
36 description = ctty_name
37
38 datetime = metainf['Scheduled Date:']
39 datetime = re.sub("\s+", " ", datetime)
40 repl = {
41 "AM": " AM",
42 "PM": " PM" # Space shim.
43 }
44 for r in repl:
45 datetime = datetime.replace(r, repl[r])
46 datetime = dt.datetime.strptime(datetime, "%b %d, %Y %I:%M %p")
47
48 event = Event(session, datetime, 'committee:meeting',
49 description, location=where)
50 event.add_source(url)
51
52 if ctty_name.startswith('Hearing Notice For'):
53 ctty_name.replace('Hearing Notice For', '')
54 event.add_participant('host', ctty_name, 'committee', chamber=chamber)
55
56 bills = tables[1]
57 for bill in bills.xpath(".//tr")[1:]:
58 tds = bill.xpath(".//td")
59 if len(tds) < 4:
60 continue
61 # First, let's get the bill ID:
62 bill_id = tds[0].text_content()
63 event.add_related_bill(bill_id,
64 description=description,
65 type='consideration')
66
67 self.save_event(event)
68
69 def scrape(self, chamber, session):
70 try:
71 url = urls[chamber]
72 except KeyError:
73 return # Not for us.
74 page = self.lxmlize(url)
75 tables = page.xpath("//table[@width='550']")
76 for table in tables:
77 meetings = table.xpath(".//a")
78 for meeting in meetings:
79 self.scrape_page(meeting.attrib['href'],
80 session, chamber)
81
[end of openstates/il/events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/il/events.py b/openstates/il/events.py
--- a/openstates/il/events.py
+++ b/openstates/il/events.py
@@ -4,7 +4,6 @@
from openstates.utils import LXMLMixin
from billy.scrape.events import Event, EventScraper
-import lxml.html
import pytz
urls = {
@@ -20,8 +19,8 @@
def scrape_page(self, url, session, chamber):
page = self.lxmlize(url)
- ctty_name = page.xpath("//span[@class='heading']")[0].text_content()
-
+ ctty_name = page.xpath("//span[@class='heading']")[0].text_content().replace(
+ "Hearing Notice For ", "")
tables = page.xpath("//table[@cellpadding='3']")
info = tables[0]
rows = info.xpath(".//tr")
@@ -33,7 +32,8 @@
metainf[key] = value
where = metainf['Location:']
- description = ctty_name
+ subject_matter = metainf['Subject Matter:']
+ description = "{}, {}".format(ctty_name, subject_matter)
datetime = metainf['Scheduled Date:']
datetime = re.sub("\s+", " ", datetime)
| {"golden_diff": "diff --git a/openstates/il/events.py b/openstates/il/events.py\n--- a/openstates/il/events.py\n+++ b/openstates/il/events.py\n@@ -4,7 +4,6 @@\n from openstates.utils import LXMLMixin\n from billy.scrape.events import Event, EventScraper\n \n-import lxml.html\n import pytz\n \n urls = {\n@@ -20,8 +19,8 @@\n def scrape_page(self, url, session, chamber):\n page = self.lxmlize(url)\n \n- ctty_name = page.xpath(\"//span[@class='heading']\")[0].text_content()\n-\n+ ctty_name = page.xpath(\"//span[@class='heading']\")[0].text_content().replace(\n+ \"Hearing Notice For \", \"\")\n tables = page.xpath(\"//table[@cellpadding='3']\")\n info = tables[0]\n rows = info.xpath(\".//tr\")\n@@ -33,7 +32,8 @@\n metainf[key] = value\n \n where = metainf['Location:']\n- description = ctty_name\n+ subject_matter = metainf['Subject Matter:']\n+ description = \"{}, {}\".format(ctty_name, subject_matter)\n \n datetime = metainf['Scheduled Date:']\n datetime = re.sub(\"\\s+\", \" \", datetime)\n", "issue": "IL: Capture \"Subject Matter\" for `event`s\nIL events have a Subject Matter field on the legislature's website, which we should capture, probably as the `description` value for that `event` object.\r\n\r\nFor example, look at one of the hearings linked from [here](http://www.ilga.gov/senate/committees/hearing.asp?CommitteeID=1927), like this:\r\n\r\n<img width=\"864\" alt=\"screen shot 2017-04-23 at 00 19 37\" src=\"https://cloud.githubusercontent.com/assets/4959135/25310740/bb6f24b8-27ba-11e7-96e3-9b102819581c.png\">\r\n\r\ncc @jonrogoff, @justgosh\n", "before_files": [{"content": "import datetime as dt\nimport re\n\nfrom openstates.utils import LXMLMixin\nfrom billy.scrape.events import Event, EventScraper\n\nimport lxml.html\nimport pytz\n\nurls = {\n \"upper\": \"http://www.ilga.gov/senate/schedules/weeklyhearings.asp\",\n \"lower\": \"http://www.ilga.gov/house/schedules/weeklyhearings.asp\"\n}\n\n\nclass ILEventScraper(EventScraper, LXMLMixin):\n jurisdiction = 'il'\n _tz = pytz.timezone('US/Eastern')\n\n def scrape_page(self, url, session, chamber):\n page = self.lxmlize(url)\n\n ctty_name = page.xpath(\"//span[@class='heading']\")[0].text_content()\n\n tables = page.xpath(\"//table[@cellpadding='3']\")\n info = tables[0]\n rows = info.xpath(\".//tr\")\n metainf = {}\n for row in rows:\n tds = row.xpath(\".//td\")\n key = tds[0].text_content().strip()\n value = tds[1].text_content().strip()\n metainf[key] = value\n\n where = metainf['Location:']\n description = ctty_name\n\n datetime = metainf['Scheduled Date:']\n datetime = re.sub(\"\\s+\", \" \", datetime)\n repl = {\n \"AM\": \" AM\",\n \"PM\": \" PM\" # Space shim.\n }\n for r in repl:\n datetime = datetime.replace(r, repl[r])\n datetime = dt.datetime.strptime(datetime, \"%b %d, %Y %I:%M %p\")\n\n event = Event(session, datetime, 'committee:meeting',\n description, location=where)\n event.add_source(url)\n\n if ctty_name.startswith('Hearing Notice For'):\n ctty_name.replace('Hearing Notice For', '')\n event.add_participant('host', ctty_name, 'committee', chamber=chamber)\n\n bills = tables[1]\n for bill in bills.xpath(\".//tr\")[1:]:\n tds = bill.xpath(\".//td\")\n if len(tds) < 4:\n continue\n # First, let's get the bill ID:\n bill_id = tds[0].text_content()\n event.add_related_bill(bill_id,\n description=description,\n type='consideration')\n\n self.save_event(event)\n\n def scrape(self, chamber, session):\n try:\n url = urls[chamber]\n except KeyError:\n return # Not for us.\n page = self.lxmlize(url)\n tables = page.xpath(\"//table[@width='550']\")\n for table in tables:\n meetings = table.xpath(\".//a\")\n for meeting in meetings:\n self.scrape_page(meeting.attrib['href'],\n session, chamber)\n", "path": "openstates/il/events.py"}]} | 1,494 | 286 |
gh_patches_debug_16602 | rasdani/github-patches | git_diff | svthalia__concrexit-2500 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Partner page partner blocks keep showing the loading animation
### Describe the bug
Partner page partner blocks keep showing the loading animation.
### How to reproduce
Steps to reproduce the behaviour:
1. Go to [the Thalia partner page](https://thalia.nu/career/).
2. Scroll down and see the loading animation on the partner blocks.
### Expected behaviour
The loading animation should stop when the partners have been loaded.
</issue>
<code>
[start of website/thaliawebsite/templatetags/grid_item.py]
1 from django import template
2
3 register = template.Library()
4
5
6 @register.inclusion_tag("includes/grid_item.html")
7 def grid_item(
8 title=None,
9 meta_text="",
10 url=None,
11 image_url=None,
12 ribbon=None,
13 class_name="",
14 anchor_attrs="",
15 ):
16 return {
17 "title": title,
18 "url": url,
19 "image_url": image_url,
20 "meta_text": meta_text,
21 "ribbon": ribbon,
22 "class_name": class_name,
23 "anchor_attrs": anchor_attrs,
24 }
25
[end of website/thaliawebsite/templatetags/grid_item.py]
[start of website/partners/templatetags/partner_cards.py]
1 from django import template
2 from django.conf import settings
3 from django.template.defaultfilters import striptags, truncatechars
4
5 from thaliawebsite.templatetags.bleach_tags import bleach
6 from thaliawebsite.templatetags.grid_item import grid_item
7 from utils.media.services import get_thumbnail_url
8 from partners.models import Vacancy
9
10 register = template.Library()
11
12
13 @register.inclusion_tag("includes/grid_item.html")
14 def partner_card(partner):
15 """Return grid item showing partner."""
16 image_url = ""
17 if partner.logo:
18 image_url = get_thumbnail_url(
19 partner.logo, settings.THUMBNAIL_SIZES["medium"], fit=False
20 )
21
22 meta_text = truncatechars(bleach(striptags(partner.company_profile)), 80)
23
24 return grid_item(
25 title=partner.name,
26 meta_text='<p class="px-2 d-none d-md-block">{}</p>'.format(meta_text),
27 url=partner.get_absolute_url,
28 image_url=image_url,
29 class_name="partner-card contain-logo",
30 )
31
32
33 @register.inclusion_tag("includes/grid_item.html")
34 def partner_image_card(image):
35 """Return grid item showing partner image."""
36 class_name = "partner-image-card"
37 image_url = get_thumbnail_url(image, settings.THUMBNAIL_SIZES["medium"])
38
39 return grid_item(
40 title="",
41 url=get_thumbnail_url(image, settings.THUMBNAIL_SIZES["large"], fit=False),
42 image_url=image_url,
43 class_name=class_name,
44 anchor_attrs='data-fancybox="gallery"',
45 )
46
47
48 @register.inclusion_tag("partners/vacancy_card.html")
49 def vacancy_card(vacancy):
50 """Return grid item showing vacancy."""
51 image_url = None
52 if vacancy.get_company_logo():
53 image_url = get_thumbnail_url(
54 vacancy.get_company_logo(), settings.THUMBNAIL_SIZES["medium"], fit=False
55 )
56
57 description = truncatechars(bleach(striptags(vacancy.description)), 300)
58 extra_class = "external-vacancy"
59 url = "#vacancy-{}".format(vacancy.id)
60 keywords = vacancy.keywords.split(",")
61 location = vacancy.location
62 if vacancy.partner and vacancy.partner.is_active:
63 url = "{}#vacancy-{}".format(vacancy.partner.get_absolute_url(), vacancy.id)
64 extra_class = ""
65
66 return {
67 "title": vacancy.title,
68 "company_name": vacancy.get_company_name(),
69 "image_url": image_url,
70 "description": description,
71 "location": location,
72 "keywords": keywords,
73 "url": url,
74 "extra_class": extra_class,
75 }
76
[end of website/partners/templatetags/partner_cards.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/partners/templatetags/partner_cards.py b/website/partners/templatetags/partner_cards.py
--- a/website/partners/templatetags/partner_cards.py
+++ b/website/partners/templatetags/partner_cards.py
@@ -27,6 +27,7 @@
url=partner.get_absolute_url,
image_url=image_url,
class_name="partner-card contain-logo",
+ show_loading_animation=False,
)
diff --git a/website/thaliawebsite/templatetags/grid_item.py b/website/thaliawebsite/templatetags/grid_item.py
--- a/website/thaliawebsite/templatetags/grid_item.py
+++ b/website/thaliawebsite/templatetags/grid_item.py
@@ -12,6 +12,7 @@
ribbon=None,
class_name="",
anchor_attrs="",
+ show_loading_animation=True,
):
return {
"title": title,
@@ -21,4 +22,5 @@
"ribbon": ribbon,
"class_name": class_name,
"anchor_attrs": anchor_attrs,
+ "show_loading_animation": show_loading_animation,
}
| {"golden_diff": "diff --git a/website/partners/templatetags/partner_cards.py b/website/partners/templatetags/partner_cards.py\n--- a/website/partners/templatetags/partner_cards.py\n+++ b/website/partners/templatetags/partner_cards.py\n@@ -27,6 +27,7 @@\n url=partner.get_absolute_url,\n image_url=image_url,\n class_name=\"partner-card contain-logo\",\n+ show_loading_animation=False,\n )\n \n \ndiff --git a/website/thaliawebsite/templatetags/grid_item.py b/website/thaliawebsite/templatetags/grid_item.py\n--- a/website/thaliawebsite/templatetags/grid_item.py\n+++ b/website/thaliawebsite/templatetags/grid_item.py\n@@ -12,6 +12,7 @@\n ribbon=None,\n class_name=\"\",\n anchor_attrs=\"\",\n+ show_loading_animation=True,\n ):\n return {\n \"title\": title,\n@@ -21,4 +22,5 @@\n \"ribbon\": ribbon,\n \"class_name\": class_name,\n \"anchor_attrs\": anchor_attrs,\n+ \"show_loading_animation\": show_loading_animation,\n }\n", "issue": "Partner page partner blocks keep showing the loading animation\n### Describe the bug\r\nPartner page partner blocks keep showing the loading animation.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to [the Thalia partner page](https://thalia.nu/career/).\r\n2. Scroll down and see the loading animation on the partner blocks.\r\n\r\n### Expected behaviour\r\nThe loading animation should stop when the partners have been loaded.\r\n\n", "before_files": [{"content": "from django import template\n\nregister = template.Library()\n\n\[email protected]_tag(\"includes/grid_item.html\")\ndef grid_item(\n title=None,\n meta_text=\"\",\n url=None,\n image_url=None,\n ribbon=None,\n class_name=\"\",\n anchor_attrs=\"\",\n):\n return {\n \"title\": title,\n \"url\": url,\n \"image_url\": image_url,\n \"meta_text\": meta_text,\n \"ribbon\": ribbon,\n \"class_name\": class_name,\n \"anchor_attrs\": anchor_attrs,\n }\n", "path": "website/thaliawebsite/templatetags/grid_item.py"}, {"content": "from django import template\nfrom django.conf import settings\nfrom django.template.defaultfilters import striptags, truncatechars\n\nfrom thaliawebsite.templatetags.bleach_tags import bleach\nfrom thaliawebsite.templatetags.grid_item import grid_item\nfrom utils.media.services import get_thumbnail_url\nfrom partners.models import Vacancy\n\nregister = template.Library()\n\n\[email protected]_tag(\"includes/grid_item.html\")\ndef partner_card(partner):\n \"\"\"Return grid item showing partner.\"\"\"\n image_url = \"\"\n if partner.logo:\n image_url = get_thumbnail_url(\n partner.logo, settings.THUMBNAIL_SIZES[\"medium\"], fit=False\n )\n\n meta_text = truncatechars(bleach(striptags(partner.company_profile)), 80)\n\n return grid_item(\n title=partner.name,\n meta_text='<p class=\"px-2 d-none d-md-block\">{}</p>'.format(meta_text),\n url=partner.get_absolute_url,\n image_url=image_url,\n class_name=\"partner-card contain-logo\",\n )\n\n\[email protected]_tag(\"includes/grid_item.html\")\ndef partner_image_card(image):\n \"\"\"Return grid item showing partner image.\"\"\"\n class_name = \"partner-image-card\"\n image_url = get_thumbnail_url(image, settings.THUMBNAIL_SIZES[\"medium\"])\n\n return grid_item(\n title=\"\",\n url=get_thumbnail_url(image, settings.THUMBNAIL_SIZES[\"large\"], fit=False),\n image_url=image_url,\n class_name=class_name,\n anchor_attrs='data-fancybox=\"gallery\"',\n )\n\n\[email protected]_tag(\"partners/vacancy_card.html\")\ndef vacancy_card(vacancy):\n \"\"\"Return grid item showing vacancy.\"\"\"\n image_url = None\n if vacancy.get_company_logo():\n image_url = get_thumbnail_url(\n vacancy.get_company_logo(), settings.THUMBNAIL_SIZES[\"medium\"], fit=False\n )\n\n description = truncatechars(bleach(striptags(vacancy.description)), 300)\n extra_class = \"external-vacancy\"\n url = \"#vacancy-{}\".format(vacancy.id)\n keywords = vacancy.keywords.split(\",\")\n location = vacancy.location\n if vacancy.partner and vacancy.partner.is_active:\n url = \"{}#vacancy-{}\".format(vacancy.partner.get_absolute_url(), vacancy.id)\n extra_class = \"\"\n\n return {\n \"title\": vacancy.title,\n \"company_name\": vacancy.get_company_name(),\n \"image_url\": image_url,\n \"description\": description,\n \"location\": location,\n \"keywords\": keywords,\n \"url\": url,\n \"extra_class\": extra_class,\n }\n", "path": "website/partners/templatetags/partner_cards.py"}]} | 1,542 | 268 |
gh_patches_debug_19699 | rasdani/github-patches | git_diff | ibis-project__ibis-3990 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: comparing bool expr to bool literal generates invalid sql
It looks like one of the recent refactorings may have broken comparisons of boolean to boolean:
This test:
```python
def test_bool_bool():
import ibis
from ibis.backends.base.sql.compiler import Compiler
t = ibis.table(
[('dest', 'string'), ('origin', 'string'), ('arrdelay', 'int32')],
'airlines',
)
x = ibis.literal(True)
top = t[(t.dest.cast('int64') == 0) == x]
result = Compiler.to_sql(top)
print(result)
```
produces this SQL:
```sql
SELECT *
FROM airlines
WHERE CAST(`dest` AS bigint) = 0 = TRUE
```
</issue>
<code>
[start of ibis/backends/base/sql/registry/helpers.py]
1 import ibis.common.exceptions as com
2 import ibis.expr.datatypes as dt
3 import ibis.expr.operations as ops
4 import ibis.expr.types as ir
5 from ibis.backends.base.sql.registry import identifiers
6
7
8 def format_call(translator, func, *args):
9 formatted_args = []
10 for arg in args:
11 fmt_arg = translator.translate(arg)
12 formatted_args.append(fmt_arg)
13
14 return '{}({})'.format(func, ', '.join(formatted_args))
15
16
17 def quote_identifier(name, quotechar='`', force=False):
18 """Add quotes to the `name` identifier if needed."""
19 if force or name.count(' ') or name in identifiers.base_identifiers:
20 return '{0}{1}{0}'.format(quotechar, name)
21 else:
22 return name
23
24
25 def needs_parens(op):
26 if isinstance(op, ir.Expr):
27 op = op.op()
28 op_klass = type(op)
29 # function calls don't need parens
30 return op_klass in {
31 ops.Negate,
32 ops.IsNull,
33 ops.NotNull,
34 ops.Add,
35 ops.Subtract,
36 ops.Multiply,
37 ops.Divide,
38 ops.Power,
39 ops.Modulus,
40 ops.Equals,
41 ops.NotEquals,
42 ops.GreaterEqual,
43 ops.Greater,
44 ops.LessEqual,
45 ops.Less,
46 ops.IdenticalTo,
47 ops.And,
48 ops.Or,
49 ops.Xor,
50 }
51
52
53 parenthesize = '({})'.format
54
55
56 sql_type_names = {
57 'int8': 'tinyint',
58 'int16': 'smallint',
59 'int32': 'int',
60 'int64': 'bigint',
61 'float': 'float',
62 'float32': 'float',
63 'double': 'double',
64 'float64': 'double',
65 'string': 'string',
66 'boolean': 'boolean',
67 'timestamp': 'timestamp',
68 'decimal': 'decimal',
69 }
70
71
72 def type_to_sql_string(tval):
73 if isinstance(tval, dt.Decimal):
74 return f'decimal({tval.precision}, {tval.scale})'
75 name = tval.name.lower()
76 try:
77 return sql_type_names[name]
78 except KeyError:
79 raise com.UnsupportedBackendType(name)
80
[end of ibis/backends/base/sql/registry/helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ibis/backends/base/sql/registry/helpers.py b/ibis/backends/base/sql/registry/helpers.py
--- a/ibis/backends/base/sql/registry/helpers.py
+++ b/ibis/backends/base/sql/registry/helpers.py
@@ -22,32 +22,34 @@
return name
-def needs_parens(op):
- if isinstance(op, ir.Expr):
- op = op.op()
- op_klass = type(op)
- # function calls don't need parens
- return op_klass in {
- ops.Negate,
- ops.IsNull,
- ops.NotNull,
- ops.Add,
- ops.Subtract,
- ops.Multiply,
- ops.Divide,
- ops.Power,
- ops.Modulus,
- ops.Equals,
- ops.NotEquals,
- ops.GreaterEqual,
- ops.Greater,
- ops.LessEqual,
- ops.Less,
- ops.IdenticalTo,
- ops.And,
- ops.Or,
- ops.Xor,
- }
+_NEEDS_PARENS_OPS = (
+ ops.Negate,
+ ops.IsNull,
+ ops.NotNull,
+ ops.Add,
+ ops.Subtract,
+ ops.Multiply,
+ ops.Divide,
+ ops.Power,
+ ops.Modulus,
+ ops.Equals,
+ ops.NotEquals,
+ ops.GreaterEqual,
+ ops.Greater,
+ ops.LessEqual,
+ ops.Less,
+ ops.IdenticalTo,
+ ops.And,
+ ops.Or,
+ ops.Xor,
+)
+
+
+def needs_parens(expr: ir.Expr):
+ op = expr.op()
+ if isinstance(op, ops.Alias):
+ op = op.arg.op()
+ return isinstance(op, _NEEDS_PARENS_OPS)
parenthesize = '({})'.format
| {"golden_diff": "diff --git a/ibis/backends/base/sql/registry/helpers.py b/ibis/backends/base/sql/registry/helpers.py\n--- a/ibis/backends/base/sql/registry/helpers.py\n+++ b/ibis/backends/base/sql/registry/helpers.py\n@@ -22,32 +22,34 @@\n return name\n \n \n-def needs_parens(op):\n- if isinstance(op, ir.Expr):\n- op = op.op()\n- op_klass = type(op)\n- # function calls don't need parens\n- return op_klass in {\n- ops.Negate,\n- ops.IsNull,\n- ops.NotNull,\n- ops.Add,\n- ops.Subtract,\n- ops.Multiply,\n- ops.Divide,\n- ops.Power,\n- ops.Modulus,\n- ops.Equals,\n- ops.NotEquals,\n- ops.GreaterEqual,\n- ops.Greater,\n- ops.LessEqual,\n- ops.Less,\n- ops.IdenticalTo,\n- ops.And,\n- ops.Or,\n- ops.Xor,\n- }\n+_NEEDS_PARENS_OPS = (\n+ ops.Negate,\n+ ops.IsNull,\n+ ops.NotNull,\n+ ops.Add,\n+ ops.Subtract,\n+ ops.Multiply,\n+ ops.Divide,\n+ ops.Power,\n+ ops.Modulus,\n+ ops.Equals,\n+ ops.NotEquals,\n+ ops.GreaterEqual,\n+ ops.Greater,\n+ ops.LessEqual,\n+ ops.Less,\n+ ops.IdenticalTo,\n+ ops.And,\n+ ops.Or,\n+ ops.Xor,\n+)\n+\n+\n+def needs_parens(expr: ir.Expr):\n+ op = expr.op()\n+ if isinstance(op, ops.Alias):\n+ op = op.arg.op()\n+ return isinstance(op, _NEEDS_PARENS_OPS)\n \n \n parenthesize = '({})'.format\n", "issue": "bug: comparing bool expr to bool literal generates invalid sql\nIt looks like one of the recent refactorings may have broken comparisons of boolean to boolean:\r\n\r\nThis test:\r\n\r\n```python\r\ndef test_bool_bool():\r\n import ibis\r\n from ibis.backends.base.sql.compiler import Compiler\r\n\r\n t = ibis.table(\r\n [('dest', 'string'), ('origin', 'string'), ('arrdelay', 'int32')],\r\n 'airlines',\r\n )\r\n\r\n x = ibis.literal(True)\r\n top = t[(t.dest.cast('int64') == 0) == x]\r\n\r\n result = Compiler.to_sql(top)\r\n print(result)\r\n```\r\n\r\nproduces this SQL:\r\n\r\n```sql\r\nSELECT *\r\nFROM airlines\r\nWHERE CAST(`dest` AS bigint) = 0 = TRUE\r\n```\n", "before_files": [{"content": "import ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.types as ir\nfrom ibis.backends.base.sql.registry import identifiers\n\n\ndef format_call(translator, func, *args):\n formatted_args = []\n for arg in args:\n fmt_arg = translator.translate(arg)\n formatted_args.append(fmt_arg)\n\n return '{}({})'.format(func, ', '.join(formatted_args))\n\n\ndef quote_identifier(name, quotechar='`', force=False):\n \"\"\"Add quotes to the `name` identifier if needed.\"\"\"\n if force or name.count(' ') or name in identifiers.base_identifiers:\n return '{0}{1}{0}'.format(quotechar, name)\n else:\n return name\n\n\ndef needs_parens(op):\n if isinstance(op, ir.Expr):\n op = op.op()\n op_klass = type(op)\n # function calls don't need parens\n return op_klass in {\n ops.Negate,\n ops.IsNull,\n ops.NotNull,\n ops.Add,\n ops.Subtract,\n ops.Multiply,\n ops.Divide,\n ops.Power,\n ops.Modulus,\n ops.Equals,\n ops.NotEquals,\n ops.GreaterEqual,\n ops.Greater,\n ops.LessEqual,\n ops.Less,\n ops.IdenticalTo,\n ops.And,\n ops.Or,\n ops.Xor,\n }\n\n\nparenthesize = '({})'.format\n\n\nsql_type_names = {\n 'int8': 'tinyint',\n 'int16': 'smallint',\n 'int32': 'int',\n 'int64': 'bigint',\n 'float': 'float',\n 'float32': 'float',\n 'double': 'double',\n 'float64': 'double',\n 'string': 'string',\n 'boolean': 'boolean',\n 'timestamp': 'timestamp',\n 'decimal': 'decimal',\n}\n\n\ndef type_to_sql_string(tval):\n if isinstance(tval, dt.Decimal):\n return f'decimal({tval.precision}, {tval.scale})'\n name = tval.name.lower()\n try:\n return sql_type_names[name]\n except KeyError:\n raise com.UnsupportedBackendType(name)\n", "path": "ibis/backends/base/sql/registry/helpers.py"}]} | 1,352 | 431 |
gh_patches_debug_5310 | rasdani/github-patches | git_diff | pantsbuild__pants-6499 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow `compatibility` attribute to be passed through to generated python targets
To do this:
- Override ` _copy_target_attributes` in `ApacheThriftPyGen` to include 'compatibility', so it looks like this:
```
@property
def _copy_target_attributes(self):
"""Propagate these attributes to the synthetic python_library() target."""
return ['provides', 'tags', 'scope', 'compatibility']
```
- See https://github.com/pantsbuild/pants/blob/039051735542d29ae02f4faa09c0c51c47292bf0/contrib/jax_ws/src/python/pants/contrib/jax_ws/tasks/jax_ws_gen.py#L90 as an example
</issue>
<code>
[start of src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py]
1 # coding=utf-8
2 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
3 # Licensed under the Apache License, Version 2.0 (see LICENSE).
4
5 from __future__ import absolute_import, division, print_function, unicode_literals
6
7 import os
8 from builtins import open
9
10 from pants.backend.codegen.thrift.lib.apache_thrift_gen_base import ApacheThriftGenBase
11 from pants.backend.codegen.thrift.python.python_thrift_library import PythonThriftLibrary
12 from pants.backend.python.targets.python_library import PythonLibrary
13 from pants.util.dirutil import safe_delete, safe_walk
14
15
16 class ApacheThriftPyGen(ApacheThriftGenBase):
17 """Generate Python source files from thrift IDL files."""
18 gentarget_type = PythonThriftLibrary
19 thrift_generator = 'py'
20 default_gen_options_map = {
21 'new_style': None
22 }
23
24 sources_globs = ('**/*',)
25
26 def synthetic_target_type(self, target):
27 return PythonLibrary
28
29 def execute_codegen(self, target, target_workdir):
30 super(ApacheThriftPyGen, self).execute_codegen(target, target_workdir)
31
32 # Thrift generates code with all parent namespaces with empty __init__.py's. Since pants allows
33 # splitting a thrift namespace hierarchy across multiple packages, we explicitly insert
34 # namespace packages to allow for consumption of 2 or more of these packages in the same
35 # PYTHONPATH.
36 for root, _, files in safe_walk(target_workdir):
37 if '__init__.py' not in files: # skip non-packages
38 continue
39
40 init_py_abspath = os.path.join(root, '__init__.py')
41
42 # Thrift puts an __init__.py file at the root, and we don't want one there (it's not needed,
43 # and it confuses some import mechanisms).
44 if root == target_workdir:
45 safe_delete(init_py_abspath)
46 elif os.path.getsize(init_py_abspath) == 0: # empty __init__, translate to namespace package
47 with open(init_py_abspath, 'wb') as f:
48 f.write(b"__import__('pkg_resources').declare_namespace(__name__)")
49 else:
50 # A non-empty __init__, this is a leaf package, usually with ttypes and constants; so we
51 # leave as-is.
52 pass
53
54 def ignore_dup(self, tgt1, tgt2, rel_src):
55 # Thrift generates all the intermediate __init__.py files, and they shouldn't
56 # count as dups.
57 return os.path.basename(rel_src) == '__init__.py'
58
[end of src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py b/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py
--- a/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py
+++ b/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py
@@ -51,6 +51,10 @@
# leave as-is.
pass
+ @property
+ def _copy_target_attributes(self):
+ return super(ApacheThriftPyGen, self)._copy_target_attributes + ['compatibility']
+
def ignore_dup(self, tgt1, tgt2, rel_src):
# Thrift generates all the intermediate __init__.py files, and they shouldn't
# count as dups.
| {"golden_diff": "diff --git a/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py b/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py\n--- a/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py\n+++ b/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py\n@@ -51,6 +51,10 @@\n # leave as-is.\n pass\n \n+ @property\n+ def _copy_target_attributes(self):\n+ return super(ApacheThriftPyGen, self)._copy_target_attributes + ['compatibility']\n+\n def ignore_dup(self, tgt1, tgt2, rel_src):\n # Thrift generates all the intermediate __init__.py files, and they shouldn't\n # count as dups.\n", "issue": "Allow `compatibility` attribute to be passed through to generated python targets\nTo do this:\r\n\r\n- Override ` _copy_target_attributes` in `ApacheThriftPyGen` to include 'compatibility', so it looks like this:\r\n\r\n```\r\n @property\r\n def _copy_target_attributes(self):\r\n \"\"\"Propagate these attributes to the synthetic python_library() target.\"\"\"\r\n return ['provides', 'tags', 'scope', 'compatibility']\r\n```\r\n\r\n- See https://github.com/pantsbuild/pants/blob/039051735542d29ae02f4faa09c0c51c47292bf0/contrib/jax_ws/src/python/pants/contrib/jax_ws/tasks/jax_ws_gen.py#L90 as an example\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom builtins import open\n\nfrom pants.backend.codegen.thrift.lib.apache_thrift_gen_base import ApacheThriftGenBase\nfrom pants.backend.codegen.thrift.python.python_thrift_library import PythonThriftLibrary\nfrom pants.backend.python.targets.python_library import PythonLibrary\nfrom pants.util.dirutil import safe_delete, safe_walk\n\n\nclass ApacheThriftPyGen(ApacheThriftGenBase):\n \"\"\"Generate Python source files from thrift IDL files.\"\"\"\n gentarget_type = PythonThriftLibrary\n thrift_generator = 'py'\n default_gen_options_map = {\n 'new_style': None\n }\n\n sources_globs = ('**/*',)\n\n def synthetic_target_type(self, target):\n return PythonLibrary\n\n def execute_codegen(self, target, target_workdir):\n super(ApacheThriftPyGen, self).execute_codegen(target, target_workdir)\n\n # Thrift generates code with all parent namespaces with empty __init__.py's. Since pants allows\n # splitting a thrift namespace hierarchy across multiple packages, we explicitly insert\n # namespace packages to allow for consumption of 2 or more of these packages in the same\n # PYTHONPATH.\n for root, _, files in safe_walk(target_workdir):\n if '__init__.py' not in files: # skip non-packages\n continue\n\n init_py_abspath = os.path.join(root, '__init__.py')\n\n # Thrift puts an __init__.py file at the root, and we don't want one there (it's not needed,\n # and it confuses some import mechanisms).\n if root == target_workdir:\n safe_delete(init_py_abspath)\n elif os.path.getsize(init_py_abspath) == 0: # empty __init__, translate to namespace package\n with open(init_py_abspath, 'wb') as f:\n f.write(b\"__import__('pkg_resources').declare_namespace(__name__)\")\n else:\n # A non-empty __init__, this is a leaf package, usually with ttypes and constants; so we\n # leave as-is.\n pass\n\n def ignore_dup(self, tgt1, tgt2, rel_src):\n # Thrift generates all the intermediate __init__.py files, and they shouldn't\n # count as dups.\n return os.path.basename(rel_src) == '__init__.py'\n", "path": "src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py"}]} | 1,402 | 177 |
gh_patches_debug_1357 | rasdani/github-patches | git_diff | aws__aws-cli-4334 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Broken docutils==0.15
Hi community,
Today docutils were updated to 0.15 (https://pypi.org/project/docutils/#history) and it breaks awscli running on Python 2.
```
# aws --version
Traceback (most recent call last):
File "/bin/aws", line 19, in <module>
import awscli.clidriver
File "/usr/lib/python2.7/site-packages/awscli/clidriver.py", line 36, in <module>
from awscli.help import ProviderHelpCommand
File "/usr/lib/python2.7/site-packages/awscli/help.py", line 20, in <module>
from docutils.core import publish_string
File "/usr/lib/python2.7/site-packages/docutils/core.py", line 246
print('\n::: Runtime settings:', file=self._stderr)
^
SyntaxError: invalid syntax
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import codecs
3 import os.path
4 import re
5 import sys
6
7 from setuptools import setup, find_packages
8
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12
13 def read(*parts):
14 return codecs.open(os.path.join(here, *parts), 'r').read()
15
16
17 def find_version(*file_paths):
18 version_file = read(*file_paths)
19 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
20 version_file, re.M)
21 if version_match:
22 return version_match.group(1)
23 raise RuntimeError("Unable to find version string.")
24
25
26 requires = ['botocore==1.12.191',
27 'colorama>=0.2.5,<=0.3.9',
28 'docutils>=0.10',
29 'rsa>=3.1.2,<=3.5.0',
30 's3transfer>=0.2.0,<0.3.0']
31
32
33 if sys.version_info[:2] == (2, 6):
34 # For python2.6 we have to require argparse since it
35 # was not in stdlib until 2.7.
36 requires.append('argparse>=1.1')
37
38 # For Python 2.6, we have to require a different verion of PyYAML since the latest
39 # versions dropped support for Python 2.6.
40 requires.append('PyYAML>=3.10,<=3.13')
41 else:
42 requires.append('PyYAML>=3.10,<=5.1')
43
44
45 setup_options = dict(
46 name='awscli',
47 version=find_version("awscli", "__init__.py"),
48 description='Universal Command Line Environment for AWS.',
49 long_description=read('README.rst'),
50 author='Amazon Web Services',
51 url='http://aws.amazon.com/cli/',
52 scripts=['bin/aws', 'bin/aws.cmd',
53 'bin/aws_completer', 'bin/aws_zsh_completer.sh',
54 'bin/aws_bash_completer'],
55 packages=find_packages(exclude=['tests*']),
56 package_data={'awscli': ['data/*.json', 'examples/*/*.rst',
57 'examples/*/*.txt', 'examples/*/*/*.txt',
58 'examples/*/*/*.rst', 'topics/*.rst',
59 'topics/*.json']},
60 install_requires=requires,
61 extras_require={
62 ':python_version=="2.6"': [
63 'argparse>=1.1',
64 ]
65 },
66 license="Apache License 2.0",
67 classifiers=[
68 'Development Status :: 5 - Production/Stable',
69 'Intended Audience :: Developers',
70 'Intended Audience :: System Administrators',
71 'Natural Language :: English',
72 'License :: OSI Approved :: Apache Software License',
73 'Programming Language :: Python',
74 'Programming Language :: Python :: 2',
75 'Programming Language :: Python :: 2.6',
76 'Programming Language :: Python :: 2.7',
77 'Programming Language :: Python :: 3',
78 'Programming Language :: Python :: 3.3',
79 'Programming Language :: Python :: 3.4',
80 'Programming Language :: Python :: 3.5',
81 'Programming Language :: Python :: 3.6',
82 'Programming Language :: Python :: 3.7',
83 ],
84 )
85
86 if 'py2exe' in sys.argv:
87 # This will actually give us a py2exe command.
88 import py2exe
89 # And we have some py2exe specific options.
90 setup_options['options'] = {
91 'py2exe': {
92 'optimize': 0,
93 'skip_archive': True,
94 'dll_excludes': ['crypt32.dll'],
95 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',
96 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],
97 }
98 }
99 setup_options['console'] = ['bin/aws']
100
101
102 setup(**setup_options)
103
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -25,7 +25,7 @@
requires = ['botocore==1.12.191',
'colorama>=0.2.5,<=0.3.9',
- 'docutils>=0.10',
+ 'docutils>=0.10,<0.15',
'rsa>=3.1.2,<=3.5.0',
's3transfer>=0.2.0,<0.3.0']
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -25,7 +25,7 @@\n \n requires = ['botocore==1.12.191',\n 'colorama>=0.2.5,<=0.3.9',\n- 'docutils>=0.10',\n+ 'docutils>=0.10,<0.15',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.2.0,<0.3.0']\n", "issue": "Broken docutils==0.15\nHi community,\r\n\r\nToday docutils were updated to 0.15 (https://pypi.org/project/docutils/#history) and it breaks awscli running on Python 2.\r\n\r\n```\r\n# aws --version\r\nTraceback (most recent call last):\r\n File \"/bin/aws\", line 19, in <module>\r\n import awscli.clidriver\r\n File \"/usr/lib/python2.7/site-packages/awscli/clidriver.py\", line 36, in <module>\r\n from awscli.help import ProviderHelpCommand\r\n File \"/usr/lib/python2.7/site-packages/awscli/help.py\", line 20, in <module>\r\n from docutils.core import publish_string\r\n File \"/usr/lib/python2.7/site-packages/docutils/core.py\", line 246\r\n print('\\n::: Runtime settings:', file=self._stderr)\r\n ^\r\nSyntaxError: invalid syntax\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.12.191',\n 'colorama>=0.2.5,<=0.3.9',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.2.0,<0.3.0']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n # For Python 2.6, we have to require a different verion of PyYAML since the latest\n # versions dropped support for Python 2.6.\n requires.append('PyYAML>=3.10,<=3.13')\nelse:\n requires.append('PyYAML>=3.10,<=5.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=read('README.rst'),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*.txt', 'examples/*/*/*.txt',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}]} | 1,795 | 130 |
gh_patches_debug_29112 | rasdani/github-patches | git_diff | crytic__slither-2239 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[False-Positive]:`msg.value` in a loop when `msg.value` is not transferred
### Describe the issue:
In the following code snippet, Slither detects the following:
<img width="1309" alt="image" src="https://github.com/crytic/slither/assets/31145285/eacf2441-3e2d-464a-97c3-635498e43120">
This seems invalid as `msg.value` here is just checked as a conditional to ensure no value is sent to this function. In any case, no logic inside this function (including inside the internal `_transfer` function) forwards `msg.value`.
### Code example to reproduce the issue:
```solidity
modifier noNativeTokens() {
require(msg.value == 0, "Not aimed to receive native tokens");
_;
}
function executeRelayCallBatch(
bytes[] calldata signatures,
uint256[] calldata nonces,
uint256[] calldata validityTimestamps,
uint256[] calldata values,
bytes[] calldata payloads
) public payable noNativeTokens returns (bytes[] memory) {
if (
signatures.length != nonces.length ||
nonces.length != validityTimestamps.length ||
validityTimestamps.length != values.length ||
values.length != payloads.length
) {
revert("Batch ExecuteRelayCall Params Length Mismatch");
}
bytes[] memory castedVotes = new bytes[](payloads.length);
for (uint256 ii; ii < payloads.length; ++ii) {
require(values[ii] == 0, "Batch entry cannot contain value");
// cast each votes one by one
castedVotes[ii] = executeRelayCall(
signatures[ii],
nonces[ii],
validityTimestamps[ii],
payloads[ii]
);
}
return castedVotes;
}
```
### Version:
0.10.0
### Relevant log output:
_No response_
</issue>
<code>
[start of slither/detectors/statements/msg_value_in_loop.py]
1 from typing import List, Optional
2 from slither.core.cfg.node import NodeType, Node
3 from slither.detectors.abstract_detector import (
4 AbstractDetector,
5 DetectorClassification,
6 DETECTOR_INFO,
7 )
8 from slither.slithir.operations import InternalCall
9 from slither.core.declarations import SolidityVariableComposed, Contract
10 from slither.utils.output import Output
11
12
13 def detect_msg_value_in_loop(contract: Contract) -> List[Node]:
14 results: List[Node] = []
15 for f in contract.functions_entry_points:
16 if f.is_implemented and f.payable:
17 msg_value_in_loop(f.entry_point, 0, [], results)
18 return results
19
20
21 def msg_value_in_loop(
22 node: Optional[Node], in_loop_counter: int, visited: List[Node], results: List[Node]
23 ) -> None:
24
25 if node is None:
26 return
27
28 if node in visited:
29 return
30 # shared visited
31 visited.append(node)
32
33 if node.type == NodeType.STARTLOOP:
34 in_loop_counter += 1
35 elif node.type == NodeType.ENDLOOP:
36 in_loop_counter -= 1
37
38 for ir in node.all_slithir_operations():
39 if in_loop_counter > 0 and SolidityVariableComposed("msg.value") in ir.read:
40 results.append(ir.node)
41 if isinstance(ir, (InternalCall)):
42 msg_value_in_loop(ir.function.entry_point, in_loop_counter, visited, results)
43
44 for son in node.sons:
45 msg_value_in_loop(son, in_loop_counter, visited, results)
46
47
48 class MsgValueInLoop(AbstractDetector):
49 """
50 Detect the use of msg.value inside a loop
51 """
52
53 ARGUMENT = "msg-value-loop"
54 HELP = "msg.value inside a loop"
55 IMPACT = DetectorClassification.HIGH
56 CONFIDENCE = DetectorClassification.MEDIUM
57
58 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation/#msgvalue-inside-a-loop"
59
60 WIKI_TITLE = "`msg.value` inside a loop"
61 WIKI_DESCRIPTION = "Detect the use of `msg.value` inside a loop."
62
63 # region wiki_exploit_scenario
64 WIKI_EXPLOIT_SCENARIO = """
65 ```solidity
66 contract MsgValueInLoop{
67
68 mapping (address => uint256) balances;
69
70 function bad(address[] memory receivers) public payable {
71 for (uint256 i=0; i < receivers.length; i++) {
72 balances[receivers[i]] += msg.value;
73 }
74 }
75
76 }
77 ```
78 """
79 # endregion wiki_exploit_scenario
80
81 WIKI_RECOMMENDATION = """
82 Provide an explicit array of amounts alongside the receivers array, and check that the sum of all amounts matches `msg.value`.
83 """
84
85 def _detect(self) -> List[Output]:
86 """"""
87 results: List[Output] = []
88 for c in self.compilation_unit.contracts_derived:
89 values = detect_msg_value_in_loop(c)
90 for node in values:
91 func = node.function
92
93 info: DETECTOR_INFO = [func, " use msg.value in a loop: ", node, "\n"]
94 res = self.generate_result(info)
95 results.append(res)
96
97 return results
98
[end of slither/detectors/statements/msg_value_in_loop.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/slither/detectors/statements/msg_value_in_loop.py b/slither/detectors/statements/msg_value_in_loop.py
--- a/slither/detectors/statements/msg_value_in_loop.py
+++ b/slither/detectors/statements/msg_value_in_loop.py
@@ -8,6 +8,9 @@
from slither.slithir.operations import InternalCall
from slither.core.declarations import SolidityVariableComposed, Contract
from slither.utils.output import Output
+from slither.slithir.variables.constant import Constant
+from slither.core.variables import Variable
+from slither.core.expressions.literal import Literal
def detect_msg_value_in_loop(contract: Contract) -> List[Node]:
@@ -37,6 +40,21 @@
for ir in node.all_slithir_operations():
if in_loop_counter > 0 and SolidityVariableComposed("msg.value") in ir.read:
+ # If we find a conditional expression with msg.value and is compared to 0 we don't report it
+ if ir.node.is_conditional() and SolidityVariableComposed("msg.value") in ir.read:
+ compared_to = (
+ ir.read[1]
+ if ir.read[0] == SolidityVariableComposed("msg.value")
+ else ir.read[0]
+ )
+ if (
+ isinstance(compared_to, Constant)
+ and compared_to.value == 0
+ or isinstance(compared_to, Variable)
+ and isinstance(compared_to.expression, Literal)
+ and str(compared_to.expression.value) == "0"
+ ):
+ continue
results.append(ir.node)
if isinstance(ir, (InternalCall)):
msg_value_in_loop(ir.function.entry_point, in_loop_counter, visited, results)
| {"golden_diff": "diff --git a/slither/detectors/statements/msg_value_in_loop.py b/slither/detectors/statements/msg_value_in_loop.py\n--- a/slither/detectors/statements/msg_value_in_loop.py\n+++ b/slither/detectors/statements/msg_value_in_loop.py\n@@ -8,6 +8,9 @@\n from slither.slithir.operations import InternalCall\n from slither.core.declarations import SolidityVariableComposed, Contract\n from slither.utils.output import Output\n+from slither.slithir.variables.constant import Constant\n+from slither.core.variables import Variable\n+from slither.core.expressions.literal import Literal\n \n \n def detect_msg_value_in_loop(contract: Contract) -> List[Node]:\n@@ -37,6 +40,21 @@\n \n for ir in node.all_slithir_operations():\n if in_loop_counter > 0 and SolidityVariableComposed(\"msg.value\") in ir.read:\n+ # If we find a conditional expression with msg.value and is compared to 0 we don't report it\n+ if ir.node.is_conditional() and SolidityVariableComposed(\"msg.value\") in ir.read:\n+ compared_to = (\n+ ir.read[1]\n+ if ir.read[0] == SolidityVariableComposed(\"msg.value\")\n+ else ir.read[0]\n+ )\n+ if (\n+ isinstance(compared_to, Constant)\n+ and compared_to.value == 0\n+ or isinstance(compared_to, Variable)\n+ and isinstance(compared_to.expression, Literal)\n+ and str(compared_to.expression.value) == \"0\"\n+ ):\n+ continue\n results.append(ir.node)\n if isinstance(ir, (InternalCall)):\n msg_value_in_loop(ir.function.entry_point, in_loop_counter, visited, results)\n", "issue": "[False-Positive]:`msg.value` in a loop when `msg.value` is not transferred\n### Describe the issue:\n\nIn the following code snippet, Slither detects the following:\r\n\r\n<img width=\"1309\" alt=\"image\" src=\"https://github.com/crytic/slither/assets/31145285/eacf2441-3e2d-464a-97c3-635498e43120\">\r\n\r\nThis seems invalid as `msg.value` here is just checked as a conditional to ensure no value is sent to this function. In any case, no logic inside this function (including inside the internal `_transfer` function) forwards `msg.value`.\n\n### Code example to reproduce the issue:\n\n```solidity\r\n\r\n modifier noNativeTokens() {\r\n require(msg.value == 0, \"Not aimed to receive native tokens\");\r\n _;\r\n }\r\n \r\n function executeRelayCallBatch(\r\n bytes[] calldata signatures,\r\n uint256[] calldata nonces,\r\n uint256[] calldata validityTimestamps,\r\n uint256[] calldata values,\r\n bytes[] calldata payloads\r\n ) public payable noNativeTokens returns (bytes[] memory) {\r\n if (\r\n signatures.length != nonces.length ||\r\n nonces.length != validityTimestamps.length ||\r\n validityTimestamps.length != values.length ||\r\n values.length != payloads.length\r\n ) {\r\n revert(\"Batch ExecuteRelayCall Params Length Mismatch\");\r\n }\r\n\r\n bytes[] memory castedVotes = new bytes[](payloads.length);\r\n\r\n for (uint256 ii; ii < payloads.length; ++ii) {\r\n require(values[ii] == 0, \"Batch entry cannot contain value\");\r\n\r\n // cast each votes one by one\r\n castedVotes[ii] = executeRelayCall(\r\n signatures[ii],\r\n nonces[ii],\r\n validityTimestamps[ii],\r\n payloads[ii]\r\n );\r\n }\r\n\r\n return castedVotes;\r\n }\r\n```\n\n### Version:\n\n0.10.0\n\n### Relevant log output:\n\n_No response_\n", "before_files": [{"content": "from typing import List, Optional\nfrom slither.core.cfg.node import NodeType, Node\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.slithir.operations import InternalCall\nfrom slither.core.declarations import SolidityVariableComposed, Contract\nfrom slither.utils.output import Output\n\n\ndef detect_msg_value_in_loop(contract: Contract) -> List[Node]:\n results: List[Node] = []\n for f in contract.functions_entry_points:\n if f.is_implemented and f.payable:\n msg_value_in_loop(f.entry_point, 0, [], results)\n return results\n\n\ndef msg_value_in_loop(\n node: Optional[Node], in_loop_counter: int, visited: List[Node], results: List[Node]\n) -> None:\n\n if node is None:\n return\n\n if node in visited:\n return\n # shared visited\n visited.append(node)\n\n if node.type == NodeType.STARTLOOP:\n in_loop_counter += 1\n elif node.type == NodeType.ENDLOOP:\n in_loop_counter -= 1\n\n for ir in node.all_slithir_operations():\n if in_loop_counter > 0 and SolidityVariableComposed(\"msg.value\") in ir.read:\n results.append(ir.node)\n if isinstance(ir, (InternalCall)):\n msg_value_in_loop(ir.function.entry_point, in_loop_counter, visited, results)\n\n for son in node.sons:\n msg_value_in_loop(son, in_loop_counter, visited, results)\n\n\nclass MsgValueInLoop(AbstractDetector):\n \"\"\"\n Detect the use of msg.value inside a loop\n \"\"\"\n\n ARGUMENT = \"msg-value-loop\"\n HELP = \"msg.value inside a loop\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation/#msgvalue-inside-a-loop\"\n\n WIKI_TITLE = \"`msg.value` inside a loop\"\n WIKI_DESCRIPTION = \"Detect the use of `msg.value` inside a loop.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract MsgValueInLoop{\n\n mapping (address => uint256) balances;\n\n function bad(address[] memory receivers) public payable {\n for (uint256 i=0; i < receivers.length; i++) {\n balances[receivers[i]] += msg.value;\n }\n }\n\n}\n```\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"\"\"\nProvide an explicit array of amounts alongside the receivers array, and check that the sum of all amounts matches `msg.value`.\n\"\"\"\n\n def _detect(self) -> List[Output]:\n \"\"\"\"\"\"\n results: List[Output] = []\n for c in self.compilation_unit.contracts_derived:\n values = detect_msg_value_in_loop(c)\n for node in values:\n func = node.function\n\n info: DETECTOR_INFO = [func, \" use msg.value in a loop: \", node, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n\n return results\n", "path": "slither/detectors/statements/msg_value_in_loop.py"}]} | 1,880 | 387 |
gh_patches_debug_8866 | rasdani/github-patches | git_diff | tensorflow__addons-618 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use package manager to pin TF dependency
Per https://github.com/tensorflow/community/pull/135 we should be using pip/conda to specify which Addons releases work with which TF versions.
This is blocked until the `tensorflow` and `tensorflow-gpu` consolidation scheduled for 2.1 release
</issue>
<code>
[start of setup.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """TensorFlow Addons.
16
17 TensorFlow Addons is a repository of contributions that conform to well-
18 established API patterns, but implement new functionality not available
19 in core TensorFlow. TensorFlow natively supports a large number of
20 operators, layers, metrics, losses, and optimizers. However, in a fast
21 moving field like ML, there are many interesting new developments that
22 cannot be integrated into core TensorFlow (because their broad
23 applicability is not yet clear, or it is mostly used by a smaller subset
24 of the community).
25 """
26
27 from __future__ import absolute_import
28 from __future__ import division
29 from __future__ import print_function
30
31 import os
32 import platform
33 import sys
34
35 from datetime import datetime
36 from setuptools import find_packages
37 from setuptools import setup
38 from setuptools.dist import Distribution
39 from setuptools import Extension
40
41 DOCLINES = __doc__.split('\n')
42
43 TFA_NIGHTLY = 'tfa-nightly'
44 TFA_RELEASE = 'tensorflow-addons'
45
46 if '--nightly' in sys.argv:
47 project_name = TFA_NIGHTLY
48 nightly_idx = sys.argv.index('--nightly')
49 sys.argv.pop(nightly_idx)
50 else:
51 project_name = TFA_RELEASE
52
53 # Version
54 version = {}
55 base_dir = os.path.dirname(os.path.abspath(__file__))
56 with open(os.path.join(base_dir, "tensorflow_addons", "version.py")) as fp:
57 # yapf: disable
58 exec(fp.read(), version)
59 # yapf: enable
60
61 if project_name == TFA_NIGHTLY:
62 version['__version__'] += datetime.strftime(datetime.today(), "%Y%m%d")
63
64 # Dependencies
65 REQUIRED_PACKAGES = [
66 'six >= 1.10.0',
67 ]
68
69 if project_name == TFA_RELEASE:
70 # TODO: remove if-else condition when tf supports package consolidation.
71 if platform.system() == 'Linux':
72 REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')
73 else:
74 REQUIRED_PACKAGES.append('tensorflow == 2.0.0')
75 elif project_name == TFA_NIGHTLY:
76 REQUIRED_PACKAGES.append('tf-nightly')
77
78
79 class BinaryDistribution(Distribution):
80 """This class is needed in order to create OS specific wheels."""
81
82 def has_ext_modules(self):
83 return True
84
85
86 setup(
87 name=project_name,
88 version=version['__version__'],
89 description=DOCLINES[0],
90 long_description='\n'.join(DOCLINES[2:]),
91 author='Google Inc.',
92 author_email='[email protected]',
93 packages=find_packages(),
94 ext_modules=[Extension('_foo', ['stub.cc'])],
95 install_requires=REQUIRED_PACKAGES,
96 include_package_data=True,
97 zip_safe=False,
98 distclass=BinaryDistribution,
99 classifiers=[
100 'Development Status :: 4 - Beta',
101 'Intended Audience :: Developers',
102 'Intended Audience :: Education',
103 'Intended Audience :: Science/Research',
104 'License :: OSI Approved :: Apache Software License',
105 'Programming Language :: Python :: 2.7',
106 'Programming Language :: Python :: 3.5',
107 'Programming Language :: Python :: 3.6',
108 'Programming Language :: Python :: 3.7',
109 'Topic :: Scientific/Engineering :: Mathematics',
110 'Topic :: Software Development :: Libraries :: Python Modules',
111 'Topic :: Software Development :: Libraries',
112 ],
113 license='Apache 2.0',
114 keywords='tensorflow addons machine learning',
115 )
116
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -69,9 +69,9 @@
if project_name == TFA_RELEASE:
# TODO: remove if-else condition when tf supports package consolidation.
if platform.system() == 'Linux':
- REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')
+ REQUIRED_PACKAGES.append('tensorflow-gpu >= 2.0.0')
else:
- REQUIRED_PACKAGES.append('tensorflow == 2.0.0')
+ REQUIRED_PACKAGES.append('tensorflow >= 2.0.0')
elif project_name == TFA_NIGHTLY:
REQUIRED_PACKAGES.append('tf-nightly')
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -69,9 +69,9 @@\n if project_name == TFA_RELEASE:\n # TODO: remove if-else condition when tf supports package consolidation.\n if platform.system() == 'Linux':\n- REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')\n+ REQUIRED_PACKAGES.append('tensorflow-gpu >= 2.0.0')\n else:\n- REQUIRED_PACKAGES.append('tensorflow == 2.0.0')\n+ REQUIRED_PACKAGES.append('tensorflow >= 2.0.0')\n elif project_name == TFA_NIGHTLY:\n REQUIRED_PACKAGES.append('tf-nightly')\n", "issue": "Use package manager to pin TF dependency\nPer https://github.com/tensorflow/community/pull/135 we should be using pip/conda to specify which Addons releases work with which TF versions. \r\n\r\nThis is blocked until the `tensorflow` and `tensorflow-gpu` consolidation scheduled for 2.1 release\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons.\n\nTensorFlow Addons is a repository of contributions that conform to well-\nestablished API patterns, but implement new functionality not available\nin core TensorFlow. TensorFlow natively supports a large number of\noperators, layers, metrics, losses, and optimizers. However, in a fast\nmoving field like ML, there are many interesting new developments that\ncannot be integrated into core TensorFlow (because their broad\napplicability is not yet clear, or it is mostly used by a smaller subset\nof the community).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport platform\nimport sys\n\nfrom datetime import datetime\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\nfrom setuptools import Extension\n\nDOCLINES = __doc__.split('\\n')\n\nTFA_NIGHTLY = 'tfa-nightly'\nTFA_RELEASE = 'tensorflow-addons'\n\nif '--nightly' in sys.argv:\n project_name = TFA_NIGHTLY\n nightly_idx = sys.argv.index('--nightly')\n sys.argv.pop(nightly_idx)\nelse:\n project_name = TFA_RELEASE\n\n# Version\nversion = {}\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n # yapf: disable\n exec(fp.read(), version)\n # yapf: enable\n\nif project_name == TFA_NIGHTLY:\n version['__version__'] += datetime.strftime(datetime.today(), \"%Y%m%d\")\n\n# Dependencies\nREQUIRED_PACKAGES = [\n 'six >= 1.10.0',\n]\n\nif project_name == TFA_RELEASE:\n # TODO: remove if-else condition when tf supports package consolidation.\n if platform.system() == 'Linux':\n REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')\n else:\n REQUIRED_PACKAGES.append('tensorflow == 2.0.0')\nelif project_name == TFA_NIGHTLY:\n REQUIRED_PACKAGES.append('tf-nightly')\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nsetup(\n name=project_name,\n version=version['__version__'],\n description=DOCLINES[0],\n long_description='\\n'.join(DOCLINES[2:]),\n author='Google Inc.',\n author_email='[email protected]',\n packages=find_packages(),\n ext_modules=[Extension('_foo', ['stub.cc'])],\n install_requires=REQUIRED_PACKAGES,\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n license='Apache 2.0',\n keywords='tensorflow addons machine learning',\n)\n", "path": "setup.py"}]} | 1,708 | 156 |
gh_patches_debug_5541 | rasdani/github-patches | git_diff | conda__conda-build-3118 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't get conda index to work with channel_name
From the updated docs on 3.14.0, `channel_name` is now a positional argument. However, when I do
```bash
$ conda index <build_dir> <channel_name>
```
I get
```
(...)
FileNotFoundError: [Errno 2] No such file or directory: '<path_prefix>/<channel_name>'
```
instead of a custom channel name in the outputted html files.
Am I forgetting some special `argparse` magic or did #3091 introduce a bug?
I'm on conda 4.5.11 and conda-build 3.14.0
</issue>
<code>
[start of conda_build/cli/main_index.py]
1 from __future__ import absolute_import, division, print_function
2
3 import logging
4 import os
5 import sys
6
7 from conda_build.conda_interface import ArgumentParser
8
9 from conda_build import api
10 from conda_build.index import DEFAULT_SUBDIRS, MAX_THREADS_DEFAULT
11
12 logging.basicConfig(level=logging.INFO)
13
14
15 def parse_args(args):
16 p = ArgumentParser(
17 description="Update package index metadata files in given directories.")
18
19 p.add_argument(
20 'dir',
21 help='Directory that contains an index to be updated.',
22 nargs='*',
23 default=[os.getcwd()],
24 )
25
26 p.add_argument(
27 '-c', "--check-md5",
28 action="store_true",
29 help="""Use hash values instead of file modification times for determining if a
30 package's metadata needs to be updated.""",
31 )
32 p.add_argument(
33 'channel_name',
34 help='Adding a channel name will create an index.html file within the subdir.',
35 nargs='?',
36 default=None,
37 )
38 p.add_argument(
39 '-s', '--subdir',
40 action='append',
41 help='Optional. The subdir to index. Can be given multiple times. If not provided, will '
42 'default to all of %s. If provided, will not create channeldata.json for the channel.'
43 '' % ', '.join(DEFAULT_SUBDIRS),
44 )
45 p.add_argument(
46 '-t', '--threads',
47 default=MAX_THREADS_DEFAULT,
48 type=int,
49 )
50 p.add_argument(
51 "-p", "--patch-generator",
52 help="Path to Python file that outputs metadata patch instructions"
53 )
54
55 args = p.parse_args(args)
56 return p, args
57
58
59 def execute(args):
60 _, args = parse_args(args)
61 api.update_index(args.dir, check_md5=args.check_md5, channel_name=args.channel_name,
62 threads=args.threads, subdir=args.subdir, patch_generator=args.patch_generator)
63
64
65 def main():
66 return execute(sys.argv[1:])
67
[end of conda_build/cli/main_index.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_build/cli/main_index.py b/conda_build/cli/main_index.py
--- a/conda_build/cli/main_index.py
+++ b/conda_build/cli/main_index.py
@@ -30,10 +30,8 @@
package's metadata needs to be updated.""",
)
p.add_argument(
- 'channel_name',
- help='Adding a channel name will create an index.html file within the subdir.',
- nargs='?',
- default=None,
+ "-n", "--channel-name",
+ help="Customize the channel name listed in each channel's index.html.",
)
p.add_argument(
'-s', '--subdir',
| {"golden_diff": "diff --git a/conda_build/cli/main_index.py b/conda_build/cli/main_index.py\n--- a/conda_build/cli/main_index.py\n+++ b/conda_build/cli/main_index.py\n@@ -30,10 +30,8 @@\n package's metadata needs to be updated.\"\"\",\n )\n p.add_argument(\n- 'channel_name',\n- help='Adding a channel name will create an index.html file within the subdir.',\n- nargs='?',\n- default=None,\n+ \"-n\", \"--channel-name\",\n+ help=\"Customize the channel name listed in each channel's index.html.\",\n )\n p.add_argument(\n '-s', '--subdir',\n", "issue": "Can't get conda index to work with channel_name\nFrom the updated docs on 3.14.0, `channel_name` is now a positional argument. However, when I do\r\n```bash\r\n$ conda index <build_dir> <channel_name>\r\n```\r\nI get\r\n```\r\n(...)\r\nFileNotFoundError: [Errno 2] No such file or directory: '<path_prefix>/<channel_name>'\r\n```\r\ninstead of a custom channel name in the outputted html files. \r\n\r\nAm I forgetting some special `argparse` magic or did #3091 introduce a bug?\r\n\r\nI'm on conda 4.5.11 and conda-build 3.14.0\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport logging\nimport os\nimport sys\n\nfrom conda_build.conda_interface import ArgumentParser\n\nfrom conda_build import api\nfrom conda_build.index import DEFAULT_SUBDIRS, MAX_THREADS_DEFAULT\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef parse_args(args):\n p = ArgumentParser(\n description=\"Update package index metadata files in given directories.\")\n\n p.add_argument(\n 'dir',\n help='Directory that contains an index to be updated.',\n nargs='*',\n default=[os.getcwd()],\n )\n\n p.add_argument(\n '-c', \"--check-md5\",\n action=\"store_true\",\n help=\"\"\"Use hash values instead of file modification times for determining if a\n package's metadata needs to be updated.\"\"\",\n )\n p.add_argument(\n 'channel_name',\n help='Adding a channel name will create an index.html file within the subdir.',\n nargs='?',\n default=None,\n )\n p.add_argument(\n '-s', '--subdir',\n action='append',\n help='Optional. The subdir to index. Can be given multiple times. If not provided, will '\n 'default to all of %s. If provided, will not create channeldata.json for the channel.'\n '' % ', '.join(DEFAULT_SUBDIRS),\n )\n p.add_argument(\n '-t', '--threads',\n default=MAX_THREADS_DEFAULT,\n type=int,\n )\n p.add_argument(\n \"-p\", \"--patch-generator\",\n help=\"Path to Python file that outputs metadata patch instructions\"\n )\n\n args = p.parse_args(args)\n return p, args\n\n\ndef execute(args):\n _, args = parse_args(args)\n api.update_index(args.dir, check_md5=args.check_md5, channel_name=args.channel_name,\n threads=args.threads, subdir=args.subdir, patch_generator=args.patch_generator)\n\n\ndef main():\n return execute(sys.argv[1:])\n", "path": "conda_build/cli/main_index.py"}]} | 1,233 | 146 |
gh_patches_debug_14395 | rasdani/github-patches | git_diff | ethereum__web3.py-3027 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix logger for AsyncHTTPProvider
AsyncHTTPProvider is getting the logger for `web3.providers.HTTPProvider` but should instead use `web3.providers.AsyncHTTPProvider`
</issue>
<code>
[start of web3/providers/__init__.py]
1 from .async_base import ( # noqa: F401
2 AsyncBaseProvider,
3 )
4 from .base import ( # noqa: F401
5 BaseProvider,
6 JSONBaseProvider,
7 )
8 from .ipc import ( # noqa: F401,
9 IPCProvider,
10 )
11 from .rpc import ( # noqa: F401,
12 HTTPProvider,
13 )
14 from .websocket import ( # noqa: F401,
15 WebsocketProvider,
16 )
17 from .auto import ( # noqa: F401,
18 AutoProvider,
19 )
20
[end of web3/providers/__init__.py]
[start of web3/providers/async_rpc.py]
1 import logging
2 from typing import (
3 Any,
4 Dict,
5 Iterable,
6 Optional,
7 Tuple,
8 Union,
9 )
10
11 from aiohttp import (
12 ClientSession,
13 )
14 from eth_typing import (
15 URI,
16 )
17 from eth_utils import (
18 to_dict,
19 )
20
21 from web3._utils.http import (
22 construct_user_agent,
23 )
24 from web3._utils.request import (
25 async_cache_and_return_session as _async_cache_and_return_session,
26 async_make_post_request,
27 get_default_http_endpoint,
28 )
29 from web3.types import (
30 AsyncMiddleware,
31 RPCEndpoint,
32 RPCResponse,
33 )
34
35 from ..datastructures import (
36 NamedElementOnion,
37 )
38 from ..middleware.exception_retry_request import (
39 async_http_retry_request_middleware,
40 )
41 from .async_base import (
42 AsyncJSONBaseProvider,
43 )
44
45
46 class AsyncHTTPProvider(AsyncJSONBaseProvider):
47 logger = logging.getLogger("web3.providers.HTTPProvider")
48 endpoint_uri = None
49 _request_kwargs = None
50 # type ignored b/c conflict with _middlewares attr on AsyncBaseProvider
51 _middlewares: Tuple[AsyncMiddleware, ...] = NamedElementOnion([(async_http_retry_request_middleware, "http_retry_request")]) # type: ignore # noqa: E501
52
53 def __init__(
54 self,
55 endpoint_uri: Optional[Union[URI, str]] = None,
56 request_kwargs: Optional[Any] = None,
57 ) -> None:
58 if endpoint_uri is None:
59 self.endpoint_uri = get_default_http_endpoint()
60 else:
61 self.endpoint_uri = URI(endpoint_uri)
62
63 self._request_kwargs = request_kwargs or {}
64
65 super().__init__()
66
67 async def cache_async_session(self, session: ClientSession) -> ClientSession:
68 return await _async_cache_and_return_session(self.endpoint_uri, session)
69
70 def __str__(self) -> str:
71 return f"RPC connection {self.endpoint_uri}"
72
73 @to_dict
74 def get_request_kwargs(self) -> Iterable[Tuple[str, Any]]:
75 if "headers" not in self._request_kwargs:
76 yield "headers", self.get_request_headers()
77 for key, value in self._request_kwargs.items():
78 yield key, value
79
80 def get_request_headers(self) -> Dict[str, str]:
81 return {
82 "Content-Type": "application/json",
83 "User-Agent": construct_user_agent(str(type(self))),
84 }
85
86 async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
87 self.logger.debug(
88 f"Making request HTTP. URI: {self.endpoint_uri}, Method: {method}"
89 )
90 request_data = self.encode_rpc_request(method, params)
91 raw_response = await async_make_post_request(
92 self.endpoint_uri, request_data, **self.get_request_kwargs()
93 )
94 response = self.decode_rpc_response(raw_response)
95 self.logger.debug(
96 f"Getting response HTTP. URI: {self.endpoint_uri}, "
97 f"Method: {method}, Response: {response}"
98 )
99 return response
100
[end of web3/providers/async_rpc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/web3/providers/__init__.py b/web3/providers/__init__.py
--- a/web3/providers/__init__.py
+++ b/web3/providers/__init__.py
@@ -1,6 +1,9 @@
from .async_base import ( # noqa: F401
AsyncBaseProvider,
)
+from .async_rpc import ( # noqa: F401
+ AsyncHTTPProvider,
+)
from .base import ( # noqa: F401
BaseProvider,
JSONBaseProvider,
diff --git a/web3/providers/async_rpc.py b/web3/providers/async_rpc.py
--- a/web3/providers/async_rpc.py
+++ b/web3/providers/async_rpc.py
@@ -44,7 +44,7 @@
class AsyncHTTPProvider(AsyncJSONBaseProvider):
- logger = logging.getLogger("web3.providers.HTTPProvider")
+ logger = logging.getLogger("web3.providers.AsyncHTTPProvider")
endpoint_uri = None
_request_kwargs = None
# type ignored b/c conflict with _middlewares attr on AsyncBaseProvider
| {"golden_diff": "diff --git a/web3/providers/__init__.py b/web3/providers/__init__.py\n--- a/web3/providers/__init__.py\n+++ b/web3/providers/__init__.py\n@@ -1,6 +1,9 @@\n from .async_base import ( # noqa: F401\n AsyncBaseProvider,\n )\n+from .async_rpc import ( # noqa: F401\n+ AsyncHTTPProvider,\n+)\n from .base import ( # noqa: F401\n BaseProvider,\n JSONBaseProvider,\ndiff --git a/web3/providers/async_rpc.py b/web3/providers/async_rpc.py\n--- a/web3/providers/async_rpc.py\n+++ b/web3/providers/async_rpc.py\n@@ -44,7 +44,7 @@\n \n \n class AsyncHTTPProvider(AsyncJSONBaseProvider):\n- logger = logging.getLogger(\"web3.providers.HTTPProvider\")\n+ logger = logging.getLogger(\"web3.providers.AsyncHTTPProvider\")\n endpoint_uri = None\n _request_kwargs = None\n # type ignored b/c conflict with _middlewares attr on AsyncBaseProvider\n", "issue": "Fix logger for AsyncHTTPProvider\nAsyncHTTPProvider is getting the logger for `web3.providers.HTTPProvider` but should instead use `web3.providers.AsyncHTTPProvider`\n", "before_files": [{"content": "from .async_base import ( # noqa: F401\n AsyncBaseProvider,\n)\nfrom .base import ( # noqa: F401\n BaseProvider,\n JSONBaseProvider,\n)\nfrom .ipc import ( # noqa: F401,\n IPCProvider,\n)\nfrom .rpc import ( # noqa: F401,\n HTTPProvider,\n)\nfrom .websocket import ( # noqa: F401,\n WebsocketProvider,\n)\nfrom .auto import ( # noqa: F401,\n AutoProvider,\n)\n", "path": "web3/providers/__init__.py"}, {"content": "import logging\nfrom typing import (\n Any,\n Dict,\n Iterable,\n Optional,\n Tuple,\n Union,\n)\n\nfrom aiohttp import (\n ClientSession,\n)\nfrom eth_typing import (\n URI,\n)\nfrom eth_utils import (\n to_dict,\n)\n\nfrom web3._utils.http import (\n construct_user_agent,\n)\nfrom web3._utils.request import (\n async_cache_and_return_session as _async_cache_and_return_session,\n async_make_post_request,\n get_default_http_endpoint,\n)\nfrom web3.types import (\n AsyncMiddleware,\n RPCEndpoint,\n RPCResponse,\n)\n\nfrom ..datastructures import (\n NamedElementOnion,\n)\nfrom ..middleware.exception_retry_request import (\n async_http_retry_request_middleware,\n)\nfrom .async_base import (\n AsyncJSONBaseProvider,\n)\n\n\nclass AsyncHTTPProvider(AsyncJSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.HTTPProvider\")\n endpoint_uri = None\n _request_kwargs = None\n # type ignored b/c conflict with _middlewares attr on AsyncBaseProvider\n _middlewares: Tuple[AsyncMiddleware, ...] = NamedElementOnion([(async_http_retry_request_middleware, \"http_retry_request\")]) # type: ignore # noqa: E501\n\n def __init__(\n self,\n endpoint_uri: Optional[Union[URI, str]] = None,\n request_kwargs: Optional[Any] = None,\n ) -> None:\n if endpoint_uri is None:\n self.endpoint_uri = get_default_http_endpoint()\n else:\n self.endpoint_uri = URI(endpoint_uri)\n\n self._request_kwargs = request_kwargs or {}\n\n super().__init__()\n\n async def cache_async_session(self, session: ClientSession) -> ClientSession:\n return await _async_cache_and_return_session(self.endpoint_uri, session)\n\n def __str__(self) -> str:\n return f\"RPC connection {self.endpoint_uri}\"\n\n @to_dict\n def get_request_kwargs(self) -> Iterable[Tuple[str, Any]]:\n if \"headers\" not in self._request_kwargs:\n yield \"headers\", self.get_request_headers()\n for key, value in self._request_kwargs.items():\n yield key, value\n\n def get_request_headers(self) -> Dict[str, str]:\n return {\n \"Content-Type\": \"application/json\",\n \"User-Agent\": construct_user_agent(str(type(self))),\n }\n\n async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n self.logger.debug(\n f\"Making request HTTP. URI: {self.endpoint_uri}, Method: {method}\"\n )\n request_data = self.encode_rpc_request(method, params)\n raw_response = await async_make_post_request(\n self.endpoint_uri, request_data, **self.get_request_kwargs()\n )\n response = self.decode_rpc_response(raw_response)\n self.logger.debug(\n f\"Getting response HTTP. URI: {self.endpoint_uri}, \"\n f\"Method: {method}, Response: {response}\"\n )\n return response\n", "path": "web3/providers/async_rpc.py"}]} | 1,585 | 238 |
gh_patches_debug_700 | rasdani/github-patches | git_diff | saulpw__visidata-1304 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[undo develop] undoing a reload blanks the entire sheet
Since v2.5 undo for reload has been removed, and replaced with quitguard+confirm! However, in that case an undo should not be set.
Current behaviour is that it blanks the sheet.
</issue>
<code>
[start of visidata/undo.py]
1 import itertools
2 from copy import copy
3
4 from visidata import vd, options, VisiData, BaseSheet, UNLOADED
5
6 BaseSheet.init('undone', list) # list of CommandLogRow for redo after undo
7
8 vd.option('undo', True, 'enable undo/redo')
9
10 nonUndo = '''commit open-file'''.split()
11
12 def isUndoableCommand(longname):
13 for n in nonUndo:
14 if longname.startswith(n):
15 return False
16 return True
17
18 @VisiData.api
19 def addUndo(vd, undofunc, *args, **kwargs):
20 'On undo of latest command, call ``undofunc(*args, **kwargs)``.'
21 if options.undo:
22 # occurs when VisiData is just starting up
23 if getattr(vd, 'activeCommand', UNLOADED) is UNLOADED:
24 return
25 r = vd.modifyCommand
26 # some special commands, like open-file, do not have an undofuncs set
27 if not r or not isUndoableCommand(r.longname):
28 return
29 if not r.undofuncs:
30 r.undofuncs = []
31 r.undofuncs.append((undofunc, args, kwargs))
32
33
34 @VisiData.api
35 def undo(vd, sheet):
36 if not options.undo:
37 vd.fail("options.undo not enabled")
38
39 # don't allow undo of first command on a sheet, which is always the command that created the sheet.
40 for cmdlogrow in sheet.cmdlog_sheet.rows[:0:-1]:
41 if cmdlogrow.undofuncs:
42 for undofunc, args, kwargs, in cmdlogrow.undofuncs[::-1]:
43 undofunc(*args, **kwargs)
44 sheet.undone.append(cmdlogrow)
45 sheet.cmdlog_sheet.rows.remove(cmdlogrow)
46
47 vd.clearCaches() # undofunc can invalidate the drawcache
48
49 vd.moveToReplayContext(cmdlogrow, sheet)
50 vd.status("%s undone" % cmdlogrow.longname)
51 return
52
53 vd.fail("nothing to undo on current sheet")
54
55
56 @VisiData.api
57 def redo(vd, sheet):
58 sheet.undone or vd.fail("nothing to redo")
59 cmdlogrow = sheet.undone.pop()
60 vd.replayOne(cmdlogrow)
61 vd.status("%s redone" % cmdlogrow.longname)
62
63 # undoers
64 def undoAttrFunc(objs, attrname):
65 'Return closure that sets attrname on each obj to its former value.'
66 oldvals = [(o, getattr(o, attrname)) for o in objs]
67 def _undofunc():
68 for o, v in oldvals:
69 setattr(o, attrname, v)
70 return _undofunc
71
72
73 class Fanout(list):
74 'Fan out attribute changes to every element in a list.'
75 def __getattr__(self, k):
76 return Fanout([getattr(o, k) for o in self])
77
78 def __setattr__(self, k, v):
79 vd.addUndo(undoAttrFunc(self, k))
80 for o in self:
81 setattr(o, k, v)
82
83 def __call__(self, *args, **kwargs):
84 return Fanout([o(*args, **kwargs) for o in self])
85
86
87 def undoAttrCopyFunc(objs, attrname):
88 'Return closure that sets attrname on each obj to its former value.'
89 oldvals = [(o, copy(getattr(o, attrname))) for o in objs]
90 def _undofunc():
91 for o, v in oldvals:
92 setattr(o, attrname, v)
93 return _undofunc
94
95
96 @VisiData.api
97 def addUndoSetValues(vd, cols, rows):
98 'Add undo function to reset values for *rows* in *cols*.'
99 oldvals = [(c, r, c.getValue(r)) for c,r in itertools.product(cols, vd.Progress(rows, gerund='doing'))]
100 def _undo():
101 for c, r, v in oldvals:
102 c.setValue(r, v)
103 vd.addUndo(_undo)
104
105 @VisiData.api
106 def addUndoColNames(vd, cols):
107 oldnames = [(c, c.name) for c in cols]
108 def _undo():
109 for c, name in oldnames:
110 c.name = name
111 vd.addUndo(_undo)
112
113
114 BaseSheet.addCommand('U', 'undo-last', 'vd.undo(sheet)', 'Undo the most recent change (options.undo must be enabled)')
115 BaseSheet.addCommand('R', 'redo-last', 'vd.redo(sheet)', 'Redo the most recent undo (options.undo must be enabled)')
116
[end of visidata/undo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/visidata/undo.py b/visidata/undo.py
--- a/visidata/undo.py
+++ b/visidata/undo.py
@@ -7,7 +7,7 @@
vd.option('undo', True, 'enable undo/redo')
-nonUndo = '''commit open-file'''.split()
+nonUndo = '''commit open-file reload-sheet'''.split()
def isUndoableCommand(longname):
for n in nonUndo:
| {"golden_diff": "diff --git a/visidata/undo.py b/visidata/undo.py\n--- a/visidata/undo.py\n+++ b/visidata/undo.py\n@@ -7,7 +7,7 @@\n \n vd.option('undo', True, 'enable undo/redo')\n \n-nonUndo = '''commit open-file'''.split()\n+nonUndo = '''commit open-file reload-sheet'''.split()\n \n def isUndoableCommand(longname):\n for n in nonUndo:\n", "issue": "[undo develop] undoing a reload blanks the entire sheet\nSince v2.5 undo for reload has been removed, and replaced with quitguard+confirm! However, in that case an undo should not be set.\r\n\r\nCurrent behaviour is that it blanks the sheet.\n", "before_files": [{"content": "import itertools\nfrom copy import copy\n\nfrom visidata import vd, options, VisiData, BaseSheet, UNLOADED\n\nBaseSheet.init('undone', list) # list of CommandLogRow for redo after undo\n\nvd.option('undo', True, 'enable undo/redo')\n\nnonUndo = '''commit open-file'''.split()\n\ndef isUndoableCommand(longname):\n for n in nonUndo:\n if longname.startswith(n):\n return False\n return True\n\[email protected]\ndef addUndo(vd, undofunc, *args, **kwargs):\n 'On undo of latest command, call ``undofunc(*args, **kwargs)``.'\n if options.undo:\n # occurs when VisiData is just starting up\n if getattr(vd, 'activeCommand', UNLOADED) is UNLOADED:\n return\n r = vd.modifyCommand\n # some special commands, like open-file, do not have an undofuncs set\n if not r or not isUndoableCommand(r.longname):\n return\n if not r.undofuncs:\n r.undofuncs = []\n r.undofuncs.append((undofunc, args, kwargs))\n\n\[email protected]\ndef undo(vd, sheet):\n if not options.undo:\n vd.fail(\"options.undo not enabled\")\n\n # don't allow undo of first command on a sheet, which is always the command that created the sheet.\n for cmdlogrow in sheet.cmdlog_sheet.rows[:0:-1]:\n if cmdlogrow.undofuncs:\n for undofunc, args, kwargs, in cmdlogrow.undofuncs[::-1]:\n undofunc(*args, **kwargs)\n sheet.undone.append(cmdlogrow)\n sheet.cmdlog_sheet.rows.remove(cmdlogrow)\n\n vd.clearCaches() # undofunc can invalidate the drawcache\n\n vd.moveToReplayContext(cmdlogrow, sheet)\n vd.status(\"%s undone\" % cmdlogrow.longname)\n return\n\n vd.fail(\"nothing to undo on current sheet\")\n\n\[email protected]\ndef redo(vd, sheet):\n sheet.undone or vd.fail(\"nothing to redo\")\n cmdlogrow = sheet.undone.pop()\n vd.replayOne(cmdlogrow)\n vd.status(\"%s redone\" % cmdlogrow.longname)\n\n# undoers\ndef undoAttrFunc(objs, attrname):\n 'Return closure that sets attrname on each obj to its former value.'\n oldvals = [(o, getattr(o, attrname)) for o in objs]\n def _undofunc():\n for o, v in oldvals:\n setattr(o, attrname, v)\n return _undofunc\n\n\nclass Fanout(list):\n 'Fan out attribute changes to every element in a list.'\n def __getattr__(self, k):\n return Fanout([getattr(o, k) for o in self])\n\n def __setattr__(self, k, v):\n vd.addUndo(undoAttrFunc(self, k))\n for o in self:\n setattr(o, k, v)\n\n def __call__(self, *args, **kwargs):\n return Fanout([o(*args, **kwargs) for o in self])\n\n\ndef undoAttrCopyFunc(objs, attrname):\n 'Return closure that sets attrname on each obj to its former value.'\n oldvals = [(o, copy(getattr(o, attrname))) for o in objs]\n def _undofunc():\n for o, v in oldvals:\n setattr(o, attrname, v)\n return _undofunc\n\n\[email protected]\ndef addUndoSetValues(vd, cols, rows):\n 'Add undo function to reset values for *rows* in *cols*.'\n oldvals = [(c, r, c.getValue(r)) for c,r in itertools.product(cols, vd.Progress(rows, gerund='doing'))]\n def _undo():\n for c, r, v in oldvals:\n c.setValue(r, v)\n vd.addUndo(_undo)\n\[email protected]\ndef addUndoColNames(vd, cols):\n oldnames = [(c, c.name) for c in cols]\n def _undo():\n for c, name in oldnames:\n c.name = name\n vd.addUndo(_undo)\n\n\nBaseSheet.addCommand('U', 'undo-last', 'vd.undo(sheet)', 'Undo the most recent change (options.undo must be enabled)')\nBaseSheet.addCommand('R', 'redo-last', 'vd.redo(sheet)', 'Redo the most recent undo (options.undo must be enabled)')\n", "path": "visidata/undo.py"}]} | 1,851 | 103 |
gh_patches_debug_22422 | rasdani/github-patches | git_diff | pypi__warehouse-1335 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Handle Translation of the <title> tag
Currently we can't really translate the `<title>` tag because our block doesn't allow a template to add a l20n ID to it.
</issue>
<code>
[start of warehouse/i18n/l20n.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import jinja2
14
15 from markupsafe import Markup as M # noqa
16
17 from warehouse.filters import tojson
18
19
20 _L20N_TEMPLATE = jinja2.Template(
21 'data-l10n-id="{{ tid }}"'
22 '{% if data %} data-l10n-args="{{ data }}"{% endif %}',
23 autoescape=True,
24 )
25
26
27 def l20n(tid, **kwargs):
28 data = tojson(kwargs) if kwargs else None
29 return M(_L20N_TEMPLATE.render(tid=tid, data=data))
30
[end of warehouse/i18n/l20n.py]
[start of warehouse/i18n/__init__.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from babel.core import Locale
14
15
16 def _locale(request):
17 """
18 Computes a babel.core:Locale() object for this request.
19 """
20 return Locale.parse(request.locale_name)
21
22
23 def includeme(config):
24 # Add the request attributes
25 config.add_request_method(_locale, name="locale", reify=True)
26
27 # Register our i18n/l10n filters for Jinja2
28 filters = config.get_settings().setdefault("jinja2.filters", {})
29 filters.setdefault("format_date", "warehouse.i18n.filters:format_date")
30 filters.setdefault(
31 "format_datetime",
32 "warehouse.i18n.filters:format_datetime",
33 )
34 filters.setdefault(
35 "format_rfc822_datetime",
36 "warehouse.i18n.filters:format_rfc822_datetime",
37 )
38
39 # Register our utility functions with Jinja2
40 jglobals = config.get_settings().setdefault("jinja2.globals", {})
41 jglobals.setdefault("l20n", "warehouse.i18n.l20n:l20n")
42
[end of warehouse/i18n/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py
--- a/warehouse/i18n/__init__.py
+++ b/warehouse/i18n/__init__.py
@@ -35,7 +35,3 @@
"format_rfc822_datetime",
"warehouse.i18n.filters:format_rfc822_datetime",
)
-
- # Register our utility functions with Jinja2
- jglobals = config.get_settings().setdefault("jinja2.globals", {})
- jglobals.setdefault("l20n", "warehouse.i18n.l20n:l20n")
diff --git a/warehouse/i18n/l20n.py b/warehouse/i18n/l20n.py
deleted file mode 100644
--- a/warehouse/i18n/l20n.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import jinja2
-
-from markupsafe import Markup as M # noqa
-
-from warehouse.filters import tojson
-
-
-_L20N_TEMPLATE = jinja2.Template(
- 'data-l10n-id="{{ tid }}"'
- '{% if data %} data-l10n-args="{{ data }}"{% endif %}',
- autoescape=True,
-)
-
-
-def l20n(tid, **kwargs):
- data = tojson(kwargs) if kwargs else None
- return M(_L20N_TEMPLATE.render(tid=tid, data=data))
| {"golden_diff": "diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py\n--- a/warehouse/i18n/__init__.py\n+++ b/warehouse/i18n/__init__.py\n@@ -35,7 +35,3 @@\n \"format_rfc822_datetime\",\n \"warehouse.i18n.filters:format_rfc822_datetime\",\n )\n-\n- # Register our utility functions with Jinja2\n- jglobals = config.get_settings().setdefault(\"jinja2.globals\", {})\n- jglobals.setdefault(\"l20n\", \"warehouse.i18n.l20n:l20n\")\ndiff --git a/warehouse/i18n/l20n.py b/warehouse/i18n/l20n.py\ndeleted file mode 100644\n--- a/warehouse/i18n/l20n.py\n+++ /dev/null\n@@ -1,29 +0,0 @@\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-import jinja2\n-\n-from markupsafe import Markup as M # noqa\n-\n-from warehouse.filters import tojson\n-\n-\n-_L20N_TEMPLATE = jinja2.Template(\n- 'data-l10n-id=\"{{ tid }}\"'\n- '{% if data %} data-l10n-args=\"{{ data }}\"{% endif %}',\n- autoescape=True,\n-)\n-\n-\n-def l20n(tid, **kwargs):\n- data = tojson(kwargs) if kwargs else None\n- return M(_L20N_TEMPLATE.render(tid=tid, data=data))\n", "issue": "Handle Translation of the <title> tag\nCurrently we can't really translate the `<title>` tag because our block doesn't allow a template to add a l20n ID to it.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport jinja2\n\nfrom markupsafe import Markup as M # noqa\n\nfrom warehouse.filters import tojson\n\n\n_L20N_TEMPLATE = jinja2.Template(\n 'data-l10n-id=\"{{ tid }}\"'\n '{% if data %} data-l10n-args=\"{{ data }}\"{% endif %}',\n autoescape=True,\n)\n\n\ndef l20n(tid, **kwargs):\n data = tojson(kwargs) if kwargs else None\n return M(_L20N_TEMPLATE.render(tid=tid, data=data))\n", "path": "warehouse/i18n/l20n.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom babel.core import Locale\n\n\ndef _locale(request):\n \"\"\"\n Computes a babel.core:Locale() object for this request.\n \"\"\"\n return Locale.parse(request.locale_name)\n\n\ndef includeme(config):\n # Add the request attributes\n config.add_request_method(_locale, name=\"locale\", reify=True)\n\n # Register our i18n/l10n filters for Jinja2\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"format_date\", \"warehouse.i18n.filters:format_date\")\n filters.setdefault(\n \"format_datetime\",\n \"warehouse.i18n.filters:format_datetime\",\n )\n filters.setdefault(\n \"format_rfc822_datetime\",\n \"warehouse.i18n.filters:format_rfc822_datetime\",\n )\n\n # Register our utility functions with Jinja2\n jglobals = config.get_settings().setdefault(\"jinja2.globals\", {})\n jglobals.setdefault(\"l20n\", \"warehouse.i18n.l20n:l20n\")\n", "path": "warehouse/i18n/__init__.py"}]} | 1,327 | 478 |
gh_patches_debug_28516 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1399 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Avoid sending typing activity when bot is invoked as skill
We should port this once the C# PR is merged.
See [parent](https://github.com/microsoft/botframework-sdk/issues/6049)
</issue>
<code>
[start of libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 import asyncio
4 from typing import Awaitable, Callable
5
6 from botbuilder.schema import Activity, ActivityTypes
7
8 from .middleware_set import Middleware
9 from .turn_context import TurnContext
10
11
12 class Timer:
13 clear_timer = False
14
15 def set_timeout(self, func, span):
16 async def some_fn(): # pylint: disable=function-redefined
17 await asyncio.sleep(span)
18 if not self.clear_timer:
19 await func()
20
21 asyncio.ensure_future(some_fn())
22
23 def set_clear_timer(self):
24 self.clear_timer = True
25
26
27 class ShowTypingMiddleware(Middleware):
28 """
29 When added, this middleware will send typing activities back to the user when a Message activity
30 is received to let them know that the bot has received the message and is working on the response.
31 You can specify a delay before the first typing activity is sent and then a frequency, which
32 determines how often another typing activity is sent. Typing activities will continue to be sent
33 until your bot sends another message back to the user.
34 """
35
36 def __init__(self, delay: float = 0.5, period: float = 2.0):
37 """
38 Initializes the middleware.
39
40 :param delay: Delay in seconds for the first typing indicator to be sent.
41 :param period: Delay in seconds for subsequent typing indicators.
42 """
43
44 if delay < 0:
45 raise ValueError("Delay must be greater than or equal to zero")
46
47 if period <= 0:
48 raise ValueError("Repeat period must be greater than zero")
49
50 self._delay = delay
51 self._period = period
52
53 async def on_turn(
54 self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]
55 ):
56 timer = Timer()
57
58 def start_interval(context: TurnContext, delay, period):
59 async def aux():
60 typing_activity = Activity(
61 type=ActivityTypes.typing, relates_to=context.activity.relates_to,
62 )
63
64 conversation_reference = TurnContext.get_conversation_reference(
65 context.activity
66 )
67
68 typing_activity = TurnContext.apply_conversation_reference(
69 typing_activity, conversation_reference
70 )
71
72 asyncio.ensure_future(
73 context.adapter.send_activities(context, [typing_activity])
74 )
75
76 # restart the timer, with the 'period' value for the delay
77 timer.set_timeout(aux, period)
78
79 # first time through we use the 'delay' value for the timer.
80 timer.set_timeout(aux, delay)
81
82 def stop_interval():
83 timer.set_clear_timer()
84
85 # if it's a message, start sending typing activities until the
86 # bot logic is done.
87 if context.activity.type == ActivityTypes.message:
88 start_interval(context, self._delay, self._period)
89
90 # call the bot logic
91 result = await logic()
92
93 stop_interval()
94
95 return result
96
[end of libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py b/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py
--- a/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py
+++ b/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py
@@ -4,7 +4,9 @@
from typing import Awaitable, Callable
from botbuilder.schema import Activity, ActivityTypes
+from botframework.connector.auth import ClaimsIdentity, SkillValidation
+from .bot_adapter import BotAdapter
from .middleware_set import Middleware
from .turn_context import TurnContext
@@ -82,9 +84,12 @@
def stop_interval():
timer.set_clear_timer()
- # if it's a message, start sending typing activities until the
- # bot logic is done.
- if context.activity.type == ActivityTypes.message:
+ # Start a timer to periodically send the typing activity
+ # (bots running as skills should not send typing activity)
+ if (
+ context.activity.type == ActivityTypes.message
+ and not ShowTypingMiddleware._is_skill_bot(context)
+ ):
start_interval(context, self._delay, self._period)
# call the bot logic
@@ -93,3 +98,10 @@
stop_interval()
return result
+
+ @staticmethod
+ def _is_skill_bot(context: TurnContext) -> bool:
+ claims_identity = context.turn_state.get(BotAdapter.BOT_IDENTITY_KEY)
+ return isinstance(
+ claims_identity, ClaimsIdentity
+ ) and SkillValidation.is_skill_claim(claims_identity.claims)
| {"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py b/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py\n--- a/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py\n+++ b/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py\n@@ -4,7 +4,9 @@\n from typing import Awaitable, Callable\r\n \r\n from botbuilder.schema import Activity, ActivityTypes\r\n+from botframework.connector.auth import ClaimsIdentity, SkillValidation\r\n \r\n+from .bot_adapter import BotAdapter\r\n from .middleware_set import Middleware\r\n from .turn_context import TurnContext\r\n \r\n@@ -82,9 +84,12 @@\n def stop_interval():\r\n timer.set_clear_timer()\r\n \r\n- # if it's a message, start sending typing activities until the\r\n- # bot logic is done.\r\n- if context.activity.type == ActivityTypes.message:\r\n+ # Start a timer to periodically send the typing activity\r\n+ # (bots running as skills should not send typing activity)\r\n+ if (\r\n+ context.activity.type == ActivityTypes.message\r\n+ and not ShowTypingMiddleware._is_skill_bot(context)\r\n+ ):\r\n start_interval(context, self._delay, self._period)\r\n \r\n # call the bot logic\r\n@@ -93,3 +98,10 @@\n stop_interval()\r\n \r\n return result\r\n+\r\n+ @staticmethod\r\n+ def _is_skill_bot(context: TurnContext) -> bool:\r\n+ claims_identity = context.turn_state.get(BotAdapter.BOT_IDENTITY_KEY)\r\n+ return isinstance(\r\n+ claims_identity, ClaimsIdentity\r\n+ ) and SkillValidation.is_skill_claim(claims_identity.claims)\n", "issue": "Avoid sending typing activity when bot is invoked as skill\nWe should port this once the C# PR is merged.\r\nSee [parent](https://github.com/microsoft/botframework-sdk/issues/6049)\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\nimport asyncio\r\nfrom typing import Awaitable, Callable\r\n\r\nfrom botbuilder.schema import Activity, ActivityTypes\r\n\r\nfrom .middleware_set import Middleware\r\nfrom .turn_context import TurnContext\r\n\r\n\r\nclass Timer:\r\n clear_timer = False\r\n\r\n def set_timeout(self, func, span):\r\n async def some_fn(): # pylint: disable=function-redefined\r\n await asyncio.sleep(span)\r\n if not self.clear_timer:\r\n await func()\r\n\r\n asyncio.ensure_future(some_fn())\r\n\r\n def set_clear_timer(self):\r\n self.clear_timer = True\r\n\r\n\r\nclass ShowTypingMiddleware(Middleware):\r\n \"\"\"\r\n When added, this middleware will send typing activities back to the user when a Message activity\r\n is received to let them know that the bot has received the message and is working on the response.\r\n You can specify a delay before the first typing activity is sent and then a frequency, which\r\n determines how often another typing activity is sent. Typing activities will continue to be sent\r\n until your bot sends another message back to the user.\r\n \"\"\"\r\n\r\n def __init__(self, delay: float = 0.5, period: float = 2.0):\r\n \"\"\"\r\n Initializes the middleware.\r\n\r\n :param delay: Delay in seconds for the first typing indicator to be sent.\r\n :param period: Delay in seconds for subsequent typing indicators.\r\n \"\"\"\r\n\r\n if delay < 0:\r\n raise ValueError(\"Delay must be greater than or equal to zero\")\r\n\r\n if period <= 0:\r\n raise ValueError(\"Repeat period must be greater than zero\")\r\n\r\n self._delay = delay\r\n self._period = period\r\n\r\n async def on_turn(\r\n self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]\r\n ):\r\n timer = Timer()\r\n\r\n def start_interval(context: TurnContext, delay, period):\r\n async def aux():\r\n typing_activity = Activity(\r\n type=ActivityTypes.typing, relates_to=context.activity.relates_to,\r\n )\r\n\r\n conversation_reference = TurnContext.get_conversation_reference(\r\n context.activity\r\n )\r\n\r\n typing_activity = TurnContext.apply_conversation_reference(\r\n typing_activity, conversation_reference\r\n )\r\n\r\n asyncio.ensure_future(\r\n context.adapter.send_activities(context, [typing_activity])\r\n )\r\n\r\n # restart the timer, with the 'period' value for the delay\r\n timer.set_timeout(aux, period)\r\n\r\n # first time through we use the 'delay' value for the timer.\r\n timer.set_timeout(aux, delay)\r\n\r\n def stop_interval():\r\n timer.set_clear_timer()\r\n\r\n # if it's a message, start sending typing activities until the\r\n # bot logic is done.\r\n if context.activity.type == ActivityTypes.message:\r\n start_interval(context, self._delay, self._period)\r\n\r\n # call the bot logic\r\n result = await logic()\r\n\r\n stop_interval()\r\n\r\n return result\r\n", "path": "libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py"}]} | 1,412 | 375 |
gh_patches_debug_1724 | rasdani/github-patches | git_diff | translate__pootle-5621 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stats are wrong for children where the child name is repeated in the childs descendants
this is causing stats to foo
</issue>
<code>
[start of pootle/apps/pootle_data/directory_data.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django.db.models import Max
10
11 from pootle_translationproject.models import TranslationProject
12
13 from .utils import RelatedStoresDataTool
14
15
16 class DirectoryDataTool(RelatedStoresDataTool):
17 """Retrieves aggregate stats for a Directory"""
18
19 group_by = ("store__parent__tp_path", )
20 cache_key_name = "directory"
21
22 @property
23 def context_name(self):
24 return self.context.pootle_path
25
26 @property
27 def max_unit_revision(self):
28 try:
29 return self.context.translationproject.data_tool.max_unit_revision
30 except TranslationProject.DoesNotExist:
31 return self.all_stat_data.aggregate(rev=Max("max_unit_revision"))["rev"]
32
33 def filter_data(self, qs):
34 return (
35 qs.filter(
36 store__translation_project=self.context.translation_project,
37 store__parent__tp_path__startswith=self.context.tp_path)
38 .exclude(store__parent=self.context))
39
40 def get_children_stats(self, qs):
41 children = {}
42 for child in qs.iterator():
43 self.add_child_stats(children, child)
44 child_stores = self.data_model.filter(store__parent=self.context).values(
45 *("store__name", ) + self.max_fields + self.sum_fields)
46 for child in child_stores:
47 self.add_child_stats(
48 children,
49 child,
50 root=child["store__name"],
51 use_aggregates=False)
52 self.add_submission_info(self.stat_data, children)
53 self.add_last_created_info(child_stores, children)
54 return children
55
56 def get_root_child_path(self, child):
57 return (
58 child["store__parent__tp_path"].replace(
59 self.context.tp_path, "").split("/")[0])
60
[end of pootle/apps/pootle_data/directory_data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_data/directory_data.py b/pootle/apps/pootle_data/directory_data.py
--- a/pootle/apps/pootle_data/directory_data.py
+++ b/pootle/apps/pootle_data/directory_data.py
@@ -54,6 +54,5 @@
return children
def get_root_child_path(self, child):
- return (
- child["store__parent__tp_path"].replace(
- self.context.tp_path, "").split("/")[0])
+ return child["store__parent__tp_path"][
+ len(self.context.tp_path):].split("/")[0]
| {"golden_diff": "diff --git a/pootle/apps/pootle_data/directory_data.py b/pootle/apps/pootle_data/directory_data.py\n--- a/pootle/apps/pootle_data/directory_data.py\n+++ b/pootle/apps/pootle_data/directory_data.py\n@@ -54,6 +54,5 @@\n return children\n \n def get_root_child_path(self, child):\n- return (\n- child[\"store__parent__tp_path\"].replace(\n- self.context.tp_path, \"\").split(\"/\")[0])\n+ return child[\"store__parent__tp_path\"][\n+ len(self.context.tp_path):].split(\"/\")[0]\n", "issue": "Stats are wrong for children where the child name is repeated in the childs descendants\nthis is causing stats to foo\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.db.models import Max\n\nfrom pootle_translationproject.models import TranslationProject\n\nfrom .utils import RelatedStoresDataTool\n\n\nclass DirectoryDataTool(RelatedStoresDataTool):\n \"\"\"Retrieves aggregate stats for a Directory\"\"\"\n\n group_by = (\"store__parent__tp_path\", )\n cache_key_name = \"directory\"\n\n @property\n def context_name(self):\n return self.context.pootle_path\n\n @property\n def max_unit_revision(self):\n try:\n return self.context.translationproject.data_tool.max_unit_revision\n except TranslationProject.DoesNotExist:\n return self.all_stat_data.aggregate(rev=Max(\"max_unit_revision\"))[\"rev\"]\n\n def filter_data(self, qs):\n return (\n qs.filter(\n store__translation_project=self.context.translation_project,\n store__parent__tp_path__startswith=self.context.tp_path)\n .exclude(store__parent=self.context))\n\n def get_children_stats(self, qs):\n children = {}\n for child in qs.iterator():\n self.add_child_stats(children, child)\n child_stores = self.data_model.filter(store__parent=self.context).values(\n *(\"store__name\", ) + self.max_fields + self.sum_fields)\n for child in child_stores:\n self.add_child_stats(\n children,\n child,\n root=child[\"store__name\"],\n use_aggregates=False)\n self.add_submission_info(self.stat_data, children)\n self.add_last_created_info(child_stores, children)\n return children\n\n def get_root_child_path(self, child):\n return (\n child[\"store__parent__tp_path\"].replace(\n self.context.tp_path, \"\").split(\"/\")[0])\n", "path": "pootle/apps/pootle_data/directory_data.py"}]} | 1,115 | 144 |
gh_patches_debug_15810 | rasdani/github-patches | git_diff | zulip__zulip-9272 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove the "Delete streams" administrative tab
Now that we have a "Delete stream" button in the main streams UI, which has lots more context on description/traffic/subscribers, this page is useless. We should just remove it.
We should make sure to remove the documentation in /help/ linking to this as well.
</issue>
<code>
[start of zerver/lib/bugdown/help_settings_links.py]
1 import re
2 import markdown
3 from typing import Any, Dict, List, Optional, Union, Text
4 from typing.re import Match
5 from markdown.preprocessors import Preprocessor
6
7 REGEXP = re.compile(r'\{settings_tab\|(?P<setting_identifier>.*?)\}')
8
9 link_mapping = {
10 # a mapping from the setting identifier that is the same as the final URL
11 # breadcrumb to that setting to the name of its setting type, the setting
12 # name as it appears in the user interface, and a relative link that can
13 # be used to get to that setting
14 'your-account': ['Settings', 'Your account', '/#settings/your-account'],
15 'display-settings': ['Settings', 'Display settings', '/#settings/display-settings'],
16 'notifications': ['Settings', 'Notifications', '/#settings/notifications'],
17 'your-bots': ['Settings', 'Your bots', '/#settings/your-bots'],
18 'alert-words': ['Settings', 'Alert words', '/#settings/alert-words'],
19 'uploaded-files': ['Settings', 'Uploaded files', '/#settings/uploaded-files'],
20 'muted-topics': ['Settings', 'Muted topics', '/#settings/muted-topics'],
21
22 'organization-profile': ['Manage organization', 'Organization profile',
23 '/#organization/organization-profile'],
24 'organization-settings': ['Manage organization', 'Organization settings',
25 '/#organization/organization-settings'],
26 'organization-permissions': ['Manage organization', 'Organization permissions',
27 '/#organization/organization-permissions'],
28 'emoji-settings': ['Manage organization', 'Custom emoji',
29 '/#organization/emoji-settings'],
30 'auth-methods': ['Manage organization', 'Authentication methods',
31 '/#organization/auth-methods'],
32 'user-groups-admin': ['Manage organization', 'User groups',
33 '/#organization/user-groups-admin'],
34 'user-list-admin': ['Manage organization', 'Users', '/#organization/user-list-admin'],
35 'deactivated-users-admin': ['Manage organization', 'Deactivated users',
36 '/#organization/deactivated-users-admin'],
37 'bot-list-admin': ['Manage organization', 'Bots', '/#organization/bot-list-admin'],
38 'streams-list-admin': ['Manage organization', 'Delete streams',
39 '/#organization/streams-list-admin'],
40 'default-streams-list': ['Manage organization', 'Default streams',
41 '/#organization/default-streams-list'],
42 'filter-settings': ['Manage organization', 'Filter settings',
43 '/#organization/filter-settings'],
44 'profile-field-settings': ['Manage organization', 'Custom profile fields',
45 '/#organization/profile-field-settings'],
46 'invites-list-admin': ['Manage organization', 'Invitations',
47 '/#organization/invites-list-admin'],
48 }
49
50 settings_markdown = """
51 1. From your desktop, click on the **gear**
52 (<i class="icon-vector-cog"></i>) in the upper right corner.
53
54 1. Select **%(setting_type_name)s**.
55
56 1. On the left, click %(setting_reference)s.
57 """
58
59
60 class SettingHelpExtension(markdown.Extension):
61 def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
62 """ Add SettingHelpExtension to the Markdown instance. """
63 md.registerExtension(self)
64 md.preprocessors.add('setting', Setting(), '_begin')
65
66 relative_settings_links = None # type: Optional[bool]
67
68 def set_relative_settings_links(value: bool) -> None:
69 global relative_settings_links
70 relative_settings_links = value
71
72 class Setting(Preprocessor):
73 def run(self, lines: List[str]) -> List[str]:
74 done = False
75 while not done:
76 for line in lines:
77 loc = lines.index(line)
78 match = REGEXP.search(line)
79
80 if match:
81 text = [self.handleMatch(match)]
82 # The line that contains the directive to include the macro
83 # may be preceded or followed by text or tags, in that case
84 # we need to make sure that any preceding or following text
85 # stays the same.
86 line_split = REGEXP.split(line, maxsplit=0)
87 preceding = line_split[0]
88 following = line_split[-1]
89 text = [preceding] + text + [following]
90 lines = lines[:loc] + text + lines[loc+1:]
91 break
92 else:
93 done = True
94 return lines
95
96 def handleMatch(self, match: Match[Text]) -> Text:
97 setting_identifier = match.group('setting_identifier')
98 setting_type_name = link_mapping[setting_identifier][0]
99 setting_name = link_mapping[setting_identifier][1]
100 setting_link = link_mapping[setting_identifier][2]
101 if relative_settings_links:
102 setting_reference = "[%s](%s)" % (setting_name, setting_link)
103 else:
104 setting_reference = "**%s**" % (setting_name,)
105 instructions = settings_markdown % {'setting_type_name': setting_type_name,
106 'setting_reference': setting_reference}
107 return instructions
108
109
110 def makeExtension(*args: Any, **kwargs: Any) -> SettingHelpExtension:
111 return SettingHelpExtension(*args, **kwargs)
112
[end of zerver/lib/bugdown/help_settings_links.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/lib/bugdown/help_settings_links.py b/zerver/lib/bugdown/help_settings_links.py
--- a/zerver/lib/bugdown/help_settings_links.py
+++ b/zerver/lib/bugdown/help_settings_links.py
@@ -35,8 +35,6 @@
'deactivated-users-admin': ['Manage organization', 'Deactivated users',
'/#organization/deactivated-users-admin'],
'bot-list-admin': ['Manage organization', 'Bots', '/#organization/bot-list-admin'],
- 'streams-list-admin': ['Manage organization', 'Delete streams',
- '/#organization/streams-list-admin'],
'default-streams-list': ['Manage organization', 'Default streams',
'/#organization/default-streams-list'],
'filter-settings': ['Manage organization', 'Filter settings',
| {"golden_diff": "diff --git a/zerver/lib/bugdown/help_settings_links.py b/zerver/lib/bugdown/help_settings_links.py\n--- a/zerver/lib/bugdown/help_settings_links.py\n+++ b/zerver/lib/bugdown/help_settings_links.py\n@@ -35,8 +35,6 @@\n 'deactivated-users-admin': ['Manage organization', 'Deactivated users',\n '/#organization/deactivated-users-admin'],\n 'bot-list-admin': ['Manage organization', 'Bots', '/#organization/bot-list-admin'],\n- 'streams-list-admin': ['Manage organization', 'Delete streams',\n- '/#organization/streams-list-admin'],\n 'default-streams-list': ['Manage organization', 'Default streams',\n '/#organization/default-streams-list'],\n 'filter-settings': ['Manage organization', 'Filter settings',\n", "issue": "Remove the \"Delete streams\" administrative tab\nNow that we have a \"Delete stream\" button in the main streams UI, which has lots more context on description/traffic/subscribers, this page is useless. We should just remove it.\r\n\r\nWe should make sure to remove the documentation in /help/ linking to this as well.\n", "before_files": [{"content": "import re\nimport markdown\nfrom typing import Any, Dict, List, Optional, Union, Text\nfrom typing.re import Match\nfrom markdown.preprocessors import Preprocessor\n\nREGEXP = re.compile(r'\\{settings_tab\\|(?P<setting_identifier>.*?)\\}')\n\nlink_mapping = {\n # a mapping from the setting identifier that is the same as the final URL\n # breadcrumb to that setting to the name of its setting type, the setting\n # name as it appears in the user interface, and a relative link that can\n # be used to get to that setting\n 'your-account': ['Settings', 'Your account', '/#settings/your-account'],\n 'display-settings': ['Settings', 'Display settings', '/#settings/display-settings'],\n 'notifications': ['Settings', 'Notifications', '/#settings/notifications'],\n 'your-bots': ['Settings', 'Your bots', '/#settings/your-bots'],\n 'alert-words': ['Settings', 'Alert words', '/#settings/alert-words'],\n 'uploaded-files': ['Settings', 'Uploaded files', '/#settings/uploaded-files'],\n 'muted-topics': ['Settings', 'Muted topics', '/#settings/muted-topics'],\n\n 'organization-profile': ['Manage organization', 'Organization profile',\n '/#organization/organization-profile'],\n 'organization-settings': ['Manage organization', 'Organization settings',\n '/#organization/organization-settings'],\n 'organization-permissions': ['Manage organization', 'Organization permissions',\n '/#organization/organization-permissions'],\n 'emoji-settings': ['Manage organization', 'Custom emoji',\n '/#organization/emoji-settings'],\n 'auth-methods': ['Manage organization', 'Authentication methods',\n '/#organization/auth-methods'],\n 'user-groups-admin': ['Manage organization', 'User groups',\n '/#organization/user-groups-admin'],\n 'user-list-admin': ['Manage organization', 'Users', '/#organization/user-list-admin'],\n 'deactivated-users-admin': ['Manage organization', 'Deactivated users',\n '/#organization/deactivated-users-admin'],\n 'bot-list-admin': ['Manage organization', 'Bots', '/#organization/bot-list-admin'],\n 'streams-list-admin': ['Manage organization', 'Delete streams',\n '/#organization/streams-list-admin'],\n 'default-streams-list': ['Manage organization', 'Default streams',\n '/#organization/default-streams-list'],\n 'filter-settings': ['Manage organization', 'Filter settings',\n '/#organization/filter-settings'],\n 'profile-field-settings': ['Manage organization', 'Custom profile fields',\n '/#organization/profile-field-settings'],\n 'invites-list-admin': ['Manage organization', 'Invitations',\n '/#organization/invites-list-admin'],\n}\n\nsettings_markdown = \"\"\"\n1. From your desktop, click on the **gear**\n (<i class=\"icon-vector-cog\"></i>) in the upper right corner.\n\n1. Select **%(setting_type_name)s**.\n\n1. On the left, click %(setting_reference)s.\n\"\"\"\n\n\nclass SettingHelpExtension(markdown.Extension):\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n \"\"\" Add SettingHelpExtension to the Markdown instance. \"\"\"\n md.registerExtension(self)\n md.preprocessors.add('setting', Setting(), '_begin')\n\nrelative_settings_links = None # type: Optional[bool]\n\ndef set_relative_settings_links(value: bool) -> None:\n global relative_settings_links\n relative_settings_links = value\n\nclass Setting(Preprocessor):\n def run(self, lines: List[str]) -> List[str]:\n done = False\n while not done:\n for line in lines:\n loc = lines.index(line)\n match = REGEXP.search(line)\n\n if match:\n text = [self.handleMatch(match)]\n # The line that contains the directive to include the macro\n # may be preceded or followed by text or tags, in that case\n # we need to make sure that any preceding or following text\n # stays the same.\n line_split = REGEXP.split(line, maxsplit=0)\n preceding = line_split[0]\n following = line_split[-1]\n text = [preceding] + text + [following]\n lines = lines[:loc] + text + lines[loc+1:]\n break\n else:\n done = True\n return lines\n\n def handleMatch(self, match: Match[Text]) -> Text:\n setting_identifier = match.group('setting_identifier')\n setting_type_name = link_mapping[setting_identifier][0]\n setting_name = link_mapping[setting_identifier][1]\n setting_link = link_mapping[setting_identifier][2]\n if relative_settings_links:\n setting_reference = \"[%s](%s)\" % (setting_name, setting_link)\n else:\n setting_reference = \"**%s**\" % (setting_name,)\n instructions = settings_markdown % {'setting_type_name': setting_type_name,\n 'setting_reference': setting_reference}\n return instructions\n\n\ndef makeExtension(*args: Any, **kwargs: Any) -> SettingHelpExtension:\n return SettingHelpExtension(*args, **kwargs)\n", "path": "zerver/lib/bugdown/help_settings_links.py"}]} | 1,935 | 174 |
gh_patches_debug_25523 | rasdani/github-patches | git_diff | bridgecrewio__checkov-2648 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
feat: add support for terraform AKS role_based_access_control_enabled
**Describe the issue**
`CKV_AZURE_5`
Recently, azurerm [2.99.0](https://github.com/hashicorp/terraform-provider-azurerm/releases/tag/v2.99.0) was published, which, in preparation for 3.0, brought some syntax changes, one of which is listed below:
***
"Data Source: `azurerm_kubernetes_cluster` - deprecated the `role_based_access_control` block in favour of `azure_active_directory_role_based_access_control` and `role_based_access_control_enabled` properties (https://github.com/hashicorp/terraform-provider-azurerm/issues/15584)"
***
**Examples**
terraform:
```hcl
role_based_access_control_enabled: true
```
This code should pass `CKV_AZURE_5`.
**Version (please complete the following information):**
- Checkov Version 2.0.970
**Additional context**
PR that adds the required support: https://github.com/bridgecrewio/checkov/pull/2648
</issue>
<code>
[start of checkov/terraform/checks/resource/azure/AKSRbacEnabled.py]
1 from checkov.common.models.enums import CheckCategories
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3
4
5 class AKSRbacEnabled(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure RBAC is enabled on AKS clusters"
8 id = "CKV_AZURE_5"
9 supported_resources = ['azurerm_kubernetes_cluster']
10 categories = [CheckCategories.KUBERNETES]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return 'role_based_access_control/[0]/enabled'
15
16
17 check = AKSRbacEnabled()
18
[end of checkov/terraform/checks/resource/azure/AKSRbacEnabled.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py b/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py
--- a/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py
+++ b/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py
@@ -1,17 +1,32 @@
-from checkov.common.models.enums import CheckCategories
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+import dpath.util
+from checkov.common.models.enums import CheckCategories, CheckResult
+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
-class AKSRbacEnabled(BaseResourceValueCheck):
+class AKSRbacEnabled(BaseResourceCheck):
def __init__(self):
name = "Ensure RBAC is enabled on AKS clusters"
id = "CKV_AZURE_5"
- supported_resources = ['azurerm_kubernetes_cluster']
+ supported_resources = ["azurerm_kubernetes_cluster"]
categories = [CheckCategories.KUBERNETES]
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
+ super().__init__(
+ name=name,
+ id=id,
+ categories=categories,
+ supported_resources=supported_resources,
+ )
- def get_inspected_key(self):
- return 'role_based_access_control/[0]/enabled'
+ def scan_resource_conf(self, conf):
+ self.evaluated_keys = [
+ "role_based_access_control/[0]/enabled", # azurerm < 2.99.0
+ "role_based_access_control_enabled", # azurerm >= 2.99.0
+ ]
+
+ for key in self.evaluated_keys:
+ if dpath.search(conf, key) and dpath.get(conf, key)[0]:
+ return CheckResult.PASSED
+
+ return CheckResult.FAILED
check = AKSRbacEnabled()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py b/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py\n--- a/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py\n+++ b/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py\n@@ -1,17 +1,32 @@\n-from checkov.common.models.enums import CheckCategories\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n+import dpath.util\n+from checkov.common.models.enums import CheckCategories, CheckResult\n+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n \n-class AKSRbacEnabled(BaseResourceValueCheck):\n+class AKSRbacEnabled(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure RBAC is enabled on AKS clusters\"\n id = \"CKV_AZURE_5\"\n- supported_resources = ['azurerm_kubernetes_cluster']\n+ supported_resources = [\"azurerm_kubernetes_cluster\"]\n categories = [CheckCategories.KUBERNETES]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n+ super().__init__(\n+ name=name,\n+ id=id,\n+ categories=categories,\n+ supported_resources=supported_resources,\n+ )\n \n- def get_inspected_key(self):\n- return 'role_based_access_control/[0]/enabled'\n+ def scan_resource_conf(self, conf):\n+ self.evaluated_keys = [\n+ \"role_based_access_control/[0]/enabled\", # azurerm < 2.99.0\n+ \"role_based_access_control_enabled\", # azurerm >= 2.99.0\n+ ]\n+\n+ for key in self.evaluated_keys:\n+ if dpath.search(conf, key) and dpath.get(conf, key)[0]:\n+ return CheckResult.PASSED\n+\n+ return CheckResult.FAILED\n \n \n check = AKSRbacEnabled()\n", "issue": "feat: add support for terraform AKS role_based_access_control_enabled\n**Describe the issue**\r\n`CKV_AZURE_5`\r\nRecently, azurerm [2.99.0](https://github.com/hashicorp/terraform-provider-azurerm/releases/tag/v2.99.0) was published, which, in preparation for 3.0, brought some syntax changes, one of which is listed below:\r\n***\r\n\"Data Source: `azurerm_kubernetes_cluster` - deprecated the `role_based_access_control` block in favour of `azure_active_directory_role_based_access_control` and `role_based_access_control_enabled` properties (https://github.com/hashicorp/terraform-provider-azurerm/issues/15584)\"\r\n***\r\n\r\n**Examples**\r\n\r\nterraform:\r\n```hcl\r\nrole_based_access_control_enabled: true\r\n```\r\nThis code should pass `CKV_AZURE_5`.\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.0.970\r\n\r\n**Additional context**\r\nPR that adds the required support: https://github.com/bridgecrewio/checkov/pull/2648\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass AKSRbacEnabled(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure RBAC is enabled on AKS clusters\"\n id = \"CKV_AZURE_5\"\n supported_resources = ['azurerm_kubernetes_cluster']\n categories = [CheckCategories.KUBERNETES]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'role_based_access_control/[0]/enabled'\n\n\ncheck = AKSRbacEnabled()\n", "path": "checkov/terraform/checks/resource/azure/AKSRbacEnabled.py"}]} | 981 | 448 |
gh_patches_debug_57408 | rasdani/github-patches | git_diff | kornia__kornia-1861 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bugs when using RandomRGBShift
### Describe the bug
When running RandomRGBShift augmentation on gpus I came across this error
```
File "/usr/local/lib/python3.8/dist-packages/kornia/augmentation/_2d/intensity/random_rgb_shift.py", line 100, in apply_transform
return shift_rgb(inp, params['r_shift'], params['g_shift'], params['b_shift'])
File "/usr/local/lib/python3.8/dist-packages/kornia/enhance/shift_rgb.py", line 17, in shift_rgb
shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)
ValueError: only one element tensors can be converted to Python scalars
```
after checking the code I think the problem is that
```
shifts = [params['r_shift'], params['g_shift'], params['b_shift']]
shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)
```
and also I am not sure about `.view(1,3,1,1)` shouldn't it be `.view(-1,3,1,1)`
### Reproduction steps
```bash
1. setting up RandomRGBShift augmentation
2. Using RandomRGBShift augmentation with gpus and batchsize > 1
```
### Expected behavior
```
File "/usr/local/lib/python3.8/dist-packages/kornia/augmentation/_2d/intensity/random_rgb_shift.py", line 100, in apply_transform
return shift_rgb(inp, params['r_shift'], params['g_shift'], params['b_shift'])
File "/usr/local/lib/python3.8/dist-packages/kornia/enhance/shift_rgb.py", line 17, in shift_rgb
shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)
ValueError: only one element tensors can be converted to Python scalars
```
### Environment
```shell
- PyTorch Version : 1.10
- Linux
- How you installed PyTorch (`pip`):
- Python version: 3.8
- cuda:11.3.0-cudnn8
```
### Additional context
_No response_
</issue>
<code>
[start of kornia/enhance/shift_rgb.py]
1 import torch
2
3 from kornia.testing import KORNIA_CHECK_IS_COLOR, KORNIA_CHECK_IS_TENSOR
4
5
6 def shift_rgb(image: torch.Tensor, r_shift: torch.Tensor, g_shift: torch.Tensor, b_shift: torch.Tensor) -> torch.Tensor:
7 """Shift rgb channels.
8
9 Shift each image's channel by either r_shift for red, g_shift for green and b_shift for blue channels.
10 """
11
12 KORNIA_CHECK_IS_TENSOR(image)
13 KORNIA_CHECK_IS_COLOR(image, f"with shape {image.shape}")
14
15 shifts = [r_shift, g_shift, b_shift]
16
17 shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)
18
19 return shifted
20
[end of kornia/enhance/shift_rgb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kornia/enhance/shift_rgb.py b/kornia/enhance/shift_rgb.py
--- a/kornia/enhance/shift_rgb.py
+++ b/kornia/enhance/shift_rgb.py
@@ -14,6 +14,6 @@
shifts = [r_shift, g_shift, b_shift]
- shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)
+ shifted = (image + torch.stack(shifts).view(-1, 3, 1, 1).to(image)).clamp_(min=0, max=1)
return shifted
| {"golden_diff": "diff --git a/kornia/enhance/shift_rgb.py b/kornia/enhance/shift_rgb.py\n--- a/kornia/enhance/shift_rgb.py\n+++ b/kornia/enhance/shift_rgb.py\n@@ -14,6 +14,6 @@\n \n shifts = [r_shift, g_shift, b_shift]\n \n- shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\n+ shifted = (image + torch.stack(shifts).view(-1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\n \n return shifted\n", "issue": "Bugs when using RandomRGBShift\n### Describe the bug\r\n\r\nWhen running RandomRGBShift augmentation on gpus I came across this error \r\n\r\n```\r\n File \"/usr/local/lib/python3.8/dist-packages/kornia/augmentation/_2d/intensity/random_rgb_shift.py\", line 100, in apply_transform\r\n return shift_rgb(inp, params['r_shift'], params['g_shift'], params['b_shift'])\r\n File \"/usr/local/lib/python3.8/dist-packages/kornia/enhance/shift_rgb.py\", line 17, in shift_rgb\r\n shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\r\nValueError: only one element tensors can be converted to Python scalars\r\n```\r\nafter checking the code I think the problem is that \r\n```\r\nshifts = [params['r_shift'], params['g_shift'], params['b_shift']]\r\nshifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\r\n```\r\nand also I am not sure about `.view(1,3,1,1)` shouldn't it be `.view(-1,3,1,1)`\r\n\r\n### Reproduction steps\r\n\r\n```bash\r\n1. setting up RandomRGBShift augmentation\r\n2. Using RandomRGBShift augmentation with gpus and batchsize > 1\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\n```\r\n File \"/usr/local/lib/python3.8/dist-packages/kornia/augmentation/_2d/intensity/random_rgb_shift.py\", line 100, in apply_transform\r\n return shift_rgb(inp, params['r_shift'], params['g_shift'], params['b_shift'])\r\n File \"/usr/local/lib/python3.8/dist-packages/kornia/enhance/shift_rgb.py\", line 17, in shift_rgb\r\n shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\r\nValueError: only one element tensors can be converted to Python scalars\r\n```\r\n\r\n### Environment\r\n\r\n```shell\r\n- PyTorch Version : 1.10\r\n- Linux\r\n- How you installed PyTorch (`pip`):\r\n- Python version: 3.8\r\n- cuda:11.3.0-cudnn8\r\n```\r\n\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "import torch\n\nfrom kornia.testing import KORNIA_CHECK_IS_COLOR, KORNIA_CHECK_IS_TENSOR\n\n\ndef shift_rgb(image: torch.Tensor, r_shift: torch.Tensor, g_shift: torch.Tensor, b_shift: torch.Tensor) -> torch.Tensor:\n \"\"\"Shift rgb channels.\n\n Shift each image's channel by either r_shift for red, g_shift for green and b_shift for blue channels.\n \"\"\"\n\n KORNIA_CHECK_IS_TENSOR(image)\n KORNIA_CHECK_IS_COLOR(image, f\"with shape {image.shape}\")\n\n shifts = [r_shift, g_shift, b_shift]\n\n shifted = (image + torch.Tensor(shifts).view(1, 3, 1, 1).to(image)).clamp_(min=0, max=1)\n\n return shifted\n", "path": "kornia/enhance/shift_rgb.py"}]} | 1,284 | 161 |
gh_patches_debug_51453 | rasdani/github-patches | git_diff | lutris__lutris-402 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Running game changes to default X cursor in main window

I think [this is the code](https://github.com/lutris/lutris/blob/cbe6f5d5d982543a66f95aa62f0d8e26dd462470/lutris/util/display.py#L10) that sets the cursor. This was introduced in the commit https://github.com/lutris/lutris/commit/4bb2d78e5c3626057f9dc10dcc27f74ae67853b9
Right now it's using [GdkCursorType](https://developer.gnome.org/gdk3/stable/gdk3-Cursors.html#GdkCursorType) to set cursor, but it seems like we should be using [gdk_cursor_new_from_name()](https://developer.gnome.org/gdk3/stable/gdk3-Cursors.html#gdk-cursor-new-from-name) instead which is the recommended way.
</issue>
<code>
[start of lutris/util/display.py]
1 import subprocess
2 from gi.repository import Gdk
3
4 from lutris.util.log import logger
5
6
7 def set_cursor(name, window, display=None):
8 """Set a named mouse cursor for the given window."""
9 cursors = {
10 'default': Gdk.CursorType.ARROW,
11 'wait': Gdk.CursorType.WATCH,
12 }
13
14 if not display:
15 display = Gdk.Display.get_default()
16 cursor = Gdk.Cursor.new_for_display(display, cursors[name])
17 window.set_cursor(cursor)
18
19
20 def get_vidmodes():
21 xrandr_output = subprocess.Popen(["xrandr"],
22 stdout=subprocess.PIPE).communicate()[0]
23 return list([line for line in xrandr_output.decode().split("\n")])
24
25
26 def get_outputs():
27 """Return list of tuples containing output name and geometry."""
28 outputs = []
29 vid_modes = get_vidmodes()
30 if not vid_modes:
31 logger.error("xrandr didn't return anything")
32 return []
33 for line in vid_modes:
34 parts = line.split()
35 if len(parts) < 2:
36 continue
37 if parts[1] == 'connected':
38 if len(parts) == 2:
39 continue
40 geom = parts[2] if parts[2] != 'primary' else parts[3]
41 if geom.startswith('('): # Screen turned off, no geometry
42 continue
43 outputs.append((parts[0], geom))
44 return outputs
45
46
47 def get_output_names():
48 return [output[0] for output in get_outputs()]
49
50
51 def turn_off_except(display):
52 for output in get_outputs():
53 if output[0] != display:
54 subprocess.Popen(["xrandr", "--output", output[0], "--off"])
55
56
57 def get_resolutions():
58 """Return the list of supported screen resolutions."""
59 resolution_list = []
60 for line in get_vidmodes():
61 if line.startswith(" "):
62 resolution_list.append(line.split()[0])
63 return resolution_list
64
65
66 def get_current_resolution(monitor=0):
67 """Return the current resolution for the desktop."""
68 resolution = list()
69 for line in get_vidmodes():
70 if line.startswith(" ") and "*" in line:
71 resolution.append(line.split()[0])
72 if monitor == 'all':
73 return resolution
74 else:
75 return resolution[monitor]
76
77
78 def change_resolution(resolution):
79 """Change display resolution.
80
81 Takes a string for single monitors or a list of displays as returned
82 by get_outputs().
83 """
84 if not resolution:
85 logger.warning("No resolution provided")
86 return
87 if isinstance(resolution, str):
88 logger.debug("Switching resolution to %s", resolution)
89
90 if resolution not in get_resolutions():
91 logger.warning("Resolution %s doesn't exist." % resolution)
92 else:
93 subprocess.Popen(["xrandr", "-s", resolution])
94 else:
95 for display in resolution:
96 display_name = display[0]
97 logger.debug("Switching to %s on %s", display[1], display[0])
98 display_geom = display[1].split('+')
99 display_resolution = display_geom[0]
100 position = (display_geom[1], display_geom[2])
101
102 subprocess.Popen([
103 "xrandr",
104 "--output", display_name,
105 "--mode", display_resolution,
106 "--pos", "{}x{}".format(position[0], position[1])
107 ]).communicate()
108
109
110 def restore_gamma():
111 """Restores gamma to a normal level."""
112 subprocess.Popen(["xgamma", "-gamma", "1.0"])
113
[end of lutris/util/display.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lutris/util/display.py b/lutris/util/display.py
--- a/lutris/util/display.py
+++ b/lutris/util/display.py
@@ -6,14 +6,10 @@
def set_cursor(name, window, display=None):
"""Set a named mouse cursor for the given window."""
- cursors = {
- 'default': Gdk.CursorType.ARROW,
- 'wait': Gdk.CursorType.WATCH,
- }
if not display:
display = Gdk.Display.get_default()
- cursor = Gdk.Cursor.new_for_display(display, cursors[name])
+ cursor = Gdk.Cursor.new_from_name(display, name)
window.set_cursor(cursor)
| {"golden_diff": "diff --git a/lutris/util/display.py b/lutris/util/display.py\n--- a/lutris/util/display.py\n+++ b/lutris/util/display.py\n@@ -6,14 +6,10 @@\n \n def set_cursor(name, window, display=None):\n \"\"\"Set a named mouse cursor for the given window.\"\"\"\n- cursors = {\n- 'default': Gdk.CursorType.ARROW,\n- 'wait': Gdk.CursorType.WATCH,\n- }\n \n if not display:\n display = Gdk.Display.get_default()\n- cursor = Gdk.Cursor.new_for_display(display, cursors[name])\n+ cursor = Gdk.Cursor.new_from_name(display, name)\n window.set_cursor(cursor)\n", "issue": "Running game changes to default X cursor in main window\n\n\nI think [this is the code](https://github.com/lutris/lutris/blob/cbe6f5d5d982543a66f95aa62f0d8e26dd462470/lutris/util/display.py#L10) that sets the cursor. This was introduced in the commit https://github.com/lutris/lutris/commit/4bb2d78e5c3626057f9dc10dcc27f74ae67853b9\n\nRight now it's using [GdkCursorType](https://developer.gnome.org/gdk3/stable/gdk3-Cursors.html#GdkCursorType) to set cursor, but it seems like we should be using [gdk_cursor_new_from_name()](https://developer.gnome.org/gdk3/stable/gdk3-Cursors.html#gdk-cursor-new-from-name) instead which is the recommended way.\n\n", "before_files": [{"content": "import subprocess\nfrom gi.repository import Gdk\n\nfrom lutris.util.log import logger\n\n\ndef set_cursor(name, window, display=None):\n \"\"\"Set a named mouse cursor for the given window.\"\"\"\n cursors = {\n 'default': Gdk.CursorType.ARROW,\n 'wait': Gdk.CursorType.WATCH,\n }\n\n if not display:\n display = Gdk.Display.get_default()\n cursor = Gdk.Cursor.new_for_display(display, cursors[name])\n window.set_cursor(cursor)\n\n\ndef get_vidmodes():\n xrandr_output = subprocess.Popen([\"xrandr\"],\n stdout=subprocess.PIPE).communicate()[0]\n return list([line for line in xrandr_output.decode().split(\"\\n\")])\n\n\ndef get_outputs():\n \"\"\"Return list of tuples containing output name and geometry.\"\"\"\n outputs = []\n vid_modes = get_vidmodes()\n if not vid_modes:\n logger.error(\"xrandr didn't return anything\")\n return []\n for line in vid_modes:\n parts = line.split()\n if len(parts) < 2:\n continue\n if parts[1] == 'connected':\n if len(parts) == 2:\n continue\n geom = parts[2] if parts[2] != 'primary' else parts[3]\n if geom.startswith('('): # Screen turned off, no geometry\n continue\n outputs.append((parts[0], geom))\n return outputs\n\n\ndef get_output_names():\n return [output[0] for output in get_outputs()]\n\n\ndef turn_off_except(display):\n for output in get_outputs():\n if output[0] != display:\n subprocess.Popen([\"xrandr\", \"--output\", output[0], \"--off\"])\n\n\ndef get_resolutions():\n \"\"\"Return the list of supported screen resolutions.\"\"\"\n resolution_list = []\n for line in get_vidmodes():\n if line.startswith(\" \"):\n resolution_list.append(line.split()[0])\n return resolution_list\n\n\ndef get_current_resolution(monitor=0):\n \"\"\"Return the current resolution for the desktop.\"\"\"\n resolution = list()\n for line in get_vidmodes():\n if line.startswith(\" \") and \"*\" in line:\n resolution.append(line.split()[0])\n if monitor == 'all':\n return resolution\n else:\n return resolution[monitor]\n\n\ndef change_resolution(resolution):\n \"\"\"Change display resolution.\n\n Takes a string for single monitors or a list of displays as returned\n by get_outputs().\n \"\"\"\n if not resolution:\n logger.warning(\"No resolution provided\")\n return\n if isinstance(resolution, str):\n logger.debug(\"Switching resolution to %s\", resolution)\n\n if resolution not in get_resolutions():\n logger.warning(\"Resolution %s doesn't exist.\" % resolution)\n else:\n subprocess.Popen([\"xrandr\", \"-s\", resolution])\n else:\n for display in resolution:\n display_name = display[0]\n logger.debug(\"Switching to %s on %s\", display[1], display[0])\n display_geom = display[1].split('+')\n display_resolution = display_geom[0]\n position = (display_geom[1], display_geom[2])\n\n subprocess.Popen([\n \"xrandr\",\n \"--output\", display_name,\n \"--mode\", display_resolution,\n \"--pos\", \"{}x{}\".format(position[0], position[1])\n ]).communicate()\n\n\ndef restore_gamma():\n \"\"\"Restores gamma to a normal level.\"\"\"\n subprocess.Popen([\"xgamma\", \"-gamma\", \"1.0\"])\n", "path": "lutris/util/display.py"}]} | 1,765 | 151 |
gh_patches_debug_35621 | rasdani/github-patches | git_diff | streamlink__streamlink-5443 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.rtvs: No playable streams found on
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest build from the master branch
### Description
rtvs plugin - stream not work
### Debug log
```text
PS C:\Users\My> streamlink https://www.rtvs.sk/televizia/live-24 --loglevel debug
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.11.2
[cli][debug] Streamlink: 5.3.1
[cli][debug] Dependencies:
[cli][debug] certifi: 2022.12.7
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.2
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.17
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.28.2
[cli][debug] urllib3: 1.26.14
[cli][debug] websocket-client: 1.5.1
[cli][debug] Arguments:
[cli][debug] url=https://www.rtvs.sk/televizia/live-24
[cli][debug] --loglevel=debug
[cli][debug] --ffmpeg-ffmpeg=C:\Program Files\Streamlink\ffmpeg\ffmpeg.exe
[cli][info] Found matching plugin rtvs for URL https://www.rtvs.sk/televizia/live-24
error: No playable streams found on this URL: https://www.rtvs.sk/televizia/live-24
```
</issue>
<code>
[start of src/streamlink/plugins/rtvs.py]
1 """
2 $description Live TV channels from RTVS, a Slovak public, state-owned broadcaster.
3 $url rtvs.sk
4 $type live
5 $region Slovakia
6 """
7
8 import re
9
10 from streamlink.plugin import Plugin, pluginmatcher
11 from streamlink.plugin.api import validate
12 from streamlink.stream.hls import HLSStream
13 from streamlink.utils.parse import parse_json
14
15
16 @pluginmatcher(re.compile(
17 r"https?://www\.rtvs\.sk/televizia/live-[\w-]+",
18 ))
19 class Rtvs(Plugin):
20 _re_channel_id = re.compile(r"'stream':\s*'live-(\d+)'")
21
22 def _get_streams(self):
23 res = self.session.http.get(self.url)
24 m = self._re_channel_id.search(res.text)
25 if not m:
26 return
27
28 res = self.session.http.get(
29 "https://www.rtvs.sk/json/live5f.json",
30 params={
31 "c": m.group(1),
32 "b": "mozilla",
33 "p": "win",
34 "f": "0",
35 "d": "1",
36 },
37 )
38 videos = parse_json(res.text, schema=validate.Schema({
39 "clip": {
40 "sources": [{
41 "src": validate.url(),
42 "type": str,
43 }],
44 }},
45 validate.get(("clip", "sources")),
46 validate.filter(lambda n: n["type"] == "application/x-mpegurl"),
47 ))
48 for video in videos:
49 yield from HLSStream.parse_variant_playlist(self.session, video["src"]).items()
50
51
52 __plugin__ = Rtvs
53
[end of src/streamlink/plugins/rtvs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/rtvs.py b/src/streamlink/plugins/rtvs.py
--- a/src/streamlink/plugins/rtvs.py
+++ b/src/streamlink/plugins/rtvs.py
@@ -6,47 +6,52 @@
"""
import re
+from urllib.parse import urlparse
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
-from streamlink.utils.parse import parse_json
@pluginmatcher(re.compile(
- r"https?://www\.rtvs\.sk/televizia/live-[\w-]+",
+ r"https?://www\.rtvs\.sk/televizia/(?:live-|sport)",
))
class Rtvs(Plugin):
- _re_channel_id = re.compile(r"'stream':\s*'live-(\d+)'")
-
def _get_streams(self):
- res = self.session.http.get(self.url)
- m = self._re_channel_id.search(res.text)
- if not m:
+ channel = self.session.http.get(self.url, schema=validate.Schema(
+ validate.parse_html(),
+ validate.xml_xpath_string(".//iframe[@id='player_live']//@src"),
+ validate.url(path=validate.startswith("/embed/live/")),
+ validate.transform(lambda embed: urlparse(embed).path[len("/embed/live/"):]),
+ ))
+ if not channel:
return
- res = self.session.http.get(
+ videos = self.session.http.get(
"https://www.rtvs.sk/json/live5f.json",
params={
- "c": m.group(1),
+ "c": channel,
"b": "mozilla",
"p": "win",
"f": "0",
"d": "1",
},
+ schema=validate.Schema(
+ validate.parse_json(),
+ {
+ "clip": {
+ "sources": [{
+ "src": validate.url(),
+ "type": str,
+ }],
+ },
+ },
+ validate.get(("clip", "sources")),
+ validate.filter(lambda n: n["type"] == "application/x-mpegurl"),
+ ),
)
- videos = parse_json(res.text, schema=validate.Schema({
- "clip": {
- "sources": [{
- "src": validate.url(),
- "type": str,
- }],
- }},
- validate.get(("clip", "sources")),
- validate.filter(lambda n: n["type"] == "application/x-mpegurl"),
- ))
for video in videos:
- yield from HLSStream.parse_variant_playlist(self.session, video["src"]).items()
+ return HLSStream.parse_variant_playlist(self.session, video["src"])
__plugin__ = Rtvs
| {"golden_diff": "diff --git a/src/streamlink/plugins/rtvs.py b/src/streamlink/plugins/rtvs.py\n--- a/src/streamlink/plugins/rtvs.py\n+++ b/src/streamlink/plugins/rtvs.py\n@@ -6,47 +6,52 @@\n \"\"\"\n \n import re\n+from urllib.parse import urlparse\n \n from streamlink.plugin import Plugin, pluginmatcher\n from streamlink.plugin.api import validate\n from streamlink.stream.hls import HLSStream\n-from streamlink.utils.parse import parse_json\n \n \n @pluginmatcher(re.compile(\n- r\"https?://www\\.rtvs\\.sk/televizia/live-[\\w-]+\",\n+ r\"https?://www\\.rtvs\\.sk/televizia/(?:live-|sport)\",\n ))\n class Rtvs(Plugin):\n- _re_channel_id = re.compile(r\"'stream':\\s*'live-(\\d+)'\")\n-\n def _get_streams(self):\n- res = self.session.http.get(self.url)\n- m = self._re_channel_id.search(res.text)\n- if not m:\n+ channel = self.session.http.get(self.url, schema=validate.Schema(\n+ validate.parse_html(),\n+ validate.xml_xpath_string(\".//iframe[@id='player_live']//@src\"),\n+ validate.url(path=validate.startswith(\"/embed/live/\")),\n+ validate.transform(lambda embed: urlparse(embed).path[len(\"/embed/live/\"):]),\n+ ))\n+ if not channel:\n return\n \n- res = self.session.http.get(\n+ videos = self.session.http.get(\n \"https://www.rtvs.sk/json/live5f.json\",\n params={\n- \"c\": m.group(1),\n+ \"c\": channel,\n \"b\": \"mozilla\",\n \"p\": \"win\",\n \"f\": \"0\",\n \"d\": \"1\",\n },\n+ schema=validate.Schema(\n+ validate.parse_json(),\n+ {\n+ \"clip\": {\n+ \"sources\": [{\n+ \"src\": validate.url(),\n+ \"type\": str,\n+ }],\n+ },\n+ },\n+ validate.get((\"clip\", \"sources\")),\n+ validate.filter(lambda n: n[\"type\"] == \"application/x-mpegurl\"),\n+ ),\n )\n- videos = parse_json(res.text, schema=validate.Schema({\n- \"clip\": {\n- \"sources\": [{\n- \"src\": validate.url(),\n- \"type\": str,\n- }],\n- }},\n- validate.get((\"clip\", \"sources\")),\n- validate.filter(lambda n: n[\"type\"] == \"application/x-mpegurl\"),\n- ))\n for video in videos:\n- yield from HLSStream.parse_variant_playlist(self.session, video[\"src\"]).items()\n+ return HLSStream.parse_variant_playlist(self.session, video[\"src\"])\n \n \n __plugin__ = Rtvs\n", "issue": "plugins.rtvs: No playable streams found on\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest build from the master branch\n\n### Description\n\nrtvs plugin - stream not work\n\n### Debug log\n\n```text\nPS C:\\Users\\My> streamlink https://www.rtvs.sk/televizia/live-24 --loglevel debug\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.11.2\r\n[cli][debug] Streamlink: 5.3.1\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2022.12.7\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.2\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.17\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.2\r\n[cli][debug] urllib3: 1.26.14\r\n[cli][debug] websocket-client: 1.5.1\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.rtvs.sk/televizia/live-24\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --ffmpeg-ffmpeg=C:\\Program Files\\Streamlink\\ffmpeg\\ffmpeg.exe\r\n[cli][info] Found matching plugin rtvs for URL https://www.rtvs.sk/televizia/live-24\r\nerror: No playable streams found on this URL: https://www.rtvs.sk/televizia/live-24\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Live TV channels from RTVS, a Slovak public, state-owned broadcaster.\n$url rtvs.sk\n$type live\n$region Slovakia\n\"\"\"\n\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.utils.parse import parse_json\n\n\n@pluginmatcher(re.compile(\n r\"https?://www\\.rtvs\\.sk/televizia/live-[\\w-]+\",\n))\nclass Rtvs(Plugin):\n _re_channel_id = re.compile(r\"'stream':\\s*'live-(\\d+)'\")\n\n def _get_streams(self):\n res = self.session.http.get(self.url)\n m = self._re_channel_id.search(res.text)\n if not m:\n return\n\n res = self.session.http.get(\n \"https://www.rtvs.sk/json/live5f.json\",\n params={\n \"c\": m.group(1),\n \"b\": \"mozilla\",\n \"p\": \"win\",\n \"f\": \"0\",\n \"d\": \"1\",\n },\n )\n videos = parse_json(res.text, schema=validate.Schema({\n \"clip\": {\n \"sources\": [{\n \"src\": validate.url(),\n \"type\": str,\n }],\n }},\n validate.get((\"clip\", \"sources\")),\n validate.filter(lambda n: n[\"type\"] == \"application/x-mpegurl\"),\n ))\n for video in videos:\n yield from HLSStream.parse_variant_playlist(self.session, video[\"src\"]).items()\n\n\n__plugin__ = Rtvs\n", "path": "src/streamlink/plugins/rtvs.py"}]} | 1,502 | 604 |
gh_patches_debug_7581 | rasdani/github-patches | git_diff | ivy-llc__ivy-18346 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sort
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/search.py]
1 # global
2 import ivy
3 from ivy.func_wrapper import with_supported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import (
5 to_ivy_arrays_and_back,
6 )
7
8
9 @with_supported_dtypes(
10 {"2.5.0 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
11 "paddle",
12 )
13 @to_ivy_arrays_and_back
14 def argmax(x, /, *, axis=None, keepdim=False, dtype="int64", name=None):
15 return ivy.argmax(x, axis=axis, keepdims=keepdim, dtype=dtype)
16
17
18 @with_supported_dtypes(
19 {"2.5.0 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
20 "paddle",
21 )
22 @to_ivy_arrays_and_back
23 def argmin(x, /, *, axis=None, keepdim=False, dtype="int64", name=None):
24 return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)
25
26
27 @with_supported_dtypes(
28 {"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
29 "paddle",
30 )
31 @to_ivy_arrays_and_back
32 def argsort(x, /, *, axis=-1, descending=False, name=None):
33 return ivy.argsort(x, axis=axis, descending=descending)
34
35
36 @with_supported_dtypes(
37 {"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
38 "paddle",
39 )
40 @to_ivy_arrays_and_back
41 def nonzero(input, /, *, as_tuple=False):
42 ret = ivy.nonzero(input)
43 if as_tuple is False:
44 ret = ivy.matrix_transpose(ivy.stack(ret))
45 return ret
46
47
48 @with_supported_dtypes(
49 {"2.5.0 and below": ("float32", "float64", "int32", "int64")},
50 "paddle",
51 )
52 @to_ivy_arrays_and_back
53 def searchsorted(sorted_sequence, values, out_int32=False, right=False, name=None):
54 if right:
55 side = "right"
56 else:
57 side = "left"
58 ret = ivy.searchsorted(sorted_sequence, values, side=side)
59 if out_int32:
60 ret = ivy.astype(ret, "int32")
61 return ret
62
[end of ivy/functional/frontends/paddle/tensor/search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/tensor/search.py b/ivy/functional/frontends/paddle/tensor/search.py
--- a/ivy/functional/frontends/paddle/tensor/search.py
+++ b/ivy/functional/frontends/paddle/tensor/search.py
@@ -33,6 +33,15 @@
return ivy.argsort(x, axis=axis, descending=descending)
+@with_supported_dtypes(
+ {"2.5.0 and below": ("float32", "float64", "int32", "int64")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def sort(x, /, *, axis=-1, descending=False, name=None):
+ return ivy.sort(x, axis=axis, descending=descending)
+
+
@with_supported_dtypes(
{"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
"paddle",
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/search.py b/ivy/functional/frontends/paddle/tensor/search.py\n--- a/ivy/functional/frontends/paddle/tensor/search.py\n+++ b/ivy/functional/frontends/paddle/tensor/search.py\n@@ -33,6 +33,15 @@\n return ivy.argsort(x, axis=axis, descending=descending)\n \n \n+@with_supported_dtypes(\n+ {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def sort(x, /, *, axis=-1, descending=False, name=None):\n+ return ivy.sort(x, axis=axis, descending=descending)\n+\n+\n @with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n", "issue": "Sort\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argmax(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmax(x, axis=axis, keepdims=keepdim, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argmin(x, /, *, axis=None, keepdim=False, dtype=\"int64\", name=None):\n return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef argsort(x, /, *, axis=-1, descending=False, name=None):\n return ivy.argsort(x, axis=axis, descending=descending)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int16\", \"int32\", \"int64\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef nonzero(input, /, *, as_tuple=False):\n ret = ivy.nonzero(input)\n if as_tuple is False:\n ret = ivy.matrix_transpose(ivy.stack(ret))\n return ret\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef searchsorted(sorted_sequence, values, out_int32=False, right=False, name=None):\n if right:\n side = \"right\"\n else:\n side = \"left\"\n ret = ivy.searchsorted(sorted_sequence, values, side=side)\n if out_int32:\n ret = ivy.astype(ret, \"int32\")\n return ret\n", "path": "ivy/functional/frontends/paddle/tensor/search.py"}]} | 1,249 | 231 |
gh_patches_debug_43422 | rasdani/github-patches | git_diff | encode__starlette-92 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add `allow_origin_regex` to CORSMiddleware.
It'd be helpful if `CORSMiddleware` supported an `allow_origin_regex`, so that users could do...
```python
# Enforce a subdomain CORS policy
app.add_middleware(CORSMiddleware, allow_origin_regex="(http|https)://*.example.com")
```
Or...
```python
# Enforce an HTTPS-only CORS policy.
app.add_middleware(CORSMiddleware, allow_origin_regex="https://*")
```
The string should be compiled to a regex by the middleware and matches should be anchored to the start/end of the origin string.
</issue>
<code>
[start of starlette/middleware/cors.py]
1 from starlette.datastructures import Headers, MutableHeaders, URL
2 from starlette.responses import PlainTextResponse
3 from starlette.types import ASGIApp, ASGIInstance, Scope
4 import functools
5 import typing
6
7
8 ALL_METHODS = ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT")
9
10
11 class CORSMiddleware:
12 def __init__(
13 self,
14 app: ASGIApp,
15 allow_origins: typing.Sequence[str] = (),
16 allow_methods: typing.Sequence[str] = ("GET",),
17 allow_headers: typing.Sequence[str] = (),
18 allow_credentials: bool = False,
19 expose_headers: typing.Sequence[str] = (),
20 max_age: int = 600,
21 ):
22
23 if "*" in allow_methods:
24 allow_methods = ALL_METHODS
25
26 simple_headers = {}
27 if "*" in allow_origins:
28 simple_headers["Access-Control-Allow-Origin"] = "*"
29 if allow_credentials:
30 simple_headers["Access-Control-Allow-Credentials"] = "true"
31 if expose_headers:
32 simple_headers["Access-Control-Expose-Headers"] = ", ".join(expose_headers)
33
34 preflight_headers = {}
35 if "*" in allow_origins:
36 preflight_headers["Access-Control-Allow-Origin"] = "*"
37 else:
38 preflight_headers["Vary"] = "Origin"
39 preflight_headers.update(
40 {
41 "Access-Control-Allow-Methods": ", ".join(allow_methods),
42 "Access-Control-Max-Age": str(max_age),
43 }
44 )
45 if allow_headers and "*" not in allow_headers:
46 preflight_headers["Access-Control-Allow-Headers"] = ", ".join(allow_headers)
47 if allow_credentials:
48 preflight_headers["Access-Control-Allow-Credentials"] = "true"
49
50 self.app = app
51 self.allow_origins = allow_origins
52 self.allow_methods = allow_methods
53 self.allow_headers = allow_headers
54 self.allow_all_origins = "*" in allow_origins
55 self.allow_all_headers = "*" in allow_headers
56 self.simple_headers = simple_headers
57 self.preflight_headers = preflight_headers
58
59 def __call__(self, scope: Scope):
60 if scope["type"] == "http":
61 method = scope["method"]
62 headers = Headers(scope["headers"])
63 origin = headers.get("origin")
64
65 if origin is not None:
66 if method == "OPTIONS" and "access-control-request-method" in headers:
67 return self.preflight_response(request_headers=headers)
68 else:
69 return functools.partial(
70 self.simple_response, scope=scope, origin=origin
71 )
72
73 return self.app(scope)
74
75 def preflight_response(self, request_headers):
76 requested_origin = request_headers["origin"]
77 requested_method = request_headers["access-control-request-method"]
78 requested_headers = request_headers.get("access-control-request-headers")
79 requested_cookie = "cookie" in request_headers
80
81 headers = dict(self.preflight_headers)
82 failures = []
83
84 # If we only allow specific origins, then we have to mirror back
85 # the Origin header in the response.
86 if not self.allow_all_origins:
87 if requested_origin in self.allow_origins:
88 headers["Access-Control-Allow-Origin"] = requested_origin
89 else:
90 failures.append("origin")
91
92 if requested_method not in self.allow_methods:
93 failures.append("method")
94
95 # If we allow all headers, then we have to mirror back any requested
96 # headers in the response.
97 if self.allow_all_headers and requested_headers is not None:
98 headers["Access-Control-Allow-Headers"] = requested_headers
99 elif requested_headers is not None:
100 for header in requested_headers.split(","):
101 if header.strip() not in self.allow_headers:
102 failures.append("headers")
103
104 # We don't strictly need to use 400 responses here, since its up to
105 # the browser to enforce the CORS policy, but its more informative
106 # if we do.
107 if failures:
108 failure_text = "Disallowed CORS " + ", ".join(failures)
109 return PlainTextResponse(failure_text, status_code=400, headers=headers)
110
111 return PlainTextResponse("OK", status_code=200, headers=headers)
112
113 async def simple_response(self, receive, send, scope=None, origin=None):
114 inner = self.app(scope)
115 send = functools.partial(self.send, send=send, origin=origin)
116 await inner(receive, send)
117
118 async def send(self, message, send=None, origin=None):
119 if message["type"] != "http.response.start":
120 await send(message)
121 return
122
123 message.setdefault("headers", [])
124 headers = MutableHeaders(message["headers"])
125
126 # If we only allow specific origins, then we have to mirror back
127 # the Origin header in the response.
128 if not self.allow_all_origins and origin in self.allow_origins:
129 headers["Access-Control-Allow-Origin"] = origin
130 headers.update(self.simple_headers)
131 await send(message)
132
[end of starlette/middleware/cors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlette/middleware/cors.py b/starlette/middleware/cors.py
--- a/starlette/middleware/cors.py
+++ b/starlette/middleware/cors.py
@@ -3,6 +3,7 @@
from starlette.types import ASGIApp, ASGIInstance, Scope
import functools
import typing
+import re
ALL_METHODS = ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT")
@@ -16,6 +17,7 @@
allow_methods: typing.Sequence[str] = ("GET",),
allow_headers: typing.Sequence[str] = (),
allow_credentials: bool = False,
+ allow_origin_regex: str = None,
expose_headers: typing.Sequence[str] = (),
max_age: int = 600,
):
@@ -23,6 +25,10 @@
if "*" in allow_methods:
allow_methods = ALL_METHODS
+ if allow_origin_regex is not None:
+ regex = re.compile(allow_origin_regex)
+ allow_origin_regex = regex
+
simple_headers = {}
if "*" in allow_origins:
simple_headers["Access-Control-Allow-Origin"] = "*"
@@ -53,6 +59,7 @@
self.allow_headers = allow_headers
self.allow_all_origins = "*" in allow_origins
self.allow_all_headers = "*" in allow_headers
+ self.allow_origin_regex = allow_origin_regex
self.simple_headers = simple_headers
self.preflight_headers = preflight_headers
@@ -66,12 +73,22 @@
if method == "OPTIONS" and "access-control-request-method" in headers:
return self.preflight_response(request_headers=headers)
else:
- return functools.partial(
- self.simple_response, scope=scope, origin=origin
- )
+ if self.is_allowed_origin(origin=origin):
+ return functools.partial(
+ self.simple_response, scope=scope, origin=origin
+ )
+ return PlainTextResponse("Disallowed CORS origin", status_code=400)
return self.app(scope)
+ def is_allowed_origin(self, origin):
+ if self.allow_origin_regex:
+ return self.allow_origin_regex.match(origin)
+ if self.allow_all_origins:
+ return True
+
+ return origin in self.allow_origins
+
def preflight_response(self, request_headers):
requested_origin = request_headers["origin"]
requested_method = request_headers["access-control-request-method"]
@@ -84,7 +101,7 @@
# If we only allow specific origins, then we have to mirror back
# the Origin header in the response.
if not self.allow_all_origins:
- if requested_origin in self.allow_origins:
+ if self.is_allowed_origin(origin=requested_origin):
headers["Access-Control-Allow-Origin"] = requested_origin
else:
failures.append("origin")
@@ -125,7 +142,7 @@
# If we only allow specific origins, then we have to mirror back
# the Origin header in the response.
- if not self.allow_all_origins and origin in self.allow_origins:
+ if not self.allow_all_origins and self.is_allowed_origin(origin=origin):
headers["Access-Control-Allow-Origin"] = origin
headers.update(self.simple_headers)
await send(message)
| {"golden_diff": "diff --git a/starlette/middleware/cors.py b/starlette/middleware/cors.py\n--- a/starlette/middleware/cors.py\n+++ b/starlette/middleware/cors.py\n@@ -3,6 +3,7 @@\n from starlette.types import ASGIApp, ASGIInstance, Scope\n import functools\n import typing\n+import re\n \n \n ALL_METHODS = (\"DELETE\", \"GET\", \"OPTIONS\", \"PATCH\", \"POST\", \"PUT\")\n@@ -16,6 +17,7 @@\n allow_methods: typing.Sequence[str] = (\"GET\",),\n allow_headers: typing.Sequence[str] = (),\n allow_credentials: bool = False,\n+ allow_origin_regex: str = None,\n expose_headers: typing.Sequence[str] = (),\n max_age: int = 600,\n ):\n@@ -23,6 +25,10 @@\n if \"*\" in allow_methods:\n allow_methods = ALL_METHODS\n \n+ if allow_origin_regex is not None:\n+ regex = re.compile(allow_origin_regex)\n+ allow_origin_regex = regex\n+\n simple_headers = {}\n if \"*\" in allow_origins:\n simple_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n@@ -53,6 +59,7 @@\n self.allow_headers = allow_headers\n self.allow_all_origins = \"*\" in allow_origins\n self.allow_all_headers = \"*\" in allow_headers\n+ self.allow_origin_regex = allow_origin_regex\n self.simple_headers = simple_headers\n self.preflight_headers = preflight_headers\n \n@@ -66,12 +73,22 @@\n if method == \"OPTIONS\" and \"access-control-request-method\" in headers:\n return self.preflight_response(request_headers=headers)\n else:\n- return functools.partial(\n- self.simple_response, scope=scope, origin=origin\n- )\n+ if self.is_allowed_origin(origin=origin):\n+ return functools.partial(\n+ self.simple_response, scope=scope, origin=origin\n+ )\n+ return PlainTextResponse(\"Disallowed CORS origin\", status_code=400)\n \n return self.app(scope)\n \n+ def is_allowed_origin(self, origin):\n+ if self.allow_origin_regex:\n+ return self.allow_origin_regex.match(origin)\n+ if self.allow_all_origins:\n+ return True\n+\n+ return origin in self.allow_origins\n+\n def preflight_response(self, request_headers):\n requested_origin = request_headers[\"origin\"]\n requested_method = request_headers[\"access-control-request-method\"]\n@@ -84,7 +101,7 @@\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins:\n- if requested_origin in self.allow_origins:\n+ if self.is_allowed_origin(origin=requested_origin):\n headers[\"Access-Control-Allow-Origin\"] = requested_origin\n else:\n failures.append(\"origin\")\n@@ -125,7 +142,7 @@\n \n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n- if not self.allow_all_origins and origin in self.allow_origins:\n+ if not self.allow_all_origins and self.is_allowed_origin(origin=origin):\n headers[\"Access-Control-Allow-Origin\"] = origin\n headers.update(self.simple_headers)\n await send(message)\n", "issue": "Add `allow_origin_regex` to CORSMiddleware.\nIt'd be helpful if `CORSMiddleware` supported an `allow_origin_regex`, so that users could do...\r\n\r\n```python\r\n# Enforce a subdomain CORS policy\r\napp.add_middleware(CORSMiddleware, allow_origin_regex=\"(http|https)://*.example.com\")\r\n```\r\n\r\nOr...\r\n\r\n```python\r\n# Enforce an HTTPS-only CORS policy.\r\napp.add_middleware(CORSMiddleware, allow_origin_regex=\"https://*\")\r\n```\r\n\r\nThe string should be compiled to a regex by the middleware and matches should be anchored to the start/end of the origin string.\n", "before_files": [{"content": "from starlette.datastructures import Headers, MutableHeaders, URL\nfrom starlette.responses import PlainTextResponse\nfrom starlette.types import ASGIApp, ASGIInstance, Scope\nimport functools\nimport typing\n\n\nALL_METHODS = (\"DELETE\", \"GET\", \"OPTIONS\", \"PATCH\", \"POST\", \"PUT\")\n\n\nclass CORSMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n allow_origins: typing.Sequence[str] = (),\n allow_methods: typing.Sequence[str] = (\"GET\",),\n allow_headers: typing.Sequence[str] = (),\n allow_credentials: bool = False,\n expose_headers: typing.Sequence[str] = (),\n max_age: int = 600,\n ):\n\n if \"*\" in allow_methods:\n allow_methods = ALL_METHODS\n\n simple_headers = {}\n if \"*\" in allow_origins:\n simple_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n if allow_credentials:\n simple_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n if expose_headers:\n simple_headers[\"Access-Control-Expose-Headers\"] = \", \".join(expose_headers)\n\n preflight_headers = {}\n if \"*\" in allow_origins:\n preflight_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n else:\n preflight_headers[\"Vary\"] = \"Origin\"\n preflight_headers.update(\n {\n \"Access-Control-Allow-Methods\": \", \".join(allow_methods),\n \"Access-Control-Max-Age\": str(max_age),\n }\n )\n if allow_headers and \"*\" not in allow_headers:\n preflight_headers[\"Access-Control-Allow-Headers\"] = \", \".join(allow_headers)\n if allow_credentials:\n preflight_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n\n self.app = app\n self.allow_origins = allow_origins\n self.allow_methods = allow_methods\n self.allow_headers = allow_headers\n self.allow_all_origins = \"*\" in allow_origins\n self.allow_all_headers = \"*\" in allow_headers\n self.simple_headers = simple_headers\n self.preflight_headers = preflight_headers\n\n def __call__(self, scope: Scope):\n if scope[\"type\"] == \"http\":\n method = scope[\"method\"]\n headers = Headers(scope[\"headers\"])\n origin = headers.get(\"origin\")\n\n if origin is not None:\n if method == \"OPTIONS\" and \"access-control-request-method\" in headers:\n return self.preflight_response(request_headers=headers)\n else:\n return functools.partial(\n self.simple_response, scope=scope, origin=origin\n )\n\n return self.app(scope)\n\n def preflight_response(self, request_headers):\n requested_origin = request_headers[\"origin\"]\n requested_method = request_headers[\"access-control-request-method\"]\n requested_headers = request_headers.get(\"access-control-request-headers\")\n requested_cookie = \"cookie\" in request_headers\n\n headers = dict(self.preflight_headers)\n failures = []\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins:\n if requested_origin in self.allow_origins:\n headers[\"Access-Control-Allow-Origin\"] = requested_origin\n else:\n failures.append(\"origin\")\n\n if requested_method not in self.allow_methods:\n failures.append(\"method\")\n\n # If we allow all headers, then we have to mirror back any requested\n # headers in the response.\n if self.allow_all_headers and requested_headers is not None:\n headers[\"Access-Control-Allow-Headers\"] = requested_headers\n elif requested_headers is not None:\n for header in requested_headers.split(\",\"):\n if header.strip() not in self.allow_headers:\n failures.append(\"headers\")\n\n # We don't strictly need to use 400 responses here, since its up to\n # the browser to enforce the CORS policy, but its more informative\n # if we do.\n if failures:\n failure_text = \"Disallowed CORS \" + \", \".join(failures)\n return PlainTextResponse(failure_text, status_code=400, headers=headers)\n\n return PlainTextResponse(\"OK\", status_code=200, headers=headers)\n\n async def simple_response(self, receive, send, scope=None, origin=None):\n inner = self.app(scope)\n send = functools.partial(self.send, send=send, origin=origin)\n await inner(receive, send)\n\n async def send(self, message, send=None, origin=None):\n if message[\"type\"] != \"http.response.start\":\n await send(message)\n return\n\n message.setdefault(\"headers\", [])\n headers = MutableHeaders(message[\"headers\"])\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins and origin in self.allow_origins:\n headers[\"Access-Control-Allow-Origin\"] = origin\n headers.update(self.simple_headers)\n await send(message)\n", "path": "starlette/middleware/cors.py"}]} | 2,018 | 733 |
gh_patches_debug_12814 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-1178 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Run tests with warnings as errors
This is harder than it sounds, because practically everything in a Python stack emits warnings. See #1124, #1149, and #1169 for previous work on this.
One lead is to check whether we can put e.g. `,default:::setuptools,default:::pip` at the end of the [`PYTHONWARNINGS`](https://docs.python.org/3/using/cmdline.html#cmdoption-w) variable, which would exempt build-time dependencies (that we can't fix) from `=error,` at the front. Unclear from the docs whether we can use one entry per package, or if it's one per file.
Internally, we get a `ResourceWarning` from failing to close a [`branch-check` file](https://github.com/HypothesisWorks/hypothesis-python/blob/06a6ce681b8f13676ae2b674c559c413f0f3dbac/src/hypothesis/internal/coverage.py#L60) in the coverage job (fixable with a minor refactor to use `mode='a'`). There are probably more warnings that we simply don't notice at the moment.
</issue>
<code>
[start of src/hypothesis/internal/coverage.py]
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2018 David R. MacIver
7 # ([email protected]), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 from __future__ import division, print_function, absolute_import
19
20 import os
21 import sys
22 import json
23 from contextlib import contextmanager
24
25 from hypothesis.internal.reflection import proxies
26
27 """
28 This module implements a custom coverage system that records conditions and
29 then validates that every condition has been seen to be both True and False
30 during the execution of our tests.
31
32 The only thing we use it for at present is our argument validation functions,
33 where we assert that every validation function has been seen to both pass and
34 fail in the course of testing.
35
36 When not running with a magic environment variable set, this module disables
37 itself and has essentially no overhead.
38 """
39
40 pretty_file_name_cache = {}
41
42
43 def pretty_file_name(f):
44 try:
45 return pretty_file_name_cache[f]
46 except KeyError:
47 pass
48
49 parts = f.split(os.path.sep)
50 parts = parts[parts.index('hypothesis'):]
51 result = os.path.sep.join(parts)
52 pretty_file_name_cache[f] = result
53 return result
54
55
56 IN_COVERAGE_TESTS = os.getenv('HYPOTHESIS_INTERNAL_COVERAGE') == 'true'
57
58
59 if IN_COVERAGE_TESTS:
60 log = open('branch-check', 'w')
61 written = set()
62
63 def record_branch(name, value):
64 key = (name, value)
65 if key in written:
66 return
67 written.add(key)
68 log.write(
69 json.dumps({'name': name, 'value': value})
70 )
71 log.write('\n')
72 log.flush()
73
74 description_stack = []
75
76 @contextmanager
77 def check_block(name, depth):
78 # We add an extra two callers to the stack: One for the contextmanager
79 # function, one for our actual caller, so we want to go two extra
80 # stack frames up.
81 caller = sys._getframe(depth + 2)
82 local_description = '%s at %s:%d' % (
83 name,
84 pretty_file_name(caller.f_code.co_filename),
85 caller.f_lineno,
86 )
87 try:
88 description_stack.append(local_description)
89 description = ' in '.join(reversed(description_stack)) + ' passed'
90 yield
91 record_branch(description, True)
92 except BaseException:
93 record_branch(description, False)
94 raise
95 finally:
96 description_stack.pop()
97
98 @contextmanager
99 def check(name):
100 with check_block(name, 2):
101 yield
102
103 def check_function(f):
104 @proxies(f)
105 def accept(*args, **kwargs):
106 # depth of 2 because of the proxy function calling us.
107 with check_block(f.__name__, 2):
108 return f(*args, **kwargs)
109 return accept
110 else:
111 def check_function(f):
112 return f
113
114 @contextmanager
115 def check(name):
116 yield
117
118
119 class suppress_tracing(object):
120 def __enter__(self):
121 self.__original_trace = sys.gettrace()
122 sys.settrace(None)
123
124 def __exit__(self, exc_type, exc_value, traceback):
125 sys.settrace(self.__original_trace)
126
[end of src/hypothesis/internal/coverage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/hypothesis/internal/coverage.py b/src/hypothesis/internal/coverage.py
--- a/src/hypothesis/internal/coverage.py
+++ b/src/hypothesis/internal/coverage.py
@@ -57,7 +57,8 @@
if IN_COVERAGE_TESTS:
- log = open('branch-check', 'w')
+ with open('branch-check', 'w'):
+ pass
written = set()
def record_branch(name, value):
@@ -65,11 +66,8 @@
if key in written:
return
written.add(key)
- log.write(
- json.dumps({'name': name, 'value': value})
- )
- log.write('\n')
- log.flush()
+ with open('branch-check', 'a') as log:
+ log.write(json.dumps({'name': name, 'value': value}) + '\n')
description_stack = []
| {"golden_diff": "diff --git a/src/hypothesis/internal/coverage.py b/src/hypothesis/internal/coverage.py\n--- a/src/hypothesis/internal/coverage.py\n+++ b/src/hypothesis/internal/coverage.py\n@@ -57,7 +57,8 @@\n \n \n if IN_COVERAGE_TESTS:\n- log = open('branch-check', 'w')\n+ with open('branch-check', 'w'):\n+ pass\n written = set()\n \n def record_branch(name, value):\n@@ -65,11 +66,8 @@\n if key in written:\n return\n written.add(key)\n- log.write(\n- json.dumps({'name': name, 'value': value})\n- )\n- log.write('\\n')\n- log.flush()\n+ with open('branch-check', 'a') as log:\n+ log.write(json.dumps({'name': name, 'value': value}) + '\\n')\n \n description_stack = []\n", "issue": "Run tests with warnings as errors\nThis is harder than it sounds, because practically everything in a Python stack emits warnings. See #1124, #1149, and #1169 for previous work on this.\r\n\r\nOne lead is to check whether we can put e.g. `,default:::setuptools,default:::pip` at the end of the [`PYTHONWARNINGS`](https://docs.python.org/3/using/cmdline.html#cmdoption-w) variable, which would exempt build-time dependencies (that we can't fix) from `=error,` at the front. Unclear from the docs whether we can use one entry per package, or if it's one per file.\r\n\r\nInternally, we get a `ResourceWarning` from failing to close a [`branch-check` file](https://github.com/HypothesisWorks/hypothesis-python/blob/06a6ce681b8f13676ae2b674c559c413f0f3dbac/src/hypothesis/internal/coverage.py#L60) in the coverage job (fixable with a minor refactor to use `mode='a'`). There are probably more warnings that we simply don't notice at the moment.\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\nimport json\nfrom contextlib import contextmanager\n\nfrom hypothesis.internal.reflection import proxies\n\n\"\"\"\nThis module implements a custom coverage system that records conditions and\nthen validates that every condition has been seen to be both True and False\nduring the execution of our tests.\n\nThe only thing we use it for at present is our argument validation functions,\nwhere we assert that every validation function has been seen to both pass and\nfail in the course of testing.\n\nWhen not running with a magic environment variable set, this module disables\nitself and has essentially no overhead.\n\"\"\"\n\npretty_file_name_cache = {}\n\n\ndef pretty_file_name(f):\n try:\n return pretty_file_name_cache[f]\n except KeyError:\n pass\n\n parts = f.split(os.path.sep)\n parts = parts[parts.index('hypothesis'):]\n result = os.path.sep.join(parts)\n pretty_file_name_cache[f] = result\n return result\n\n\nIN_COVERAGE_TESTS = os.getenv('HYPOTHESIS_INTERNAL_COVERAGE') == 'true'\n\n\nif IN_COVERAGE_TESTS:\n log = open('branch-check', 'w')\n written = set()\n\n def record_branch(name, value):\n key = (name, value)\n if key in written:\n return\n written.add(key)\n log.write(\n json.dumps({'name': name, 'value': value})\n )\n log.write('\\n')\n log.flush()\n\n description_stack = []\n\n @contextmanager\n def check_block(name, depth):\n # We add an extra two callers to the stack: One for the contextmanager\n # function, one for our actual caller, so we want to go two extra\n # stack frames up.\n caller = sys._getframe(depth + 2)\n local_description = '%s at %s:%d' % (\n name,\n pretty_file_name(caller.f_code.co_filename),\n caller.f_lineno,\n )\n try:\n description_stack.append(local_description)\n description = ' in '.join(reversed(description_stack)) + ' passed'\n yield\n record_branch(description, True)\n except BaseException:\n record_branch(description, False)\n raise\n finally:\n description_stack.pop()\n\n @contextmanager\n def check(name):\n with check_block(name, 2):\n yield\n\n def check_function(f):\n @proxies(f)\n def accept(*args, **kwargs):\n # depth of 2 because of the proxy function calling us.\n with check_block(f.__name__, 2):\n return f(*args, **kwargs)\n return accept\nelse:\n def check_function(f):\n return f\n\n @contextmanager\n def check(name):\n yield\n\n\nclass suppress_tracing(object):\n def __enter__(self):\n self.__original_trace = sys.gettrace()\n sys.settrace(None)\n\n def __exit__(self, exc_type, exc_value, traceback):\n sys.settrace(self.__original_trace)\n", "path": "src/hypothesis/internal/coverage.py"}]} | 1,914 | 206 |
gh_patches_debug_18261 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-4296 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing template translations confirm-email
**URL:** https://meinberlin-dev.liqd.net/accounts/confirm-email/
**user:** user who want to register
**expected behaviour:**
Translation is shown https://github.com/liqd/a4-meinberlin/blob/80ac3005df15322a330c2ce98833a11fb35d8bc8/locale/de_DE/LC_MESSAGES/django.po#L2180
From mail-override text https://github.com/liqd/a4-meinberlin/blob/80ac3005df15322a330c2ce98833a11fb35d8bc8/meinberlin/apps/contrib/django_standard_messages.py#L13
**behaviour:** django-allauth text is used and not translated https://github.com/pennersr/django-allauth/blob/672507e517eb762f76afae0ec3670d96c6afc143/allauth/templates/account/verified_email_required.html#L16
**important screensize:**
**device & browser:**
**Comment/Question:**
The same is true for https://meinberlin-dev.liqd.net/accounts/password/reset/done/ which also shows the django-allauth text https://github.com/pennersr/django-allauth/blob/672507e517eb762f76afae0ec3670d96c6afc143/allauth/templates/account/password_reset_done.html#L15
But this is the same issue, as override text and translation are also actually in our repo.
Screenshot?

</issue>
<code>
[start of meinberlin/apps/contrib/django_standard_messages.py]
1 def _(s):
2 return s
3
4
5 django_standard_messages_to_override = [
6 _("You have signed out."),
7 _("Verify Your E-mail Address"),
8 _("You must type the same password each time."),
9 _("You have confirmed %(email)s."),
10 _("You cannot remove your primary e-mail address (%(email)s)."),
11 _("We have sent you an e-mail. Please contact us if "
12 "you do not receive it within a few minutes."),
13 _("We have sent an e-mail to you for verification. "
14 "Follow the link provided to finalize the signup process. "
15 "Please contact us if you do not receive it within a few minutes."),
16 _(u'You must select a minimum of %(limit_value)d choices.'),
17 _(u'You must select a maximum of %(limit_value)d choices.'),
18 _('Enter a valid email address.')
19 ]
20
[end of meinberlin/apps/contrib/django_standard_messages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/contrib/django_standard_messages.py b/meinberlin/apps/contrib/django_standard_messages.py
--- a/meinberlin/apps/contrib/django_standard_messages.py
+++ b/meinberlin/apps/contrib/django_standard_messages.py
@@ -12,7 +12,13 @@
"you do not receive it within a few minutes."),
_("We have sent an e-mail to you for verification. "
"Follow the link provided to finalize the signup process. "
- "Please contact us if you do not receive it within a few minutes."),
+ "If you do not see the verification e-mail in your main inbox, "
+ "check your spam folder. "
+ "Please contact us if you do not receive the verification e-mail "
+ "within a few minutes."),
+ _("We have sent you an e-mail. If you have not received it "
+ "please check your spam folder. Otherwise contact us if you "
+ "do not receive it in a few minutes."),
_(u'You must select a minimum of %(limit_value)d choices.'),
_(u'You must select a maximum of %(limit_value)d choices.'),
_('Enter a valid email address.')
| {"golden_diff": "diff --git a/meinberlin/apps/contrib/django_standard_messages.py b/meinberlin/apps/contrib/django_standard_messages.py\n--- a/meinberlin/apps/contrib/django_standard_messages.py\n+++ b/meinberlin/apps/contrib/django_standard_messages.py\n@@ -12,7 +12,13 @@\n \"you do not receive it within a few minutes.\"),\n _(\"We have sent an e-mail to you for verification. \"\n \"Follow the link provided to finalize the signup process. \"\n- \"Please contact us if you do not receive it within a few minutes.\"),\n+ \"If you do not see the verification e-mail in your main inbox, \"\n+ \"check your spam folder. \"\n+ \"Please contact us if you do not receive the verification e-mail \"\n+ \"within a few minutes.\"),\n+ _(\"We have sent you an e-mail. If you have not received it \"\n+ \"please check your spam folder. Otherwise contact us if you \"\n+ \"do not receive it in a few minutes.\"),\n _(u'You must select a minimum of %(limit_value)d choices.'),\n _(u'You must select a maximum of %(limit_value)d choices.'),\n _('Enter a valid email address.')\n", "issue": "Missing template translations confirm-email\n**URL:** https://meinberlin-dev.liqd.net/accounts/confirm-email/\r\n**user:** user who want to register\r\n**expected behaviour:** \r\nTranslation is shown https://github.com/liqd/a4-meinberlin/blob/80ac3005df15322a330c2ce98833a11fb35d8bc8/locale/de_DE/LC_MESSAGES/django.po#L2180\r\nFrom mail-override text https://github.com/liqd/a4-meinberlin/blob/80ac3005df15322a330c2ce98833a11fb35d8bc8/meinberlin/apps/contrib/django_standard_messages.py#L13\r\n**behaviour:** django-allauth text is used and not translated https://github.com/pennersr/django-allauth/blob/672507e517eb762f76afae0ec3670d96c6afc143/allauth/templates/account/verified_email_required.html#L16\r\n**important screensize:**\r\n**device & browser:** \r\n\r\n**Comment/Question:** \r\nThe same is true for https://meinberlin-dev.liqd.net/accounts/password/reset/done/ which also shows the django-allauth text https://github.com/pennersr/django-allauth/blob/672507e517eb762f76afae0ec3670d96c6afc143/allauth/templates/account/password_reset_done.html#L15\r\nBut this is the same issue, as override text and translation are also actually in our repo. \r\n\r\nScreenshot?\r\n\r\n\r\n\n", "before_files": [{"content": "def _(s):\n return s\n\n\ndjango_standard_messages_to_override = [\n _(\"You have signed out.\"),\n _(\"Verify Your E-mail Address\"),\n _(\"You must type the same password each time.\"),\n _(\"You have confirmed %(email)s.\"),\n _(\"You cannot remove your primary e-mail address (%(email)s).\"),\n _(\"We have sent you an e-mail. Please contact us if \"\n \"you do not receive it within a few minutes.\"),\n _(\"We have sent an e-mail to you for verification. \"\n \"Follow the link provided to finalize the signup process. \"\n \"Please contact us if you do not receive it within a few minutes.\"),\n _(u'You must select a minimum of %(limit_value)d choices.'),\n _(u'You must select a maximum of %(limit_value)d choices.'),\n _('Enter a valid email address.')\n]\n", "path": "meinberlin/apps/contrib/django_standard_messages.py"}]} | 1,208 | 268 |
gh_patches_debug_9637 | rasdani/github-patches | git_diff | pre-commit__pre-commit-2029 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`check-useless-excludes` meta hook doesn't seem to work well with broken symlinks
Repro steps
1. Add a submodule
2. Add a symlink to somewhere inside this submodule
3. Use `check-symlinks`
4. Deinit submodule (to simulate the case when people clone without `--recursive`)
4. Notice that it now complains about the symlink created on 2
5. Add an exclude entry with the path to that symlink
6. Use `check-useless-excludes`
7. Notice that it complains that the added exclude entry doesn't apply.
For a working example, check out https://github.com/pkoch/test-check-useless-excludes (without `--recursive`, or run `git submodule deinit --all`) and run `pre-commit run --all`.
</issue>
<code>
[start of pre_commit/meta_hooks/check_useless_excludes.py]
1 import argparse
2 import re
3 from typing import Optional
4 from typing import Sequence
5
6 from cfgv import apply_defaults
7
8 import pre_commit.constants as C
9 from pre_commit import git
10 from pre_commit.clientlib import load_config
11 from pre_commit.clientlib import MANIFEST_HOOK_DICT
12 from pre_commit.commands.run import Classifier
13
14
15 def exclude_matches_any(
16 filenames: Sequence[str],
17 include: str,
18 exclude: str,
19 ) -> bool:
20 if exclude == '^$':
21 return True
22 include_re, exclude_re = re.compile(include), re.compile(exclude)
23 for filename in filenames:
24 if include_re.search(filename) and exclude_re.search(filename):
25 return True
26 return False
27
28
29 def check_useless_excludes(config_file: str) -> int:
30 config = load_config(config_file)
31 filenames = git.get_all_files()
32 classifier = Classifier.from_config(
33 filenames, config['files'], config['exclude'],
34 )
35 retv = 0
36
37 exclude = config['exclude']
38 if not exclude_matches_any(filenames, '', exclude):
39 print(
40 f'The global exclude pattern {exclude!r} does not match any files',
41 )
42 retv = 1
43
44 for repo in config['repos']:
45 for hook in repo['hooks']:
46 # Not actually a manifest dict, but this more accurately reflects
47 # the defaults applied during runtime
48 hook = apply_defaults(hook, MANIFEST_HOOK_DICT)
49 names = classifier.filenames
50 types = hook['types']
51 types_or = hook['types_or']
52 exclude_types = hook['exclude_types']
53 names = classifier.by_types(names, types, types_or, exclude_types)
54 include, exclude = hook['files'], hook['exclude']
55 if not exclude_matches_any(names, include, exclude):
56 print(
57 f'The exclude pattern {exclude!r} for {hook["id"]} does '
58 f'not match any files',
59 )
60 retv = 1
61
62 return retv
63
64
65 def main(argv: Optional[Sequence[str]] = None) -> int:
66 parser = argparse.ArgumentParser()
67 parser.add_argument('filenames', nargs='*', default=[C.CONFIG_FILE])
68 args = parser.parse_args(argv)
69
70 retv = 0
71 for filename in args.filenames:
72 retv |= check_useless_excludes(filename)
73 return retv
74
75
76 if __name__ == '__main__':
77 exit(main())
78
[end of pre_commit/meta_hooks/check_useless_excludes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/meta_hooks/check_useless_excludes.py b/pre_commit/meta_hooks/check_useless_excludes.py
--- a/pre_commit/meta_hooks/check_useless_excludes.py
+++ b/pre_commit/meta_hooks/check_useless_excludes.py
@@ -43,6 +43,9 @@
for repo in config['repos']:
for hook in repo['hooks']:
+ # the default of manifest hooks is `types: [file]` but we may
+ # be configuring a symlink hook while there's a broken symlink
+ hook.setdefault('types', [])
# Not actually a manifest dict, but this more accurately reflects
# the defaults applied during runtime
hook = apply_defaults(hook, MANIFEST_HOOK_DICT)
| {"golden_diff": "diff --git a/pre_commit/meta_hooks/check_useless_excludes.py b/pre_commit/meta_hooks/check_useless_excludes.py\n--- a/pre_commit/meta_hooks/check_useless_excludes.py\n+++ b/pre_commit/meta_hooks/check_useless_excludes.py\n@@ -43,6 +43,9 @@\n \n for repo in config['repos']:\n for hook in repo['hooks']:\n+ # the default of manifest hooks is `types: [file]` but we may\n+ # be configuring a symlink hook while there's a broken symlink\n+ hook.setdefault('types', [])\n # Not actually a manifest dict, but this more accurately reflects\n # the defaults applied during runtime\n hook = apply_defaults(hook, MANIFEST_HOOK_DICT)\n", "issue": "`check-useless-excludes` meta hook doesn't seem to work well with broken symlinks\nRepro steps\r\n1. Add a submodule\r\n2. Add a symlink to somewhere inside this submodule\r\n3. Use `check-symlinks`\r\n4. Deinit submodule (to simulate the case when people clone without `--recursive`)\r\n4. Notice that it now complains about the symlink created on 2\r\n5. Add an exclude entry with the path to that symlink\r\n6. Use `check-useless-excludes`\r\n7. Notice that it complains that the added exclude entry doesn't apply.\r\n\r\nFor a working example, check out https://github.com/pkoch/test-check-useless-excludes (without `--recursive`, or run `git submodule deinit --all`) and run `pre-commit run --all`.\n", "before_files": [{"content": "import argparse\nimport re\nfrom typing import Optional\nfrom typing import Sequence\n\nfrom cfgv import apply_defaults\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.clientlib import MANIFEST_HOOK_DICT\nfrom pre_commit.commands.run import Classifier\n\n\ndef exclude_matches_any(\n filenames: Sequence[str],\n include: str,\n exclude: str,\n) -> bool:\n if exclude == '^$':\n return True\n include_re, exclude_re = re.compile(include), re.compile(exclude)\n for filename in filenames:\n if include_re.search(filename) and exclude_re.search(filename):\n return True\n return False\n\n\ndef check_useless_excludes(config_file: str) -> int:\n config = load_config(config_file)\n filenames = git.get_all_files()\n classifier = Classifier.from_config(\n filenames, config['files'], config['exclude'],\n )\n retv = 0\n\n exclude = config['exclude']\n if not exclude_matches_any(filenames, '', exclude):\n print(\n f'The global exclude pattern {exclude!r} does not match any files',\n )\n retv = 1\n\n for repo in config['repos']:\n for hook in repo['hooks']:\n # Not actually a manifest dict, but this more accurately reflects\n # the defaults applied during runtime\n hook = apply_defaults(hook, MANIFEST_HOOK_DICT)\n names = classifier.filenames\n types = hook['types']\n types_or = hook['types_or']\n exclude_types = hook['exclude_types']\n names = classifier.by_types(names, types, types_or, exclude_types)\n include, exclude = hook['files'], hook['exclude']\n if not exclude_matches_any(names, include, exclude):\n print(\n f'The exclude pattern {exclude!r} for {hook[\"id\"]} does '\n f'not match any files',\n )\n retv = 1\n\n return retv\n\n\ndef main(argv: Optional[Sequence[str]] = None) -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('filenames', nargs='*', default=[C.CONFIG_FILE])\n args = parser.parse_args(argv)\n\n retv = 0\n for filename in args.filenames:\n retv |= check_useless_excludes(filename)\n return retv\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/meta_hooks/check_useless_excludes.py"}]} | 1,382 | 160 |
gh_patches_debug_24181 | rasdani/github-patches | git_diff | ansible__awx-13528 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exception handling for Conjur Secrets Manager Lookup not working properly
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
### Bug Summary
When using the Conjur Secrets Manager Lookup credential type against Conjur Cloud, exception handling does not properly switch to the proper URI endpoint automatically. Any attempts currently fail with HTTP 404.
### AWX version
0.1.dev32763+g3d73b80
### Select the relevant components
- [X] UI
- [ ] API
- [ ] Docs
- [ ] Collection
- [ ] CLI
- [ ] Other
### Installation method
docker development environment
### Modifications
no
### Ansible version
_No response_
### Operating system
_No response_
### Web browser
_No response_
### Steps to reproduce
1. Select "CyberArk Conjur Secrets Manager Lookup" as the credential type for a new credential object in AWX.
2. Configure for a Conjur Cloud tenant.
3. Run "Test" and a failure with "HTTP 404" is returned.
### Expected results
The test should pass.
### Actual results
The test fails with an HTTP 404 error code.
### Additional information
This fix is being implemented in a PR within moments.
</issue>
<code>
[start of awx/main/credential_plugins/conjur.py]
1 from .plugin import CredentialPlugin, CertFiles, raise_for_status
2
3 from urllib.parse import urljoin, quote
4
5 from django.utils.translation import gettext_lazy as _
6 import requests
7
8
9 conjur_inputs = {
10 'fields': [
11 {
12 'id': 'url',
13 'label': _('Conjur URL'),
14 'type': 'string',
15 'format': 'url',
16 },
17 {
18 'id': 'api_key',
19 'label': _('API Key'),
20 'type': 'string',
21 'secret': True,
22 },
23 {
24 'id': 'account',
25 'label': _('Account'),
26 'type': 'string',
27 },
28 {
29 'id': 'username',
30 'label': _('Username'),
31 'type': 'string',
32 },
33 {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True},
34 ],
35 'metadata': [
36 {
37 'id': 'secret_path',
38 'label': _('Secret Identifier'),
39 'type': 'string',
40 'help_text': _('The identifier for the secret e.g., /some/identifier'),
41 },
42 {
43 'id': 'secret_version',
44 'label': _('Secret Version'),
45 'type': 'string',
46 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),
47 },
48 ],
49 'required': ['url', 'api_key', 'account', 'username'],
50 }
51
52
53 def conjur_backend(**kwargs):
54 url = kwargs['url']
55 api_key = kwargs['api_key']
56 account = quote(kwargs['account'], safe='')
57 username = quote(kwargs['username'], safe='')
58 secret_path = quote(kwargs['secret_path'], safe='')
59 version = kwargs.get('secret_version')
60 cacert = kwargs.get('cacert', None)
61
62 auth_kwargs = {
63 'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'},
64 'data': api_key,
65 'allow_redirects': False,
66 }
67
68 with CertFiles(cacert) as cert:
69 # https://www.conjur.org/api.html#authentication-authenticate-post
70 auth_kwargs['verify'] = cert
71 try:
72 resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)
73 except requests.exceptions.ConnectionError:
74 resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)
75 raise_for_status(resp)
76 token = resp.content.decode('utf-8')
77
78 lookup_kwargs = {
79 'headers': {'Authorization': 'Token token="{}"'.format(token)},
80 'allow_redirects': False,
81 }
82
83 # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
84 path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))
85 path_conjurcloud = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))
86 if version:
87 ver = "version={}".format(version)
88 path = '?'.join([path, ver])
89 path_conjurcloud = '?'.join([path_conjurcloud, ver])
90
91 with CertFiles(cacert) as cert:
92 lookup_kwargs['verify'] = cert
93 try:
94 resp = requests.get(path, timeout=30, **lookup_kwargs)
95 except requests.exceptions.ConnectionError:
96 resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)
97 raise_for_status(resp)
98 return resp.text
99
100
101 conjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend)
102
[end of awx/main/credential_plugins/conjur.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awx/main/credential_plugins/conjur.py b/awx/main/credential_plugins/conjur.py
--- a/awx/main/credential_plugins/conjur.py
+++ b/awx/main/credential_plugins/conjur.py
@@ -70,7 +70,8 @@
auth_kwargs['verify'] = cert
try:
resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)
- except requests.exceptions.ConnectionError:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError:
resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)
raise_for_status(resp)
token = resp.content.decode('utf-8')
@@ -92,7 +93,8 @@
lookup_kwargs['verify'] = cert
try:
resp = requests.get(path, timeout=30, **lookup_kwargs)
- except requests.exceptions.ConnectionError:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError:
resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)
raise_for_status(resp)
return resp.text
| {"golden_diff": "diff --git a/awx/main/credential_plugins/conjur.py b/awx/main/credential_plugins/conjur.py\n--- a/awx/main/credential_plugins/conjur.py\n+++ b/awx/main/credential_plugins/conjur.py\n@@ -70,7 +70,8 @@\n auth_kwargs['verify'] = cert\n try:\n resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)\n- except requests.exceptions.ConnectionError:\n+ resp.raise_for_status()\n+ except requests.exceptions.HTTPError:\n resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n raise_for_status(resp)\n token = resp.content.decode('utf-8')\n@@ -92,7 +93,8 @@\n lookup_kwargs['verify'] = cert\n try:\n resp = requests.get(path, timeout=30, **lookup_kwargs)\n- except requests.exceptions.ConnectionError:\n+ resp.raise_for_status()\n+ except requests.exceptions.HTTPError:\n resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)\n raise_for_status(resp)\n return resp.text\n", "issue": "Exception handling for Conjur Secrets Manager Lookup not working properly\n### Please confirm the following\n\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\n\n### Bug Summary\n\nWhen using the Conjur Secrets Manager Lookup credential type against Conjur Cloud, exception handling does not properly switch to the proper URI endpoint automatically. Any attempts currently fail with HTTP 404.\n\n### AWX version\n\n0.1.dev32763+g3d73b80\n\n### Select the relevant components\n\n- [X] UI\n- [ ] API\n- [ ] Docs\n- [ ] Collection\n- [ ] CLI\n- [ ] Other\n\n### Installation method\n\ndocker development environment\n\n### Modifications\n\nno\n\n### Ansible version\n\n_No response_\n\n### Operating system\n\n_No response_\n\n### Web browser\n\n_No response_\n\n### Steps to reproduce\n\n1. Select \"CyberArk Conjur Secrets Manager Lookup\" as the credential type for a new credential object in AWX.\r\n2. Configure for a Conjur Cloud tenant.\r\n3. Run \"Test\" and a failure with \"HTTP 404\" is returned.\n\n### Expected results\n\nThe test should pass.\n\n### Actual results\n\nThe test fails with an HTTP 404 error code.\n\n### Additional information\n\nThis fix is being implemented in a PR within moments.\n", "before_files": [{"content": "from .plugin import CredentialPlugin, CertFiles, raise_for_status\n\nfrom urllib.parse import urljoin, quote\n\nfrom django.utils.translation import gettext_lazy as _\nimport requests\n\n\nconjur_inputs = {\n 'fields': [\n {\n 'id': 'url',\n 'label': _('Conjur URL'),\n 'type': 'string',\n 'format': 'url',\n },\n {\n 'id': 'api_key',\n 'label': _('API Key'),\n 'type': 'string',\n 'secret': True,\n },\n {\n 'id': 'account',\n 'label': _('Account'),\n 'type': 'string',\n },\n {\n 'id': 'username',\n 'label': _('Username'),\n 'type': 'string',\n },\n {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True},\n ],\n 'metadata': [\n {\n 'id': 'secret_path',\n 'label': _('Secret Identifier'),\n 'type': 'string',\n 'help_text': _('The identifier for the secret e.g., /some/identifier'),\n },\n {\n 'id': 'secret_version',\n 'label': _('Secret Version'),\n 'type': 'string',\n 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),\n },\n ],\n 'required': ['url', 'api_key', 'account', 'username'],\n}\n\n\ndef conjur_backend(**kwargs):\n url = kwargs['url']\n api_key = kwargs['api_key']\n account = quote(kwargs['account'], safe='')\n username = quote(kwargs['username'], safe='')\n secret_path = quote(kwargs['secret_path'], safe='')\n version = kwargs.get('secret_version')\n cacert = kwargs.get('cacert', None)\n\n auth_kwargs = {\n 'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'},\n 'data': api_key,\n 'allow_redirects': False,\n }\n\n with CertFiles(cacert) as cert:\n # https://www.conjur.org/api.html#authentication-authenticate-post\n auth_kwargs['verify'] = cert\n try:\n resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)\n except requests.exceptions.ConnectionError:\n resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n raise_for_status(resp)\n token = resp.content.decode('utf-8')\n\n lookup_kwargs = {\n 'headers': {'Authorization': 'Token token=\"{}\"'.format(token)},\n 'allow_redirects': False,\n }\n\n # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get\n path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))\n path_conjurcloud = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))\n if version:\n ver = \"version={}\".format(version)\n path = '?'.join([path, ver])\n path_conjurcloud = '?'.join([path_conjurcloud, ver])\n\n with CertFiles(cacert) as cert:\n lookup_kwargs['verify'] = cert\n try:\n resp = requests.get(path, timeout=30, **lookup_kwargs)\n except requests.exceptions.ConnectionError:\n resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)\n raise_for_status(resp)\n return resp.text\n\n\nconjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend)\n", "path": "awx/main/credential_plugins/conjur.py"}]} | 1,902 | 269 |
gh_patches_debug_15683 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-2556 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ME failing since at least 2018-10-25
ME has been failing since 2018-10-25
Based on automated runs it appears that ME has not run successfully in 2 days (2018-10-25).
```
loaded Open States pupa settings...
me (scrape, import)
bills: {}
Traceback (most recent call last):
File "/opt/openstates/venv-pupa//bin/pupa", line 11, in <module>
load_entry_point('pupa', 'console_scripts', 'pupa')()
File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py", line 68, in main
subcommands[args.subcommand].handle(args, other)
File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 260, in handle
return self.do_handle(args, other, juris)
File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 301, in do_handle
self.check_session_list(juris)
File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 228, in check_session_list
scraped_sessions = juris.get_session_list()
File "/opt/openstates/openstates/openstates/me/__init__.py", line 98, in get_session_list
sessions.remove('jb-Test')
ValueError: list.remove(x): x not in list
```
Visit http://bobsled.openstates.org for more info.
</issue>
<code>
[start of openstates/me/__init__.py]
1 from pupa.scrape import Jurisdiction, Organization
2 from openstates.utils import url_xpath
3
4 from .bills import MEBillScraper
5 # from .people import MEPersonScraper
6 # from .committees import MECommitteeScraper
7
8
9 class Maine(Jurisdiction):
10 division_id = "ocd-division/country:us/state:me"
11 classification = "government"
12 name = "Maine"
13 url = "http://legislature.maine.gov"
14 scrapers = {
15 'bills': MEBillScraper,
16 # 'people': MEPersonScraper,
17 # 'committees': MECommitteeScraper,
18 }
19 legislative_sessions = [
20 {
21 "_scraped_name": "121st Legislature",
22 "identifier": "121",
23 "name": "121st Legislature (2003-2004)"
24 },
25 {
26 "_scraped_name": "122nd Legislature",
27 "identifier": "122",
28 "name": "122nd Legislature (2005-2006)"
29 },
30 {
31 "_scraped_name": "123rd Legislature",
32 "identifier": "123",
33 "name": "123rd Legislature (2007-2008)"
34 },
35 {
36 "_scraped_name": "124th Legislature",
37 "identifier": "124",
38 "name": "124th Legislature (2009-2010)"
39 },
40 {
41 "_scraped_name": "125th Legislature",
42 "identifier": "125",
43 "name": "125th Legislature (2011-2012)"
44 },
45 {
46 "_scraped_name": "126th Legislature",
47 "identifier": "126",
48 "name": "126th Legislature (2013-2014)"
49 },
50 {
51 "_scraped_name": "127th Legislature",
52 "identifier": "127",
53 "name": "127th Legislature (2015-2016)"
54 },
55 {
56 "_scraped_name": "128th Legislature",
57 "identifier": "128",
58 "name": "128th Legislature (2017-2018)",
59 "start_date": "2016-12-07",
60 "end_date": "2017-06-14",
61 }
62 ]
63 ignored_scraped_sessions = []
64
65 def get_organizations(self):
66 legislature_name = "Maine Legislature"
67 lower_chamber_name = "House"
68 lower_seats = 151
69 lower_title = "Representative"
70 upper_chamber_name = "Senate"
71 upper_seats = 35
72 upper_title = "Senator"
73
74 legislature = Organization(name=legislature_name,
75 classification="legislature")
76 upper = Organization(upper_chamber_name, classification='upper',
77 parent_id=legislature._id)
78 lower = Organization(lower_chamber_name, classification='lower',
79 parent_id=legislature._id)
80
81 for n in range(1, upper_seats + 1):
82 upper.add_post(
83 label=str(n), role=upper_title,
84 division_id='{}/sldu:{}'.format(self.division_id, n))
85 for n in range(1, lower_seats + 1):
86 lower.add_post(
87 label=str(n), role=lower_title,
88 division_id='{}/sldl:{}'.format(self.division_id, n))
89
90 yield legislature
91 yield Organization(name='Office of the Governor', classification='executive')
92 yield upper
93 yield lower
94
95 def get_session_list(self):
96 sessions = url_xpath('http://www.mainelegislature.org/LawMakerWeb/advancedsearch.asp',
97 '//select[@name="LegSession"]/option/text()')
98 sessions.remove('jb-Test')
99 sessions.remove('2001-2002')
100 return sessions
101
[end of openstates/me/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/me/__init__.py b/openstates/me/__init__.py
--- a/openstates/me/__init__.py
+++ b/openstates/me/__init__.py
@@ -60,7 +60,9 @@
"end_date": "2017-06-14",
}
]
- ignored_scraped_sessions = []
+ ignored_scraped_sessions = [
+ '2001-2002'
+ ]
def get_organizations(self):
legislature_name = "Maine Legislature"
@@ -95,6 +97,4 @@
def get_session_list(self):
sessions = url_xpath('http://www.mainelegislature.org/LawMakerWeb/advancedsearch.asp',
'//select[@name="LegSession"]/option/text()')
- sessions.remove('jb-Test')
- sessions.remove('2001-2002')
return sessions
| {"golden_diff": "diff --git a/openstates/me/__init__.py b/openstates/me/__init__.py\n--- a/openstates/me/__init__.py\n+++ b/openstates/me/__init__.py\n@@ -60,7 +60,9 @@\n \"end_date\": \"2017-06-14\",\n }\n ]\n- ignored_scraped_sessions = []\n+ ignored_scraped_sessions = [\n+ '2001-2002'\n+ ]\n \n def get_organizations(self):\n legislature_name = \"Maine Legislature\"\n@@ -95,6 +97,4 @@\n def get_session_list(self):\n sessions = url_xpath('http://www.mainelegislature.org/LawMakerWeb/advancedsearch.asp',\n '//select[@name=\"LegSession\"]/option/text()')\n- sessions.remove('jb-Test')\n- sessions.remove('2001-2002')\n return sessions\n", "issue": "ME failing since at least 2018-10-25\nME has been failing since 2018-10-25\n\nBased on automated runs it appears that ME has not run successfully in 2 days (2018-10-25).\n\n\n```\n loaded Open States pupa settings...\nme (scrape, import)\n bills: {}\nTraceback (most recent call last):\n File \"/opt/openstates/venv-pupa//bin/pupa\", line 11, in <module>\n load_entry_point('pupa', 'console_scripts', 'pupa')()\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py\", line 68, in main\n subcommands[args.subcommand].handle(args, other)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 260, in handle\n return self.do_handle(args, other, juris)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 301, in do_handle\n self.check_session_list(juris)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 228, in check_session_list\n scraped_sessions = juris.get_session_list()\n File \"/opt/openstates/openstates/openstates/me/__init__.py\", line 98, in get_session_list\n sessions.remove('jb-Test')\nValueError: list.remove(x): x not in list\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\nfrom openstates.utils import url_xpath\n\nfrom .bills import MEBillScraper\n# from .people import MEPersonScraper\n# from .committees import MECommitteeScraper\n\n\nclass Maine(Jurisdiction):\n division_id = \"ocd-division/country:us/state:me\"\n classification = \"government\"\n name = \"Maine\"\n url = \"http://legislature.maine.gov\"\n scrapers = {\n 'bills': MEBillScraper,\n # 'people': MEPersonScraper,\n # 'committees': MECommitteeScraper,\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"121st Legislature\",\n \"identifier\": \"121\",\n \"name\": \"121st Legislature (2003-2004)\"\n },\n {\n \"_scraped_name\": \"122nd Legislature\",\n \"identifier\": \"122\",\n \"name\": \"122nd Legislature (2005-2006)\"\n },\n {\n \"_scraped_name\": \"123rd Legislature\",\n \"identifier\": \"123\",\n \"name\": \"123rd Legislature (2007-2008)\"\n },\n {\n \"_scraped_name\": \"124th Legislature\",\n \"identifier\": \"124\",\n \"name\": \"124th Legislature (2009-2010)\"\n },\n {\n \"_scraped_name\": \"125th Legislature\",\n \"identifier\": \"125\",\n \"name\": \"125th Legislature (2011-2012)\"\n },\n {\n \"_scraped_name\": \"126th Legislature\",\n \"identifier\": \"126\",\n \"name\": \"126th Legislature (2013-2014)\"\n },\n {\n \"_scraped_name\": \"127th Legislature\",\n \"identifier\": \"127\",\n \"name\": \"127th Legislature (2015-2016)\"\n },\n {\n \"_scraped_name\": \"128th Legislature\",\n \"identifier\": \"128\",\n \"name\": \"128th Legislature (2017-2018)\",\n \"start_date\": \"2016-12-07\",\n \"end_date\": \"2017-06-14\",\n }\n ]\n ignored_scraped_sessions = []\n\n def get_organizations(self):\n legislature_name = \"Maine Legislature\"\n lower_chamber_name = \"House\"\n lower_seats = 151\n lower_title = \"Representative\"\n upper_chamber_name = \"Senate\"\n upper_seats = 35\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats + 1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats + 1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield Organization(name='Office of the Governor', classification='executive')\n yield upper\n yield lower\n\n def get_session_list(self):\n sessions = url_xpath('http://www.mainelegislature.org/LawMakerWeb/advancedsearch.asp',\n '//select[@name=\"LegSession\"]/option/text()')\n sessions.remove('jb-Test')\n sessions.remove('2001-2002')\n return sessions\n", "path": "openstates/me/__init__.py"}]} | 2,004 | 209 |
gh_patches_debug_3041 | rasdani/github-patches | git_diff | ethereum__web3.py-1107 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Backport 1094 to v4 branch
### What was wrong?
https://github.com/ethereum/web3.py/issues/1094#issuecomment-428259232 needs to be backported to the v4 branch.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from setuptools import (
4 find_packages,
5 setup,
6 )
7
8 extras_require = {
9 'tester': [
10 "eth-tester[py-evm]==0.1.0-beta.33",
11 "py-geth>=2.0.1,<3.0.0",
12 ],
13 'testrpc': ["eth-testrpc>=1.3.3,<2.0.0"],
14 'linter': [
15 "flake8==3.4.1",
16 "isort>=4.2.15,<5",
17 ],
18 'docs': [
19 "mock",
20 "sphinx-better-theme>=0.1.4",
21 "click>=5.1",
22 "configparser==3.5.0",
23 "contextlib2>=0.5.4",
24 #"eth-testrpc>=0.8.0",
25 #"ethereum-tester-client>=1.1.0",
26 "ethtoken",
27 "py-geth>=1.4.0",
28 "py-solc>=0.4.0",
29 "pytest>=2.7.2",
30 "sphinx",
31 "sphinx_rtd_theme>=0.1.9",
32 "toposort>=1.4",
33 "urllib3",
34 "web3>=2.1.0",
35 "wheel"
36 ],
37 'dev': [
38 "bumpversion",
39 "flaky>=3.3.0",
40 "hypothesis>=3.31.2",
41 "pytest>=3.5.0,<4",
42 "pytest-mock==1.*",
43 "pytest-pythonpath>=0.3",
44 "pytest-watch==4.*",
45 "pytest-xdist==1.*",
46 "tox>=1.8.0",
47 "tqdm",
48 "when-changed"
49 ]
50 }
51
52 extras_require['dev'] = (
53 extras_require['tester'] +
54 extras_require['linter'] +
55 extras_require['docs'] +
56 extras_require['dev']
57 )
58
59 setup(
60 name='web3',
61 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
62 version='4.7.2',
63 description="""Web3.py""",
64 long_description_markdown_filename='README.md',
65 author='Piper Merriam',
66 author_email='[email protected]',
67 url='https://github.com/ethereum/web3.py',
68 include_package_data=True,
69 install_requires=[
70 "toolz>=0.9.0,<1.0.0;implementation_name=='pypy'",
71 "cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'",
72 "eth-abi>=1.2.0,<2.0.0",
73 "eth-account>=0.2.1,<0.4.0",
74 "eth-utils>=1.2.0,<2.0.0",
75 "hexbytes>=0.1.0,<1.0.0",
76 "lru-dict>=1.1.6,<2.0.0",
77 "eth-hash[pycryptodome]>=0.2.0,<1.0.0",
78 "requests>=2.16.0,<3.0.0",
79 "websockets>=6.0.0,<7.0.0",
80 "pypiwin32>=223;platform_system=='Windows'",
81 ],
82 setup_requires=['setuptools-markdown'],
83 python_requires='>=3.5, <4',
84 extras_require=extras_require,
85 py_modules=['web3', 'ens'],
86 license="MIT",
87 zip_safe=False,
88 keywords='ethereum',
89 packages=find_packages(exclude=["tests", "tests.*"]),
90 classifiers=[
91 'Development Status :: 5 - Production/Stable',
92 'Intended Audience :: Developers',
93 'License :: OSI Approved :: MIT License',
94 'Natural Language :: English',
95 'Programming Language :: Python :: 3',
96 'Programming Language :: Python :: 3.5',
97 'Programming Language :: Python :: 3.6',
98 ],
99 )
100
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -80,7 +80,7 @@
"pypiwin32>=223;platform_system=='Windows'",
],
setup_requires=['setuptools-markdown'],
- python_requires='>=3.5, <4',
+ python_requires='>=3.5.3,<4',
extras_require=extras_require,
py_modules=['web3', 'ens'],
license="MIT",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -80,7 +80,7 @@\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n- python_requires='>=3.5, <4',\n+ python_requires='>=3.5.3,<4',\n extras_require=extras_require,\n py_modules=['web3', 'ens'],\n license=\"MIT\",\n", "issue": "Backport 1094 to v4 branch\n### What was wrong?\r\n\r\nhttps://github.com/ethereum/web3.py/issues/1094#issuecomment-428259232 needs to be backported to the v4 branch.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n 'tester': [\n \"eth-tester[py-evm]==0.1.0-beta.33\",\n \"py-geth>=2.0.1,<3.0.0\",\n ],\n 'testrpc': [\"eth-testrpc>=1.3.3,<2.0.0\"],\n 'linter': [\n \"flake8==3.4.1\",\n \"isort>=4.2.15,<5\",\n ],\n 'docs': [\n \"mock\",\n \"sphinx-better-theme>=0.1.4\",\n \"click>=5.1\",\n \"configparser==3.5.0\",\n \"contextlib2>=0.5.4\",\n #\"eth-testrpc>=0.8.0\",\n #\"ethereum-tester-client>=1.1.0\",\n \"ethtoken\",\n \"py-geth>=1.4.0\",\n \"py-solc>=0.4.0\",\n \"pytest>=2.7.2\",\n \"sphinx\",\n \"sphinx_rtd_theme>=0.1.9\",\n \"toposort>=1.4\",\n \"urllib3\",\n \"web3>=2.1.0\",\n \"wheel\"\n ],\n 'dev': [\n \"bumpversion\",\n \"flaky>=3.3.0\",\n \"hypothesis>=3.31.2\",\n \"pytest>=3.5.0,<4\",\n \"pytest-mock==1.*\",\n \"pytest-pythonpath>=0.3\",\n \"pytest-watch==4.*\",\n \"pytest-xdist==1.*\",\n \"tox>=1.8.0\",\n \"tqdm\",\n \"when-changed\"\n ]\n}\n\nextras_require['dev'] = (\n extras_require['tester'] +\n extras_require['linter'] +\n extras_require['docs'] +\n extras_require['dev']\n)\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='4.7.2',\n description=\"\"\"Web3.py\"\"\",\n long_description_markdown_filename='README.md',\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"toolz>=0.9.0,<1.0.0;implementation_name=='pypy'\",\n \"cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'\",\n \"eth-abi>=1.2.0,<2.0.0\",\n \"eth-account>=0.2.1,<0.4.0\",\n \"eth-utils>=1.2.0,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"eth-hash[pycryptodome]>=0.2.0,<1.0.0\",\n \"requests>=2.16.0,<3.0.0\",\n \"websockets>=6.0.0,<7.0.0\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n python_requires='>=3.5, <4',\n extras_require=extras_require,\n py_modules=['web3', 'ens'],\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}]} | 1,683 | 110 |
gh_patches_debug_11266 | rasdani/github-patches | git_diff | marshmallow-code__webargs-943 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Testsuite fails under pytest==8.2.0 with `'AsyncHTTPTestCase' has no attribute 'runTest'`
We currently have some test failures in basically all python versions, starting ~2 days ago. At first glance, it looks like an issue with `tornado`'s `AsyncHTTPTestCase`, but `tornado` doesn't have a recent release.
Looking at what projects updated recently, I flagged `pytest` as a good candidate for investigation, and testing with `pytest=8.1.2` works fine. So something related to unittest TestCases changed in 8.2.0 in a way that breaks tornado tests.
For reference, here's one of the error traces:
```
____________________ ERROR collecting tests/test_tornadoparser.py ____________________
.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__
setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
E AttributeError: 'AsyncHTTPTestCase' object has no attribute 'runTest'
____________________ ERROR collecting tests/test_tornadoparser.py ____________________
.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__
setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
E AttributeError: 'TestApp' object has no attribute 'runTest'
____________________ ERROR collecting tests/test_tornadoparser.py ____________________
.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__
setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
E AttributeError: 'TestValidateApp' object has no attribute 'runTest'
```
`runTest` looks like a dynamically created method in unittest, whose name gets passed on init to TestCase.
I started looking at changes to `pytest`'s unittest module ( https://github.com/pytest-dev/pytest/blame/main/src/_pytest/unittest.py ), but I'm out of time for digging into this right now.
</issue>
<code>
[start of src/webargs/__init__.py]
1 from __future__ import annotations
2
3 import importlib.metadata
4
5 # Make marshmallow's validation functions importable from webargs
6 from marshmallow import validate
7 from marshmallow.utils import missing
8 from packaging.version import Version
9
10 from webargs import fields
11 from webargs.core import ValidationError
12
13 # TODO: Deprecate __version__ et al.
14 __version__ = importlib.metadata.version("webargs")
15 __parsed_version__ = Version(__version__)
16 __version_info__: tuple[int, int, int] | tuple[int, int, int, str, int] = (
17 __parsed_version__.release
18 ) # type: ignore[assignment]
19 if __parsed_version__.pre:
20 __version_info__ += __parsed_version__.pre # type: ignore[assignment]
21 __all__ = ("ValidationError", "fields", "missing", "validate")
22
[end of src/webargs/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/webargs/__init__.py b/src/webargs/__init__.py
--- a/src/webargs/__init__.py
+++ b/src/webargs/__init__.py
@@ -14,8 +14,8 @@
__version__ = importlib.metadata.version("webargs")
__parsed_version__ = Version(__version__)
__version_info__: tuple[int, int, int] | tuple[int, int, int, str, int] = (
- __parsed_version__.release
-) # type: ignore[assignment]
+ __parsed_version__.release # type: ignore[assignment]
+)
if __parsed_version__.pre:
__version_info__ += __parsed_version__.pre # type: ignore[assignment]
__all__ = ("ValidationError", "fields", "missing", "validate")
| {"golden_diff": "diff --git a/src/webargs/__init__.py b/src/webargs/__init__.py\n--- a/src/webargs/__init__.py\n+++ b/src/webargs/__init__.py\n@@ -14,8 +14,8 @@\n __version__ = importlib.metadata.version(\"webargs\")\n __parsed_version__ = Version(__version__)\n __version_info__: tuple[int, int, int] | tuple[int, int, int, str, int] = (\n- __parsed_version__.release\n-) # type: ignore[assignment]\n+ __parsed_version__.release # type: ignore[assignment]\n+)\n if __parsed_version__.pre:\n __version_info__ += __parsed_version__.pre # type: ignore[assignment]\n __all__ = (\"ValidationError\", \"fields\", \"missing\", \"validate\")\n", "issue": "Testsuite fails under pytest==8.2.0 with `'AsyncHTTPTestCase' has no attribute 'runTest'`\nWe currently have some test failures in basically all python versions, starting ~2 days ago. At first glance, it looks like an issue with `tornado`'s `AsyncHTTPTestCase`, but `tornado` doesn't have a recent release.\r\n\r\nLooking at what projects updated recently, I flagged `pytest` as a good candidate for investigation, and testing with `pytest=8.1.2` works fine. So something related to unittest TestCases changed in 8.2.0 in a way that breaks tornado tests.\r\n\r\nFor reference, here's one of the error traces:\r\n```\r\n____________________ ERROR collecting tests/test_tornadoparser.py ____________________\r\n.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__\r\n setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))\r\nE AttributeError: 'AsyncHTTPTestCase' object has no attribute 'runTest'\r\n____________________ ERROR collecting tests/test_tornadoparser.py ____________________\r\n.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__\r\n setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))\r\nE AttributeError: 'TestApp' object has no attribute 'runTest'\r\n____________________ ERROR collecting tests/test_tornadoparser.py ____________________\r\n.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__\r\n setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))\r\nE AttributeError: 'TestValidateApp' object has no attribute 'runTest'\r\n```\r\n\r\n`runTest` looks like a dynamically created method in unittest, whose name gets passed on init to TestCase.\r\nI started looking at changes to `pytest`'s unittest module ( https://github.com/pytest-dev/pytest/blame/main/src/_pytest/unittest.py ), but I'm out of time for digging into this right now.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport importlib.metadata\n\n# Make marshmallow's validation functions importable from webargs\nfrom marshmallow import validate\nfrom marshmallow.utils import missing\nfrom packaging.version import Version\n\nfrom webargs import fields\nfrom webargs.core import ValidationError\n\n# TODO: Deprecate __version__ et al.\n__version__ = importlib.metadata.version(\"webargs\")\n__parsed_version__ = Version(__version__)\n__version_info__: tuple[int, int, int] | tuple[int, int, int, str, int] = (\n __parsed_version__.release\n) # type: ignore[assignment]\nif __parsed_version__.pre:\n __version_info__ += __parsed_version__.pre # type: ignore[assignment]\n__all__ = (\"ValidationError\", \"fields\", \"missing\", \"validate\")\n", "path": "src/webargs/__init__.py"}]} | 1,186 | 175 |
gh_patches_debug_22777 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1370 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NC - Committee assignments missing for a house member
State: NC
There may be more, I just happen to find this today. Jay Adams is missing committee assignments. The API does not show any in roles.
https://openstates.org/api/v1/legislators/NCL000338/
The NCGA site shows committee assignments here: http://www.ncleg.net/gascripts/members/reports/committeeAssignments.pl?nUserid=697&Chamber=H
There may be something broken and it may affect more members in NC.
I have assignments for others. Have not determined what is different between Adams and the others
</issue>
<code>
[start of openstates/nc/committees.py]
1 from billy.scrape.committees import CommitteeScraper, Committee
2
3 import lxml.html
4
5 class NCCommitteeScraper(CommitteeScraper):
6 jurisdiction = 'nc'
7
8 def scrape_committee(self, committee, url):
9 url = url.replace(' ', '%20') + '&bPrintable=true'
10 data = self.get(url).text
11 doc = lxml.html.fromstring(data)
12 for row in doc.xpath('//table/tr'):
13 children = row.getchildren()
14 if len(children) != 2:
15 self.log('skipping members for ' + committee['committee'])
16 continue
17 mtype, members = row.getchildren()
18 if mtype.text == 'Members':
19 for m in members.getchildren():
20 committee.add_member(m.text)
21 else:
22 committee.add_member(members.text_content(), mtype.text)
23
24
25 def scrape(self, term, chambers):
26 base_url = 'http://www.ncga.state.nc.us/gascripts/Committees/Committees.asp?bPrintable=true&sAction=ViewCommitteeType&sActionDetails='
27
28 chamber_slugs = {'upper': ['Senate%20Standing', 'Senate%20Select'],
29 'lower': ['House%20Standing', 'House%20Select']}
30
31 for chamber in chambers:
32 for ctype in chamber_slugs[chamber]:
33 data = self.get(base_url + ctype).text
34 doc = lxml.html.fromstring(data)
35 doc.make_links_absolute(base_url+ctype)
36 for comm in doc.xpath('//ul/li/a'):
37 name = comm.text
38 # skip committee of whole Senate
39 if 'Whole Senate' in name:
40 continue
41 url = comm.get('href')
42 committee = Committee(chamber, name)
43 self.scrape_committee(committee, url)
44 committee.add_source(url)
45 if not committee['members']:
46 self.warning('empty committee: %s', name)
47 else:
48 self.save_committee(committee)
49
50
[end of openstates/nc/committees.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/nc/committees.py b/openstates/nc/committees.py
--- a/openstates/nc/committees.py
+++ b/openstates/nc/committees.py
@@ -17,10 +17,23 @@
mtype, members = row.getchildren()
if mtype.text == 'Members':
for m in members.getchildren():
- committee.add_member(m.text)
+ member_name = self._clean_member_name(m.text)
+ committee.add_member(member_name)
else:
- committee.add_member(members.text_content(), mtype.text)
+ member_name = self._clean_member_name(members.text_content())
+ committee.add_member(member_name, mtype.text)
+ def _clean_member_name(self, name):
+ """Names are displayed as "Office. LastName", e.g. "Rep. Adamsa". This strips the "Office. "
+
+ This helps the NameMatcher in billy link this to the correct legislator.
+ """
+ for prefix in ['Rep. ', 'Sen. ']:
+ if name.startswith(prefix):
+ return name.replace(prefix, '')
+
+ # If none hit, return the name as is
+ return name
def scrape(self, term, chambers):
base_url = 'http://www.ncga.state.nc.us/gascripts/Committees/Committees.asp?bPrintable=true&sAction=ViewCommitteeType&sActionDetails='
| {"golden_diff": "diff --git a/openstates/nc/committees.py b/openstates/nc/committees.py\n--- a/openstates/nc/committees.py\n+++ b/openstates/nc/committees.py\n@@ -17,10 +17,23 @@\n mtype, members = row.getchildren()\n if mtype.text == 'Members':\n for m in members.getchildren():\n- committee.add_member(m.text)\n+ member_name = self._clean_member_name(m.text)\n+ committee.add_member(member_name)\n else:\n- committee.add_member(members.text_content(), mtype.text)\n+ member_name = self._clean_member_name(members.text_content())\n+ committee.add_member(member_name, mtype.text)\n \n+ def _clean_member_name(self, name):\n+ \"\"\"Names are displayed as \"Office. LastName\", e.g. \"Rep. Adamsa\". This strips the \"Office. \"\n+\n+ This helps the NameMatcher in billy link this to the correct legislator.\n+ \"\"\"\n+ for prefix in ['Rep. ', 'Sen. ']:\n+ if name.startswith(prefix):\n+ return name.replace(prefix, '')\n+\n+ # If none hit, return the name as is\n+ return name\n \n def scrape(self, term, chambers):\n base_url = 'http://www.ncga.state.nc.us/gascripts/Committees/Committees.asp?bPrintable=true&sAction=ViewCommitteeType&sActionDetails='\n", "issue": "NC - Committee assignments missing for a house member\nState: NC\r\n\r\nThere may be more, I just happen to find this today. Jay Adams is missing committee assignments. The API does not show any in roles. \r\n\r\nhttps://openstates.org/api/v1/legislators/NCL000338/\r\n\r\nThe NCGA site shows committee assignments here: http://www.ncleg.net/gascripts/members/reports/committeeAssignments.pl?nUserid=697&Chamber=H\r\n\r\nThere may be something broken and it may affect more members in NC.\r\n\r\nI have assignments for others. Have not determined what is different between Adams and the others\n", "before_files": [{"content": "from billy.scrape.committees import CommitteeScraper, Committee\n\nimport lxml.html\n\nclass NCCommitteeScraper(CommitteeScraper):\n jurisdiction = 'nc'\n\n def scrape_committee(self, committee, url):\n url = url.replace(' ', '%20') + '&bPrintable=true'\n data = self.get(url).text\n doc = lxml.html.fromstring(data)\n for row in doc.xpath('//table/tr'):\n children = row.getchildren()\n if len(children) != 2:\n self.log('skipping members for ' + committee['committee'])\n continue\n mtype, members = row.getchildren()\n if mtype.text == 'Members':\n for m in members.getchildren():\n committee.add_member(m.text)\n else:\n committee.add_member(members.text_content(), mtype.text)\n\n\n def scrape(self, term, chambers):\n base_url = 'http://www.ncga.state.nc.us/gascripts/Committees/Committees.asp?bPrintable=true&sAction=ViewCommitteeType&sActionDetails='\n\n chamber_slugs = {'upper': ['Senate%20Standing', 'Senate%20Select'],\n 'lower': ['House%20Standing', 'House%20Select']}\n\n for chamber in chambers:\n for ctype in chamber_slugs[chamber]:\n data = self.get(base_url + ctype).text\n doc = lxml.html.fromstring(data)\n doc.make_links_absolute(base_url+ctype)\n for comm in doc.xpath('//ul/li/a'):\n name = comm.text\n # skip committee of whole Senate\n if 'Whole Senate' in name:\n continue\n url = comm.get('href')\n committee = Committee(chamber, name)\n self.scrape_committee(committee, url)\n committee.add_source(url)\n if not committee['members']:\n self.warning('empty committee: %s', name)\n else:\n self.save_committee(committee)\n\n", "path": "openstates/nc/committees.py"}]} | 1,193 | 318 |
gh_patches_debug_858 | rasdani/github-patches | git_diff | pypi__warehouse-3292 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Warehouse file order differs from legacy PyPI file list
Tonight, while load testing of pypi.org was ongoing, we saw some failures in automated systems that use `--require-hashes` with `pip install`, as ordering on the package file list page changed.
The specific package we saw break was `pandas` at version `0.12.0`. We had a single hash for `pandas-0.12.0.tar.gz`. A few of our hosts were served from the legacy PyPI service, which succeeded as normal. The Warehouse endpoint, however, failed, since `pandas-0.12.0.zip` now preceded `pandas-0.12.0.tar.gz` in the file list.
At the moment, you can see that https://pypi.org/simple/pandas/ and https://pypi.python.org/simple/pandas/ differ by searching for `pandas-0.12.0.tar.gz` and `pandas-0.12.0.zip` and comparing the position.
</issue>
<code>
[start of warehouse/legacy/api/simple.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13
14 from packaging.version import parse
15 from pyramid.httpexceptions import HTTPMovedPermanently
16 from pyramid.view import view_config
17 from sqlalchemy import func
18 from sqlalchemy.orm import joinedload
19
20 from warehouse.cache.http import cache_control
21 from warehouse.cache.origin import origin_cache
22 from warehouse.packaging.models import JournalEntry, File, Project, Release
23
24
25 @view_config(
26 route_name="legacy.api.simple.index",
27 renderer="legacy/api/simple/index.html",
28 decorator=[
29 cache_control(10 * 60), # 10 minutes
30 origin_cache(
31 1 * 24 * 60 * 60, # 1 day
32 stale_while_revalidate=5 * 60, # 5 minutes
33 stale_if_error=1 * 24 * 60 * 60, # 1 day
34 ),
35 ],
36 )
37 def simple_index(request):
38 # Get the latest serial number
39 serial = request.db.query(func.max(JournalEntry.id)).scalar() or 0
40 request.response.headers["X-PyPI-Last-Serial"] = str(serial)
41
42 # Fetch the name and normalized name for all of our projects
43 projects = (
44 request.db.query(Project.name, Project.normalized_name)
45 .order_by(Project.normalized_name)
46 .all()
47 )
48
49 return {"projects": projects}
50
51
52 @view_config(
53 route_name="legacy.api.simple.detail",
54 renderer="legacy/api/simple/detail.html",
55 decorator=[
56 cache_control(10 * 60), # 10 minutes
57 origin_cache(
58 1 * 24 * 60 * 60, # 1 day
59 stale_while_revalidate=5 * 60, # 5 minutes
60 stale_if_error=1 * 24 * 60 * 60, # 1 day
61 ),
62 ],
63 )
64 def simple_detail(project, request):
65 # TODO: Handle files which are not hosted on PyPI
66
67 # Make sure that we're using the normalized version of the URL.
68 if (project.normalized_name !=
69 request.matchdict.get("name", project.normalized_name)):
70 return HTTPMovedPermanently(
71 request.current_route_path(name=project.normalized_name),
72 )
73
74 # Get the latest serial number for this project.
75 request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial)
76
77 # Get all of the files for this project.
78 files = sorted(
79 request.db.query(File)
80 .options(joinedload(File.release))
81 .filter(
82 File.name == project.name,
83 File.version.in_(
84 request.db.query(Release)
85 .filter(Release.project == project)
86 .with_entities(Release.version)
87 )
88 )
89 .all(),
90 key=lambda f: (parse(f.version), f.packagetype)
91 )
92
93 return {"project": project, "files": files}
94
[end of warehouse/legacy/api/simple.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/legacy/api/simple.py b/warehouse/legacy/api/simple.py
--- a/warehouse/legacy/api/simple.py
+++ b/warehouse/legacy/api/simple.py
@@ -87,7 +87,7 @@
)
)
.all(),
- key=lambda f: (parse(f.version), f.packagetype)
+ key=lambda f: (parse(f.version), f.filename)
)
return {"project": project, "files": files}
| {"golden_diff": "diff --git a/warehouse/legacy/api/simple.py b/warehouse/legacy/api/simple.py\n--- a/warehouse/legacy/api/simple.py\n+++ b/warehouse/legacy/api/simple.py\n@@ -87,7 +87,7 @@\n )\n )\n .all(),\n- key=lambda f: (parse(f.version), f.packagetype)\n+ key=lambda f: (parse(f.version), f.filename)\n )\n \n return {\"project\": project, \"files\": files}\n", "issue": "Warehouse file order differs from legacy PyPI file list\nTonight, while load testing of pypi.org was ongoing, we saw some failures in automated systems that use `--require-hashes` with `pip install`, as ordering on the package file list page changed.\r\n\r\nThe specific package we saw break was `pandas` at version `0.12.0`. We had a single hash for `pandas-0.12.0.tar.gz`. A few of our hosts were served from the legacy PyPI service, which succeeded as normal. The Warehouse endpoint, however, failed, since `pandas-0.12.0.zip` now preceded `pandas-0.12.0.tar.gz` in the file list.\r\n\r\nAt the moment, you can see that https://pypi.org/simple/pandas/ and https://pypi.python.org/simple/pandas/ differ by searching for `pandas-0.12.0.tar.gz` and `pandas-0.12.0.zip` and comparing the position.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom packaging.version import parse\nfrom pyramid.httpexceptions import HTTPMovedPermanently\nfrom pyramid.view import view_config\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import JournalEntry, File, Project, Release\n\n\n@view_config(\n route_name=\"legacy.api.simple.index\",\n renderer=\"legacy/api/simple/index.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_index(request):\n # Get the latest serial number\n serial = request.db.query(func.max(JournalEntry.id)).scalar() or 0\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(serial)\n\n # Fetch the name and normalized name for all of our projects\n projects = (\n request.db.query(Project.name, Project.normalized_name)\n .order_by(Project.normalized_name)\n .all()\n )\n\n return {\"projects\": projects}\n\n\n@view_config(\n route_name=\"legacy.api.simple.detail\",\n renderer=\"legacy/api/simple/detail.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_detail(project, request):\n # TODO: Handle files which are not hosted on PyPI\n\n # Make sure that we're using the normalized version of the URL.\n if (project.normalized_name !=\n request.matchdict.get(\"name\", project.normalized_name)):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.normalized_name),\n )\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the files for this project.\n files = sorted(\n request.db.query(File)\n .options(joinedload(File.release))\n .filter(\n File.name == project.name,\n File.version.in_(\n request.db.query(Release)\n .filter(Release.project == project)\n .with_entities(Release.version)\n )\n )\n .all(),\n key=lambda f: (parse(f.version), f.packagetype)\n )\n\n return {\"project\": project, \"files\": files}\n", "path": "warehouse/legacy/api/simple.py"}]} | 1,681 | 106 |
gh_patches_debug_34797 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-657 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
precommit flake8 check is missing files
elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py is missed by the checker.
</issue>
<code>
[start of elasticdl/python/data/codec/__init__.py]
1 from elasticdl.python.data.codec.bytes_codec import BytesCodec
2 from elasticdl.python.data.codec.tf_example_codec import TFExampleCodec
3
4 __all__ = [BytesCodec, TFExampleCodec]
5
[end of elasticdl/python/data/codec/__init__.py]
[start of elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py]
1 import argparse
2 import os
3
4 from pyspark import SparkContext
5 from pyspark import TaskContext
6
7 import numpy as np
8 from elasticdl.python.elasticdl.common.model_helper import load_user_model
9 from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import \
10 convert_numpy_to_recordio
11
12
13 def process_data(
14 feature_label_columns,
15 single_file_preparation_func,
16 output_dir,
17 records_per_file,
18 codec_type,
19 ):
20 def _process_data(file_list):
21 ctx = TaskContext()
22 feature_list = []
23 label_list = []
24 for file in file_list:
25 feature_label_tuple = single_file_preparation_func(file)
26 assert len(feature_label_tuple) == 2
27 feature_list.append(feature_label_tuple[0])
28 label_list.append(feature_label_tuple[1])
29 convert_numpy_to_recordio(
30 output_dir,
31 np.array(feature_list),
32 np.array(label_list),
33 feature_label_columns,
34 records_per_file,
35 codec_type,
36 str(ctx.partitionId()),
37 )
38 return file_list
39 return _process_data
40
41
42 def main():
43 parser = argparse.ArgumentParser(
44 description="Spark job to convert training data to RecordIO format"
45 )
46 parser.add_argument(
47 "--training_data_dir",
48 help="Directory that contains training data and will be traversed \
49 recursively",
50 required=True,
51 )
52 parser.add_argument(
53 "--output_dir",
54 help="Directory of output RecordIO data",
55 required=True,
56 )
57 parser.add_argument(
58 "--model_file",
59 required=True,
60 help="User-defined model file which data processing logic is in",
61 )
62 parser.add_argument(
63 "--records_per_file", default=1024, type=int, help="Record per file"
64 )
65 parser.add_argument(
66 "--codec_type",
67 default="tf_example",
68 choices=["tf_example", "bytes"],
69 help="Type of codec(tf_example or bytes)",
70 )
71 parser.add_argument(
72 "--num_workers",
73 default=2,
74 type=int,
75 help="Number of workers of Spark job",
76 )
77
78 args = parser.parse_args()
79
80 # Get training data files recursively from training_data_dir
81 file_list = []
82 for dir_name, subdir_list, files in os.walk(args.training_data_dir):
83 for fname in files:
84 if not fname.startswith('.'):
85 file_list.append(os.path.join(dir_name, fname))
86
87 # Load user-defined model
88 model_module = load_user_model(args.model_file)
89
90 if not os.path.exists(args.output_dir):
91 os.makedirs(args.output_dir)
92
93 # Start the Spark job
94 sc = SparkContext()
95 rdd = sc.parallelize(file_list, args.num_workers)
96 rdd.mapPartitions(
97 process_data(
98 model_module.feature_columns() + model_module.label_columns(),
99 model_module.prepare_data_for_a_single_file,
100 args.output_dir,
101 args.records_per_file,
102 args.codec_type,
103 )
104 ).collect()
105
106
107 if __name__ == "__main__":
108 main()
109
[end of elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/python/data/codec/__init__.py b/elasticdl/python/data/codec/__init__.py
--- a/elasticdl/python/data/codec/__init__.py
+++ b/elasticdl/python/data/codec/__init__.py
@@ -1,4 +1,4 @@
from elasticdl.python.data.codec.bytes_codec import BytesCodec
from elasticdl.python.data.codec.tf_example_codec import TFExampleCodec
-__all__ = [BytesCodec, TFExampleCodec]
+__all__ = ["BytesCodec", "TFExampleCodec"]
diff --git a/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py b/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py
--- a/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py
+++ b/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py
@@ -6,8 +6,9 @@
import numpy as np
from elasticdl.python.elasticdl.common.model_helper import load_user_model
-from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import \
- convert_numpy_to_recordio
+from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import (
+ convert_numpy_to_recordio,
+)
def process_data(
@@ -36,6 +37,7 @@
str(ctx.partitionId()),
)
return file_list
+
return _process_data
@@ -50,9 +52,7 @@
required=True,
)
parser.add_argument(
- "--output_dir",
- help="Directory of output RecordIO data",
- required=True,
+ "--output_dir", help="Directory of output RecordIO data", required=True
)
parser.add_argument(
"--model_file",
@@ -81,12 +81,12 @@
file_list = []
for dir_name, subdir_list, files in os.walk(args.training_data_dir):
for fname in files:
- if not fname.startswith('.'):
+ if not fname.startswith("."):
file_list.append(os.path.join(dir_name, fname))
# Load user-defined model
model_module = load_user_model(args.model_file)
-
+
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
| {"golden_diff": "diff --git a/elasticdl/python/data/codec/__init__.py b/elasticdl/python/data/codec/__init__.py\n--- a/elasticdl/python/data/codec/__init__.py\n+++ b/elasticdl/python/data/codec/__init__.py\n@@ -1,4 +1,4 @@\n from elasticdl.python.data.codec.bytes_codec import BytesCodec\n from elasticdl.python.data.codec.tf_example_codec import TFExampleCodec\n \n-__all__ = [BytesCodec, TFExampleCodec]\n+__all__ = [\"BytesCodec\", \"TFExampleCodec\"]\ndiff --git a/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py b/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py\n--- a/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py\n+++ b/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py\n@@ -6,8 +6,9 @@\n \n import numpy as np\n from elasticdl.python.elasticdl.common.model_helper import load_user_model\n-from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import \\\n- convert_numpy_to_recordio\n+from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import (\n+ convert_numpy_to_recordio,\n+)\n \n \n def process_data(\n@@ -36,6 +37,7 @@\n str(ctx.partitionId()),\n )\n return file_list\n+\n return _process_data\n \n \n@@ -50,9 +52,7 @@\n required=True,\n )\n parser.add_argument(\n- \"--output_dir\",\n- help=\"Directory of output RecordIO data\",\n- required=True,\n+ \"--output_dir\", help=\"Directory of output RecordIO data\", required=True\n )\n parser.add_argument(\n \"--model_file\",\n@@ -81,12 +81,12 @@\n file_list = []\n for dir_name, subdir_list, files in os.walk(args.training_data_dir):\n for fname in files:\n- if not fname.startswith('.'):\n+ if not fname.startswith(\".\"):\n file_list.append(os.path.join(dir_name, fname))\n \n # Load user-defined model\n model_module = load_user_model(args.model_file)\n- \n+\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n", "issue": "precommit flake8 check is missing files\nelasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py is missed by the checker.\n", "before_files": [{"content": "from elasticdl.python.data.codec.bytes_codec import BytesCodec\nfrom elasticdl.python.data.codec.tf_example_codec import TFExampleCodec\n\n__all__ = [BytesCodec, TFExampleCodec]\n", "path": "elasticdl/python/data/codec/__init__.py"}, {"content": "import argparse\nimport os\n\nfrom pyspark import SparkContext\nfrom pyspark import TaskContext\n\nimport numpy as np\nfrom elasticdl.python.elasticdl.common.model_helper import load_user_model\nfrom elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import \\\n convert_numpy_to_recordio\n\n\ndef process_data(\n feature_label_columns,\n single_file_preparation_func,\n output_dir,\n records_per_file,\n codec_type,\n):\n def _process_data(file_list):\n ctx = TaskContext()\n feature_list = []\n label_list = []\n for file in file_list:\n feature_label_tuple = single_file_preparation_func(file)\n assert len(feature_label_tuple) == 2\n feature_list.append(feature_label_tuple[0])\n label_list.append(feature_label_tuple[1])\n convert_numpy_to_recordio(\n output_dir,\n np.array(feature_list),\n np.array(label_list),\n feature_label_columns,\n records_per_file,\n codec_type,\n str(ctx.partitionId()),\n )\n return file_list\n return _process_data\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Spark job to convert training data to RecordIO format\"\n )\n parser.add_argument(\n \"--training_data_dir\",\n help=\"Directory that contains training data and will be traversed \\\n recursively\",\n required=True,\n )\n parser.add_argument(\n \"--output_dir\",\n help=\"Directory of output RecordIO data\",\n required=True,\n )\n parser.add_argument(\n \"--model_file\",\n required=True,\n help=\"User-defined model file which data processing logic is in\",\n )\n parser.add_argument(\n \"--records_per_file\", default=1024, type=int, help=\"Record per file\"\n )\n parser.add_argument(\n \"--codec_type\",\n default=\"tf_example\",\n choices=[\"tf_example\", \"bytes\"],\n help=\"Type of codec(tf_example or bytes)\",\n )\n parser.add_argument(\n \"--num_workers\",\n default=2,\n type=int,\n help=\"Number of workers of Spark job\",\n )\n\n args = parser.parse_args()\n\n # Get training data files recursively from training_data_dir\n file_list = []\n for dir_name, subdir_list, files in os.walk(args.training_data_dir):\n for fname in files:\n if not fname.startswith('.'):\n file_list.append(os.path.join(dir_name, fname))\n\n # Load user-defined model\n model_module = load_user_model(args.model_file)\n \n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n # Start the Spark job\n sc = SparkContext()\n rdd = sc.parallelize(file_list, args.num_workers)\n rdd.mapPartitions(\n process_data(\n model_module.feature_columns() + model_module.label_columns(),\n model_module.prepare_data_for_a_single_file,\n args.output_dir,\n args.records_per_file,\n args.codec_type,\n )\n ).collect()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py"}]} | 1,537 | 522 |
gh_patches_debug_16462 | rasdani/github-patches | git_diff | napari__napari-6268 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG `changing` event in `EventedDict` not emitted
## 🐛 Bug
The `changing` event in [`EventedDict`](https://github.com/napari/napari/blob/8c307022cc557692409f5e8bc031f1dcde4c374a/napari/utils/events/containers/_evented_dict.py#L9) is created but not documented or emitted:
https://github.com/napari/napari/blob/8c307022cc557692409f5e8bc031f1dcde4c374a/napari/utils/events/containers/_evented_dict.py#L75-L77
It seems not to be missed, should it be added?
<!-- A clear and concise description of what the bug is. -->
</issue>
<code>
[start of napari/utils/events/containers/_evented_dict.py]
1 """MutableMapping that emits events when altered."""
2 from typing import Mapping, Optional, Sequence, Type, Union
3
4 from napari.utils.events.containers._dict import _K, _T, TypedMutableMapping
5 from napari.utils.events.event import EmitterGroup, Event
6 from napari.utils.events.types import SupportsEvents
7
8
9 class EventedDict(TypedMutableMapping[_K, _T]):
10 """Mutable dictionary that emits events when altered.
11
12 This class is designed to behave exactly like builtin ``dict``, but
13 will emit events before and after all mutations (addition, removal, and
14 changing).
15
16 Parameters
17 ----------
18 data : Mapping, optional
19 Dictionary to initialize the class with.
20 basetype : type of sequence of types, optional
21 Type of the element in the dictionary.
22
23 Events
24 ------
25 changed (key: K, old_value: T, value: T)
26 emitted when item at ``key`` is changed from ``old_value`` to ``value``
27 adding (key: K)
28 emitted before an item is added to the dictionary with ``key``
29 added (key: K, value: T)
30 emitted after ``value`` was added to the dictionary with ``key``
31 removing (key: K)
32 emitted before ``key`` is removed from the dictionary
33 removed (key: K, value: T)
34 emitted after ``key`` was removed from the dictionary
35 updated (key, K, value: T)
36 emitted after ``value`` of ``key`` was changed. Only implemented by
37 subclasses to give them an option to trigger some update after ``value``
38 was changed and this class did not register it. This can be useful if
39 the ``basetype`` is not an evented object.
40 """
41
42 events: EmitterGroup
43
44 def __init__(
45 self,
46 data: Optional[Mapping[_K, _T]] = None,
47 basetype: Union[Type[_T], Sequence[Type[_T]]] = (),
48 ) -> None:
49 _events = {
50 "changing": None,
51 "changed": None,
52 "adding": None,
53 "added": None,
54 "removing": None,
55 "removed": None,
56 "updated": None,
57 }
58 # For inheritance: If the mro already provides an EmitterGroup, add...
59 if hasattr(self, "events") and isinstance(self.events, EmitterGroup):
60 self.events.add(**_events)
61 else:
62 # otherwise create a new one
63 self.events = EmitterGroup(
64 source=self, auto_connect=False, **_events
65 )
66 super().__init__(data, basetype)
67
68 def __setitem__(self, key: _K, value: _T):
69 old = self._dict.get(key)
70 if value is old or value == old:
71 return
72 if old is None:
73 self.events.adding(key=key)
74 super().__setitem__(key, value)
75 self.events.added(key=key, value=value)
76 self._connect_child_emitters(value)
77 else:
78 super().__setitem__(key, value)
79 self.events.changed(key=key, old_value=old, value=value)
80
81 def __delitem__(self, key: _K):
82 self.events.removing(key=key)
83 self._disconnect_child_emitters(self[key])
84 item = self._dict.pop(key)
85 self.events.removed(key=key, value=item)
86
87 def _reemit_child_event(self, event: Event):
88 """An item in the dict emitted an event. Re-emit with key"""
89 if not hasattr(event, "key"):
90 event.key = self.key(event.source)
91
92 # re-emit with this object's EventEmitter
93 self.events(event)
94
95 def _disconnect_child_emitters(self, child: _T):
96 """Disconnect all events from the child from the re-emitter."""
97 if isinstance(child, SupportsEvents):
98 child.events.disconnect(self._reemit_child_event)
99
100 def _connect_child_emitters(self, child: _T):
101 """Connect all events from the child to be re-emitted."""
102 if isinstance(child, SupportsEvents):
103 # make sure the event source has been set on the child
104 if child.events.source is None:
105 child.events.source = child
106 child.events.connect(self._reemit_child_event)
107
108 def key(self, value: _T):
109 """Return first instance of value."""
110 for k, v in self._dict.items():
111 if v is value or v == value:
112 return k
113 return None
114
[end of napari/utils/events/containers/_evented_dict.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/napari/utils/events/containers/_evented_dict.py b/napari/utils/events/containers/_evented_dict.py
--- a/napari/utils/events/containers/_evented_dict.py
+++ b/napari/utils/events/containers/_evented_dict.py
@@ -22,6 +22,8 @@
Events
------
+ changing (key: K)
+ emitted before an item at ``key`` is changed
changed (key: K, old_value: T, value: T)
emitted when item at ``key`` is changed from ``old_value`` to ``value``
adding (key: K)
@@ -75,6 +77,7 @@
self.events.added(key=key, value=value)
self._connect_child_emitters(value)
else:
+ self.events.changing(key=key)
super().__setitem__(key, value)
self.events.changed(key=key, old_value=old, value=value)
| {"golden_diff": "diff --git a/napari/utils/events/containers/_evented_dict.py b/napari/utils/events/containers/_evented_dict.py\n--- a/napari/utils/events/containers/_evented_dict.py\n+++ b/napari/utils/events/containers/_evented_dict.py\n@@ -22,6 +22,8 @@\n \n Events\n ------\n+ changing (key: K)\n+ emitted before an item at ``key`` is changed\n changed (key: K, old_value: T, value: T)\n emitted when item at ``key`` is changed from ``old_value`` to ``value``\n adding (key: K)\n@@ -75,6 +77,7 @@\n self.events.added(key=key, value=value)\n self._connect_child_emitters(value)\n else:\n+ self.events.changing(key=key)\n super().__setitem__(key, value)\n self.events.changed(key=key, old_value=old, value=value)\n", "issue": "BUG `changing` event in `EventedDict` not emitted\n## \ud83d\udc1b Bug\r\n\r\nThe `changing` event in [`EventedDict`](https://github.com/napari/napari/blob/8c307022cc557692409f5e8bc031f1dcde4c374a/napari/utils/events/containers/_evented_dict.py#L9) is created but not documented or emitted:\r\n\r\nhttps://github.com/napari/napari/blob/8c307022cc557692409f5e8bc031f1dcde4c374a/napari/utils/events/containers/_evented_dict.py#L75-L77\r\n\r\nIt seems not to be missed, should it be added?\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n\n", "before_files": [{"content": "\"\"\"MutableMapping that emits events when altered.\"\"\"\nfrom typing import Mapping, Optional, Sequence, Type, Union\n\nfrom napari.utils.events.containers._dict import _K, _T, TypedMutableMapping\nfrom napari.utils.events.event import EmitterGroup, Event\nfrom napari.utils.events.types import SupportsEvents\n\n\nclass EventedDict(TypedMutableMapping[_K, _T]):\n \"\"\"Mutable dictionary that emits events when altered.\n\n This class is designed to behave exactly like builtin ``dict``, but\n will emit events before and after all mutations (addition, removal, and\n changing).\n\n Parameters\n ----------\n data : Mapping, optional\n Dictionary to initialize the class with.\n basetype : type of sequence of types, optional\n Type of the element in the dictionary.\n\n Events\n ------\n changed (key: K, old_value: T, value: T)\n emitted when item at ``key`` is changed from ``old_value`` to ``value``\n adding (key: K)\n emitted before an item is added to the dictionary with ``key``\n added (key: K, value: T)\n emitted after ``value`` was added to the dictionary with ``key``\n removing (key: K)\n emitted before ``key`` is removed from the dictionary\n removed (key: K, value: T)\n emitted after ``key`` was removed from the dictionary\n updated (key, K, value: T)\n emitted after ``value`` of ``key`` was changed. Only implemented by\n subclasses to give them an option to trigger some update after ``value``\n was changed and this class did not register it. This can be useful if\n the ``basetype`` is not an evented object.\n \"\"\"\n\n events: EmitterGroup\n\n def __init__(\n self,\n data: Optional[Mapping[_K, _T]] = None,\n basetype: Union[Type[_T], Sequence[Type[_T]]] = (),\n ) -> None:\n _events = {\n \"changing\": None,\n \"changed\": None,\n \"adding\": None,\n \"added\": None,\n \"removing\": None,\n \"removed\": None,\n \"updated\": None,\n }\n # For inheritance: If the mro already provides an EmitterGroup, add...\n if hasattr(self, \"events\") and isinstance(self.events, EmitterGroup):\n self.events.add(**_events)\n else:\n # otherwise create a new one\n self.events = EmitterGroup(\n source=self, auto_connect=False, **_events\n )\n super().__init__(data, basetype)\n\n def __setitem__(self, key: _K, value: _T):\n old = self._dict.get(key)\n if value is old or value == old:\n return\n if old is None:\n self.events.adding(key=key)\n super().__setitem__(key, value)\n self.events.added(key=key, value=value)\n self._connect_child_emitters(value)\n else:\n super().__setitem__(key, value)\n self.events.changed(key=key, old_value=old, value=value)\n\n def __delitem__(self, key: _K):\n self.events.removing(key=key)\n self._disconnect_child_emitters(self[key])\n item = self._dict.pop(key)\n self.events.removed(key=key, value=item)\n\n def _reemit_child_event(self, event: Event):\n \"\"\"An item in the dict emitted an event. Re-emit with key\"\"\"\n if not hasattr(event, \"key\"):\n event.key = self.key(event.source)\n\n # re-emit with this object's EventEmitter\n self.events(event)\n\n def _disconnect_child_emitters(self, child: _T):\n \"\"\"Disconnect all events from the child from the re-emitter.\"\"\"\n if isinstance(child, SupportsEvents):\n child.events.disconnect(self._reemit_child_event)\n\n def _connect_child_emitters(self, child: _T):\n \"\"\"Connect all events from the child to be re-emitted.\"\"\"\n if isinstance(child, SupportsEvents):\n # make sure the event source has been set on the child\n if child.events.source is None:\n child.events.source = child\n child.events.connect(self._reemit_child_event)\n\n def key(self, value: _T):\n \"\"\"Return first instance of value.\"\"\"\n for k, v in self._dict.items():\n if v is value or v == value:\n return k\n return None\n", "path": "napari/utils/events/containers/_evented_dict.py"}]} | 1,955 | 212 |
gh_patches_debug_11353 | rasdani/github-patches | git_diff | iterative__dvc-951 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Testing against Python 3.6 and 3.7
https://github.com/pyinstaller/pyinstaller#main-advantages says that pyinstaller supports these versions of Python but https://github.com/iterative/dvc/blob/master/.travis.yml#L9 says the opposite.
</issue>
<code>
[start of dvc/remote/base.py]
1 import os
2 import re
3
4 from dvc.config import Config
5 from dvc.logger import Logger
6 from dvc.exceptions import DvcException
7
8
9 STATUS_OK = 1
10 STATUS_NEW = 3
11 STATUS_DELETED = 4
12
13
14 STATUS_MAP = {
15 # (local_exists, remote_exists)
16 (True, True): STATUS_OK,
17 (False, False): STATUS_OK,
18 (True, False): STATUS_NEW,
19 (False, True): STATUS_DELETED,
20 }
21
22
23 class DataCloudError(DvcException):
24 """ Data Cloud exception """
25 def __init__(self, msg):
26 super(DataCloudError, self).__init__('Data sync error: {}'.format(msg))
27
28
29 class RemoteBase(object):
30 REGEX = None
31 REQUIRES = {}
32
33 def __init__(self, project, config):
34 pass
35
36 @classmethod
37 def supported(cls, config):
38 url = config[Config.SECTION_REMOTE_URL]
39 url_ok = cls.match(url)
40 deps_ok = all(cls.REQUIRES.values())
41 if url_ok and not deps_ok:
42 missing = [k for k, v in cls.REQUIRES.items() if v is None]
43 msg = "URL \'{}\' is supported but requires " \
44 "these missing dependencies: {}"
45 Logger.warn(msg.format(url, str(missing)))
46 return url_ok and deps_ok
47
48 @classmethod
49 def match(cls, url):
50 return re.match(cls.REGEX, url)
51
52 def group(self, name):
53 m = self.match(self.url)
54 if not m:
55 return None
56 return m.group(name)
57
58 @staticmethod
59 def tmp_file(fname):
60 """ Temporary name for a partial download """
61 # FIXME probably better use uuid()
62 return fname + '.part'
63
64 def save_info(self, path_info):
65 raise NotImplementedError
66
67 def save(self, path_info):
68 raise NotImplementedError
69
70 def checkout(self, path_info, checksum_info):
71 raise NotImplementedError
72
73 def download(self, from_infos, to_infos, no_progress_bar=False, name=None):
74 raise NotImplementedError
75
76 def upload(self, from_infos, to_infos, path_info, name=None):
77 raise NotImplementedError
78
79 def remove(self, path_info):
80 raise NotImplementedError
81
82 def move(self, path_info):
83 raise NotImplementedError
84
85 def _makedirs(self, fname):
86 dname = os.path.dirname(fname)
87 try:
88 os.makedirs(dname)
89 except OSError as e:
90 if e.errno != os.errno.EEXIST:
91 raise
92
93 def md5s_to_path_infos(self, md5s):
94 raise NotImplementedError
95
96 def exists(self, path_infos):
97 raise NotImplementedError
98
99 @classmethod
100 def _verify_path_args(cls, from_infos, to_infos, names=None):
101 assert isinstance(from_infos, list)
102 assert isinstance(to_infos, list)
103 assert len(from_infos) == len(to_infos)
104
105 if not names:
106 names = len(to_infos) * [None]
107 else:
108 assert isinstance(names, list)
109 assert len(names) == len(to_infos)
110
111 return names
112
[end of dvc/remote/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/remote/base.py b/dvc/remote/base.py
--- a/dvc/remote/base.py
+++ b/dvc/remote/base.py
@@ -1,5 +1,6 @@
import os
import re
+import errno
from dvc.config import Config
from dvc.logger import Logger
@@ -84,10 +85,11 @@
def _makedirs(self, fname):
dname = os.path.dirname(fname)
+
try:
os.makedirs(dname)
except OSError as e:
- if e.errno != os.errno.EEXIST:
+ if e.errno != errno.EEXIST:
raise
def md5s_to_path_infos(self, md5s):
| {"golden_diff": "diff --git a/dvc/remote/base.py b/dvc/remote/base.py\n--- a/dvc/remote/base.py\n+++ b/dvc/remote/base.py\n@@ -1,5 +1,6 @@\n import os\n import re\n+import errno\n \n from dvc.config import Config\n from dvc.logger import Logger\n@@ -84,10 +85,11 @@\n \n def _makedirs(self, fname):\n dname = os.path.dirname(fname)\n+\n try:\n os.makedirs(dname)\n except OSError as e:\n- if e.errno != os.errno.EEXIST:\n+ if e.errno != errno.EEXIST:\n raise\n \n def md5s_to_path_infos(self, md5s):\n", "issue": "Testing against Python 3.6 and 3.7\nhttps://github.com/pyinstaller/pyinstaller#main-advantages says that pyinstaller supports these versions of Python but https://github.com/iterative/dvc/blob/master/.travis.yml#L9 says the opposite.\n", "before_files": [{"content": "import os\nimport re\n\nfrom dvc.config import Config\nfrom dvc.logger import Logger\nfrom dvc.exceptions import DvcException\n\n\nSTATUS_OK = 1\nSTATUS_NEW = 3\nSTATUS_DELETED = 4\n\n\nSTATUS_MAP = {\n # (local_exists, remote_exists)\n (True, True): STATUS_OK,\n (False, False): STATUS_OK,\n (True, False): STATUS_NEW,\n (False, True): STATUS_DELETED,\n}\n\n\nclass DataCloudError(DvcException):\n \"\"\" Data Cloud exception \"\"\"\n def __init__(self, msg):\n super(DataCloudError, self).__init__('Data sync error: {}'.format(msg))\n\n\nclass RemoteBase(object):\n REGEX = None\n REQUIRES = {}\n\n def __init__(self, project, config):\n pass\n\n @classmethod\n def supported(cls, config):\n url = config[Config.SECTION_REMOTE_URL]\n url_ok = cls.match(url)\n deps_ok = all(cls.REQUIRES.values())\n if url_ok and not deps_ok:\n missing = [k for k, v in cls.REQUIRES.items() if v is None]\n msg = \"URL \\'{}\\' is supported but requires \" \\\n \"these missing dependencies: {}\"\n Logger.warn(msg.format(url, str(missing)))\n return url_ok and deps_ok\n\n @classmethod\n def match(cls, url):\n return re.match(cls.REGEX, url)\n\n def group(self, name):\n m = self.match(self.url)\n if not m:\n return None\n return m.group(name)\n\n @staticmethod\n def tmp_file(fname):\n \"\"\" Temporary name for a partial download \"\"\"\n # FIXME probably better use uuid()\n return fname + '.part'\n\n def save_info(self, path_info):\n raise NotImplementedError\n\n def save(self, path_info):\n raise NotImplementedError\n\n def checkout(self, path_info, checksum_info):\n raise NotImplementedError\n\n def download(self, from_infos, to_infos, no_progress_bar=False, name=None):\n raise NotImplementedError\n\n def upload(self, from_infos, to_infos, path_info, name=None):\n raise NotImplementedError\n\n def remove(self, path_info):\n raise NotImplementedError\n\n def move(self, path_info):\n raise NotImplementedError\n\n def _makedirs(self, fname):\n dname = os.path.dirname(fname)\n try:\n os.makedirs(dname)\n except OSError as e:\n if e.errno != os.errno.EEXIST:\n raise\n\n def md5s_to_path_infos(self, md5s):\n raise NotImplementedError\n\n def exists(self, path_infos):\n raise NotImplementedError\n\n @classmethod\n def _verify_path_args(cls, from_infos, to_infos, names=None):\n assert isinstance(from_infos, list)\n assert isinstance(to_infos, list)\n assert len(from_infos) == len(to_infos)\n\n if not names:\n names = len(to_infos) * [None]\n else:\n assert isinstance(names, list)\n assert len(names) == len(to_infos)\n\n return names\n", "path": "dvc/remote/base.py"}]} | 1,488 | 161 |
gh_patches_debug_4392 | rasdani/github-patches | git_diff | encode__starlette-1940 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
middleware causes exceptions to not be raised/handled silently
### Discussed in https://github.com/encode/starlette/discussions/1976
<div type='discussions-op-text'>
<sup>Originally posted by **fraser-langton** December 6, 2022</sup>
From [issue in FastAPI](https://github.com/tiangolo/fastapi/issues/5173) - issue was said to be from starlette
Was initially noticed in FastAPI 0.74.0, was fixed after 0.79.0 but has since regressed (FastAPI 0.88.0 it isn't working)
When exceptions are raised on a subapp, the exceptions are not propagated all the way to see in console
In the example code
/info raises an exception and the full stacktrace is seen in console
/private/info does not raise the exception and only `INFO: 127.0.0.1:56308 - "GET /info HTTP/1.1" 500 Internal Server Error` is shown in console
```python
import uvicorn
from fastapi import FastAPI
from starlette.middleware.base import BaseHTTPMiddleware
app = FastAPI()
@app.get("/info")
def info():
# raises Exception as expected, the traceback is seen in console
raise Exception
private_api = FastAPI()
@private_api.get("/info")
def info():
# exception is handled silently, no traceback is seen in console
raise Exception
app.mount("/private", private_api)
class Middleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
return await call_next(request)
app.add_middleware(Middleware) # when this is removed, the exceptions are raised for all routes
if __name__ == '__main__':
uvicorn.run(app, port=8000)
```
</div>
</issue>
<code>
[start of starlette/middleware/base.py]
1 import typing
2
3 import anyio
4
5 from starlette.requests import Request
6 from starlette.responses import Response, StreamingResponse
7 from starlette.types import ASGIApp, Message, Receive, Scope, Send
8
9 RequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]
10 DispatchFunction = typing.Callable[
11 [Request, RequestResponseEndpoint], typing.Awaitable[Response]
12 ]
13 T = typing.TypeVar("T")
14
15
16 class BaseHTTPMiddleware:
17 def __init__(
18 self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None
19 ) -> None:
20 self.app = app
21 self.dispatch_func = self.dispatch if dispatch is None else dispatch
22
23 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
24 if scope["type"] != "http":
25 await self.app(scope, receive, send)
26 return
27
28 response_sent = anyio.Event()
29
30 async def call_next(request: Request) -> Response:
31 app_exc: typing.Optional[Exception] = None
32 send_stream, recv_stream = anyio.create_memory_object_stream()
33
34 async def receive_or_disconnect() -> Message:
35 if response_sent.is_set():
36 return {"type": "http.disconnect"}
37
38 async with anyio.create_task_group() as task_group:
39
40 async def wrap(func: typing.Callable[[], typing.Awaitable[T]]) -> T:
41 result = await func()
42 task_group.cancel_scope.cancel()
43 return result
44
45 task_group.start_soon(wrap, response_sent.wait)
46 message = await wrap(request.receive)
47
48 if response_sent.is_set():
49 return {"type": "http.disconnect"}
50
51 return message
52
53 async def close_recv_stream_on_response_sent() -> None:
54 await response_sent.wait()
55 recv_stream.close()
56
57 async def send_no_error(message: Message) -> None:
58 try:
59 await send_stream.send(message)
60 except anyio.BrokenResourceError:
61 # recv_stream has been closed, i.e. response_sent has been set.
62 return
63
64 async def coro() -> None:
65 nonlocal app_exc
66
67 async with send_stream:
68 try:
69 await self.app(scope, receive_or_disconnect, send_no_error)
70 except Exception as exc:
71 app_exc = exc
72
73 task_group.start_soon(close_recv_stream_on_response_sent)
74 task_group.start_soon(coro)
75
76 try:
77 message = await recv_stream.receive()
78 except anyio.EndOfStream:
79 if app_exc is not None:
80 raise app_exc
81 raise RuntimeError("No response returned.")
82
83 assert message["type"] == "http.response.start"
84
85 async def body_stream() -> typing.AsyncGenerator[bytes, None]:
86 async with recv_stream:
87 async for message in recv_stream:
88 assert message["type"] == "http.response.body"
89 body = message.get("body", b"")
90 if body:
91 yield body
92 if not message.get("more_body", False):
93 break
94
95 if app_exc is not None:
96 raise app_exc
97
98 response = StreamingResponse(
99 status_code=message["status"], content=body_stream()
100 )
101 response.raw_headers = message["headers"]
102 return response
103
104 async with anyio.create_task_group() as task_group:
105 request = Request(scope, receive=receive)
106 response = await self.dispatch_func(request, call_next)
107 await response(scope, receive, send)
108 response_sent.set()
109
110 async def dispatch(
111 self, request: Request, call_next: RequestResponseEndpoint
112 ) -> Response:
113 raise NotImplementedError() # pragma: no cover
114
[end of starlette/middleware/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlette/middleware/base.py b/starlette/middleware/base.py
--- a/starlette/middleware/base.py
+++ b/starlette/middleware/base.py
@@ -89,8 +89,6 @@
body = message.get("body", b"")
if body:
yield body
- if not message.get("more_body", False):
- break
if app_exc is not None:
raise app_exc
| {"golden_diff": "diff --git a/starlette/middleware/base.py b/starlette/middleware/base.py\n--- a/starlette/middleware/base.py\n+++ b/starlette/middleware/base.py\n@@ -89,8 +89,6 @@\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n- if not message.get(\"more_body\", False):\n- break\n \n if app_exc is not None:\n raise app_exc\n", "issue": "middleware causes exceptions to not be raised/handled silently\n### Discussed in https://github.com/encode/starlette/discussions/1976\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **fraser-langton** December 6, 2022</sup>\r\nFrom [issue in FastAPI](https://github.com/tiangolo/fastapi/issues/5173) - issue was said to be from starlette\r\n\r\nWas initially noticed in FastAPI 0.74.0, was fixed after 0.79.0 but has since regressed (FastAPI 0.88.0 it isn't working)\r\n\r\nWhen exceptions are raised on a subapp, the exceptions are not propagated all the way to see in console\r\nIn the example code\r\n/info raises an exception and the full stacktrace is seen in console\r\n/private/info does not raise the exception and only `INFO: 127.0.0.1:56308 - \"GET /info HTTP/1.1\" 500 Internal Server Error` is shown in console\r\n\r\n```python\r\nimport uvicorn\r\nfrom fastapi import FastAPI\r\nfrom starlette.middleware.base import BaseHTTPMiddleware\r\n\r\n\r\napp = FastAPI()\r\n\r\n\r\[email protected](\"/info\")\r\ndef info():\r\n # raises Exception as expected, the traceback is seen in console\r\n raise Exception\r\n\r\n\r\nprivate_api = FastAPI()\r\n\r\n\r\n@private_api.get(\"/info\")\r\ndef info():\r\n # exception is handled silently, no traceback is seen in console\r\n raise Exception\r\n\r\n\r\napp.mount(\"/private\", private_api)\r\n\r\n\r\nclass Middleware(BaseHTTPMiddleware):\r\n\r\n async def dispatch(self, request, call_next):\r\n return await call_next(request)\r\n\r\n\r\napp.add_middleware(Middleware) # when this is removed, the exceptions are raised for all routes\r\n\r\n\r\nif __name__ == '__main__':\r\n uvicorn.run(app, port=8000)\r\n```\r\n</div>\n", "before_files": [{"content": "import typing\n\nimport anyio\n\nfrom starlette.requests import Request\nfrom starlette.responses import Response, StreamingResponse\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\nRequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]\nDispatchFunction = typing.Callable[\n [Request, RequestResponseEndpoint], typing.Awaitable[Response]\n]\nT = typing.TypeVar(\"T\")\n\n\nclass BaseHTTPMiddleware:\n def __init__(\n self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None\n ) -> None:\n self.app = app\n self.dispatch_func = self.dispatch if dispatch is None else dispatch\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n response_sent = anyio.Event()\n\n async def call_next(request: Request) -> Response:\n app_exc: typing.Optional[Exception] = None\n send_stream, recv_stream = anyio.create_memory_object_stream()\n\n async def receive_or_disconnect() -> Message:\n if response_sent.is_set():\n return {\"type\": \"http.disconnect\"}\n\n async with anyio.create_task_group() as task_group:\n\n async def wrap(func: typing.Callable[[], typing.Awaitable[T]]) -> T:\n result = await func()\n task_group.cancel_scope.cancel()\n return result\n\n task_group.start_soon(wrap, response_sent.wait)\n message = await wrap(request.receive)\n\n if response_sent.is_set():\n return {\"type\": \"http.disconnect\"}\n\n return message\n\n async def close_recv_stream_on_response_sent() -> None:\n await response_sent.wait()\n recv_stream.close()\n\n async def send_no_error(message: Message) -> None:\n try:\n await send_stream.send(message)\n except anyio.BrokenResourceError:\n # recv_stream has been closed, i.e. response_sent has been set.\n return\n\n async def coro() -> None:\n nonlocal app_exc\n\n async with send_stream:\n try:\n await self.app(scope, receive_or_disconnect, send_no_error)\n except Exception as exc:\n app_exc = exc\n\n task_group.start_soon(close_recv_stream_on_response_sent)\n task_group.start_soon(coro)\n\n try:\n message = await recv_stream.receive()\n except anyio.EndOfStream:\n if app_exc is not None:\n raise app_exc\n raise RuntimeError(\"No response returned.\")\n\n assert message[\"type\"] == \"http.response.start\"\n\n async def body_stream() -> typing.AsyncGenerator[bytes, None]:\n async with recv_stream:\n async for message in recv_stream:\n assert message[\"type\"] == \"http.response.body\"\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n if not message.get(\"more_body\", False):\n break\n\n if app_exc is not None:\n raise app_exc\n\n response = StreamingResponse(\n status_code=message[\"status\"], content=body_stream()\n )\n response.raw_headers = message[\"headers\"]\n return response\n\n async with anyio.create_task_group() as task_group:\n request = Request(scope, receive=receive)\n response = await self.dispatch_func(request, call_next)\n await response(scope, receive, send)\n response_sent.set()\n\n async def dispatch(\n self, request: Request, call_next: RequestResponseEndpoint\n ) -> Response:\n raise NotImplementedError() # pragma: no cover\n", "path": "starlette/middleware/base.py"}]} | 1,960 | 97 |
gh_patches_debug_59726 | rasdani/github-patches | git_diff | pytorch__audio-755 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
🚀 Feature Request: Opus audio format support
## 🚀 Feature
<!-- -->
Add opus format
## Motivation
<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->
Opus is a good and popular lossy audio coding format. A lot of audio files are stored in opus format but it's impossible to use it with torchaudio.load().
## Pitch
<!-- A clear and concise description of what you want to happen. -->
Please, make possible to do
audio = torchaudio.load("my_audio.opus")
</issue>
<code>
[start of build_tools/setup_helpers/extension.py]
1 import os
2 import platform
3 import subprocess
4 from pathlib import Path
5
6 from torch.utils.cpp_extension import (
7 CppExtension,
8 BuildExtension as TorchBuildExtension
9 )
10
11 __all__ = [
12 'get_ext_modules',
13 'BuildExtension',
14 ]
15
16 _THIS_DIR = Path(__file__).parent.resolve()
17 _ROOT_DIR = _THIS_DIR.parent.parent.resolve()
18 _CSRC_DIR = _ROOT_DIR / 'torchaudio' / 'csrc'
19 _TP_BASE_DIR = _ROOT_DIR / 'third_party'
20 _TP_INSTALL_DIR = _TP_BASE_DIR / 'install'
21
22
23 def _get_build_sox():
24 val = os.environ.get('BUILD_SOX', '0')
25 trues = ['1', 'true', 'TRUE', 'on', 'ON', 'yes', 'YES']
26 falses = ['0', 'false', 'FALSE', 'off', 'OFF', 'no', 'NO']
27 if val in trues:
28 return True
29 if val not in falses:
30 print(
31 f'WARNING: Unexpected environment variable value `BUILD_SOX={val}`. '
32 f'Expected one of {trues + falses}')
33 return False
34
35
36 _BUILD_SOX = _get_build_sox()
37
38
39 def _get_eca(debug):
40 eca = []
41 if debug:
42 eca += ["-O0", "-g"]
43 else:
44 eca += ["-O3"]
45 return eca
46
47
48 def _get_ela(debug):
49 ela = []
50 if debug:
51 if platform.system() == "Windows":
52 ela += ["/DEBUG:FULL"]
53 else:
54 ela += ["-O0", "-g"]
55 else:
56 ela += ["-O3"]
57 return ela
58
59
60 def _get_srcs():
61 return [str(p) for p in _CSRC_DIR.glob('**/*.cpp')]
62
63
64 def _get_include_dirs():
65 dirs = [
66 str(_ROOT_DIR),
67 ]
68 if _BUILD_SOX:
69 dirs.append(str(_TP_INSTALL_DIR / 'include'))
70 return dirs
71
72
73 def _get_extra_objects():
74 objs = []
75 if _BUILD_SOX:
76 # NOTE: The order of the library listed bellow matters.
77 #
78 # (the most important thing is that dependencies come after a library
79 # e.g., sox comes first, flac/vorbis comes before ogg, and
80 # vorbisenc/vorbisfile comes before vorbis
81 libs = [
82 'libsox.a',
83 'libmad.a',
84 'libFLAC.a',
85 'libmp3lame.a',
86 'libvorbisenc.a',
87 'libvorbisfile.a',
88 'libvorbis.a',
89 'libogg.a',
90 ]
91 for lib in libs:
92 objs.append(str(_TP_INSTALL_DIR / 'lib' / lib))
93 return objs
94
95
96 def _get_libraries():
97 return [] if _BUILD_SOX else ['sox']
98
99
100 def _build_third_party():
101 build_dir = str(_TP_BASE_DIR / 'build')
102 os.makedirs(build_dir, exist_ok=True)
103 subprocess.run(
104 args=['cmake', '..'],
105 cwd=build_dir,
106 check=True,
107 )
108 subprocess.run(
109 args=['cmake', '--build', '.'],
110 cwd=build_dir,
111 check=True,
112 )
113
114
115 _EXT_NAME = 'torchaudio._torchaudio'
116
117
118 def get_ext_modules(debug=False):
119 if platform.system() == 'Windows':
120 return None
121 return [
122 CppExtension(
123 _EXT_NAME,
124 _get_srcs(),
125 libraries=_get_libraries(),
126 include_dirs=_get_include_dirs(),
127 extra_compile_args=_get_eca(debug),
128 extra_objects=_get_extra_objects(),
129 extra_link_args=_get_ela(debug),
130 ),
131 ]
132
133
134 class BuildExtension(TorchBuildExtension):
135 def build_extension(self, ext):
136 if ext.name == _EXT_NAME and _BUILD_SOX:
137 _build_third_party()
138 super().build_extension(ext)
139
[end of build_tools/setup_helpers/extension.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/build_tools/setup_helpers/extension.py b/build_tools/setup_helpers/extension.py
--- a/build_tools/setup_helpers/extension.py
+++ b/build_tools/setup_helpers/extension.py
@@ -83,6 +83,8 @@
'libmad.a',
'libFLAC.a',
'libmp3lame.a',
+ 'libopusfile.a',
+ 'libopus.a',
'libvorbisenc.a',
'libvorbisfile.a',
'libvorbis.a',
| {"golden_diff": "diff --git a/build_tools/setup_helpers/extension.py b/build_tools/setup_helpers/extension.py\n--- a/build_tools/setup_helpers/extension.py\n+++ b/build_tools/setup_helpers/extension.py\n@@ -83,6 +83,8 @@\n 'libmad.a',\n 'libFLAC.a',\n 'libmp3lame.a',\n+ 'libopusfile.a',\n+ 'libopus.a',\n 'libvorbisenc.a',\n 'libvorbisfile.a',\n 'libvorbis.a',\n", "issue": "\ud83d\ude80 Feature Request: Opus audio format support\n## \ud83d\ude80 Feature\r\n<!-- -->\r\nAdd opus format \r\n## Motivation\r\n\r\n<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->\r\nOpus is a good and popular lossy audio coding format. A lot of audio files are stored in opus format but it's impossible to use it with torchaudio.load(). \r\n## Pitch\r\n\r\n<!-- A clear and concise description of what you want to happen. -->\r\nPlease, make possible to do \r\naudio = torchaudio.load(\"my_audio.opus\")\r\n\n", "before_files": [{"content": "import os\nimport platform\nimport subprocess\nfrom pathlib import Path\n\nfrom torch.utils.cpp_extension import (\n CppExtension,\n BuildExtension as TorchBuildExtension\n)\n\n__all__ = [\n 'get_ext_modules',\n 'BuildExtension',\n]\n\n_THIS_DIR = Path(__file__).parent.resolve()\n_ROOT_DIR = _THIS_DIR.parent.parent.resolve()\n_CSRC_DIR = _ROOT_DIR / 'torchaudio' / 'csrc'\n_TP_BASE_DIR = _ROOT_DIR / 'third_party'\n_TP_INSTALL_DIR = _TP_BASE_DIR / 'install'\n\n\ndef _get_build_sox():\n val = os.environ.get('BUILD_SOX', '0')\n trues = ['1', 'true', 'TRUE', 'on', 'ON', 'yes', 'YES']\n falses = ['0', 'false', 'FALSE', 'off', 'OFF', 'no', 'NO']\n if val in trues:\n return True\n if val not in falses:\n print(\n f'WARNING: Unexpected environment variable value `BUILD_SOX={val}`. '\n f'Expected one of {trues + falses}')\n return False\n\n\n_BUILD_SOX = _get_build_sox()\n\n\ndef _get_eca(debug):\n eca = []\n if debug:\n eca += [\"-O0\", \"-g\"]\n else:\n eca += [\"-O3\"]\n return eca\n\n\ndef _get_ela(debug):\n ela = []\n if debug:\n if platform.system() == \"Windows\":\n ela += [\"/DEBUG:FULL\"]\n else:\n ela += [\"-O0\", \"-g\"]\n else:\n ela += [\"-O3\"]\n return ela\n\n\ndef _get_srcs():\n return [str(p) for p in _CSRC_DIR.glob('**/*.cpp')]\n\n\ndef _get_include_dirs():\n dirs = [\n str(_ROOT_DIR),\n ]\n if _BUILD_SOX:\n dirs.append(str(_TP_INSTALL_DIR / 'include'))\n return dirs\n\n\ndef _get_extra_objects():\n objs = []\n if _BUILD_SOX:\n # NOTE: The order of the library listed bellow matters.\n #\n # (the most important thing is that dependencies come after a library\n # e.g., sox comes first, flac/vorbis comes before ogg, and\n # vorbisenc/vorbisfile comes before vorbis\n libs = [\n 'libsox.a',\n 'libmad.a',\n 'libFLAC.a',\n 'libmp3lame.a',\n 'libvorbisenc.a',\n 'libvorbisfile.a',\n 'libvorbis.a',\n 'libogg.a',\n ]\n for lib in libs:\n objs.append(str(_TP_INSTALL_DIR / 'lib' / lib))\n return objs\n\n\ndef _get_libraries():\n return [] if _BUILD_SOX else ['sox']\n\n\ndef _build_third_party():\n build_dir = str(_TP_BASE_DIR / 'build')\n os.makedirs(build_dir, exist_ok=True)\n subprocess.run(\n args=['cmake', '..'],\n cwd=build_dir,\n check=True,\n )\n subprocess.run(\n args=['cmake', '--build', '.'],\n cwd=build_dir,\n check=True,\n )\n\n\n_EXT_NAME = 'torchaudio._torchaudio'\n\n\ndef get_ext_modules(debug=False):\n if platform.system() == 'Windows':\n return None\n return [\n CppExtension(\n _EXT_NAME,\n _get_srcs(),\n libraries=_get_libraries(),\n include_dirs=_get_include_dirs(),\n extra_compile_args=_get_eca(debug),\n extra_objects=_get_extra_objects(),\n extra_link_args=_get_ela(debug),\n ),\n ]\n\n\nclass BuildExtension(TorchBuildExtension):\n def build_extension(self, ext):\n if ext.name == _EXT_NAME and _BUILD_SOX:\n _build_third_party()\n super().build_extension(ext)\n", "path": "build_tools/setup_helpers/extension.py"}]} | 1,869 | 116 |
gh_patches_debug_39666 | rasdani/github-patches | git_diff | SeldonIO__MLServer-288 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consider using `tobytes()` instead of `tolist()` in numpy codec for performance improvement
Our use-case is to make multiple requests to MLServer and all of them will have a 640x640 image data in numpy format as input.
Given that `tobytes()` is way faster than `tolist()` does it make sense to support it by default in the numpy codec here: https://github.com/SeldonIO/MLServer/blob/61d9f078d1f886d14083551f33db6a3146f12745/mlserver/codecs/numpy.py#L54
----
Performance comparison of the two methods:

</issue>
<code>
[start of mlserver/codecs/numpy.py]
1 import numpy as np
2
3 from ..types import RequestInput, ResponseOutput
4
5 from .base import InputCodec, register_input_codec, register_request_codec
6 from .utils import FirstInputRequestCodec
7
8 _DatatypeToNumpy = {
9 "BOOL": "bool",
10 "UINT8": "uint8",
11 "UINT16": "uint16",
12 "UINT32": "uint32",
13 "UINT64": "uint64",
14 "INT8": "int8",
15 "INT16": "int16",
16 "INT32": "int32",
17 "INT64": "int64",
18 "FP16": "float16",
19 "FP32": "float32",
20 "FP64": "float64",
21 "BYTES": "byte",
22 }
23
24 _NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}
25
26 # NOTE: numpy has more types than v2 protocol
27 _NumpyToDatatype["object"] = "BYTES"
28
29
30 def _to_dtype(datatype: str) -> "np.dtype":
31 dtype = _DatatypeToNumpy[datatype]
32 return np.dtype(dtype)
33
34
35 def _to_datatype(dtype: np.dtype) -> str:
36 as_str = str(dtype)
37 datatype = _NumpyToDatatype[as_str]
38
39 return datatype
40
41
42 @register_input_codec
43 class NumpyCodec(InputCodec):
44 """
45 Encodes a tensor as a numpy array.
46 """
47
48 ContentType = "np"
49
50 @classmethod
51 def encode(cls, name: str, payload: np.ndarray) -> ResponseOutput:
52 return ResponseOutput(
53 name=name,
54 datatype=_to_datatype(payload.dtype),
55 shape=list(payload.shape),
56 data=payload.flatten().tolist(),
57 )
58
59 @classmethod
60 def decode(cls, request_input: RequestInput) -> np.ndarray:
61 dtype = _to_dtype(request_input.datatype)
62 data = getattr(request_input.data, "__root__", request_input.data)
63
64 model_data = np.array(data, dtype=dtype)
65
66 # TODO: Check if reshape not valid
67 return model_data.reshape(request_input.shape)
68
69
70 @register_request_codec
71 class NumpyRequestCodec(FirstInputRequestCodec):
72 InputCodec = NumpyCodec
73 ContentType = NumpyCodec.ContentType
74
[end of mlserver/codecs/numpy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlserver/codecs/numpy.py b/mlserver/codecs/numpy.py
--- a/mlserver/codecs/numpy.py
+++ b/mlserver/codecs/numpy.py
@@ -1,5 +1,7 @@
import numpy as np
+from typing import Any
+
from ..types import RequestInput, ResponseOutput
from .base import InputCodec, register_input_codec, register_request_codec
@@ -18,27 +20,59 @@
"FP16": "float16",
"FP32": "float32",
"FP64": "float64",
- "BYTES": "byte",
+ "BYTES": "bytes",
}
_NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}
# NOTE: numpy has more types than v2 protocol
_NumpyToDatatype["object"] = "BYTES"
+_NumpyToDatatype["S"] = "BYTES"
+
+def _to_dtype(request_input: RequestInput) -> "np.dtype":
+ dtype = _DatatypeToNumpy[request_input.datatype]
+
+ if request_input.datatype == "BYTES":
+ # bytes have variable size, so need to specify as part of type
+ # TODO: Make elem size variable (and not just the last dimension)
+ elem_size = request_input.shape[-1]
+ return np.dtype((dtype, elem_size))
-def _to_dtype(datatype: str) -> "np.dtype":
- dtype = _DatatypeToNumpy[datatype]
return np.dtype(dtype)
def _to_datatype(dtype: np.dtype) -> str:
as_str = str(dtype)
+
+ if as_str not in _NumpyToDatatype:
+ # If not present, try with kind
+ as_str = getattr(dtype, "kind")
+
datatype = _NumpyToDatatype[as_str]
return datatype
+def _to_ndarray(request_input: RequestInput) -> np.ndarray:
+ data = getattr(request_input.data, "__root__", request_input.data)
+ dtype = _to_dtype(request_input)
+
+ if request_input.datatype == "BYTES":
+ return np.frombuffer(data, dtype)
+
+ return np.array(data, dtype)
+
+
+def _encode_data(data: np.ndarray, datatype: str) -> Any:
+ if datatype == "BYTES":
+ # tobytes is way faster than tolist, although it's harder to serialise
+ # and only makes sense for actual bytes inputs (#253)
+ return data.tobytes()
+
+ return data.flatten().tolist()
+
+
@register_input_codec
class NumpyCodec(InputCodec):
"""
@@ -49,19 +83,18 @@
@classmethod
def encode(cls, name: str, payload: np.ndarray) -> ResponseOutput:
+ datatype = _to_datatype(payload.dtype)
+
return ResponseOutput(
name=name,
- datatype=_to_datatype(payload.dtype),
+ datatype=datatype,
shape=list(payload.shape),
- data=payload.flatten().tolist(),
+ data=_encode_data(payload, datatype),
)
@classmethod
def decode(cls, request_input: RequestInput) -> np.ndarray:
- dtype = _to_dtype(request_input.datatype)
- data = getattr(request_input.data, "__root__", request_input.data)
-
- model_data = np.array(data, dtype=dtype)
+ model_data = _to_ndarray(request_input)
# TODO: Check if reshape not valid
return model_data.reshape(request_input.shape)
| {"golden_diff": "diff --git a/mlserver/codecs/numpy.py b/mlserver/codecs/numpy.py\n--- a/mlserver/codecs/numpy.py\n+++ b/mlserver/codecs/numpy.py\n@@ -1,5 +1,7 @@\n import numpy as np\n \n+from typing import Any\n+\n from ..types import RequestInput, ResponseOutput\n \n from .base import InputCodec, register_input_codec, register_request_codec\n@@ -18,27 +20,59 @@\n \"FP16\": \"float16\",\n \"FP32\": \"float32\",\n \"FP64\": \"float64\",\n- \"BYTES\": \"byte\",\n+ \"BYTES\": \"bytes\",\n }\n \n _NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}\n \n # NOTE: numpy has more types than v2 protocol\n _NumpyToDatatype[\"object\"] = \"BYTES\"\n+_NumpyToDatatype[\"S\"] = \"BYTES\"\n+\n \n+def _to_dtype(request_input: RequestInput) -> \"np.dtype\":\n+ dtype = _DatatypeToNumpy[request_input.datatype]\n+\n+ if request_input.datatype == \"BYTES\":\n+ # bytes have variable size, so need to specify as part of type\n+ # TODO: Make elem size variable (and not just the last dimension)\n+ elem_size = request_input.shape[-1]\n+ return np.dtype((dtype, elem_size))\n \n-def _to_dtype(datatype: str) -> \"np.dtype\":\n- dtype = _DatatypeToNumpy[datatype]\n return np.dtype(dtype)\n \n \n def _to_datatype(dtype: np.dtype) -> str:\n as_str = str(dtype)\n+\n+ if as_str not in _NumpyToDatatype:\n+ # If not present, try with kind\n+ as_str = getattr(dtype, \"kind\")\n+\n datatype = _NumpyToDatatype[as_str]\n \n return datatype\n \n \n+def _to_ndarray(request_input: RequestInput) -> np.ndarray:\n+ data = getattr(request_input.data, \"__root__\", request_input.data)\n+ dtype = _to_dtype(request_input)\n+\n+ if request_input.datatype == \"BYTES\":\n+ return np.frombuffer(data, dtype)\n+\n+ return np.array(data, dtype)\n+\n+\n+def _encode_data(data: np.ndarray, datatype: str) -> Any:\n+ if datatype == \"BYTES\":\n+ # tobytes is way faster than tolist, although it's harder to serialise\n+ # and only makes sense for actual bytes inputs (#253)\n+ return data.tobytes()\n+\n+ return data.flatten().tolist()\n+\n+\n @register_input_codec\n class NumpyCodec(InputCodec):\n \"\"\"\n@@ -49,19 +83,18 @@\n \n @classmethod\n def encode(cls, name: str, payload: np.ndarray) -> ResponseOutput:\n+ datatype = _to_datatype(payload.dtype)\n+\n return ResponseOutput(\n name=name,\n- datatype=_to_datatype(payload.dtype),\n+ datatype=datatype,\n shape=list(payload.shape),\n- data=payload.flatten().tolist(),\n+ data=_encode_data(payload, datatype),\n )\n \n @classmethod\n def decode(cls, request_input: RequestInput) -> np.ndarray:\n- dtype = _to_dtype(request_input.datatype)\n- data = getattr(request_input.data, \"__root__\", request_input.data)\n-\n- model_data = np.array(data, dtype=dtype)\n+ model_data = _to_ndarray(request_input)\n \n # TODO: Check if reshape not valid\n return model_data.reshape(request_input.shape)\n", "issue": "Consider using `tobytes()` instead of `tolist()` in numpy codec for performance improvement\nOur use-case is to make multiple requests to MLServer and all of them will have a 640x640 image data in numpy format as input.\r\n\r\nGiven that `tobytes()` is way faster than `tolist()` does it make sense to support it by default in the numpy codec here: https://github.com/SeldonIO/MLServer/blob/61d9f078d1f886d14083551f33db6a3146f12745/mlserver/codecs/numpy.py#L54\r\n\r\n----\r\n\r\nPerformance comparison of the two methods:\r\n\r\n\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom ..types import RequestInput, ResponseOutput\n\nfrom .base import InputCodec, register_input_codec, register_request_codec\nfrom .utils import FirstInputRequestCodec\n\n_DatatypeToNumpy = {\n \"BOOL\": \"bool\",\n \"UINT8\": \"uint8\",\n \"UINT16\": \"uint16\",\n \"UINT32\": \"uint32\",\n \"UINT64\": \"uint64\",\n \"INT8\": \"int8\",\n \"INT16\": \"int16\",\n \"INT32\": \"int32\",\n \"INT64\": \"int64\",\n \"FP16\": \"float16\",\n \"FP32\": \"float32\",\n \"FP64\": \"float64\",\n \"BYTES\": \"byte\",\n}\n\n_NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}\n\n# NOTE: numpy has more types than v2 protocol\n_NumpyToDatatype[\"object\"] = \"BYTES\"\n\n\ndef _to_dtype(datatype: str) -> \"np.dtype\":\n dtype = _DatatypeToNumpy[datatype]\n return np.dtype(dtype)\n\n\ndef _to_datatype(dtype: np.dtype) -> str:\n as_str = str(dtype)\n datatype = _NumpyToDatatype[as_str]\n\n return datatype\n\n\n@register_input_codec\nclass NumpyCodec(InputCodec):\n \"\"\"\n Encodes a tensor as a numpy array.\n \"\"\"\n\n ContentType = \"np\"\n\n @classmethod\n def encode(cls, name: str, payload: np.ndarray) -> ResponseOutput:\n return ResponseOutput(\n name=name,\n datatype=_to_datatype(payload.dtype),\n shape=list(payload.shape),\n data=payload.flatten().tolist(),\n )\n\n @classmethod\n def decode(cls, request_input: RequestInput) -> np.ndarray:\n dtype = _to_dtype(request_input.datatype)\n data = getattr(request_input.data, \"__root__\", request_input.data)\n\n model_data = np.array(data, dtype=dtype)\n\n # TODO: Check if reshape not valid\n return model_data.reshape(request_input.shape)\n\n\n@register_request_codec\nclass NumpyRequestCodec(FirstInputRequestCodec):\n InputCodec = NumpyCodec\n ContentType = NumpyCodec.ContentType\n", "path": "mlserver/codecs/numpy.py"}]} | 1,431 | 797 |
gh_patches_debug_12538 | rasdani/github-patches | git_diff | kivy__python-for-android-3027 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix sqlalchemy recipe
Fix sqlalchemy build errors and bump sqlalchemy from 1.3.3 to 2.0+.
</issue>
<code>
[start of pythonforandroid/recipes/sqlalchemy/__init__.py]
1 from pythonforandroid.recipe import CompiledComponentsPythonRecipe
2
3
4 class SQLAlchemyRecipe(CompiledComponentsPythonRecipe):
5 name = 'sqlalchemy'
6 version = '1.3.3'
7 url = 'https://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-{version}.tar.gz'
8 call_hostpython_via_targetpython = False
9
10 depends = ['setuptools']
11
12 patches = ['zipsafe.patch']
13
14
15 recipe = SQLAlchemyRecipe()
16
[end of pythonforandroid/recipes/sqlalchemy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pythonforandroid/recipes/sqlalchemy/__init__.py b/pythonforandroid/recipes/sqlalchemy/__init__.py
--- a/pythonforandroid/recipes/sqlalchemy/__init__.py
+++ b/pythonforandroid/recipes/sqlalchemy/__init__.py
@@ -1,15 +1,15 @@
-from pythonforandroid.recipe import CompiledComponentsPythonRecipe
+from pythonforandroid.recipe import PyProjectRecipe
-class SQLAlchemyRecipe(CompiledComponentsPythonRecipe):
+class SQLAlchemyRecipe(PyProjectRecipe):
name = 'sqlalchemy'
- version = '1.3.3'
- url = 'https://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-{version}.tar.gz'
- call_hostpython_via_targetpython = False
-
+ version = '2.0.30'
+ url = 'https://github.com/sqlalchemy/sqlalchemy/archive/refs/tags/rel_{}.tar.gz'
depends = ['setuptools']
- patches = ['zipsafe.patch']
+ @property
+ def versioned_url(self):
+ return self.url.format(self.version.replace(".", "_"))
recipe = SQLAlchemyRecipe()
| {"golden_diff": "diff --git a/pythonforandroid/recipes/sqlalchemy/__init__.py b/pythonforandroid/recipes/sqlalchemy/__init__.py\n--- a/pythonforandroid/recipes/sqlalchemy/__init__.py\n+++ b/pythonforandroid/recipes/sqlalchemy/__init__.py\n@@ -1,15 +1,15 @@\n-from pythonforandroid.recipe import CompiledComponentsPythonRecipe\n+from pythonforandroid.recipe import PyProjectRecipe\n \n \n-class SQLAlchemyRecipe(CompiledComponentsPythonRecipe):\n+class SQLAlchemyRecipe(PyProjectRecipe):\n name = 'sqlalchemy'\n- version = '1.3.3'\n- url = 'https://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-{version}.tar.gz'\n- call_hostpython_via_targetpython = False\n-\n+ version = '2.0.30'\n+ url = 'https://github.com/sqlalchemy/sqlalchemy/archive/refs/tags/rel_{}.tar.gz'\n depends = ['setuptools']\n \n- patches = ['zipsafe.patch']\n+ @property\n+ def versioned_url(self):\n+ return self.url.format(self.version.replace(\".\", \"_\"))\n \n \n recipe = SQLAlchemyRecipe()\n", "issue": "Fix sqlalchemy recipe\nFix sqlalchemy build errors and bump sqlalchemy from 1.3.3 to 2.0+.\r\n\n", "before_files": [{"content": "from pythonforandroid.recipe import CompiledComponentsPythonRecipe\n\n\nclass SQLAlchemyRecipe(CompiledComponentsPythonRecipe):\n name = 'sqlalchemy'\n version = '1.3.3'\n url = 'https://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-{version}.tar.gz'\n call_hostpython_via_targetpython = False\n\n depends = ['setuptools']\n\n patches = ['zipsafe.patch']\n\n\nrecipe = SQLAlchemyRecipe()\n", "path": "pythonforandroid/recipes/sqlalchemy/__init__.py"}]} | 696 | 250 |
gh_patches_debug_5816 | rasdani/github-patches | git_diff | pulp__pulpcore-4684 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
file:// sync deletes files from directory
**Version**
Pulpcore 3.39
**Describe the bug**
When syncing file:// repositories, files are disappearing after the sync.
**To Reproduce**
1) Copy these two repositories to the FS:
- https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file1
- https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file2
2) Sync one, then the other
3) See that some files disappeared.
- In my case, file2 lost every file except PULP_MANIFEST
**Expected behavior**
No files disappear.
**Additional context**
This also occurred with RPM content type files.
</issue>
<code>
[start of pulpcore/download/file.py]
1 import os
2
3 from urllib.parse import urlparse
4
5 import aiofiles
6
7 from .base import BaseDownloader, DownloadResult
8
9
10 class FileDownloader(BaseDownloader):
11 """
12 A downloader for downloading files from the filesystem.
13
14 It provides digest and size validation along with computation of the digests needed to save the
15 file as an Artifact. It writes a new file to the disk and the return path is included in the
16 :class:`~pulpcore.plugin.download.DownloadResult`.
17
18 This downloader has all of the attributes of
19 :class:`~pulpcore.plugin.download.BaseDownloader`
20 """
21
22 def __init__(self, url, *args, **kwargs):
23 """
24 Download files from a url that starts with `file://`
25
26 Args:
27 url (str): The url to the file. This is expected to begin with `file://`
28 kwargs (dict): This accepts the parameters of
29 :class:`~pulpcore.plugin.download.BaseDownloader`.
30
31 Raises:
32 ValidationError: When the url starts with `file://`, but is not a subfolder of a path in
33 the ALLOWED_IMPORT_PATH setting.
34 """
35 from pulpcore.app.serializers import RemoteSerializer
36
37 RemoteSerializer().validate_url(url)
38 p = urlparse(url)
39 self._path = os.path.abspath(os.path.join(p.netloc, p.path))
40 super().__init__(url, *args, **kwargs)
41
42 async def _run(self, extra_data=None):
43 """
44 Read, validate, and compute digests on the `url`. This is a coroutine.
45
46 This method provides the same return object type and documented in
47 :meth:`~pulpcore.plugin.download.BaseDownloader._run`.
48
49 Args:
50 extra_data (dict): Extra data passed to the downloader.
51 """
52 async with aiofiles.open(self._path, "rb") as f_handle:
53 while True:
54 chunk = await f_handle.read(1048576) # 1 megabyte
55 if not chunk:
56 await self.finalize()
57 break # the reading is done
58 await self.handle_data(chunk)
59 return DownloadResult(
60 path=self._path,
61 artifact_attributes=self.artifact_attributes,
62 url=self.url,
63 headers=None,
64 )
65
[end of pulpcore/download/file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/download/file.py b/pulpcore/download/file.py
--- a/pulpcore/download/file.py
+++ b/pulpcore/download/file.py
@@ -57,7 +57,7 @@
break # the reading is done
await self.handle_data(chunk)
return DownloadResult(
- path=self._path,
+ path=self.path,
artifact_attributes=self.artifact_attributes,
url=self.url,
headers=None,
| {"golden_diff": "diff --git a/pulpcore/download/file.py b/pulpcore/download/file.py\n--- a/pulpcore/download/file.py\n+++ b/pulpcore/download/file.py\n@@ -57,7 +57,7 @@\n break # the reading is done\n await self.handle_data(chunk)\n return DownloadResult(\n- path=self._path,\n+ path=self.path,\n artifact_attributes=self.artifact_attributes,\n url=self.url,\n headers=None,\n", "issue": "file:// sync deletes files from directory\n**Version**\r\nPulpcore 3.39\r\n\r\n**Describe the bug**\r\nWhen syncing file:// repositories, files are disappearing after the sync.\r\n\r\n**To Reproduce**\r\n1) Copy these two repositories to the FS:\r\n - https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file1\r\n - https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file2\r\n2) Sync one, then the other\r\n3) See that some files disappeared.\r\n - In my case, file2 lost every file except PULP_MANIFEST\r\n\r\n\r\n**Expected behavior**\r\nNo files disappear.\r\n\r\n**Additional context**\r\nThis also occurred with RPM content type files.\r\n\n", "before_files": [{"content": "import os\n\nfrom urllib.parse import urlparse\n\nimport aiofiles\n\nfrom .base import BaseDownloader, DownloadResult\n\n\nclass FileDownloader(BaseDownloader):\n \"\"\"\n A downloader for downloading files from the filesystem.\n\n It provides digest and size validation along with computation of the digests needed to save the\n file as an Artifact. It writes a new file to the disk and the return path is included in the\n :class:`~pulpcore.plugin.download.DownloadResult`.\n\n This downloader has all of the attributes of\n :class:`~pulpcore.plugin.download.BaseDownloader`\n \"\"\"\n\n def __init__(self, url, *args, **kwargs):\n \"\"\"\n Download files from a url that starts with `file://`\n\n Args:\n url (str): The url to the file. This is expected to begin with `file://`\n kwargs (dict): This accepts the parameters of\n :class:`~pulpcore.plugin.download.BaseDownloader`.\n\n Raises:\n ValidationError: When the url starts with `file://`, but is not a subfolder of a path in\n the ALLOWED_IMPORT_PATH setting.\n \"\"\"\n from pulpcore.app.serializers import RemoteSerializer\n\n RemoteSerializer().validate_url(url)\n p = urlparse(url)\n self._path = os.path.abspath(os.path.join(p.netloc, p.path))\n super().__init__(url, *args, **kwargs)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Read, validate, and compute digests on the `url`. This is a coroutine.\n\n This method provides the same return object type and documented in\n :meth:`~pulpcore.plugin.download.BaseDownloader._run`.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n \"\"\"\n async with aiofiles.open(self._path, \"rb\") as f_handle:\n while True:\n chunk = await f_handle.read(1048576) # 1 megabyte\n if not chunk:\n await self.finalize()\n break # the reading is done\n await self.handle_data(chunk)\n return DownloadResult(\n path=self._path,\n artifact_attributes=self.artifact_attributes,\n url=self.url,\n headers=None,\n )\n", "path": "pulpcore/download/file.py"}]} | 1,292 | 100 |
gh_patches_debug_3254 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-43 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Django: CursorWrapper expects a sequence or None for params
As seen in this partial traceback:
```
File "python3.6/site-packages/scout_apm/django/instruments/sql.py", line 29, in execute
return self.cursor.execute(sql, params)
File "python3.6/site-packages/django/db/backends/utils.py", line 80, in execute
return super(CursorDebugWrapper, self).execute(sql, params)
File "python3.6/site-packages/django/db/backends/utils.py", line 65, in execute
return self.cursor.execute(sql, params)
IndexError: tuple index out of range
```
Currently making a PR to hopefully fix =)
</issue>
<code>
[start of src/scout_apm/django/instruments/sql.py]
1 from __future__ import absolute_import
2 import logging
3
4 from scout_apm.core.monkey import monkeypatch_method
5 from scout_apm.core.tracked_request import TrackedRequest
6
7 try:
8 from django.db.backends.base.base import BaseDatabaseWrapper
9 except ImportError:
10 # Backwards compatibility for Django <1.8
11 from django.db.backends import BaseDatabaseWrapper
12
13 try:
14 from django.db.backends.utils import CursorWrapper
15 except ImportError:
16 # Backwards compatibility for Django <1.9
17 from django.db.backends.util import CursorWrapper
18
19 logger = logging.getLogger(__name__)
20
21
22 class _DetailedTracingCursorWrapper(CursorWrapper):
23 def execute(self, sql, params=()):
24 tr = TrackedRequest.instance()
25 span = tr.start_span(operation='SQL/Query')
26 span.tag('db.statement', sql)
27
28 try:
29 return self.cursor.execute(sql, params)
30 finally:
31 tr.stop_span()
32
33 def executemany(self, sql, param_list):
34 span = TrackedRequest.instance().start_span(operation='SQL/Many')
35 span.tag('db.statement', sql)
36
37 try:
38 return self.cursor.executemany(sql, param_list)
39 finally:
40 TrackedRequest.instance().stop_span()
41
42
43 # pylint: disable=too-few-public-methods
44 class SQLInstrument:
45
46 # The linter thinks the methods we monkeypatch are not used
47 # pylint: disable=W0612
48 # pylint: disable=no-method-argument
49 @staticmethod
50 def install():
51 """
52 Installs ScoutApm SQL Instrumentation by monkeypatching the `cursor`
53 method of BaseDatabaseWrapper, to return a wrapper that instruments any
54 calls going through it.
55 """
56 @monkeypatch_method(BaseDatabaseWrapper)
57 def cursor(original, self, *args, **kwargs):
58 result = original(*args, **kwargs)
59 return _DetailedTracingCursorWrapper(result, self)
60
61 logger.debug('Monkey patched SQL')
62
[end of src/scout_apm/django/instruments/sql.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/scout_apm/django/instruments/sql.py b/src/scout_apm/django/instruments/sql.py
--- a/src/scout_apm/django/instruments/sql.py
+++ b/src/scout_apm/django/instruments/sql.py
@@ -20,7 +20,7 @@
class _DetailedTracingCursorWrapper(CursorWrapper):
- def execute(self, sql, params=()):
+ def execute(self, sql, params=None):
tr = TrackedRequest.instance()
span = tr.start_span(operation='SQL/Query')
span.tag('db.statement', sql)
| {"golden_diff": "diff --git a/src/scout_apm/django/instruments/sql.py b/src/scout_apm/django/instruments/sql.py\n--- a/src/scout_apm/django/instruments/sql.py\n+++ b/src/scout_apm/django/instruments/sql.py\n@@ -20,7 +20,7 @@\n \n \n class _DetailedTracingCursorWrapper(CursorWrapper):\n- def execute(self, sql, params=()):\n+ def execute(self, sql, params=None):\n tr = TrackedRequest.instance()\n span = tr.start_span(operation='SQL/Query')\n span.tag('db.statement', sql)\n", "issue": "Django: CursorWrapper expects a sequence or None for params \nAs seen in this partial traceback:\r\n```\r\n File \"python3.6/site-packages/scout_apm/django/instruments/sql.py\", line 29, in execute\r\n return self.cursor.execute(sql, params)\r\n File \"python3.6/site-packages/django/db/backends/utils.py\", line 80, in execute\r\n return super(CursorDebugWrapper, self).execute(sql, params)\r\n File \"python3.6/site-packages/django/db/backends/utils.py\", line 65, in execute\r\n return self.cursor.execute(sql, params)\r\nIndexError: tuple index out of range\r\n```\r\nCurrently making a PR to hopefully fix =)\n", "before_files": [{"content": "from __future__ import absolute_import\nimport logging\n\nfrom scout_apm.core.monkey import monkeypatch_method\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from django.db.backends.base.base import BaseDatabaseWrapper\nexcept ImportError:\n # Backwards compatibility for Django <1.8\n from django.db.backends import BaseDatabaseWrapper\n\ntry:\n from django.db.backends.utils import CursorWrapper\nexcept ImportError:\n # Backwards compatibility for Django <1.9\n from django.db.backends.util import CursorWrapper\n\nlogger = logging.getLogger(__name__)\n\n\nclass _DetailedTracingCursorWrapper(CursorWrapper):\n def execute(self, sql, params=()):\n tr = TrackedRequest.instance()\n span = tr.start_span(operation='SQL/Query')\n span.tag('db.statement', sql)\n\n try:\n return self.cursor.execute(sql, params)\n finally:\n tr.stop_span()\n\n def executemany(self, sql, param_list):\n span = TrackedRequest.instance().start_span(operation='SQL/Many')\n span.tag('db.statement', sql)\n\n try:\n return self.cursor.executemany(sql, param_list)\n finally:\n TrackedRequest.instance().stop_span()\n\n\n# pylint: disable=too-few-public-methods\nclass SQLInstrument:\n\n # The linter thinks the methods we monkeypatch are not used\n # pylint: disable=W0612\n # pylint: disable=no-method-argument\n @staticmethod\n def install():\n \"\"\"\n Installs ScoutApm SQL Instrumentation by monkeypatching the `cursor`\n method of BaseDatabaseWrapper, to return a wrapper that instruments any\n calls going through it.\n \"\"\"\n @monkeypatch_method(BaseDatabaseWrapper)\n def cursor(original, self, *args, **kwargs):\n result = original(*args, **kwargs)\n return _DetailedTracingCursorWrapper(result, self)\n\n logger.debug('Monkey patched SQL')\n", "path": "src/scout_apm/django/instruments/sql.py"}]} | 1,239 | 133 |
gh_patches_debug_28327 | rasdani/github-patches | git_diff | TheAlgorithms__Python-9482 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Concatenate/consolidate all algorithms with different implementations
### Feature description
There are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file
</issue>
<code>
[start of backtracking/minmax.py]
1 """
2 Minimax helps to achieve maximum score in a game by checking all possible moves.
3
4 """
5 from __future__ import annotations
6
7 import math
8
9
10 def minimax(
11 depth: int, node_index: int, is_max: bool, scores: list[int], height: float
12 ) -> int:
13 """
14 depth is current depth in game tree.
15 node_index is index of current node in scores[].
16 scores[] contains the leaves of game tree.
17 height is maximum height of game tree.
18
19 >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]
20 >>> height = math.log(len(scores), 2)
21 >>> minimax(0, 0, True, scores, height)
22 65
23 >>> minimax(-1, 0, True, scores, height)
24 Traceback (most recent call last):
25 ...
26 ValueError: Depth cannot be less than 0
27 >>> minimax(0, 0, True, [], 2)
28 Traceback (most recent call last):
29 ...
30 ValueError: Scores cannot be empty
31 >>> scores = [3, 5, 2, 9, 12, 5, 23, 23]
32 >>> height = math.log(len(scores), 2)
33 >>> minimax(0, 0, True, scores, height)
34 12
35 """
36
37 if depth < 0:
38 raise ValueError("Depth cannot be less than 0")
39
40 if not scores:
41 raise ValueError("Scores cannot be empty")
42
43 if depth == height:
44 return scores[node_index]
45
46 return (
47 max(
48 minimax(depth + 1, node_index * 2, False, scores, height),
49 minimax(depth + 1, node_index * 2 + 1, False, scores, height),
50 )
51 if is_max
52 else min(
53 minimax(depth + 1, node_index * 2, True, scores, height),
54 minimax(depth + 1, node_index * 2 + 1, True, scores, height),
55 )
56 )
57
58
59 def main() -> None:
60 scores = [90, 23, 6, 33, 21, 65, 123, 34423]
61 height = math.log(len(scores), 2)
62 print(f"Optimal value : {minimax(0, 0, True, scores, height)}")
63
64
65 if __name__ == "__main__":
66 import doctest
67
68 doctest.testmod()
69 main()
70
[end of backtracking/minmax.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backtracking/minmax.py b/backtracking/minmax.py
deleted file mode 100644
--- a/backtracking/minmax.py
+++ /dev/null
@@ -1,69 +0,0 @@
-"""
-Minimax helps to achieve maximum score in a game by checking all possible moves.
-
-"""
-from __future__ import annotations
-
-import math
-
-
-def minimax(
- depth: int, node_index: int, is_max: bool, scores: list[int], height: float
-) -> int:
- """
- depth is current depth in game tree.
- node_index is index of current node in scores[].
- scores[] contains the leaves of game tree.
- height is maximum height of game tree.
-
- >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]
- >>> height = math.log(len(scores), 2)
- >>> minimax(0, 0, True, scores, height)
- 65
- >>> minimax(-1, 0, True, scores, height)
- Traceback (most recent call last):
- ...
- ValueError: Depth cannot be less than 0
- >>> minimax(0, 0, True, [], 2)
- Traceback (most recent call last):
- ...
- ValueError: Scores cannot be empty
- >>> scores = [3, 5, 2, 9, 12, 5, 23, 23]
- >>> height = math.log(len(scores), 2)
- >>> minimax(0, 0, True, scores, height)
- 12
- """
-
- if depth < 0:
- raise ValueError("Depth cannot be less than 0")
-
- if not scores:
- raise ValueError("Scores cannot be empty")
-
- if depth == height:
- return scores[node_index]
-
- return (
- max(
- minimax(depth + 1, node_index * 2, False, scores, height),
- minimax(depth + 1, node_index * 2 + 1, False, scores, height),
- )
- if is_max
- else min(
- minimax(depth + 1, node_index * 2, True, scores, height),
- minimax(depth + 1, node_index * 2 + 1, True, scores, height),
- )
- )
-
-
-def main() -> None:
- scores = [90, 23, 6, 33, 21, 65, 123, 34423]
- height = math.log(len(scores), 2)
- print(f"Optimal value : {minimax(0, 0, True, scores, height)}")
-
-
-if __name__ == "__main__":
- import doctest
-
- doctest.testmod()
- main()
| {"golden_diff": "diff --git a/backtracking/minmax.py b/backtracking/minmax.py\ndeleted file mode 100644\n--- a/backtracking/minmax.py\n+++ /dev/null\n@@ -1,69 +0,0 @@\n-\"\"\"\n-Minimax helps to achieve maximum score in a game by checking all possible moves.\n-\n-\"\"\"\n-from __future__ import annotations\n-\n-import math\n-\n-\n-def minimax(\n- depth: int, node_index: int, is_max: bool, scores: list[int], height: float\n-) -> int:\n- \"\"\"\n- depth is current depth in game tree.\n- node_index is index of current node in scores[].\n- scores[] contains the leaves of game tree.\n- height is maximum height of game tree.\n-\n- >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]\n- >>> height = math.log(len(scores), 2)\n- >>> minimax(0, 0, True, scores, height)\n- 65\n- >>> minimax(-1, 0, True, scores, height)\n- Traceback (most recent call last):\n- ...\n- ValueError: Depth cannot be less than 0\n- >>> minimax(0, 0, True, [], 2)\n- Traceback (most recent call last):\n- ...\n- ValueError: Scores cannot be empty\n- >>> scores = [3, 5, 2, 9, 12, 5, 23, 23]\n- >>> height = math.log(len(scores), 2)\n- >>> minimax(0, 0, True, scores, height)\n- 12\n- \"\"\"\n-\n- if depth < 0:\n- raise ValueError(\"Depth cannot be less than 0\")\n-\n- if not scores:\n- raise ValueError(\"Scores cannot be empty\")\n-\n- if depth == height:\n- return scores[node_index]\n-\n- return (\n- max(\n- minimax(depth + 1, node_index * 2, False, scores, height),\n- minimax(depth + 1, node_index * 2 + 1, False, scores, height),\n- )\n- if is_max\n- else min(\n- minimax(depth + 1, node_index * 2, True, scores, height),\n- minimax(depth + 1, node_index * 2 + 1, True, scores, height),\n- )\n- )\n-\n-\n-def main() -> None:\n- scores = [90, 23, 6, 33, 21, 65, 123, 34423]\n- height = math.log(len(scores), 2)\n- print(f\"Optimal value : {minimax(0, 0, True, scores, height)}\")\n-\n-\n-if __name__ == \"__main__\":\n- import doctest\n-\n- doctest.testmod()\n- main()\n", "issue": "Concatenate/consolidate all algorithms with different implementations\n### Feature description\n\nThere are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file\n", "before_files": [{"content": "\"\"\"\nMinimax helps to achieve maximum score in a game by checking all possible moves.\n\n\"\"\"\nfrom __future__ import annotations\n\nimport math\n\n\ndef minimax(\n depth: int, node_index: int, is_max: bool, scores: list[int], height: float\n) -> int:\n \"\"\"\n depth is current depth in game tree.\n node_index is index of current node in scores[].\n scores[] contains the leaves of game tree.\n height is maximum height of game tree.\n\n >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]\n >>> height = math.log(len(scores), 2)\n >>> minimax(0, 0, True, scores, height)\n 65\n >>> minimax(-1, 0, True, scores, height)\n Traceback (most recent call last):\n ...\n ValueError: Depth cannot be less than 0\n >>> minimax(0, 0, True, [], 2)\n Traceback (most recent call last):\n ...\n ValueError: Scores cannot be empty\n >>> scores = [3, 5, 2, 9, 12, 5, 23, 23]\n >>> height = math.log(len(scores), 2)\n >>> minimax(0, 0, True, scores, height)\n 12\n \"\"\"\n\n if depth < 0:\n raise ValueError(\"Depth cannot be less than 0\")\n\n if not scores:\n raise ValueError(\"Scores cannot be empty\")\n\n if depth == height:\n return scores[node_index]\n\n return (\n max(\n minimax(depth + 1, node_index * 2, False, scores, height),\n minimax(depth + 1, node_index * 2 + 1, False, scores, height),\n )\n if is_max\n else min(\n minimax(depth + 1, node_index * 2, True, scores, height),\n minimax(depth + 1, node_index * 2 + 1, True, scores, height),\n )\n )\n\n\ndef main() -> None:\n scores = [90, 23, 6, 33, 21, 65, 123, 34423]\n height = math.log(len(scores), 2)\n print(f\"Optimal value : {minimax(0, 0, True, scores, height)}\")\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n main()\n", "path": "backtracking/minmax.py"}]} | 1,290 | 681 |
gh_patches_debug_25689 | rasdani/github-patches | git_diff | searx__searx-542 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
reddit: cannot view image
- search: !re wallpaper dark blue
- select some image
- click "View Image" button
Then the image should be shown but the searx start page is loaded. "View Page" shows the image. This is because image==page but this is an inconsistency (sort of)
</issue>
<code>
[start of searx/engines/reddit.py]
1 """
2 Reddit
3
4 @website https://www.reddit.com/
5 @provide-api yes (https://www.reddit.com/dev/api)
6
7 @using-api yes
8 @results JSON
9 @stable yes
10 @parse url, title, content, thumbnail, publishedDate
11 """
12
13 import json
14 from cgi import escape
15 from urllib import urlencode
16 from urlparse import urlparse
17 from datetime import datetime
18
19 # engine dependent config
20 categories = ['general', 'images', 'news', 'social media']
21 page_size = 25
22
23 # search-url
24 search_url = 'https://www.reddit.com/search.json?{query}'
25
26
27 # do search-request
28 def request(query, params):
29 query = urlencode({'q': query,
30 'limit': page_size})
31 params['url'] = search_url.format(query=query)
32
33 return params
34
35
36 # get response from search-request
37 def response(resp):
38 img_results = []
39 text_results = []
40
41 search_results = json.loads(resp.text)
42
43 # return empty array if there are no results
44 if 'data' not in search_results:
45 return []
46
47 posts = search_results.get('data', {}).get('children', [])
48
49 # process results
50 for post in posts:
51 data = post['data']
52
53 # extract post information
54 params = {
55 'url': data['url'],
56 'title': data['title']
57 }
58
59 # if thumbnail field contains a valid URL, we need to change template
60 thumbnail = data['thumbnail']
61 url_info = urlparse(thumbnail)
62 # netloc & path
63 if url_info[1] != '' and url_info[2] != '':
64 params['thumbnail_src'] = thumbnail
65 params['template'] = 'images.html'
66 img_results.append(params)
67 else:
68 created = datetime.fromtimestamp(data['created_utc'])
69 content = escape(data['selftext'])
70 if len(content) > 500:
71 content = content[:500] + '...'
72 params['content'] = content
73 params['publishedDate'] = created
74 text_results.append(params)
75
76 # show images first and text results second
77 return img_results + text_results
78
[end of searx/engines/reddit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/reddit.py b/searx/engines/reddit.py
--- a/searx/engines/reddit.py
+++ b/searx/engines/reddit.py
@@ -13,7 +13,7 @@
import json
from cgi import escape
from urllib import urlencode
-from urlparse import urlparse
+from urlparse import urlparse, urljoin
from datetime import datetime
# engine dependent config
@@ -21,7 +21,8 @@
page_size = 25
# search-url
-search_url = 'https://www.reddit.com/search.json?{query}'
+base_url = 'https://www.reddit.com/'
+search_url = base_url + 'search.json?{query}'
# do search-request
@@ -52,7 +53,7 @@
# extract post information
params = {
- 'url': data['url'],
+ 'url': urljoin(base_url, data['permalink']),
'title': data['title']
}
@@ -61,6 +62,7 @@
url_info = urlparse(thumbnail)
# netloc & path
if url_info[1] != '' and url_info[2] != '':
+ params['img_src'] = data['url']
params['thumbnail_src'] = thumbnail
params['template'] = 'images.html'
img_results.append(params)
| {"golden_diff": "diff --git a/searx/engines/reddit.py b/searx/engines/reddit.py\n--- a/searx/engines/reddit.py\n+++ b/searx/engines/reddit.py\n@@ -13,7 +13,7 @@\n import json\n from cgi import escape\n from urllib import urlencode\n-from urlparse import urlparse\n+from urlparse import urlparse, urljoin\n from datetime import datetime\n \n # engine dependent config\n@@ -21,7 +21,8 @@\n page_size = 25\n \n # search-url\n-search_url = 'https://www.reddit.com/search.json?{query}'\n+base_url = 'https://www.reddit.com/'\n+search_url = base_url + 'search.json?{query}'\n \n \n # do search-request\n@@ -52,7 +53,7 @@\n \n # extract post information\n params = {\n- 'url': data['url'],\n+ 'url': urljoin(base_url, data['permalink']),\n 'title': data['title']\n }\n \n@@ -61,6 +62,7 @@\n url_info = urlparse(thumbnail)\n # netloc & path\n if url_info[1] != '' and url_info[2] != '':\n+ params['img_src'] = data['url']\n params['thumbnail_src'] = thumbnail\n params['template'] = 'images.html'\n img_results.append(params)\n", "issue": "reddit: cannot view image\n- search: !re wallpaper dark blue\n- select some image\n- click \"View Image\" button\n\nThen the image should be shown but the searx start page is loaded. \"View Page\" shows the image. This is because image==page but this is an inconsistency (sort of)\n\n", "before_files": [{"content": "\"\"\"\n Reddit\n\n @website https://www.reddit.com/\n @provide-api yes (https://www.reddit.com/dev/api)\n\n @using-api yes\n @results JSON\n @stable yes\n @parse url, title, content, thumbnail, publishedDate\n\"\"\"\n\nimport json\nfrom cgi import escape\nfrom urllib import urlencode\nfrom urlparse import urlparse\nfrom datetime import datetime\n\n# engine dependent config\ncategories = ['general', 'images', 'news', 'social media']\npage_size = 25\n\n# search-url\nsearch_url = 'https://www.reddit.com/search.json?{query}'\n\n\n# do search-request\ndef request(query, params):\n query = urlencode({'q': query,\n 'limit': page_size})\n params['url'] = search_url.format(query=query)\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n img_results = []\n text_results = []\n\n search_results = json.loads(resp.text)\n\n # return empty array if there are no results\n if 'data' not in search_results:\n return []\n\n posts = search_results.get('data', {}).get('children', [])\n\n # process results\n for post in posts:\n data = post['data']\n\n # extract post information\n params = {\n 'url': data['url'],\n 'title': data['title']\n }\n\n # if thumbnail field contains a valid URL, we need to change template\n thumbnail = data['thumbnail']\n url_info = urlparse(thumbnail)\n # netloc & path\n if url_info[1] != '' and url_info[2] != '':\n params['thumbnail_src'] = thumbnail\n params['template'] = 'images.html'\n img_results.append(params)\n else:\n created = datetime.fromtimestamp(data['created_utc'])\n content = escape(data['selftext'])\n if len(content) > 500:\n content = content[:500] + '...'\n params['content'] = content\n params['publishedDate'] = created\n text_results.append(params)\n\n # show images first and text results second\n return img_results + text_results\n", "path": "searx/engines/reddit.py"}]} | 1,233 | 305 |
gh_patches_debug_20819 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-2062 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect insights.components.rhel_version module doc
The [example](https://github.com/RedHatInsights/insights-core/blob/master/insights/components/rhel_version.py#L17) in the insights.components.rhel_version pydoc doesn't work. The objects don't have a `rhel_version` attribute.
</issue>
<code>
[start of insights/components/rhel_version.py]
1 """
2 IsRhel6, IsRhel7 and IsRhel8
3 ===============================
4
5 The ``IsRhel*`` components each use the ``RedhatRelease`` combiner to
6 retrieve the RHEL version information.
7 Each component checks if the release version matches the version it represents,
8 if the version does not match what is expected the class raises ``SkipComponent``
9 so that the dependent component will not fire.
10 Can be added as a dependency of a parser so that the parser only fires if the
11 ``IsRhel*`` dependency is met.
12
13 An example from the following ``/etc/redhat_release`` file output::
14
15 Red Hat Enterprise Linux release 8.0 (Ootpa)
16
17 Example:
18
19 >>> type(IsRhel8)
20 <class 'insights.components.rhel_version.Is_Rhel8'>
21 >>> is_rhel8.rhel_version
22 '8.0'
23 """
24
25 from insights.core.plugins import component
26 from insights.combiners.redhat_release import RedHatRelease
27 from insights.core.dr import SkipComponent
28
29
30 @component(RedHatRelease)
31 class IsRhel6(object):
32 """
33 This component uses ``RedHatRelease`` combiner
34 to determine RHEL version. It checks if RHEL6, if not
35 RHEL6 it raises ``SkipComponent``.
36
37 Raises:
38 SkipComponent: When RHEL version is not RHEL6.
39 """
40 def __init__(self, rhel):
41 if rhel.major != 6:
42 raise SkipComponent('Not RHEL6')
43
44
45 @component(RedHatRelease)
46 class IsRhel7(object):
47 """
48 This component uses ``RedHatRelease`` combiner
49 to determine RHEL version. It checks if RHEL7, if not \
50 RHEL7 it raises ``SkipComponent``.
51
52 Raises:
53 SkipComponent: When RHEL version is not RHEL7.
54 """
55 def __init__(self, rhel):
56 if rhel.major != 7:
57 raise SkipComponent('Not RHEL7')
58
59
60 @component(RedHatRelease)
61 class IsRhel8(object):
62 """
63 This component uses ``RedhatRelease`` combiner
64 to determine RHEL version. It checks if RHEL8, if not
65 RHEL8 it raises ``SkipComponent``.
66
67 Raises:
68 SkipComponent: When RHEL version is not RHEL8.
69 """
70 def __init__(self, rhel):
71 if rhel.major != 8:
72 raise SkipComponent('Not RHEL8')
73
[end of insights/components/rhel_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/insights/components/rhel_version.py b/insights/components/rhel_version.py
--- a/insights/components/rhel_version.py
+++ b/insights/components/rhel_version.py
@@ -2,24 +2,14 @@
IsRhel6, IsRhel7 and IsRhel8
===============================
-The ``IsRhel*`` components each use the ``RedhatRelease`` combiner to
-retrieve the RHEL version information.
-Each component checks if the release version matches the version it represents,
-if the version does not match what is expected the class raises ``SkipComponent``
-so that the dependent component will not fire.
-Can be added as a dependency of a parser so that the parser only fires if the
-``IsRhel*`` dependency is met.
-
-An example from the following ``/etc/redhat_release`` file output::
-
- Red Hat Enterprise Linux release 8.0 (Ootpa)
-
-Example:
-
- >>> type(IsRhel8)
- <class 'insights.components.rhel_version.Is_Rhel8'>
- >>> is_rhel8.rhel_version
- '8.0'
+An ``IsRhel*`` component is valid if the
+:py:class:`insights.combiners.redhat_release.RedHatRelease` combiner indicates
+the major RHEL version represented by the component. Otherwise, it raises a
+:py:class:`insights.core.dr.SkipComponent` to prevent dependent components from
+executing.
+
+In particular, an ``IsRhel*`` component can be added as a dependency of a
+parser to limit it to a given version.
"""
from insights.core.plugins import component
| {"golden_diff": "diff --git a/insights/components/rhel_version.py b/insights/components/rhel_version.py\n--- a/insights/components/rhel_version.py\n+++ b/insights/components/rhel_version.py\n@@ -2,24 +2,14 @@\n IsRhel6, IsRhel7 and IsRhel8\n ===============================\n \n-The ``IsRhel*`` components each use the ``RedhatRelease`` combiner to\n-retrieve the RHEL version information.\n-Each component checks if the release version matches the version it represents,\n-if the version does not match what is expected the class raises ``SkipComponent``\n-so that the dependent component will not fire.\n-Can be added as a dependency of a parser so that the parser only fires if the\n-``IsRhel*`` dependency is met.\n-\n-An example from the following ``/etc/redhat_release`` file output::\n-\n- Red Hat Enterprise Linux release 8.0 (Ootpa)\n-\n-Example:\n-\n- >>> type(IsRhel8)\n- <class 'insights.components.rhel_version.Is_Rhel8'>\n- >>> is_rhel8.rhel_version\n- '8.0'\n+An ``IsRhel*`` component is valid if the\n+:py:class:`insights.combiners.redhat_release.RedHatRelease` combiner indicates\n+the major RHEL version represented by the component. Otherwise, it raises a\n+:py:class:`insights.core.dr.SkipComponent` to prevent dependent components from\n+executing.\n+\n+In particular, an ``IsRhel*`` component can be added as a dependency of a\n+parser to limit it to a given version.\n \"\"\"\n \n from insights.core.plugins import component\n", "issue": "Incorrect insights.components.rhel_version module doc\nThe [example](https://github.com/RedHatInsights/insights-core/blob/master/insights/components/rhel_version.py#L17) in the insights.components.rhel_version pydoc doesn't work. The objects don't have a `rhel_version` attribute.\n", "before_files": [{"content": "\"\"\"\nIsRhel6, IsRhel7 and IsRhel8\n===============================\n\nThe ``IsRhel*`` components each use the ``RedhatRelease`` combiner to\nretrieve the RHEL version information.\nEach component checks if the release version matches the version it represents,\nif the version does not match what is expected the class raises ``SkipComponent``\nso that the dependent component will not fire.\nCan be added as a dependency of a parser so that the parser only fires if the\n``IsRhel*`` dependency is met.\n\nAn example from the following ``/etc/redhat_release`` file output::\n\n Red Hat Enterprise Linux release 8.0 (Ootpa)\n\nExample:\n\n >>> type(IsRhel8)\n <class 'insights.components.rhel_version.Is_Rhel8'>\n >>> is_rhel8.rhel_version\n '8.0'\n\"\"\"\n\nfrom insights.core.plugins import component\nfrom insights.combiners.redhat_release import RedHatRelease\nfrom insights.core.dr import SkipComponent\n\n\n@component(RedHatRelease)\nclass IsRhel6(object):\n \"\"\"\n This component uses ``RedHatRelease`` combiner\n to determine RHEL version. It checks if RHEL6, if not\n RHEL6 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL6.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 6:\n raise SkipComponent('Not RHEL6')\n\n\n@component(RedHatRelease)\nclass IsRhel7(object):\n \"\"\"\n This component uses ``RedHatRelease`` combiner\n to determine RHEL version. It checks if RHEL7, if not \\\n RHEL7 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL7.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 7:\n raise SkipComponent('Not RHEL7')\n\n\n@component(RedHatRelease)\nclass IsRhel8(object):\n \"\"\"\n This component uses ``RedhatRelease`` combiner\n to determine RHEL version. It checks if RHEL8, if not\n RHEL8 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL8.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 8:\n raise SkipComponent('Not RHEL8')\n", "path": "insights/components/rhel_version.py"}]} | 1,280 | 366 |
gh_patches_debug_20533 | rasdani/github-patches | git_diff | nilearn__nilearn-2264 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Strange comment in ICA example
This is the comment:
# XXX: must get the code to run for more than 1 subject
And this is the link to the example:
https://nilearn.github.io/auto_examples/05_advanced/plot_ica_resting_state.html
The comment appears to relate to an internal todo rather than comment/instruction to users.
</issue>
<code>
[start of examples/05_advanced/plot_ica_resting_state.py]
1 """
2 Multivariate decompositions: Independent component analysis of fMRI
3 ===================================================================
4
5
6 This example is meant to demonstrate nilearn as a low-level tools used to
7 combine feature extraction with a multivariate decomposition algorithm
8 for movie-watching.
9
10 This example is a toy. To apply ICA to fmri timeseries data, it is advised
11 to look at the example
12 :ref:`sphx_glr_auto_examples_03_connectivity_plot_compare_decomposition.py`.
13
14 The example here applies the scikit-learn ICA to movie watching timeseries data.
15 Note that following the code in the example, any unsupervised
16 decomposition model, or other latent-factor models, can be applied to
17 the data, as the scikit-learn API enables to exchange them as almost
18 black box (though the relevant parameter for brain maps might no longer
19 be given by a call to fit_transform).
20
21 """
22
23 #####################################################################
24 # Load movie watching dataset
25 from nilearn import datasets
26 # Here we use only single subject to get faster-running code. For better
27 # results, simply increase this number
28 # XXX: must get the code to run for more than 1 subject
29 dataset = datasets.fetch_development_fmri(n_subjects=1)
30 func_filename = dataset.func[0]
31
32 # print basic information on the dataset
33 print('First subject functional nifti image (4D) is at: %s' %
34 dataset.func[0]) # 4D data
35
36
37 #####################################################################
38 # Preprocess
39 from nilearn.input_data import NiftiMasker
40
41 # This is fmri timeseries data: the background has not been removed yet,
42 # thus we need to use mask_strategy='epi' to compute the mask from the
43 # EPI images
44 masker = NiftiMasker(smoothing_fwhm=8, memory='nilearn_cache', memory_level=1,
45 mask_strategy='epi', standardize=True)
46 data_masked = masker.fit_transform(func_filename)
47
48 # Concatenate all the subjects
49 # fmri_data = np.concatenate(data_masked, axis=1)
50 fmri_data = data_masked
51
52
53 #####################################################################
54 # Apply ICA
55
56 from sklearn.decomposition import FastICA
57 n_components = 10
58 ica = FastICA(n_components=n_components, random_state=42)
59 components_masked = ica.fit_transform(data_masked.T).T
60
61 # Normalize estimated components, for thresholding to make sense
62 components_masked -= components_masked.mean(axis=0)
63 components_masked /= components_masked.std(axis=0)
64 # Threshold
65 import numpy as np
66 components_masked[np.abs(components_masked) < .8] = 0
67
68 # Now invert the masking operation, going back to a full 3D
69 # representation
70 component_img = masker.inverse_transform(components_masked)
71
72 #####################################################################
73 # Visualize the results
74
75 # Show some interesting components
76 from nilearn import image
77 from nilearn.plotting import plot_stat_map, show
78
79 # Use the mean as a background
80 mean_img = image.mean_img(func_filename)
81
82 plot_stat_map(image.index_img(component_img, 0), mean_img)
83
84 plot_stat_map(image.index_img(component_img, 1), mean_img)
85
86 show()
87
[end of examples/05_advanced/plot_ica_resting_state.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/05_advanced/plot_ica_resting_state.py b/examples/05_advanced/plot_ica_resting_state.py
--- a/examples/05_advanced/plot_ica_resting_state.py
+++ b/examples/05_advanced/plot_ica_resting_state.py
@@ -23,9 +23,7 @@
#####################################################################
# Load movie watching dataset
from nilearn import datasets
-# Here we use only single subject to get faster-running code. For better
-# results, simply increase this number
-# XXX: must get the code to run for more than 1 subject
+# Here we use only single subject to get faster-running code.
dataset = datasets.fetch_development_fmri(n_subjects=1)
func_filename = dataset.func[0]
@@ -45,10 +43,6 @@
mask_strategy='epi', standardize=True)
data_masked = masker.fit_transform(func_filename)
-# Concatenate all the subjects
-# fmri_data = np.concatenate(data_masked, axis=1)
-fmri_data = data_masked
-
#####################################################################
# Apply ICA
| {"golden_diff": "diff --git a/examples/05_advanced/plot_ica_resting_state.py b/examples/05_advanced/plot_ica_resting_state.py\n--- a/examples/05_advanced/plot_ica_resting_state.py\n+++ b/examples/05_advanced/plot_ica_resting_state.py\n@@ -23,9 +23,7 @@\n #####################################################################\n # Load movie watching dataset\n from nilearn import datasets\n-# Here we use only single subject to get faster-running code. For better\n-# results, simply increase this number\n-# XXX: must get the code to run for more than 1 subject\n+# Here we use only single subject to get faster-running code.\n dataset = datasets.fetch_development_fmri(n_subjects=1)\n func_filename = dataset.func[0]\n \n@@ -45,10 +43,6 @@\n mask_strategy='epi', standardize=True)\n data_masked = masker.fit_transform(func_filename)\n \n-# Concatenate all the subjects\n-# fmri_data = np.concatenate(data_masked, axis=1)\n-fmri_data = data_masked\n-\n \n #####################################################################\n # Apply ICA\n", "issue": "Strange comment in ICA example\nThis is the comment:\r\n# XXX: must get the code to run for more than 1 subject\r\nAnd this is the link to the example:\r\nhttps://nilearn.github.io/auto_examples/05_advanced/plot_ica_resting_state.html\r\n\r\nThe comment appears to relate to an internal todo rather than comment/instruction to users.\n", "before_files": [{"content": "\"\"\"\nMultivariate decompositions: Independent component analysis of fMRI\n===================================================================\n\n\nThis example is meant to demonstrate nilearn as a low-level tools used to\ncombine feature extraction with a multivariate decomposition algorithm\nfor movie-watching.\n\nThis example is a toy. To apply ICA to fmri timeseries data, it is advised\nto look at the example\n:ref:`sphx_glr_auto_examples_03_connectivity_plot_compare_decomposition.py`.\n\nThe example here applies the scikit-learn ICA to movie watching timeseries data.\nNote that following the code in the example, any unsupervised\ndecomposition model, or other latent-factor models, can be applied to\nthe data, as the scikit-learn API enables to exchange them as almost\nblack box (though the relevant parameter for brain maps might no longer\nbe given by a call to fit_transform).\n\n\"\"\"\n\n#####################################################################\n# Load movie watching dataset\nfrom nilearn import datasets\n# Here we use only single subject to get faster-running code. For better\n# results, simply increase this number\n# XXX: must get the code to run for more than 1 subject\ndataset = datasets.fetch_development_fmri(n_subjects=1)\nfunc_filename = dataset.func[0]\n\n# print basic information on the dataset\nprint('First subject functional nifti image (4D) is at: %s' %\n dataset.func[0]) # 4D data\n\n\n#####################################################################\n# Preprocess\nfrom nilearn.input_data import NiftiMasker\n\n# This is fmri timeseries data: the background has not been removed yet,\n# thus we need to use mask_strategy='epi' to compute the mask from the\n# EPI images\nmasker = NiftiMasker(smoothing_fwhm=8, memory='nilearn_cache', memory_level=1,\n mask_strategy='epi', standardize=True)\ndata_masked = masker.fit_transform(func_filename)\n\n# Concatenate all the subjects\n# fmri_data = np.concatenate(data_masked, axis=1)\nfmri_data = data_masked\n\n\n#####################################################################\n# Apply ICA\n\nfrom sklearn.decomposition import FastICA\nn_components = 10\nica = FastICA(n_components=n_components, random_state=42)\ncomponents_masked = ica.fit_transform(data_masked.T).T\n\n# Normalize estimated components, for thresholding to make sense\ncomponents_masked -= components_masked.mean(axis=0)\ncomponents_masked /= components_masked.std(axis=0)\n# Threshold\nimport numpy as np\ncomponents_masked[np.abs(components_masked) < .8] = 0\n\n# Now invert the masking operation, going back to a full 3D\n# representation\ncomponent_img = masker.inverse_transform(components_masked)\n\n#####################################################################\n# Visualize the results\n\n# Show some interesting components\nfrom nilearn import image\nfrom nilearn.plotting import plot_stat_map, show\n\n# Use the mean as a background\nmean_img = image.mean_img(func_filename)\n\nplot_stat_map(image.index_img(component_img, 0), mean_img)\n\nplot_stat_map(image.index_img(component_img, 1), mean_img)\n\nshow()\n", "path": "examples/05_advanced/plot_ica_resting_state.py"}]} | 1,482 | 249 |
gh_patches_debug_26036 | rasdani/github-patches | git_diff | python-discord__bot-1293 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Filter functionality for emoji-spam messages
Now that someone's figured out you can cause havok by dumping massive amounts of emojis into a channel, we need a filter to automatically manage this ASAP. Detection should be fairly simple, a very high emoji count is required for the effect (>20/message to >250 for varying impact) but emoji variance is not required.
Tangentially: This is also likely related to a discord update that went out that has slowed the emoji loading process recently.
I think we can safely filter out and autodelete high emoji messages without significant false positives.
We may want to handle both unicode emojis and server emojis.
It's possible for unicode emojis to get a higher "load" because they could as one character for message transmission purposes but server emojis count for more. However, server emojis may be animated, and will apply significant load by that fact as well.
</issue>
<code>
[start of bot/rules/discord_emojis.py]
1 import re
2 from typing import Dict, Iterable, List, Optional, Tuple
3
4 from discord import Member, Message
5
6
7 DISCORD_EMOJI_RE = re.compile(r"<:\w+:\d+>")
8 CODE_BLOCK_RE = re.compile(r"```.*?```", flags=re.DOTALL)
9
10
11 async def apply(
12 last_message: Message, recent_messages: List[Message], config: Dict[str, int]
13 ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
14 """Detects total Discord emojis (excluding Unicode emojis) exceeding the limit sent by a single user."""
15 relevant_messages = tuple(
16 msg
17 for msg in recent_messages
18 if msg.author == last_message.author
19 )
20
21 # Get rid of code blocks in the message before searching for emojis.
22 total_emojis = sum(
23 len(DISCORD_EMOJI_RE.findall(CODE_BLOCK_RE.sub("", msg.content)))
24 for msg in relevant_messages
25 )
26
27 if total_emojis > config['max']:
28 return (
29 f"sent {total_emojis} emojis in {config['interval']}s",
30 (last_message.author,),
31 relevant_messages
32 )
33 return None
34
[end of bot/rules/discord_emojis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/rules/discord_emojis.py b/bot/rules/discord_emojis.py
--- a/bot/rules/discord_emojis.py
+++ b/bot/rules/discord_emojis.py
@@ -2,16 +2,17 @@
from typing import Dict, Iterable, List, Optional, Tuple
from discord import Member, Message
+from emoji import demojize
-DISCORD_EMOJI_RE = re.compile(r"<:\w+:\d+>")
+DISCORD_EMOJI_RE = re.compile(r"<:\w+:\d+>|:\w+:")
CODE_BLOCK_RE = re.compile(r"```.*?```", flags=re.DOTALL)
async def apply(
last_message: Message, recent_messages: List[Message], config: Dict[str, int]
) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
- """Detects total Discord emojis (excluding Unicode emojis) exceeding the limit sent by a single user."""
+ """Detects total Discord emojis exceeding the limit sent by a single user."""
relevant_messages = tuple(
msg
for msg in recent_messages
@@ -19,8 +20,9 @@
)
# Get rid of code blocks in the message before searching for emojis.
+ # Convert Unicode emojis to :emoji: format to get their count.
total_emojis = sum(
- len(DISCORD_EMOJI_RE.findall(CODE_BLOCK_RE.sub("", msg.content)))
+ len(DISCORD_EMOJI_RE.findall(demojize(CODE_BLOCK_RE.sub("", msg.content))))
for msg in relevant_messages
)
| {"golden_diff": "diff --git a/bot/rules/discord_emojis.py b/bot/rules/discord_emojis.py\n--- a/bot/rules/discord_emojis.py\n+++ b/bot/rules/discord_emojis.py\n@@ -2,16 +2,17 @@\n from typing import Dict, Iterable, List, Optional, Tuple\n \n from discord import Member, Message\n+from emoji import demojize\n \n \n-DISCORD_EMOJI_RE = re.compile(r\"<:\\w+:\\d+>\")\n+DISCORD_EMOJI_RE = re.compile(r\"<:\\w+:\\d+>|:\\w+:\")\n CODE_BLOCK_RE = re.compile(r\"```.*?```\", flags=re.DOTALL)\n \n \n async def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n- \"\"\"Detects total Discord emojis (excluding Unicode emojis) exceeding the limit sent by a single user.\"\"\"\n+ \"\"\"Detects total Discord emojis exceeding the limit sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n@@ -19,8 +20,9 @@\n )\n \n # Get rid of code blocks in the message before searching for emojis.\n+ # Convert Unicode emojis to :emoji: format to get their count.\n total_emojis = sum(\n- len(DISCORD_EMOJI_RE.findall(CODE_BLOCK_RE.sub(\"\", msg.content)))\n+ len(DISCORD_EMOJI_RE.findall(demojize(CODE_BLOCK_RE.sub(\"\", msg.content))))\n for msg in relevant_messages\n )\n", "issue": "Filter functionality for emoji-spam messages\nNow that someone's figured out you can cause havok by dumping massive amounts of emojis into a channel, we need a filter to automatically manage this ASAP. Detection should be fairly simple, a very high emoji count is required for the effect (>20/message to >250 for varying impact) but emoji variance is not required. \r\n\r\nTangentially: This is also likely related to a discord update that went out that has slowed the emoji loading process recently.\r\n\r\nI think we can safely filter out and autodelete high emoji messages without significant false positives. \r\n\r\nWe may want to handle both unicode emojis and server emojis. \r\n\r\nIt's possible for unicode emojis to get a higher \"load\" because they could as one character for message transmission purposes but server emojis count for more. However, server emojis may be animated, and will apply significant load by that fact as well.\r\n\r\n\n", "before_files": [{"content": "import re\nfrom typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nDISCORD_EMOJI_RE = re.compile(r\"<:\\w+:\\d+>\")\nCODE_BLOCK_RE = re.compile(r\"```.*?```\", flags=re.DOTALL)\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total Discord emojis (excluding Unicode emojis) exceeding the limit sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n if msg.author == last_message.author\n )\n\n # Get rid of code blocks in the message before searching for emojis.\n total_emojis = sum(\n len(DISCORD_EMOJI_RE.findall(CODE_BLOCK_RE.sub(\"\", msg.content)))\n for msg in relevant_messages\n )\n\n if total_emojis > config['max']:\n return (\n f\"sent {total_emojis} emojis in {config['interval']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/discord_emojis.py"}]} | 1,040 | 352 |
gh_patches_debug_51560 | rasdani/github-patches | git_diff | ray-project__ray-10593 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make the multi-worker feature for Java worker experimental
Right now, the multi-worker feature for Java worker is enabled by default, but the `ActorHandle::kill()` API doesn't work well if multi-worker is enabled because it will kill the whole process instead of one worker in the process.
To avoid complaints from Java users, we should disable the multi-worker feature by default, but we still enable it in unit test.
</issue>
<code>
[start of python/ray/job_config.py]
1 import ray
2
3
4 class JobConfig:
5 """A class used to store the configurations of a job.
6
7 Attributes:
8 worker_env (dict): Environment variables to be set on worker
9 processes.
10 num_java_workers_per_process (int): The number of java workers per
11 worker process.
12 jvm_options (str[]): The jvm options for java workers of the job.
13 """
14
15 def __init__(
16 self,
17 worker_env=None,
18 num_java_workers_per_process=10,
19 jvm_options=None,
20 ):
21 if worker_env is None:
22 self.worker_env = dict()
23 else:
24 self.worker_env = worker_env
25 self.num_java_workers_per_process = num_java_workers_per_process
26 if jvm_options is None:
27 self.jvm_options = []
28 else:
29 self.jvm_options = jvm_options
30
31 def serialize(self):
32 job_config = ray.gcs_utils.JobConfig()
33 for key in self.worker_env:
34 job_config.worker_env[key] = self.worker_env[key]
35 job_config.num_java_workers_per_process = (
36 self.num_java_workers_per_process)
37 job_config.jvm_options.extend(self.jvm_options)
38 return job_config.SerializeToString()
39
[end of python/ray/job_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/ray/job_config.py b/python/ray/job_config.py
--- a/python/ray/job_config.py
+++ b/python/ray/job_config.py
@@ -15,7 +15,7 @@
def __init__(
self,
worker_env=None,
- num_java_workers_per_process=10,
+ num_java_workers_per_process=1,
jvm_options=None,
):
if worker_env is None:
| {"golden_diff": "diff --git a/python/ray/job_config.py b/python/ray/job_config.py\n--- a/python/ray/job_config.py\n+++ b/python/ray/job_config.py\n@@ -15,7 +15,7 @@\n def __init__(\n self,\n worker_env=None,\n- num_java_workers_per_process=10,\n+ num_java_workers_per_process=1,\n jvm_options=None,\n ):\n if worker_env is None:\n", "issue": "Make the multi-worker feature for Java worker experimental\nRight now, the multi-worker feature for Java worker is enabled by default, but the `ActorHandle::kill()` API doesn't work well if multi-worker is enabled because it will kill the whole process instead of one worker in the process.\r\n\r\nTo avoid complaints from Java users, we should disable the multi-worker feature by default, but we still enable it in unit test.\n", "before_files": [{"content": "import ray\n\n\nclass JobConfig:\n \"\"\"A class used to store the configurations of a job.\n\n Attributes:\n worker_env (dict): Environment variables to be set on worker\n processes.\n num_java_workers_per_process (int): The number of java workers per\n worker process.\n jvm_options (str[]): The jvm options for java workers of the job.\n \"\"\"\n\n def __init__(\n self,\n worker_env=None,\n num_java_workers_per_process=10,\n jvm_options=None,\n ):\n if worker_env is None:\n self.worker_env = dict()\n else:\n self.worker_env = worker_env\n self.num_java_workers_per_process = num_java_workers_per_process\n if jvm_options is None:\n self.jvm_options = []\n else:\n self.jvm_options = jvm_options\n\n def serialize(self):\n job_config = ray.gcs_utils.JobConfig()\n for key in self.worker_env:\n job_config.worker_env[key] = self.worker_env[key]\n job_config.num_java_workers_per_process = (\n self.num_java_workers_per_process)\n job_config.jvm_options.extend(self.jvm_options)\n return job_config.SerializeToString()\n", "path": "python/ray/job_config.py"}]} | 948 | 98 |
gh_patches_debug_28555 | rasdani/github-patches | git_diff | archlinux__archinstall-418 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AMD graphics driver selection unclear
For AMD, the options are by no means obvious in what they actually do.
The recommended choice should in my opinion be mesa (packages mesa and radeon-vulkan).
If you want to go by vendor in the first table, then mesa should be moved into the secondary table, i.e. when selecting AMD you get the choice between AMDVLK and mesa. The current situation is just confusing.
</issue>
<code>
[start of archinstall/lib/hardware.py]
1 import os, subprocess, json
2 from .general import sys_command
3 from .networking import list_interfaces, enrichIfaceTypes
4 from typing import Optional
5
6 __packages__ = ['xf86-video-amdgpu', 'xf86-video-ati', 'xf86-video-intel', 'xf86-video-nouveau', 'xf86-video-fbdev', 'xf86-video-vesa', 'xf86-video-vmware', 'nvidia', 'mesa']
7
8 AVAILABLE_GFX_DRIVERS = {
9 # Sub-dicts are layer-2 options to be selected
10 # and lists are a list of packages to be installed
11 'AMD / ATI' : {
12 'amd' : ['xf86-video-amdgpu'],
13 'ati' : ['xf86-video-ati']
14 },
15 'intel' : ['xf86-video-intel'],
16 'nvidia' : {
17 'open-source' : ['xf86-video-nouveau'],
18 'proprietary' : ['nvidia']
19 },
20 'mesa' : ['mesa'],
21 'fbdev' : ['xf86-video-fbdev'],
22 'vesa' : ['xf86-video-vesa'],
23 'vmware / virtualbox' : ['xf86-video-vmware']
24 }
25
26 def hasWifi()->bool:
27 return 'WIRELESS' in enrichIfaceTypes(list_interfaces().values()).values()
28
29 def hasAMDCPU()->bool:
30 if subprocess.check_output("lscpu | grep AMD", shell=True).strip().decode():
31 return True
32 return False
33 def hasIntelCPU()->bool:
34 if subprocess.check_output("lscpu | grep Intel", shell=True).strip().decode():
35 return True
36 return False
37
38 def hasUEFI()->bool:
39 return os.path.isdir('/sys/firmware/efi')
40
41 def graphicsDevices()->dict:
42 cards = {}
43 for line in sys_command(f"lspci"):
44 if b' VGA ' in line:
45 _, identifier = line.split(b': ',1)
46 cards[identifier.strip().lower().decode('UTF-8')] = line
47 return cards
48
49 def hasNvidiaGraphics()->bool:
50 return any('nvidia' in x for x in graphicsDevices())
51
52 def hasAmdGraphics()->bool:
53 return any('amd' in x for x in graphicsDevices())
54
55 def hasIntelGraphics()->bool:
56 return any('intel' in x for x in graphicsDevices())
57
58
59 def cpuVendor()-> Optional[str]:
60 cpu_info = json.loads(subprocess.check_output("lscpu -J", shell=True).decode('utf-8'))['lscpu']
61 for info in cpu_info:
62 if info.get('field',None):
63 if info.get('field',None) == "Vendor ID:":
64 return info.get('data',None)
65
66 def isVM() -> bool:
67 try:
68 subprocess.check_call(["systemd-detect-virt"]) # systemd-detect-virt issues a non-zero exit code if it is not on a virtual machine
69 return True
70 except:
71 return False
72
73 # TODO: Add more identifiers
74
[end of archinstall/lib/hardware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/archinstall/lib/hardware.py b/archinstall/lib/hardware.py
--- a/archinstall/lib/hardware.py
+++ b/archinstall/lib/hardware.py
@@ -3,24 +3,53 @@
from .networking import list_interfaces, enrichIfaceTypes
from typing import Optional
-__packages__ = ['xf86-video-amdgpu', 'xf86-video-ati', 'xf86-video-intel', 'xf86-video-nouveau', 'xf86-video-fbdev', 'xf86-video-vesa', 'xf86-video-vmware', 'nvidia', 'mesa']
+__packages__ = [
+ "mesa",
+ "xf86-video-amdgpu",
+ "xf86-video-ati",
+ "xf86-video-nouveau",
+ "xf86-video-vmware",
+ "libva-mesa-driver",
+ "libva-intel-driver",
+ "intel-media-driver",
+ "vulkan-radeon",
+ "vulkan-intel",
+ "nvidia",
+]
AVAILABLE_GFX_DRIVERS = {
# Sub-dicts are layer-2 options to be selected
# and lists are a list of packages to be installed
- 'AMD / ATI' : {
- 'amd' : ['xf86-video-amdgpu'],
- 'ati' : ['xf86-video-ati']
+ "All open-source (default)": [
+ "mesa",
+ "xf86-video-amdgpu",
+ "xf86-video-ati",
+ "xf86-video-nouveau",
+ "xf86-video-vmware",
+ "libva-mesa-driver",
+ "libva-intel-driver",
+ "intel-media-driver",
+ "vulkan-radeon",
+ "vulkan-intel",
+ ],
+ "AMD / ATI (open-source)": [
+ "mesa",
+ "xf86-video-amdgpu",
+ "xf86-video-ati",
+ "libva-mesa-driver",
+ "vulkan-radeon",
+ ],
+ "Intel (open-source)": [
+ "mesa",
+ "libva-intel-driver",
+ "intel-media-driver",
+ "vulkan-intel",
+ ],
+ "Nvidia": {
+ "open-source": ["mesa", "xf86-video-nouveau", "libva-mesa-driver"],
+ "proprietary": ["nvidia"],
},
- 'intel' : ['xf86-video-intel'],
- 'nvidia' : {
- 'open-source' : ['xf86-video-nouveau'],
- 'proprietary' : ['nvidia']
- },
- 'mesa' : ['mesa'],
- 'fbdev' : ['xf86-video-fbdev'],
- 'vesa' : ['xf86-video-vesa'],
- 'vmware / virtualbox' : ['xf86-video-vmware']
+ "VMware / VirtualBox (open-source)": ["mesa", "xf86-video-vmware"],
}
def hasWifi()->bool:
| {"golden_diff": "diff --git a/archinstall/lib/hardware.py b/archinstall/lib/hardware.py\n--- a/archinstall/lib/hardware.py\n+++ b/archinstall/lib/hardware.py\n@@ -3,24 +3,53 @@\n from .networking import list_interfaces, enrichIfaceTypes\n from typing import Optional\n \n-__packages__ = ['xf86-video-amdgpu', 'xf86-video-ati', 'xf86-video-intel', 'xf86-video-nouveau', 'xf86-video-fbdev', 'xf86-video-vesa', 'xf86-video-vmware', 'nvidia', 'mesa']\n+__packages__ = [\n+\t\t\"mesa\",\n+\t\t\"xf86-video-amdgpu\",\n+\t\t\"xf86-video-ati\",\n+\t\t\"xf86-video-nouveau\",\n+\t\t\"xf86-video-vmware\",\n+\t\t\"libva-mesa-driver\",\n+\t\t\"libva-intel-driver\",\n+\t\t\"intel-media-driver\",\n+\t\t\"vulkan-radeon\",\n+\t\t\"vulkan-intel\",\n+\t\t\"nvidia\",\n+]\n \n AVAILABLE_GFX_DRIVERS = {\n \t# Sub-dicts are layer-2 options to be selected\n \t# and lists are a list of packages to be installed\n-\t'AMD / ATI' : {\n-\t\t'amd' : ['xf86-video-amdgpu'],\n-\t\t'ati' : ['xf86-video-ati']\n+\t\"All open-source (default)\": [\n+\t\t\"mesa\",\n+\t\t\"xf86-video-amdgpu\",\n+\t\t\"xf86-video-ati\",\n+\t\t\"xf86-video-nouveau\",\n+\t\t\"xf86-video-vmware\",\n+\t\t\"libva-mesa-driver\",\n+\t\t\"libva-intel-driver\",\n+\t\t\"intel-media-driver\",\n+\t\t\"vulkan-radeon\",\n+\t\t\"vulkan-intel\",\n+\t],\n+\t\"AMD / ATI (open-source)\": [\n+\t\t\"mesa\",\n+\t\t\"xf86-video-amdgpu\",\n+\t\t\"xf86-video-ati\",\n+\t\t\"libva-mesa-driver\",\n+\t\t\"vulkan-radeon\",\n+\t],\n+\t\"Intel (open-source)\": [\n+\t\t\"mesa\",\n+\t\t\"libva-intel-driver\",\n+\t\t\"intel-media-driver\",\n+\t\t\"vulkan-intel\",\n+\t],\n+\t\"Nvidia\": {\n+\t\t\"open-source\": [\"mesa\", \"xf86-video-nouveau\", \"libva-mesa-driver\"],\n+\t\t\"proprietary\": [\"nvidia\"],\n \t},\n-\t'intel' : ['xf86-video-intel'],\n-\t'nvidia' : {\n-\t\t'open-source' : ['xf86-video-nouveau'],\n-\t\t'proprietary' : ['nvidia']\n-\t},\n-\t'mesa' : ['mesa'],\n-\t'fbdev' : ['xf86-video-fbdev'],\n-\t'vesa' : ['xf86-video-vesa'],\n-\t'vmware / virtualbox' : ['xf86-video-vmware']\n+\t\"VMware / VirtualBox (open-source)\": [\"mesa\", \"xf86-video-vmware\"],\n }\n \n def hasWifi()->bool:\n", "issue": "AMD graphics driver selection unclear\nFor AMD, the options are by no means obvious in what they actually do.\r\n\r\nThe recommended choice should in my opinion be mesa (packages mesa and radeon-vulkan).\r\nIf you want to go by vendor in the first table, then mesa should be moved into the secondary table, i.e. when selecting AMD you get the choice between AMDVLK and mesa. The current situation is just confusing.\n", "before_files": [{"content": "import os, subprocess, json\nfrom .general import sys_command\nfrom .networking import list_interfaces, enrichIfaceTypes\nfrom typing import Optional\n\n__packages__ = ['xf86-video-amdgpu', 'xf86-video-ati', 'xf86-video-intel', 'xf86-video-nouveau', 'xf86-video-fbdev', 'xf86-video-vesa', 'xf86-video-vmware', 'nvidia', 'mesa']\n\nAVAILABLE_GFX_DRIVERS = {\n\t# Sub-dicts are layer-2 options to be selected\n\t# and lists are a list of packages to be installed\n\t'AMD / ATI' : {\n\t\t'amd' : ['xf86-video-amdgpu'],\n\t\t'ati' : ['xf86-video-ati']\n\t},\n\t'intel' : ['xf86-video-intel'],\n\t'nvidia' : {\n\t\t'open-source' : ['xf86-video-nouveau'],\n\t\t'proprietary' : ['nvidia']\n\t},\n\t'mesa' : ['mesa'],\n\t'fbdev' : ['xf86-video-fbdev'],\n\t'vesa' : ['xf86-video-vesa'],\n\t'vmware / virtualbox' : ['xf86-video-vmware']\n}\n\ndef hasWifi()->bool:\n\treturn 'WIRELESS' in enrichIfaceTypes(list_interfaces().values()).values()\n\ndef hasAMDCPU()->bool:\n\tif subprocess.check_output(\"lscpu | grep AMD\", shell=True).strip().decode():\n\t\treturn True\n\treturn False\ndef hasIntelCPU()->bool:\n\tif subprocess.check_output(\"lscpu | grep Intel\", shell=True).strip().decode():\n\t\treturn True\n\treturn False\n\ndef hasUEFI()->bool:\n\treturn os.path.isdir('/sys/firmware/efi')\n\ndef graphicsDevices()->dict:\n\tcards = {}\n\tfor line in sys_command(f\"lspci\"):\n\t\tif b' VGA ' in line:\n\t\t\t_, identifier = line.split(b': ',1)\n\t\t\tcards[identifier.strip().lower().decode('UTF-8')] = line\n\treturn cards\n\ndef hasNvidiaGraphics()->bool:\n\treturn any('nvidia' in x for x in graphicsDevices())\n\ndef hasAmdGraphics()->bool:\n\treturn any('amd' in x for x in graphicsDevices())\n\ndef hasIntelGraphics()->bool:\n\treturn any('intel' in x for x in graphicsDevices())\n\n\ndef cpuVendor()-> Optional[str]:\n\tcpu_info = json.loads(subprocess.check_output(\"lscpu -J\", shell=True).decode('utf-8'))['lscpu']\n\tfor info in cpu_info:\n\t\tif info.get('field',None):\n\t\t\tif info.get('field',None) == \"Vendor ID:\":\n\t\t\t\treturn info.get('data',None)\n\ndef isVM() -> bool:\n\ttry:\n\t\tsubprocess.check_call([\"systemd-detect-virt\"]) # systemd-detect-virt issues a non-zero exit code if it is not on a virtual machine\n\t\treturn True\n\texcept:\n\t\treturn False\n\n# TODO: Add more identifiers\n", "path": "archinstall/lib/hardware.py"}]} | 1,456 | 745 |
gh_patches_debug_47929 | rasdani/github-patches | git_diff | liqd__a4-opin-1835 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sort by section changes automatically to "most recent" on productive
</issue>
<code>
[start of euth/ideas/templatetags/idea_tags.py]
1 from django import template
2
3 from euth.ideas.models import Idea
4
5 register = template.Library()
6
7
8 @register.simple_tag
9 def get_range(number, listcount):
10 if number < 3:
11 return range(1, 6)
12 elif number > listcount - 2:
13 return range(listcount - 4, listcount + 1)
14 else:
15 return range(number - 2, number + 3)
16
17
18 @register.simple_tag
19 def is_idea_list(module):
20 return Idea.objects.filter(module=module).count() > 0
21
[end of euth/ideas/templatetags/idea_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/euth/ideas/templatetags/idea_tags.py b/euth/ideas/templatetags/idea_tags.py
--- a/euth/ideas/templatetags/idea_tags.py
+++ b/euth/ideas/templatetags/idea_tags.py
@@ -18,3 +18,12 @@
@register.simple_tag
def is_idea_list(module):
return Idea.objects.filter(module=module).count() > 0
+
+
[email protected]_tag
+def combined_url_parameter(request_query_dict, **kwargs):
+ combined_query_dict = request_query_dict.copy()
+ for key in kwargs:
+ combined_query_dict.setlist(key, [kwargs[key]])
+ encoded_parameter = '?' + combined_query_dict.urlencode()
+ return encoded_parameter
| {"golden_diff": "diff --git a/euth/ideas/templatetags/idea_tags.py b/euth/ideas/templatetags/idea_tags.py\n--- a/euth/ideas/templatetags/idea_tags.py\n+++ b/euth/ideas/templatetags/idea_tags.py\n@@ -18,3 +18,12 @@\n @register.simple_tag\n def is_idea_list(module):\n return Idea.objects.filter(module=module).count() > 0\n+\n+\[email protected]_tag\n+def combined_url_parameter(request_query_dict, **kwargs):\n+ combined_query_dict = request_query_dict.copy()\n+ for key in kwargs:\n+ combined_query_dict.setlist(key, [kwargs[key]])\n+ encoded_parameter = '?' + combined_query_dict.urlencode()\n+ return encoded_parameter\n", "issue": "Sort by section changes automatically to \"most recent\" on productive\n\n", "before_files": [{"content": "from django import template\n\nfrom euth.ideas.models import Idea\n\nregister = template.Library()\n\n\[email protected]_tag\ndef get_range(number, listcount):\n if number < 3:\n return range(1, 6)\n elif number > listcount - 2:\n return range(listcount - 4, listcount + 1)\n else:\n return range(number - 2, number + 3)\n\n\[email protected]_tag\ndef is_idea_list(module):\n return Idea.objects.filter(module=module).count() > 0\n", "path": "euth/ideas/templatetags/idea_tags.py"}]} | 717 | 175 |
gh_patches_debug_2515 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2974 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
test 2959: redesign mail of new Stellungnahme in b-plan module
**URL:** mail
**user:** sachbearbeiter
**expected behaviour:** logo is no longer in the email
**behaviour:** logo is on the bottom left corner of the mail, outside the mail layout box
**important screensize:**
**device & browser:** mail on mac
**Comment/Question:**
Screenshot?
<img width="776" alt="Bildschirmfoto 2020-05-25 um 15 44 09" src="https://user-images.githubusercontent.com/35491681/82819838-5e76f900-9ea1-11ea-99a9-9a531588387f.png">
</issue>
<code>
[start of meinberlin/apps/bplan/emails.py]
1 from django.conf import settings
2
3 from meinberlin.apps.contrib.emails import Email
4
5
6 class OfficeWorkerNotification(Email):
7 template_name = 'meinberlin_bplan/emails/office_worker_notification'
8
9 @property
10 def office_worker_email(self):
11 project = self.object.module.project
12 return project.externalproject.bplan.office_worker_email
13
14 @property
15 def bplan_identifier(self):
16 project = self.object.module.project
17 return project.externalproject.bplan.identifier
18
19 def get_receivers(self):
20 return [self.office_worker_email]
21
22 def get_context(self):
23 context = super().get_context()
24 context['module'] = self.object.module
25 context['project'] = self.object.module.project
26 context['contact_email'] = settings.CONTACT_EMAIL
27 context['identifier'] = self.bplan_identifier
28 return context
29
30
31 class SubmitterConfirmation(Email):
32 template_name = 'meinberlin_bplan/emails/submitter_confirmation'
33
34 def get_receivers(self):
35 return [self.object.email]
36
37 def get_context(self):
38 context = super().get_context()
39 context['module'] = self.object.module
40 context['project'] = self.object.module.project
41 context['contact_email'] = settings.CONTACT_EMAIL
42 return context
43
[end of meinberlin/apps/bplan/emails.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/bplan/emails.py b/meinberlin/apps/bplan/emails.py
--- a/meinberlin/apps/bplan/emails.py
+++ b/meinberlin/apps/bplan/emails.py
@@ -27,6 +27,9 @@
context['identifier'] = self.bplan_identifier
return context
+ def get_attachments(self):
+ return []
+
class SubmitterConfirmation(Email):
template_name = 'meinberlin_bplan/emails/submitter_confirmation'
| {"golden_diff": "diff --git a/meinberlin/apps/bplan/emails.py b/meinberlin/apps/bplan/emails.py\n--- a/meinberlin/apps/bplan/emails.py\n+++ b/meinberlin/apps/bplan/emails.py\n@@ -27,6 +27,9 @@\n context['identifier'] = self.bplan_identifier\n return context\n \n+ def get_attachments(self):\n+ return []\n+\n \n class SubmitterConfirmation(Email):\n template_name = 'meinberlin_bplan/emails/submitter_confirmation'\n", "issue": "test 2959: redesign mail of new Stellungnahme in b-plan module\n**URL:** mail\r\n**user:** sachbearbeiter\r\n**expected behaviour:** logo is no longer in the email\r\n**behaviour:** logo is on the bottom left corner of the mail, outside the mail layout box \r\n**important screensize:**\r\n**device & browser:** mail on mac\r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n<img width=\"776\" alt=\"Bildschirmfoto 2020-05-25 um 15 44 09\" src=\"https://user-images.githubusercontent.com/35491681/82819838-5e76f900-9ea1-11ea-99a9-9a531588387f.png\">\r\n\r\n\n", "before_files": [{"content": "from django.conf import settings\n\nfrom meinberlin.apps.contrib.emails import Email\n\n\nclass OfficeWorkerNotification(Email):\n template_name = 'meinberlin_bplan/emails/office_worker_notification'\n\n @property\n def office_worker_email(self):\n project = self.object.module.project\n return project.externalproject.bplan.office_worker_email\n\n @property\n def bplan_identifier(self):\n project = self.object.module.project\n return project.externalproject.bplan.identifier\n\n def get_receivers(self):\n return [self.office_worker_email]\n\n def get_context(self):\n context = super().get_context()\n context['module'] = self.object.module\n context['project'] = self.object.module.project\n context['contact_email'] = settings.CONTACT_EMAIL\n context['identifier'] = self.bplan_identifier\n return context\n\n\nclass SubmitterConfirmation(Email):\n template_name = 'meinberlin_bplan/emails/submitter_confirmation'\n\n def get_receivers(self):\n return [self.object.email]\n\n def get_context(self):\n context = super().get_context()\n context['module'] = self.object.module\n context['project'] = self.object.module.project\n context['contact_email'] = settings.CONTACT_EMAIL\n return context\n", "path": "meinberlin/apps/bplan/emails.py"}]} | 1,089 | 118 |
gh_patches_debug_11460 | rasdani/github-patches | git_diff | modoboa__modoboa-2495 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Robots.txt is missing from urls.py
# Impacted versions
* Modoboa: 1.12.2 and older
* installer used: Yes, but some modifications made
* Webserver: Nginx
# Steps to reproduce
Install modoboa and enable webinterface.
# Current behavior
No robots.txt is defined. Search engines do not now how to index the website. When search engines try to find robots.txt an 404 is raised and the error is mailed to ADMINS (if configured)
# Expected behavior
Robots.txt in urls.py defined, to deny all traffic, as webmail should not be publicly indexed by search engines. Possible fix, add:
`path('robots.txt', lambda r: HttpResponse("User-agent: *\nDisAllow: /", content_type="text/plain"), name='robots')`
# Video/Screenshot link (optional)
</issue>
<code>
[start of modoboa/core/urls.py]
1 """Core urls."""
2
3 from django.urls import path
4
5 from . import views
6
7 app_name = "core"
8
9 urlpatterns = [
10 path('', views.RootDispatchView.as_view(), name="root"),
11 path('dashboard/', views.DashboardView.as_view(), name="dashboard"),
12
13 path('accounts/login/', views.dologin, name="login"),
14 path('accounts/logout/', views.dologout, name="logout"),
15 path('accounts/2fa_verify/',
16 views.TwoFactorCodeVerifyView.as_view(),
17 name='2fa_verify'),
18
19 path('core/', views.viewsettings, name="index"),
20 path('core/parameters/', views.parameters, name="parameters"),
21 path('core/info/', views.information, name="information"),
22 path('core/logs/', views.logs, name="log_list"),
23 path('core/logs/page/', views.logs_page, name="logs_page"),
24 path('core/top_notifications/check/',
25 views.check_top_notifications,
26 name="top_notifications_check"),
27
28 path('user/', views.index, name="user_index"),
29 path('user/preferences/', views.preferences,
30 name="user_preferences"),
31 path('user/profile/', views.profile, name="user_profile"),
32 path('user/api/', views.api_access, name="user_api_access"),
33 path('user/security/', views.security, name="user_security"),
34 ]
35
[end of modoboa/core/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modoboa/core/urls.py b/modoboa/core/urls.py
--- a/modoboa/core/urls.py
+++ b/modoboa/core/urls.py
@@ -1,6 +1,7 @@
"""Core urls."""
from django.urls import path
+from django.views.generic.base import TemplateView
from . import views
@@ -31,4 +32,5 @@
path('user/profile/', views.profile, name="user_profile"),
path('user/api/', views.api_access, name="user_api_access"),
path('user/security/', views.security, name="user_security"),
+ path('robots.txt', TemplateView.as_view(template_name="core/robots.txt", content_type="text/plain")),
]
| {"golden_diff": "diff --git a/modoboa/core/urls.py b/modoboa/core/urls.py\n--- a/modoboa/core/urls.py\n+++ b/modoboa/core/urls.py\n@@ -1,6 +1,7 @@\n \"\"\"Core urls.\"\"\"\n \n from django.urls import path\n+from django.views.generic.base import TemplateView\n \n from . import views\n \n@@ -31,4 +32,5 @@\n path('user/profile/', views.profile, name=\"user_profile\"),\n path('user/api/', views.api_access, name=\"user_api_access\"),\n path('user/security/', views.security, name=\"user_security\"),\n+ path('robots.txt', TemplateView.as_view(template_name=\"core/robots.txt\", content_type=\"text/plain\")),\n ]\n", "issue": "Robots.txt is missing from urls.py\n# Impacted versions\r\n\r\n* Modoboa: 1.12.2 and older\r\n* installer used: Yes, but some modifications made\r\n* Webserver: Nginx\r\n\r\n# Steps to reproduce\r\nInstall modoboa and enable webinterface.\r\n\r\n# Current behavior\r\nNo robots.txt is defined. Search engines do not now how to index the website. When search engines try to find robots.txt an 404 is raised and the error is mailed to ADMINS (if configured)\r\n\r\n# Expected behavior\r\nRobots.txt in urls.py defined, to deny all traffic, as webmail should not be publicly indexed by search engines. Possible fix, add:\r\n`path('robots.txt', lambda r: HttpResponse(\"User-agent: *\\nDisAllow: /\", content_type=\"text/plain\"), name='robots')`\r\n\r\n# Video/Screenshot link (optional)\r\n\r\n\n", "before_files": [{"content": "\"\"\"Core urls.\"\"\"\n\nfrom django.urls import path\n\nfrom . import views\n\napp_name = \"core\"\n\nurlpatterns = [\n path('', views.RootDispatchView.as_view(), name=\"root\"),\n path('dashboard/', views.DashboardView.as_view(), name=\"dashboard\"),\n\n path('accounts/login/', views.dologin, name=\"login\"),\n path('accounts/logout/', views.dologout, name=\"logout\"),\n path('accounts/2fa_verify/',\n views.TwoFactorCodeVerifyView.as_view(),\n name='2fa_verify'),\n\n path('core/', views.viewsettings, name=\"index\"),\n path('core/parameters/', views.parameters, name=\"parameters\"),\n path('core/info/', views.information, name=\"information\"),\n path('core/logs/', views.logs, name=\"log_list\"),\n path('core/logs/page/', views.logs_page, name=\"logs_page\"),\n path('core/top_notifications/check/',\n views.check_top_notifications,\n name=\"top_notifications_check\"),\n\n path('user/', views.index, name=\"user_index\"),\n path('user/preferences/', views.preferences,\n name=\"user_preferences\"),\n path('user/profile/', views.profile, name=\"user_profile\"),\n path('user/api/', views.api_access, name=\"user_api_access\"),\n path('user/security/', views.security, name=\"user_security\"),\n]\n", "path": "modoboa/core/urls.py"}]} | 1,071 | 159 |
gh_patches_debug_3849 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-2037 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Celery is only using low priority queue
I'm not sure if this is happening for everyone or just bookwyrm.social, but all my celery tasks are going to the `low_priority` queue and it's making everything run super slowly!
(@tofuwabohu are you noticing this in flower?)
</issue>
<code>
[start of celerywyrm/settings.py]
1 """ bookwyrm settings and configuration """
2 # pylint: disable=wildcard-import
3 # pylint: disable=unused-wildcard-import
4 from bookwyrm.settings import *
5
6 # pylint: disable=line-too-long
7 REDIS_BROKER_PASSWORD = requests.utils.quote(env("REDIS_BROKER_PASSWORD", None))
8 REDIS_BROKER_HOST = env("REDIS_BROKER_HOST", "redis_broker")
9 REDIS_BROKER_PORT = env("REDIS_BROKER_PORT", 6379)
10 REDIS_BROKER_DB_INDEX = env("REDIS_BROKER_DB_INDEX", 0)
11
12 CELERY_BROKER_URL = f"redis://:{REDIS_BROKER_PASSWORD}@{REDIS_BROKER_HOST}:{REDIS_BROKER_PORT}/{REDIS_BROKER_DB_INDEX}"
13 CELERY_RESULT_BACKEND = f"redis://:{REDIS_BROKER_PASSWORD}@{REDIS_BROKER_HOST}:{REDIS_BROKER_PORT}/{REDIS_BROKER_DB_INDEX}"
14
15 CELERY_DEFAULT_QUEUE = "low_priority"
16
17 CELERY_ACCEPT_CONTENT = ["json"]
18 CELERY_TASK_SERIALIZER = "json"
19 CELERY_RESULT_SERIALIZER = "json"
20
21 CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
22 CELERY_TIMEZONE = env("TIME_ZONE", "UTC")
23
24 FLOWER_PORT = env("FLOWER_PORT")
25
26 INSTALLED_APPS = INSTALLED_APPS + [
27 "celerywyrm",
28 ]
29
30 ROOT_URLCONF = "celerywyrm.urls"
31
32 WSGI_APPLICATION = "celerywyrm.wsgi.application"
33
[end of celerywyrm/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/celerywyrm/settings.py b/celerywyrm/settings.py
--- a/celerywyrm/settings.py
+++ b/celerywyrm/settings.py
@@ -13,6 +13,7 @@
CELERY_RESULT_BACKEND = f"redis://:{REDIS_BROKER_PASSWORD}@{REDIS_BROKER_HOST}:{REDIS_BROKER_PORT}/{REDIS_BROKER_DB_INDEX}"
CELERY_DEFAULT_QUEUE = "low_priority"
+CELERY_CREATE_MISSING_QUEUES = True
CELERY_ACCEPT_CONTENT = ["json"]
CELERY_TASK_SERIALIZER = "json"
| {"golden_diff": "diff --git a/celerywyrm/settings.py b/celerywyrm/settings.py\n--- a/celerywyrm/settings.py\n+++ b/celerywyrm/settings.py\n@@ -13,6 +13,7 @@\n CELERY_RESULT_BACKEND = f\"redis://:{REDIS_BROKER_PASSWORD}@{REDIS_BROKER_HOST}:{REDIS_BROKER_PORT}/{REDIS_BROKER_DB_INDEX}\"\n \n CELERY_DEFAULT_QUEUE = \"low_priority\"\n+CELERY_CREATE_MISSING_QUEUES = True\n \n CELERY_ACCEPT_CONTENT = [\"json\"]\n CELERY_TASK_SERIALIZER = \"json\"\n", "issue": "Celery is only using low priority queue\nI'm not sure if this is happening for everyone or just bookwyrm.social, but all my celery tasks are going to the `low_priority` queue and it's making everything run super slowly!\r\n\r\n(@tofuwabohu are you noticing this in flower?)\n", "before_files": [{"content": "\"\"\" bookwyrm settings and configuration \"\"\"\n# pylint: disable=wildcard-import\n# pylint: disable=unused-wildcard-import\nfrom bookwyrm.settings import *\n\n# pylint: disable=line-too-long\nREDIS_BROKER_PASSWORD = requests.utils.quote(env(\"REDIS_BROKER_PASSWORD\", None))\nREDIS_BROKER_HOST = env(\"REDIS_BROKER_HOST\", \"redis_broker\")\nREDIS_BROKER_PORT = env(\"REDIS_BROKER_PORT\", 6379)\nREDIS_BROKER_DB_INDEX = env(\"REDIS_BROKER_DB_INDEX\", 0)\n\nCELERY_BROKER_URL = f\"redis://:{REDIS_BROKER_PASSWORD}@{REDIS_BROKER_HOST}:{REDIS_BROKER_PORT}/{REDIS_BROKER_DB_INDEX}\"\nCELERY_RESULT_BACKEND = f\"redis://:{REDIS_BROKER_PASSWORD}@{REDIS_BROKER_HOST}:{REDIS_BROKER_PORT}/{REDIS_BROKER_DB_INDEX}\"\n\nCELERY_DEFAULT_QUEUE = \"low_priority\"\n\nCELERY_ACCEPT_CONTENT = [\"json\"]\nCELERY_TASK_SERIALIZER = \"json\"\nCELERY_RESULT_SERIALIZER = \"json\"\n\nCELERY_BEAT_SCHEDULER = \"django_celery_beat.schedulers:DatabaseScheduler\"\nCELERY_TIMEZONE = env(\"TIME_ZONE\", \"UTC\")\n\nFLOWER_PORT = env(\"FLOWER_PORT\")\n\nINSTALLED_APPS = INSTALLED_APPS + [\n \"celerywyrm\",\n]\n\nROOT_URLCONF = \"celerywyrm.urls\"\n\nWSGI_APPLICATION = \"celerywyrm.wsgi.application\"\n", "path": "celerywyrm/settings.py"}]} | 985 | 128 |
gh_patches_debug_48127 | rasdani/github-patches | git_diff | dynaconf__dynaconf-1010 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] TypeError for older versions of HVAC in read_secret_version method
**Describe the bug**
A combination of newer versions of Dynaconf with older versions of HVAC result in an incompatible mix of expected vs available arguments. Specifically you can get the following traceback.
```python
109 try:
110 if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:
--> 111 data = client.secrets.kv.v2.read_secret_version(
112 path,
113 mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
114 raise_on_deleted_version=True, # keep default behavior
115 )
116 else:
117 data = client.secrets.kv.read_secret(
118 "data/" + path,
119 mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
120 )
TypeError: KvV2.read_secret_version() got an unexpected keyword argument 'raise_on_deleted_version'
```
The PR introducing this feature was included in HVAC 1.1.0: https://github.com/hvac/hvac/pull/907
**To Reproduce**
Steps to reproduce the behavior:
1. Have a version of HVAC older than 1.1.0
2. Trigger a vault version read
</issue>
<code>
[start of setup.py]
1 from __future__ import annotations
2
3 import os
4
5 from setuptools import find_packages
6 from setuptools import setup
7
8
9 def read(*names, **kwargs):
10 """Read a file."""
11 content = ""
12 with open(
13 os.path.join(os.path.dirname(__file__), *names),
14 encoding=kwargs.get("encoding", "utf8"),
15 ) as open_file:
16 content = open_file.read().strip()
17 return content
18
19
20 test_requirements = [
21 "pytest",
22 "pytest-cov",
23 "pytest-xdist",
24 "pytest-mock",
25 "flake8",
26 "pep8-naming",
27 "flake8-debugger",
28 "flake8-print",
29 "flake8-todo",
30 "radon",
31 "flask>=0.12",
32 "django",
33 "python-dotenv",
34 "toml",
35 "redis",
36 "hvac",
37 "configobj",
38 ]
39
40
41 setup(
42 name="dynaconf",
43 version=read("dynaconf", "VERSION"),
44 url="https://github.com/dynaconf/dynaconf",
45 license="MIT",
46 license_files=["LICENSE", "vendor_licenses/*"],
47 author="Bruno Rocha",
48 author_email="[email protected]",
49 description="The dynamic configurator for your Python Project",
50 long_description=read("README.md"),
51 long_description_content_type="text/markdown",
52 packages=find_packages(
53 exclude=[
54 "tests",
55 "tests.*",
56 "tests_functional",
57 "tests_functional.*",
58 "docs",
59 "legacy_docs",
60 "legacy_docs.*",
61 "docs.*",
62 "build",
63 "build.*",
64 "dynaconf.vendor_src",
65 "dynaconf/vendor_src",
66 "dynaconf.vendor_src.*",
67 "dynaconf/vendor_src/*",
68 ]
69 ),
70 include_package_data=True,
71 zip_safe=False,
72 platforms="any",
73 tests_require=test_requirements,
74 extras_require={
75 "redis": ["redis"],
76 "vault": ["hvac"],
77 "yaml": ["ruamel.yaml"],
78 "toml": ["toml"],
79 "ini": ["configobj"],
80 "configobj": ["configobj"],
81 "all": ["redis", "ruamel.yaml", "configobj", "hvac"],
82 "test": test_requirements,
83 },
84 python_requires=">=3.8",
85 entry_points={"console_scripts": ["dynaconf=dynaconf.cli:main"]},
86 setup_requires=["setuptools>=38.6.0"],
87 classifiers=[
88 "Development Status :: 5 - Production/Stable",
89 "Framework :: Django",
90 "Framework :: Flask",
91 "Intended Audience :: Developers",
92 "License :: OSI Approved :: MIT License",
93 "Natural Language :: English",
94 "Operating System :: OS Independent",
95 "Programming Language :: Python",
96 "Programming Language :: Python :: 3",
97 "Programming Language :: Python :: 3 :: Only",
98 "Programming Language :: Python :: 3.8",
99 "Programming Language :: Python :: 3.9",
100 "Programming Language :: Python :: 3.10",
101 "Programming Language :: Python :: 3.11",
102 "Topic :: Utilities",
103 "Topic :: Software Development :: Libraries",
104 "Topic :: Software Development :: Libraries :: Python Modules",
105 ],
106 )
107
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
"python-dotenv",
"toml",
"redis",
- "hvac",
+ "hvac>=1.1.0",
"configobj",
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,7 @@\n \"python-dotenv\",\n \"toml\",\n \"redis\",\n- \"hvac\",\n+ \"hvac>=1.1.0\",\n \"configobj\",\n ]\n", "issue": "[bug] TypeError for older versions of HVAC in read_secret_version method\n**Describe the bug**\r\nA combination of newer versions of Dynaconf with older versions of HVAC result in an incompatible mix of expected vs available arguments. Specifically you can get the following traceback.\r\n\r\n```python\r\n 109 try:\r\n 110 if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:\r\n--> 111 data = client.secrets.kv.v2.read_secret_version(\r\n 112 path,\r\n 113 mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\r\n 114 raise_on_deleted_version=True, # keep default behavior\r\n 115 )\r\n 116 else:\r\n 117 data = client.secrets.kv.read_secret(\r\n 118 \"data/\" + path,\r\n 119 mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\r\n 120 )\r\n\r\nTypeError: KvV2.read_secret_version() got an unexpected keyword argument 'raise_on_deleted_version'\r\n```\r\n\r\nThe PR introducing this feature was included in HVAC 1.1.0: https://github.com/hvac/hvac/pull/907 \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Have a version of HVAC older than 1.1.0\r\n2. Trigger a vault version read\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef read(*names, **kwargs):\n \"\"\"Read a file.\"\"\"\n content = \"\"\n with open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\"),\n ) as open_file:\n content = open_file.read().strip()\n return content\n\n\ntest_requirements = [\n \"pytest\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-mock\",\n \"flake8\",\n \"pep8-naming\",\n \"flake8-debugger\",\n \"flake8-print\",\n \"flake8-todo\",\n \"radon\",\n \"flask>=0.12\",\n \"django\",\n \"python-dotenv\",\n \"toml\",\n \"redis\",\n \"hvac\",\n \"configobj\",\n]\n\n\nsetup(\n name=\"dynaconf\",\n version=read(\"dynaconf\", \"VERSION\"),\n url=\"https://github.com/dynaconf/dynaconf\",\n license=\"MIT\",\n license_files=[\"LICENSE\", \"vendor_licenses/*\"],\n author=\"Bruno Rocha\",\n author_email=\"[email protected]\",\n description=\"The dynamic configurator for your Python Project\",\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(\n exclude=[\n \"tests\",\n \"tests.*\",\n \"tests_functional\",\n \"tests_functional.*\",\n \"docs\",\n \"legacy_docs\",\n \"legacy_docs.*\",\n \"docs.*\",\n \"build\",\n \"build.*\",\n \"dynaconf.vendor_src\",\n \"dynaconf/vendor_src\",\n \"dynaconf.vendor_src.*\",\n \"dynaconf/vendor_src/*\",\n ]\n ),\n include_package_data=True,\n zip_safe=False,\n platforms=\"any\",\n tests_require=test_requirements,\n extras_require={\n \"redis\": [\"redis\"],\n \"vault\": [\"hvac\"],\n \"yaml\": [\"ruamel.yaml\"],\n \"toml\": [\"toml\"],\n \"ini\": [\"configobj\"],\n \"configobj\": [\"configobj\"],\n \"all\": [\"redis\", \"ruamel.yaml\", \"configobj\", \"hvac\"],\n \"test\": test_requirements,\n },\n python_requires=\">=3.8\",\n entry_points={\"console_scripts\": [\"dynaconf=dynaconf.cli:main\"]},\n setup_requires=[\"setuptools>=38.6.0\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Django\",\n \"Framework :: Flask\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Utilities\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}]} | 1,770 | 72 |
gh_patches_debug_23445 | rasdani/github-patches | git_diff | liqd__a4-opin-689 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Invite: email address should be independent of letter case
After testing invites for private projects a lot with AEGEE, I finally found out what their problem was. When they invite users, the auto correct on their Android tablet lets email addresses start with an uppercase letter. The users they wanted to invite had their email address written in lowercase letters though. OPIN did not recognize them as the same users. We should change this behaviour ASAP. It should not matter anywhere whether a user inputs email addresses in lower or uppercase letters.
</issue>
<code>
[start of euth/memberships/views.py]
1 from django.http import Http404
2 from django.shortcuts import redirect
3 from django.views import generic
4 from rules.compat import access_mixins as mixin
5
6 from adhocracy4.projects import models as prj_models
7 from adhocracy4.projects import views as prj_views
8
9 from . import forms, models
10
11
12 class RequestsProjectDetailView(prj_views.ProjectDetailView):
13
14 def handle_no_permission(self):
15 """
16 Check if user clould join
17 """
18 user = self.request.user
19 is_member = user.is_authenticated() and self.project.has_member(user)
20
21 if is_member:
22 return super().handle_no_permission()
23 else:
24 return self.handle_no_membership()
25
26 def handle_no_membership(self):
27 membership_impossible = (
28 not self.request.user.is_authenticated()
29 or self.project.is_draft
30 or self.project.has_member(self.request.user)
31 )
32
33 if membership_impossible:
34 return super().handle_no_permission()
35 else:
36 return redirect('memberships-request',
37 project_slug=self.project.slug)
38
39
40 class InviteView(mixin.LoginRequiredMixin, generic.UpdateView):
41 model = models.Invite
42 form_class = forms.InviteForm
43 slug_field = 'token'
44 slug_url_kwarg = 'invite_token'
45
46 def get_form_kwargs(self):
47 kwargs = super().get_form_kwargs()
48 kwargs.update({'user': self.request.user})
49 return kwargs
50
51 def form_valid(self, form):
52 if form.is_accepted():
53 form.instance.accept(self.request.user)
54 return redirect(form.instance.project.get_absolute_url())
55 else:
56 form.instance.reject()
57 return redirect('/')
58
59
60 class RequestView(mixin.LoginRequiredMixin, generic.DetailView):
61 """
62 Displays membership request if it exists or allows to create one.
63 """
64 model = models.Request
65 slug_field = 'project__slug'
66 slug_url_kwarg = 'project_slug'
67 context_object_name = 'join_request'
68
69 def get_queryset(self):
70 return self.model.objects.filter(creator=self.request.user)
71
72 def get(self, request, *args, **kwargs):
73 if self.project.has_member(request.user):
74 return redirect(self.project.get_absolute_url())
75 else:
76 return super().get(request, *args, **kwargs)
77
78 def post(self, request, *args, **kwargs):
79 user = request.user
80 project = self.project
81 models.Request.objects.request_membership(project, user)
82 return redirect(self.request.path)
83
84 def get_object(self, queryset=None):
85 try:
86 return super().get_object(queryset)
87 except Http404:
88 return None
89
90 @property
91 def project(self):
92 project_slug = self.kwargs[self.slug_url_kwarg]
93 return prj_models.Project.objects.get(slug=project_slug)
94
[end of euth/memberships/views.py]
[start of euth/memberships/forms.py]
1 from django import forms
2 from django.core.exceptions import ValidationError
3
4 from . import models
5
6
7 class InviteForm(forms.ModelForm):
8 accept = forms.CharField(required=False)
9 reject = forms.CharField(required=False)
10
11 class Meta:
12 model = models.Invite
13 fields = ['accept', 'reject']
14
15 def __init__(self, user=None, **kwargs):
16 super().__init__(**kwargs)
17 self.user = user
18
19 def clean(self):
20 data = self.data
21 if 'accept' not in data and 'reject' not in data:
22 raise ValidationError('Reject or accept')
23 if 'accept' in data and not self.user.email == self.instance.email:
24 raise ValidationError('This user has another email address than '
25 'the one that received the invitation.')
26 return data
27
28 def is_accepted(self):
29 data = self.data
30 return 'accept' in data and 'reject' not in data
31
[end of euth/memberships/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/euth/memberships/forms.py b/euth/memberships/forms.py
--- a/euth/memberships/forms.py
+++ b/euth/memberships/forms.py
@@ -12,17 +12,10 @@
model = models.Invite
fields = ['accept', 'reject']
- def __init__(self, user=None, **kwargs):
- super().__init__(**kwargs)
- self.user = user
-
def clean(self):
data = self.data
if 'accept' not in data and 'reject' not in data:
raise ValidationError('Reject or accept')
- if 'accept' in data and not self.user.email == self.instance.email:
- raise ValidationError('This user has another email address than '
- 'the one that received the invitation.')
return data
def is_accepted(self):
diff --git a/euth/memberships/views.py b/euth/memberships/views.py
--- a/euth/memberships/views.py
+++ b/euth/memberships/views.py
@@ -43,11 +43,6 @@
slug_field = 'token'
slug_url_kwarg = 'invite_token'
- def get_form_kwargs(self):
- kwargs = super().get_form_kwargs()
- kwargs.update({'user': self.request.user})
- return kwargs
-
def form_valid(self, form):
if form.is_accepted():
form.instance.accept(self.request.user)
| {"golden_diff": "diff --git a/euth/memberships/forms.py b/euth/memberships/forms.py\n--- a/euth/memberships/forms.py\n+++ b/euth/memberships/forms.py\n@@ -12,17 +12,10 @@\n model = models.Invite\n fields = ['accept', 'reject']\n \n- def __init__(self, user=None, **kwargs):\n- super().__init__(**kwargs)\n- self.user = user\n-\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n- if 'accept' in data and not self.user.email == self.instance.email:\n- raise ValidationError('This user has another email address than '\n- 'the one that received the invitation.')\n return data\n \n def is_accepted(self):\ndiff --git a/euth/memberships/views.py b/euth/memberships/views.py\n--- a/euth/memberships/views.py\n+++ b/euth/memberships/views.py\n@@ -43,11 +43,6 @@\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n \n- def get_form_kwargs(self):\n- kwargs = super().get_form_kwargs()\n- kwargs.update({'user': self.request.user})\n- return kwargs\n-\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n", "issue": "Invite: email address should be independent of letter case \nAfter testing invites for private projects a lot with AEGEE, I finally found out what their problem was. When they invite users, the auto correct on their Android tablet lets email addresses start with an uppercase letter. The users they wanted to invite had their email address written in lowercase letters though. OPIN did not recognize them as the same users. We should change this behaviour ASAP. It should not matter anywhere whether a user inputs email addresses in lower or uppercase letters.\n", "before_files": [{"content": "from django.http import Http404\nfrom django.shortcuts import redirect\nfrom django.views import generic\nfrom rules.compat import access_mixins as mixin\n\nfrom adhocracy4.projects import models as prj_models\nfrom adhocracy4.projects import views as prj_views\n\nfrom . import forms, models\n\n\nclass RequestsProjectDetailView(prj_views.ProjectDetailView):\n\n def handle_no_permission(self):\n \"\"\"\n Check if user clould join\n \"\"\"\n user = self.request.user\n is_member = user.is_authenticated() and self.project.has_member(user)\n\n if is_member:\n return super().handle_no_permission()\n else:\n return self.handle_no_membership()\n\n def handle_no_membership(self):\n membership_impossible = (\n not self.request.user.is_authenticated()\n or self.project.is_draft\n or self.project.has_member(self.request.user)\n )\n\n if membership_impossible:\n return super().handle_no_permission()\n else:\n return redirect('memberships-request',\n project_slug=self.project.slug)\n\n\nclass InviteView(mixin.LoginRequiredMixin, generic.UpdateView):\n model = models.Invite\n form_class = forms.InviteForm\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n return redirect(form.instance.project.get_absolute_url())\n else:\n form.instance.reject()\n return redirect('/')\n\n\nclass RequestView(mixin.LoginRequiredMixin, generic.DetailView):\n \"\"\"\n Displays membership request if it exists or allows to create one.\n \"\"\"\n model = models.Request\n slug_field = 'project__slug'\n slug_url_kwarg = 'project_slug'\n context_object_name = 'join_request'\n\n def get_queryset(self):\n return self.model.objects.filter(creator=self.request.user)\n\n def get(self, request, *args, **kwargs):\n if self.project.has_member(request.user):\n return redirect(self.project.get_absolute_url())\n else:\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n user = request.user\n project = self.project\n models.Request.objects.request_membership(project, user)\n return redirect(self.request.path)\n\n def get_object(self, queryset=None):\n try:\n return super().get_object(queryset)\n except Http404:\n return None\n\n @property\n def project(self):\n project_slug = self.kwargs[self.slug_url_kwarg]\n return prj_models.Project.objects.get(slug=project_slug)\n", "path": "euth/memberships/views.py"}, {"content": "from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom . import models\n\n\nclass InviteForm(forms.ModelForm):\n accept = forms.CharField(required=False)\n reject = forms.CharField(required=False)\n\n class Meta:\n model = models.Invite\n fields = ['accept', 'reject']\n\n def __init__(self, user=None, **kwargs):\n super().__init__(**kwargs)\n self.user = user\n\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n if 'accept' in data and not self.user.email == self.instance.email:\n raise ValidationError('This user has another email address than '\n 'the one that received the invitation.')\n return data\n\n def is_accepted(self):\n data = self.data\n return 'accept' in data and 'reject' not in data\n", "path": "euth/memberships/forms.py"}]} | 1,675 | 317 |
gh_patches_debug_13444 | rasdani/github-patches | git_diff | iterative__dvc-5425 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tests: exp executor teardown is flaky on windows
Looks like there is some race condition on windows that sometimes happens between cleaning up the test `tmp_dir` and cleaning up the experiments executor temp directory (which is placed in `tmp_dir/.dvc/tmp/...`). May be better to go back to running experiments in system `$TEMP` instead of `.dvc/tmp` (for win tests only)?
</issue>
<code>
[start of dvc/repo/experiments/executor/local.py]
1 import logging
2 import os
3 import sys
4 from tempfile import TemporaryDirectory
5 from typing import Optional
6
7 from dvc.utils.fs import remove
8
9 from .base import BaseExecutor
10
11 logger = logging.getLogger(__name__)
12
13
14 class BaseLocalExecutor(BaseExecutor):
15 """Base local machine executor."""
16
17 @property
18 def git_url(self) -> str:
19 root_dir = os.path.abspath(self.root_dir)
20 if os.name == "nt":
21 root_dir = root_dir.replace(os.sep, "/")
22 return f"file://{root_dir}"
23
24
25 class TempDirExecutor(BaseLocalExecutor):
26 """Temp directory experiment executor."""
27
28 # Temp dir executors should warn if untracked files exist (to help with
29 # debugging user code), and suppress other DVC hints (like `git add`
30 # suggestions) that are not applicable outside of workspace runs
31 WARN_UNTRACKED = True
32 QUIET = True
33
34 def __init__(
35 self,
36 *args,
37 tmp_dir: Optional[str] = None,
38 cache_dir: Optional[str] = None,
39 **kwargs,
40 ):
41 self._tmp_dir = TemporaryDirectory(dir=tmp_dir)
42 kwargs["root_dir"] = self._tmp_dir.name
43 super().__init__(*args, **kwargs)
44 if cache_dir:
45 self._config(cache_dir)
46 logger.debug(
47 "Init temp dir executor in dir '%s'", self._tmp_dir,
48 )
49
50 def _config(self, cache_dir):
51 local_config = os.path.join(self.dvc_dir, "config.local")
52 logger.debug("Writing experiments local config '%s'", local_config)
53 with open(local_config, "w") as fobj:
54 fobj.write(f"[cache]\n dir = {cache_dir}")
55
56 def cleanup(self):
57 super().cleanup()
58 logger.debug("Removing tmpdir '%s'", self._tmp_dir)
59 try:
60 self._tmp_dir.cleanup()
61 except PermissionError:
62 if os.name == "nt" and sys.version_info < (3, 8):
63 # see https://bugs.python.org/issue26660
64 remove(self._tmp_dir.name)
65 return
66 raise
67
[end of dvc/repo/experiments/executor/local.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/repo/experiments/executor/local.py b/dvc/repo/experiments/executor/local.py
--- a/dvc/repo/experiments/executor/local.py
+++ b/dvc/repo/experiments/executor/local.py
@@ -1,6 +1,5 @@
import logging
import os
-import sys
from tempfile import TemporaryDirectory
from typing import Optional
@@ -56,11 +55,4 @@
def cleanup(self):
super().cleanup()
logger.debug("Removing tmpdir '%s'", self._tmp_dir)
- try:
- self._tmp_dir.cleanup()
- except PermissionError:
- if os.name == "nt" and sys.version_info < (3, 8):
- # see https://bugs.python.org/issue26660
- remove(self._tmp_dir.name)
- return
- raise
+ remove(self._tmp_dir.name)
| {"golden_diff": "diff --git a/dvc/repo/experiments/executor/local.py b/dvc/repo/experiments/executor/local.py\n--- a/dvc/repo/experiments/executor/local.py\n+++ b/dvc/repo/experiments/executor/local.py\n@@ -1,6 +1,5 @@\n import logging\n import os\n-import sys\n from tempfile import TemporaryDirectory\n from typing import Optional\n \n@@ -56,11 +55,4 @@\n def cleanup(self):\n super().cleanup()\n logger.debug(\"Removing tmpdir '%s'\", self._tmp_dir)\n- try:\n- self._tmp_dir.cleanup()\n- except PermissionError:\n- if os.name == \"nt\" and sys.version_info < (3, 8):\n- # see https://bugs.python.org/issue26660\n- remove(self._tmp_dir.name)\n- return\n- raise\n+ remove(self._tmp_dir.name)\n", "issue": "tests: exp executor teardown is flaky on windows\nLooks like there is some race condition on windows that sometimes happens between cleaning up the test `tmp_dir` and cleaning up the experiments executor temp directory (which is placed in `tmp_dir/.dvc/tmp/...`). May be better to go back to running experiments in system `$TEMP` instead of `.dvc/tmp` (for win tests only)?\n", "before_files": [{"content": "import logging\nimport os\nimport sys\nfrom tempfile import TemporaryDirectory\nfrom typing import Optional\n\nfrom dvc.utils.fs import remove\n\nfrom .base import BaseExecutor\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseLocalExecutor(BaseExecutor):\n \"\"\"Base local machine executor.\"\"\"\n\n @property\n def git_url(self) -> str:\n root_dir = os.path.abspath(self.root_dir)\n if os.name == \"nt\":\n root_dir = root_dir.replace(os.sep, \"/\")\n return f\"file://{root_dir}\"\n\n\nclass TempDirExecutor(BaseLocalExecutor):\n \"\"\"Temp directory experiment executor.\"\"\"\n\n # Temp dir executors should warn if untracked files exist (to help with\n # debugging user code), and suppress other DVC hints (like `git add`\n # suggestions) that are not applicable outside of workspace runs\n WARN_UNTRACKED = True\n QUIET = True\n\n def __init__(\n self,\n *args,\n tmp_dir: Optional[str] = None,\n cache_dir: Optional[str] = None,\n **kwargs,\n ):\n self._tmp_dir = TemporaryDirectory(dir=tmp_dir)\n kwargs[\"root_dir\"] = self._tmp_dir.name\n super().__init__(*args, **kwargs)\n if cache_dir:\n self._config(cache_dir)\n logger.debug(\n \"Init temp dir executor in dir '%s'\", self._tmp_dir,\n )\n\n def _config(self, cache_dir):\n local_config = os.path.join(self.dvc_dir, \"config.local\")\n logger.debug(\"Writing experiments local config '%s'\", local_config)\n with open(local_config, \"w\") as fobj:\n fobj.write(f\"[cache]\\n dir = {cache_dir}\")\n\n def cleanup(self):\n super().cleanup()\n logger.debug(\"Removing tmpdir '%s'\", self._tmp_dir)\n try:\n self._tmp_dir.cleanup()\n except PermissionError:\n if os.name == \"nt\" and sys.version_info < (3, 8):\n # see https://bugs.python.org/issue26660\n remove(self._tmp_dir.name)\n return\n raise\n", "path": "dvc/repo/experiments/executor/local.py"}]} | 1,216 | 202 |
gh_patches_debug_2966 | rasdani/github-patches | git_diff | ivy-llc__ivy-16518 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
uniform
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/random.py]
1 # global
2
[end of ivy/functional/frontends/paddle/tensor/random.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/tensor/random.py b/ivy/functional/frontends/paddle/tensor/random.py
--- a/ivy/functional/frontends/paddle/tensor/random.py
+++ b/ivy/functional/frontends/paddle/tensor/random.py
@@ -1 +1,15 @@
# global
+import ivy
+from ivy.func_wrapper import with_supported_dtypes
+from ivy.functional.frontends.paddle.func_wrapper import (
+ to_ivy_arrays_and_back,
+)
+
+
+@with_supported_dtypes(
+ {"2.4.2 and below": ("float32", "float64")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
+ return ivy.random_uniform(low=min, high=max, shape=shape, dtype=dtype, seed=seed)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/random.py b/ivy/functional/frontends/paddle/tensor/random.py\n--- a/ivy/functional/frontends/paddle/tensor/random.py\n+++ b/ivy/functional/frontends/paddle/tensor/random.py\n@@ -1 +1,15 @@\n # global\n+import ivy\n+from ivy.func_wrapper import with_supported_dtypes\n+from ivy.functional.frontends.paddle.func_wrapper import (\n+ to_ivy_arrays_and_back,\n+)\n+\n+\n+@with_supported_dtypes(\n+ {\"2.4.2 and below\": (\"float32\", \"float64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):\n+ return ivy.random_uniform(low=min, high=max, shape=shape, dtype=dtype, seed=seed)\n", "issue": "uniform\n\n", "before_files": [{"content": "# global\n", "path": "ivy/functional/frontends/paddle/tensor/random.py"}]} | 557 | 213 |
gh_patches_debug_21213 | rasdani/github-patches | git_diff | crytic__slither-2310 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug-Candidate]: --disable-color ignored, printer produces colored outputs
### Describe the issue:
Flag `--disable-color` seem to be ignored and printer produces colored output with ASCII escape characters not suitable to capture into plaintext files
```
slither --help
usage: slither target [flag]
Additional options:
...
--disable-color Disable output colorization
```
Workaround: pass the output through the following sed script:
```
slither . --print function-summary 2>&1 | sed 's/\x1b\[[0-9;]*m//g'
```
### Code example to reproduce the issue:
<img width="1192" alt="image" src="https://github.com/crytic/slither/assets/7992612/850e41d6-e60e-4383-bdb4-c6d6a385c320">
### Version:
slither --version
0.10.0
From docker image `ghcr.io/trailofbits/eth-security-toolbox:nightly`
### Relevant log output:
_No response_
</issue>
<code>
[start of slither/utils/myprettytable.py]
1 from typing import List, Dict, Union
2
3 from prettytable.colortable import ColorTable, Themes
4
5
6 class MyPrettyTable:
7 def __init__(self, field_names: List[str], pretty_align: bool = True): # TODO: True by default?
8 self._field_names = field_names
9 self._rows: List = []
10 self._options: Dict = {}
11 if pretty_align:
12 self._options["set_alignment"] = []
13 self._options["set_alignment"] += [(field_names[0], "l")]
14 for field_name in field_names[1:]:
15 self._options["set_alignment"] += [(field_name, "r")]
16 else:
17 self._options["set_alignment"] = []
18
19 def add_row(self, row: List[Union[str, List[str]]]) -> None:
20 self._rows.append(row)
21
22 def to_pretty_table(self) -> ColorTable:
23 table = ColorTable(self._field_names, theme=Themes.OCEAN)
24 for row in self._rows:
25 table.add_row(row)
26 if len(self._options["set_alignment"]):
27 for column_header, value in self._options["set_alignment"]:
28 table.align[column_header] = value
29 return table
30
31 def to_json(self) -> Dict:
32 return {"fields_names": self._field_names, "rows": self._rows}
33
34 def __str__(self) -> str:
35 return str(self.to_pretty_table())
36
37
38 # UTILITY FUNCTIONS
39
40
41 def make_pretty_table(
42 headers: list, body: dict, totals: bool = False, total_header="TOTAL"
43 ) -> MyPrettyTable:
44 """
45 Converts a dict to a MyPrettyTable. Dict keys are the row headers.
46 Args:
47 headers: str[] of column names
48 body: dict of row headers with a dict of the values
49 totals: bool optional add Totals row
50 total_header: str optional if totals is set to True this will override the default "TOTAL" header
51 Returns:
52 MyPrettyTable
53 """
54 table = MyPrettyTable(headers)
55 for row in body:
56 table_row = [row] + [body[row][key] for key in headers[1:]]
57 table.add_row(table_row)
58 if totals:
59 table.add_row(
60 [total_header] + [sum([body[row][key] for row in body]) for key in headers[1:]]
61 )
62 return table
63
[end of slither/utils/myprettytable.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/slither/utils/myprettytable.py b/slither/utils/myprettytable.py
--- a/slither/utils/myprettytable.py
+++ b/slither/utils/myprettytable.py
@@ -1,7 +1,10 @@
from typing import List, Dict, Union
+from prettytable import PrettyTable
from prettytable.colortable import ColorTable, Themes
+from slither.utils.colors import Colors
+
class MyPrettyTable:
def __init__(self, field_names: List[str], pretty_align: bool = True): # TODO: True by default?
@@ -19,8 +22,12 @@
def add_row(self, row: List[Union[str, List[str]]]) -> None:
self._rows.append(row)
- def to_pretty_table(self) -> ColorTable:
- table = ColorTable(self._field_names, theme=Themes.OCEAN)
+ def to_pretty_table(self) -> PrettyTable:
+ if Colors.COLORIZATION_ENABLED:
+ table = ColorTable(self._field_names, theme=Themes.OCEAN)
+ else:
+ table = PrettyTable(self._field_names)
+
for row in self._rows:
table.add_row(row)
if len(self._options["set_alignment"]):
| {"golden_diff": "diff --git a/slither/utils/myprettytable.py b/slither/utils/myprettytable.py\n--- a/slither/utils/myprettytable.py\n+++ b/slither/utils/myprettytable.py\n@@ -1,7 +1,10 @@\n from typing import List, Dict, Union\n \n+from prettytable import PrettyTable\n from prettytable.colortable import ColorTable, Themes\n \n+from slither.utils.colors import Colors\n+\n \n class MyPrettyTable:\n def __init__(self, field_names: List[str], pretty_align: bool = True): # TODO: True by default?\n@@ -19,8 +22,12 @@\n def add_row(self, row: List[Union[str, List[str]]]) -> None:\n self._rows.append(row)\n \n- def to_pretty_table(self) -> ColorTable:\n- table = ColorTable(self._field_names, theme=Themes.OCEAN)\n+ def to_pretty_table(self) -> PrettyTable:\n+ if Colors.COLORIZATION_ENABLED:\n+ table = ColorTable(self._field_names, theme=Themes.OCEAN)\n+ else:\n+ table = PrettyTable(self._field_names)\n+\n for row in self._rows:\n table.add_row(row)\n if len(self._options[\"set_alignment\"]):\n", "issue": "[Bug-Candidate]: --disable-color ignored, printer produces colored outputs\n### Describe the issue:\n\nFlag `--disable-color` seem to be ignored and printer produces colored output with ASCII escape characters not suitable to capture into plaintext files\r\n\r\n```\r\nslither --help \r\nusage: slither target [flag]\r\nAdditional options:\r\n...\r\n --disable-color Disable output colorization\r\n```\r\n\r\nWorkaround: pass the output through the following sed script:\r\n```\r\nslither . --print function-summary 2>&1 | sed 's/\\x1b\\[[0-9;]*m//g'\r\n```\n\n### Code example to reproduce the issue:\n\n<img width=\"1192\" alt=\"image\" src=\"https://github.com/crytic/slither/assets/7992612/850e41d6-e60e-4383-bdb4-c6d6a385c320\">\r\n\n\n### Version:\n\nslither --version\r\n0.10.0\r\n\r\nFrom docker image `ghcr.io/trailofbits/eth-security-toolbox:nightly`\n\n### Relevant log output:\n\n_No response_\n", "before_files": [{"content": "from typing import List, Dict, Union\n\nfrom prettytable.colortable import ColorTable, Themes\n\n\nclass MyPrettyTable:\n def __init__(self, field_names: List[str], pretty_align: bool = True): # TODO: True by default?\n self._field_names = field_names\n self._rows: List = []\n self._options: Dict = {}\n if pretty_align:\n self._options[\"set_alignment\"] = []\n self._options[\"set_alignment\"] += [(field_names[0], \"l\")]\n for field_name in field_names[1:]:\n self._options[\"set_alignment\"] += [(field_name, \"r\")]\n else:\n self._options[\"set_alignment\"] = []\n\n def add_row(self, row: List[Union[str, List[str]]]) -> None:\n self._rows.append(row)\n\n def to_pretty_table(self) -> ColorTable:\n table = ColorTable(self._field_names, theme=Themes.OCEAN)\n for row in self._rows:\n table.add_row(row)\n if len(self._options[\"set_alignment\"]):\n for column_header, value in self._options[\"set_alignment\"]:\n table.align[column_header] = value\n return table\n\n def to_json(self) -> Dict:\n return {\"fields_names\": self._field_names, \"rows\": self._rows}\n\n def __str__(self) -> str:\n return str(self.to_pretty_table())\n\n\n# UTILITY FUNCTIONS\n\n\ndef make_pretty_table(\n headers: list, body: dict, totals: bool = False, total_header=\"TOTAL\"\n) -> MyPrettyTable:\n \"\"\"\n Converts a dict to a MyPrettyTable. Dict keys are the row headers.\n Args:\n headers: str[] of column names\n body: dict of row headers with a dict of the values\n totals: bool optional add Totals row\n total_header: str optional if totals is set to True this will override the default \"TOTAL\" header\n Returns:\n MyPrettyTable\n \"\"\"\n table = MyPrettyTable(headers)\n for row in body:\n table_row = [row] + [body[row][key] for key in headers[1:]]\n table.add_row(table_row)\n if totals:\n table.add_row(\n [total_header] + [sum([body[row][key] for row in body]) for key in headers[1:]]\n )\n return table\n", "path": "slither/utils/myprettytable.py"}]} | 1,428 | 281 |
gh_patches_debug_13123 | rasdani/github-patches | git_diff | ietf-tools__datatracker-3727 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The non-wg list view contains things that do not belong.
The list contains things that do not belong. For example, 'geopriv' is listed as a non-wg list, but it is a concluded wg. Maybe this should be a separate issue.
_Originally posted by @russhousley in https://github.com/ietf-tools/datatracker/issues/3675#issuecomment-1075013354_
</issue>
<code>
[start of ietf/mailinglists/views.py]
1 # Copyright The IETF Trust 2007, All Rights Reserved
2
3 import re
4
5 from django.shortcuts import render
6
7 import debug # pyflakes:ignore
8
9 from ietf.group.models import Group
10 from ietf.mailinglists.models import List
11
12 def groups(request):
13 groups = Group.objects.filter(type__features__acts_like_wg=True, list_archive__startswith='http').exclude(state__in=('bof', 'conclude')).order_by("acronym")
14
15 return render(request, "mailinglists/group_archives.html", { "groups": groups } )
16
17 def nonwg(request):
18 groups = Group.objects.filter(type__features__acts_like_wg=True).exclude(state__in=['bof', 'conclude']).order_by("acronym")
19
20 #urls = [ g.list_archive for g in groups if '.ietf.org' in g.list_archive ]
21
22 wg_lists = set()
23 for g in groups:
24 wg_lists.add(g.acronym)
25 match = re.search(r'^(https?://mailarchive.ietf.org/arch/(browse/|search/\?email-list=))(?P<name>[^/]*)/?$', g.list_archive)
26 if match:
27 wg_lists.add(match.group('name').lower())
28
29 lists = List.objects.filter(advertised=True)
30 #debug.show('lists.count()')
31 lists = lists.exclude(name__in=wg_lists).order_by('name')
32 #debug.show('lists.count()')
33 return render(request, "mailinglists/nonwg.html", { "lists": lists } )
34
[end of ietf/mailinglists/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ietf/mailinglists/views.py b/ietf/mailinglists/views.py
--- a/ietf/mailinglists/views.py
+++ b/ietf/mailinglists/views.py
@@ -1,4 +1,4 @@
-# Copyright The IETF Trust 2007, All Rights Reserved
+# Copyright The IETF Trust 2007-2022, All Rights Reserved
import re
@@ -15,7 +15,7 @@
return render(request, "mailinglists/group_archives.html", { "groups": groups } )
def nonwg(request):
- groups = Group.objects.filter(type__features__acts_like_wg=True).exclude(state__in=['bof', 'conclude']).order_by("acronym")
+ groups = Group.objects.filter(type__features__acts_like_wg=True).exclude(state__in=['bof']).order_by("acronym")
#urls = [ g.list_archive for g in groups if '.ietf.org' in g.list_archive ]
| {"golden_diff": "diff --git a/ietf/mailinglists/views.py b/ietf/mailinglists/views.py\n--- a/ietf/mailinglists/views.py\n+++ b/ietf/mailinglists/views.py\n@@ -1,4 +1,4 @@\n-# Copyright The IETF Trust 2007, All Rights Reserved\n+# Copyright The IETF Trust 2007-2022, All Rights Reserved\n \n import re\n \n@@ -15,7 +15,7 @@\n return render(request, \"mailinglists/group_archives.html\", { \"groups\": groups } )\n \n def nonwg(request):\n- groups = Group.objects.filter(type__features__acts_like_wg=True).exclude(state__in=['bof', 'conclude']).order_by(\"acronym\")\n+ groups = Group.objects.filter(type__features__acts_like_wg=True).exclude(state__in=['bof']).order_by(\"acronym\")\n \n #urls = [ g.list_archive for g in groups if '.ietf.org' in g.list_archive ]\n", "issue": "The non-wg list view contains things that do not belong.\nThe list contains things that do not belong. For example, 'geopriv' is listed as a non-wg list, but it is a concluded wg. Maybe this should be a separate issue.\r\n\r\n_Originally posted by @russhousley in https://github.com/ietf-tools/datatracker/issues/3675#issuecomment-1075013354_\n", "before_files": [{"content": "# Copyright The IETF Trust 2007, All Rights Reserved\n\nimport re\n\nfrom django.shortcuts import render\n\nimport debug # pyflakes:ignore\n\nfrom ietf.group.models import Group\nfrom ietf.mailinglists.models import List\n\ndef groups(request):\n groups = Group.objects.filter(type__features__acts_like_wg=True, list_archive__startswith='http').exclude(state__in=('bof', 'conclude')).order_by(\"acronym\")\n\n return render(request, \"mailinglists/group_archives.html\", { \"groups\": groups } )\n\ndef nonwg(request):\n groups = Group.objects.filter(type__features__acts_like_wg=True).exclude(state__in=['bof', 'conclude']).order_by(\"acronym\")\n\n #urls = [ g.list_archive for g in groups if '.ietf.org' in g.list_archive ]\n\n wg_lists = set()\n for g in groups:\n wg_lists.add(g.acronym)\n match = re.search(r'^(https?://mailarchive.ietf.org/arch/(browse/|search/\\?email-list=))(?P<name>[^/]*)/?$', g.list_archive)\n if match:\n wg_lists.add(match.group('name').lower())\n\n lists = List.objects.filter(advertised=True)\n #debug.show('lists.count()')\n lists = lists.exclude(name__in=wg_lists).order_by('name')\n #debug.show('lists.count()')\n return render(request, \"mailinglists/nonwg.html\", { \"lists\": lists } )\n", "path": "ietf/mailinglists/views.py"}]} | 1,033 | 220 |
gh_patches_debug_1637 | rasdani/github-patches | git_diff | pre-commit__pre-commit-67 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError while instantiating LoggingHandler (2.6)
I assume this is new-style vs old-style classes being grumpy?
```
>>> from pre_commit.logging_handler import LoggingHandler
>>> LoggingHandler(True)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../py_env/lib/python2.6/site-packages/pre_commit/logging_handler.py", line 19, in __init__
super(LoggingHandler, self).__init__()
TypeError: super() argument 1 must be type, not classobj
```
</issue>
<code>
[start of pre_commit/logging_handler.py]
1
2 from __future__ import print_function
3
4 import logging
5
6 from pre_commit import color
7
8
9 LOG_LEVEL_COLORS = {
10 'DEBUG': '',
11 'INFO': '',
12 'WARNING': color.YELLOW,
13 'ERROR': color.RED,
14 }
15
16
17 class LoggingHandler(logging.Handler):
18 def __init__(self, use_color):
19 super(LoggingHandler, self).__init__()
20 self.use_color = use_color
21
22 def emit(self, record):
23 print(
24 u'{0}{1}'.format(
25 color.format_color(
26 '[{0}]'.format(record.levelname),
27 LOG_LEVEL_COLORS[record.levelname],
28 self.use_color,
29 ) + ' ' if record.levelno >= logging.WARNING else '',
30 record.getMessage(),
31 )
32 )
33
[end of pre_commit/logging_handler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/logging_handler.py b/pre_commit/logging_handler.py
--- a/pre_commit/logging_handler.py
+++ b/pre_commit/logging_handler.py
@@ -16,7 +16,7 @@
class LoggingHandler(logging.Handler):
def __init__(self, use_color):
- super(LoggingHandler, self).__init__()
+ logging.Handler.__init__(self)
self.use_color = use_color
def emit(self, record):
| {"golden_diff": "diff --git a/pre_commit/logging_handler.py b/pre_commit/logging_handler.py\n--- a/pre_commit/logging_handler.py\n+++ b/pre_commit/logging_handler.py\n@@ -16,7 +16,7 @@\n \n class LoggingHandler(logging.Handler):\n def __init__(self, use_color):\n- super(LoggingHandler, self).__init__()\n+ logging.Handler.__init__(self)\n self.use_color = use_color\n \n def emit(self, record):\n", "issue": "TypeError while instantiating LoggingHandler (2.6)\nI assume this is new-style vs old-style classes being grumpy?\n\n```\n>>> from pre_commit.logging_handler import LoggingHandler\n>>> LoggingHandler(True)\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \".../py_env/lib/python2.6/site-packages/pre_commit/logging_handler.py\", line 19, in __init__\n super(LoggingHandler, self).__init__()\nTypeError: super() argument 1 must be type, not classobj\n```\n\n", "before_files": [{"content": "\nfrom __future__ import print_function\n\nimport logging\n\nfrom pre_commit import color\n\n\nLOG_LEVEL_COLORS = {\n 'DEBUG': '',\n 'INFO': '',\n 'WARNING': color.YELLOW,\n 'ERROR': color.RED,\n}\n\n\nclass LoggingHandler(logging.Handler):\n def __init__(self, use_color):\n super(LoggingHandler, self).__init__()\n self.use_color = use_color\n\n def emit(self, record):\n print(\n u'{0}{1}'.format(\n color.format_color(\n '[{0}]'.format(record.levelname),\n LOG_LEVEL_COLORS[record.levelname],\n self.use_color,\n ) + ' ' if record.levelno >= logging.WARNING else '',\n record.getMessage(),\n )\n )\n", "path": "pre_commit/logging_handler.py"}]} | 871 | 97 |
gh_patches_debug_22883 | rasdani/github-patches | git_diff | getsentry__sentry-3447 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Turn the option system.logging-format into an enum.
</issue>
<code>
[start of src/sentry/logging/__init__.py]
1 """
2 sentry.logging
3 ~~~~~~~~~~~~~~
4 :copyright: (c) 2010-2016 by the Sentry Team, see AUTHORS for more details.
5 :license: BSD, see LICENSE for more details.
6 """
7
8 from __future__ import absolute_import
9
[end of src/sentry/logging/__init__.py]
[start of src/sentry/options/defaults.py]
1 """
2 sentry.options.defaults
3 ~~~~~~~~~~~~~~~~~~~~~~~
4
5 :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
6 :license: BSD, see LICENSE for more details.
7 """
8 from __future__ import absolute_import, print_function
9
10 from sentry.options import (
11 FLAG_IMMUTABLE, FLAG_NOSTORE, FLAG_PRIORITIZE_DISK, FLAG_REQUIRED, FLAG_ALLOW_EMPTY,
12 register,
13 )
14 from sentry.utils.types import Dict, String
15
16 # Cache
17 # register('cache.backend', flags=FLAG_NOSTORE)
18 # register('cache.options', type=Dict, flags=FLAG_NOSTORE)
19
20 # System
21 register('system.admin-email', flags=FLAG_REQUIRED)
22 register('system.databases', type=Dict, flags=FLAG_NOSTORE)
23 # register('system.debug', default=False, flags=FLAG_NOSTORE)
24 register('system.rate-limit', default=0, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
25 register('system.secret-key', flags=FLAG_NOSTORE)
26 # Absolute URL to the sentry root directory. Should not include a trailing slash.
27 register('system.url-prefix', ttl=60, grace=3600, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
28 register('system.root-api-key', flags=FLAG_PRIORITIZE_DISK)
29 register('system.logging-format', default='human', flags=FLAG_PRIORITIZE_DISK)
30
31 # Redis
32 register(
33 'redis.clusters',
34 type=Dict,
35 default={
36 'default': {
37 'hosts': {
38 0: {
39 'host': '127.0.0.1',
40 'port': 6379,
41 }
42 },
43 },
44 },
45 flags=FLAG_NOSTORE | FLAG_IMMUTABLE
46 )
47 register('redis.options', type=Dict, flags=FLAG_NOSTORE)
48
49 # symbolizer specifics
50 register('dsym.llvm-symbolizer-path', type=String)
51 register('dsym.cache-path', type=String, default='/tmp/sentry-dsym-cache')
52
53 # Mail
54 register('mail.backend', default='smtp', flags=FLAG_NOSTORE)
55 register('mail.host', default='localhost', flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
56 register('mail.port', default=25, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
57 register('mail.username', flags=FLAG_REQUIRED | FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
58 register('mail.password', flags=FLAG_REQUIRED | FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
59 register('mail.use-tls', default=False, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
60 register('mail.subject-prefix', default='[Sentry] ', flags=FLAG_PRIORITIZE_DISK)
61 register('mail.from', default='root@localhost', flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
62 register('mail.list-namespace', type=String, default='localhost', flags=FLAG_NOSTORE)
63 register('mail.enable-replies', default=False, flags=FLAG_PRIORITIZE_DISK)
64 register('mail.reply-hostname', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
65 register('mail.mailgun-api-key', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
66
67 # SMS
68 register('sms.twilio-account', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
69 register('sms.twilio-token', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
70 register('sms.twilio-number', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
71
[end of src/sentry/options/defaults.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/logging/__init__.py b/src/sentry/logging/__init__.py
--- a/src/sentry/logging/__init__.py
+++ b/src/sentry/logging/__init__.py
@@ -6,3 +6,8 @@
"""
from __future__ import absolute_import
+
+
+class LoggingFormat(object):
+ HUMAN = 'human'
+ MACHINE = 'machine'
diff --git a/src/sentry/options/defaults.py b/src/sentry/options/defaults.py
--- a/src/sentry/options/defaults.py
+++ b/src/sentry/options/defaults.py
@@ -7,6 +7,7 @@
"""
from __future__ import absolute_import, print_function
+from sentry.logging import LoggingFormat
from sentry.options import (
FLAG_IMMUTABLE, FLAG_NOSTORE, FLAG_PRIORITIZE_DISK, FLAG_REQUIRED, FLAG_ALLOW_EMPTY,
register,
@@ -26,7 +27,7 @@
# Absolute URL to the sentry root directory. Should not include a trailing slash.
register('system.url-prefix', ttl=60, grace=3600, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
register('system.root-api-key', flags=FLAG_PRIORITIZE_DISK)
-register('system.logging-format', default='human', flags=FLAG_PRIORITIZE_DISK)
+register('system.logging-format', default=LoggingFormat.HUMAN, flags=FLAG_PRIORITIZE_DISK)
# Redis
register(
| {"golden_diff": "diff --git a/src/sentry/logging/__init__.py b/src/sentry/logging/__init__.py\n--- a/src/sentry/logging/__init__.py\n+++ b/src/sentry/logging/__init__.py\n@@ -6,3 +6,8 @@\n \"\"\"\n \n from __future__ import absolute_import\n+\n+\n+class LoggingFormat(object):\n+ HUMAN = 'human'\n+ MACHINE = 'machine'\ndiff --git a/src/sentry/options/defaults.py b/src/sentry/options/defaults.py\n--- a/src/sentry/options/defaults.py\n+++ b/src/sentry/options/defaults.py\n@@ -7,6 +7,7 @@\n \"\"\"\n from __future__ import absolute_import, print_function\n \n+from sentry.logging import LoggingFormat\n from sentry.options import (\n FLAG_IMMUTABLE, FLAG_NOSTORE, FLAG_PRIORITIZE_DISK, FLAG_REQUIRED, FLAG_ALLOW_EMPTY,\n register,\n@@ -26,7 +27,7 @@\n # Absolute URL to the sentry root directory. Should not include a trailing slash.\n register('system.url-prefix', ttl=60, grace=3600, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\n register('system.root-api-key', flags=FLAG_PRIORITIZE_DISK)\n-register('system.logging-format', default='human', flags=FLAG_PRIORITIZE_DISK)\n+register('system.logging-format', default=LoggingFormat.HUMAN, flags=FLAG_PRIORITIZE_DISK)\n \n # Redis\n register(\n", "issue": "Turn the option system.logging-format into an enum.\n\n", "before_files": [{"content": "\"\"\"\nsentry.logging\n~~~~~~~~~~~~~~\n:copyright: (c) 2010-2016 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom __future__ import absolute_import\n", "path": "src/sentry/logging/__init__.py"}, {"content": "\"\"\"\nsentry.options.defaults\n~~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nfrom sentry.options import (\n FLAG_IMMUTABLE, FLAG_NOSTORE, FLAG_PRIORITIZE_DISK, FLAG_REQUIRED, FLAG_ALLOW_EMPTY,\n register,\n)\nfrom sentry.utils.types import Dict, String\n\n# Cache\n# register('cache.backend', flags=FLAG_NOSTORE)\n# register('cache.options', type=Dict, flags=FLAG_NOSTORE)\n\n# System\nregister('system.admin-email', flags=FLAG_REQUIRED)\nregister('system.databases', type=Dict, flags=FLAG_NOSTORE)\n# register('system.debug', default=False, flags=FLAG_NOSTORE)\nregister('system.rate-limit', default=0, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('system.secret-key', flags=FLAG_NOSTORE)\n# Absolute URL to the sentry root directory. Should not include a trailing slash.\nregister('system.url-prefix', ttl=60, grace=3600, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('system.root-api-key', flags=FLAG_PRIORITIZE_DISK)\nregister('system.logging-format', default='human', flags=FLAG_PRIORITIZE_DISK)\n\n# Redis\nregister(\n 'redis.clusters',\n type=Dict,\n default={\n 'default': {\n 'hosts': {\n 0: {\n 'host': '127.0.0.1',\n 'port': 6379,\n }\n },\n },\n },\n flags=FLAG_NOSTORE | FLAG_IMMUTABLE\n)\nregister('redis.options', type=Dict, flags=FLAG_NOSTORE)\n\n# symbolizer specifics\nregister('dsym.llvm-symbolizer-path', type=String)\nregister('dsym.cache-path', type=String, default='/tmp/sentry-dsym-cache')\n\n# Mail\nregister('mail.backend', default='smtp', flags=FLAG_NOSTORE)\nregister('mail.host', default='localhost', flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('mail.port', default=25, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('mail.username', flags=FLAG_REQUIRED | FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('mail.password', flags=FLAG_REQUIRED | FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('mail.use-tls', default=False, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('mail.subject-prefix', default='[Sentry] ', flags=FLAG_PRIORITIZE_DISK)\nregister('mail.from', default='root@localhost', flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('mail.list-namespace', type=String, default='localhost', flags=FLAG_NOSTORE)\nregister('mail.enable-replies', default=False, flags=FLAG_PRIORITIZE_DISK)\nregister('mail.reply-hostname', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('mail.mailgun-api-key', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\n\n# SMS\nregister('sms.twilio-account', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('sms.twilio-token', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('sms.twilio-number', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\n", "path": "src/sentry/options/defaults.py"}]} | 1,528 | 311 |
gh_patches_debug_46 | rasdani/github-patches | git_diff | archlinux__archinstall-1300 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Archinstall discover shop non-functional.
Hello,
I have installed Arch with archinstall twice now, selected the desktop option then KDE but I noticed that by default the "Discover" shop does not want to function I have to download the packagekit-qt5 package then it functions. Just wanted to let you know.
Archinstall discover shop non-functional.
Hello,
I have installed Arch with archinstall twice now, selected the desktop option then KDE but I noticed that by default the "Discover" shop does not want to function I have to download the packagekit-qt5 package then it functions. Just wanted to let you know.
</issue>
<code>
[start of profiles/kde.py]
1 # A desktop environment using "KDE".
2
3 import archinstall
4
5 is_top_level_profile = False
6
7 __packages__ = [
8 "plasma-meta",
9 "konsole",
10 "kwrite",
11 "dolphin",
12 "ark",
13 "sddm",
14 "plasma-wayland-session",
15 "egl-wayland",
16 ]
17
18
19 # TODO: Remove hard dependency of bash (due to .bash_profile)
20
21
22 def _prep_function(*args, **kwargs):
23 """
24 Magic function called by the importing installer
25 before continuing any further. It also avoids executing any
26 other code in this stage. So it's a safe way to ask the user
27 for more input before any other installer steps start.
28 """
29
30 # KDE requires a functioning Xorg installation.
31 profile = archinstall.Profile(None, 'xorg')
32 with profile.load_instructions(namespace='xorg.py') as imported:
33 if hasattr(imported, '_prep_function'):
34 return imported._prep_function()
35 else:
36 print('Deprecated (??): xorg profile has no _prep_function() anymore')
37
38
39 """
40 def _post_install(*args, **kwargs):
41 if "nvidia" in _gfx_driver_packages:
42 print("Plasma Wayland has known compatibility issues with the proprietary Nvidia driver")
43 print("After booting, you can choose between Wayland and Xorg using the drop-down menu")
44 return True
45 """
46
47 # Ensures that this code only gets executed if executed
48 # through importlib.util.spec_from_file_location("kde", "/somewhere/kde.py")
49 # or through conventional import kde
50 if __name__ == 'kde':
51 # Install dependency profiles
52 archinstall.storage['installation_session'].install_profile('xorg')
53
54 # Install the KDE packages
55 archinstall.storage['installation_session'].add_additional_packages(__packages__)
56
57 # Enable autostart of KDE for all users
58 archinstall.storage['installation_session'].enable_service('sddm')
59
[end of profiles/kde.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/profiles/kde.py b/profiles/kde.py
--- a/profiles/kde.py
+++ b/profiles/kde.py
@@ -13,6 +13,7 @@
"sddm",
"plasma-wayland-session",
"egl-wayland",
+ "packagekit-qt5",
]
| {"golden_diff": "diff --git a/profiles/kde.py b/profiles/kde.py\n--- a/profiles/kde.py\n+++ b/profiles/kde.py\n@@ -13,6 +13,7 @@\n \t\"sddm\",\n \t\"plasma-wayland-session\",\n \t\"egl-wayland\",\n+\t\"packagekit-qt5\",\n ]\n", "issue": "Archinstall discover shop non-functional.\nHello,\r\n\r\nI have installed Arch with archinstall twice now, selected the desktop option then KDE but I noticed that by default the \"Discover\" shop does not want to function I have to download the packagekit-qt5 package then it functions. Just wanted to let you know.\r\n\r\n\nArchinstall discover shop non-functional.\nHello,\r\n\r\nI have installed Arch with archinstall twice now, selected the desktop option then KDE but I noticed that by default the \"Discover\" shop does not want to function I have to download the packagekit-qt5 package then it functions. Just wanted to let you know.\r\n\r\n\n", "before_files": [{"content": "# A desktop environment using \"KDE\".\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\n\t\"plasma-meta\",\n\t\"konsole\",\n\t\"kwrite\",\n\t\"dolphin\",\n\t\"ark\",\n\t\"sddm\",\n\t\"plasma-wayland-session\",\n\t\"egl-wayland\",\n]\n\n\n# TODO: Remove hard dependency of bash (due to .bash_profile)\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# KDE requires a functioning Xorg installation.\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n\"\"\"\ndef _post_install(*args, **kwargs):\n\tif \"nvidia\" in _gfx_driver_packages:\n\t\tprint(\"Plasma Wayland has known compatibility issues with the proprietary Nvidia driver\")\n\tprint(\"After booting, you can choose between Wayland and Xorg using the drop-down menu\")\n\treturn True\n\"\"\"\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"kde\", \"/somewhere/kde.py\")\n# or through conventional import kde\nif __name__ == 'kde':\n\t# Install dependency profiles\n\tarchinstall.storage['installation_session'].install_profile('xorg')\n\n\t# Install the KDE packages\n\tarchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\n\t# Enable autostart of KDE for all users\n\tarchinstall.storage['installation_session'].enable_service('sddm')\n", "path": "profiles/kde.py"}]} | 1,211 | 77 |
gh_patches_debug_1815 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-959 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix for checkbox accessibility no longer working
### Current Behavior
Checkboxes in django admin superuser no longer generated with an associated label.
### Expected Behavior
Expect to see accessible checkboxes in django admin, no missing columns in either superuser or staff views.
### Steps to Reproduce
1. Log in as superuser
2. Go to list view on a model
3. Run ANDI or inspect checkboxes
### Environment
_No response_
### Additional Context
Traced this to the fix for missing columns in staff view. The check {% if results.0.form %} did not work and failed silently. Have a fix for this.
Will prioritize implementation and deployment to staging since we have some accessibility testing in progress.
### Issue Links
_No response_
</issue>
<code>
[start of src/registrar/templatetags/custom_filters.py]
1 from django import template
2 import re
3
4 register = template.Library()
5
6
7 @register.filter(name="extract_value")
8 def extract_value(html_input):
9 match = re.search(r'value="([^"]*)"', html_input)
10 if match:
11 return match.group(1)
12 return ""
13
14
15 @register.filter
16 def extract_a_text(value):
17 # Use regex to extract the text within the <a> tag
18 pattern = r"<a\b[^>]*>(.*?)</a>"
19 match = re.search(pattern, value)
20 if match:
21 extracted_text = match.group(1)
22 else:
23 extracted_text = ""
24
25 return extracted_text
26
27
28 @register.filter
29 def find_index(haystack, needle):
30 try:
31 return haystack.index(needle)
32 except ValueError:
33 return -1
34
35
36 @register.filter
37 def slice_after(value, substring):
38 index = value.find(substring)
39 if index != -1:
40 result = value[index + len(substring) :]
41 return result
42 return value
43
[end of src/registrar/templatetags/custom_filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/registrar/templatetags/custom_filters.py b/src/registrar/templatetags/custom_filters.py
--- a/src/registrar/templatetags/custom_filters.py
+++ b/src/registrar/templatetags/custom_filters.py
@@ -40,3 +40,11 @@
result = value[index + len(substring) :]
return result
return value
+
+
[email protected]
+def contains_checkbox(html_list):
+ for html_string in html_list:
+ if re.search(r'<input[^>]*type="checkbox"', html_string):
+ return True
+ return False
| {"golden_diff": "diff --git a/src/registrar/templatetags/custom_filters.py b/src/registrar/templatetags/custom_filters.py\n--- a/src/registrar/templatetags/custom_filters.py\n+++ b/src/registrar/templatetags/custom_filters.py\n@@ -40,3 +40,11 @@\n result = value[index + len(substring) :]\n return result\n return value\n+\n+\[email protected]\n+def contains_checkbox(html_list):\n+ for html_string in html_list:\n+ if re.search(r'<input[^>]*type=\"checkbox\"', html_string):\n+ return True\n+ return False\n", "issue": "Fix for checkbox accessibility no longer working\n### Current Behavior\n\nCheckboxes in django admin superuser no longer generated with an associated label.\n\n### Expected Behavior\n\nExpect to see accessible checkboxes in django admin, no missing columns in either superuser or staff views.\n\n### Steps to Reproduce\n\n1. Log in as superuser\r\n2. Go to list view on a model\r\n3. Run ANDI or inspect checkboxes\r\n\n\n### Environment\n\n_No response_\n\n### Additional Context\n\nTraced this to the fix for missing columns in staff view. The check {% if results.0.form %} did not work and failed silently. Have a fix for this.\r\n\r\nWill prioritize implementation and deployment to staging since we have some accessibility testing in progress.\n\n### Issue Links\n\n_No response_\n", "before_files": [{"content": "from django import template\nimport re\n\nregister = template.Library()\n\n\[email protected](name=\"extract_value\")\ndef extract_value(html_input):\n match = re.search(r'value=\"([^\"]*)\"', html_input)\n if match:\n return match.group(1)\n return \"\"\n\n\[email protected]\ndef extract_a_text(value):\n # Use regex to extract the text within the <a> tag\n pattern = r\"<a\\b[^>]*>(.*?)</a>\"\n match = re.search(pattern, value)\n if match:\n extracted_text = match.group(1)\n else:\n extracted_text = \"\"\n\n return extracted_text\n\n\[email protected]\ndef find_index(haystack, needle):\n try:\n return haystack.index(needle)\n except ValueError:\n return -1\n\n\[email protected]\ndef slice_after(value, substring):\n index = value.find(substring)\n if index != -1:\n result = value[index + len(substring) :]\n return result\n return value\n", "path": "src/registrar/templatetags/custom_filters.py"}]} | 998 | 140 |
gh_patches_debug_2449 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-10168 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PubSub: declaratively drop Python 3.4 support
The README and the language classifiers in `setup.py` both only claim support for Python 3.5+ (and 2.7), but not Python 3.4. However, the `python_requires` in `setup.py` does not reflect that, and does not prevent installing the library in Python 3.4.
</issue>
<code>
[start of pubsub/setup.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-pubsub"
24 description = "Google Cloud Pub/Sub API client library"
25 version = "1.1.0"
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "google-api-core[grpc] >= 1.14.0, < 2.0.0dev",
33 "grpc-google-iam-v1 >= 0.12.3, < 0.13dev",
34 'enum34; python_version < "3.4"',
35 ]
36 extras = {}
37
38
39 # Setup boilerplate below this line.
40
41 package_root = os.path.abspath(os.path.dirname(__file__))
42
43 readme_filename = os.path.join(package_root, "README.rst")
44 with io.open(readme_filename, encoding="utf-8") as readme_file:
45 readme = readme_file.read()
46
47 # Only include packages under the 'google' namespace. Do not include tests,
48 # benchmarks, etc.
49 packages = [
50 package for package in setuptools.find_packages() if package.startswith("google")
51 ]
52
53 # Determine which namespaces are needed.
54 namespaces = ["google"]
55 if "google.cloud" in packages:
56 namespaces.append("google.cloud")
57
58
59 setuptools.setup(
60 name=name,
61 version=version,
62 description=description,
63 long_description=readme,
64 author="Google LLC",
65 author_email="[email protected]",
66 license="Apache 2.0",
67 url="https://github.com/GoogleCloudPlatform/google-cloud-python",
68 classifiers=[
69 release_status,
70 "Intended Audience :: Developers",
71 "License :: OSI Approved :: Apache Software License",
72 "Programming Language :: Python",
73 "Programming Language :: Python :: 2",
74 "Programming Language :: Python :: 2.7",
75 "Programming Language :: Python :: 3",
76 "Programming Language :: Python :: 3.5",
77 "Programming Language :: Python :: 3.6",
78 "Programming Language :: Python :: 3.7",
79 "Operating System :: OS Independent",
80 "Topic :: Internet",
81 ],
82 platforms="Posix; MacOS X; Windows",
83 packages=packages,
84 namespace_packages=namespaces,
85 install_requires=dependencies,
86 extras_require=extras,
87 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
88 include_package_data=True,
89 zip_safe=False,
90 )
91
[end of pubsub/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pubsub/setup.py b/pubsub/setup.py
--- a/pubsub/setup.py
+++ b/pubsub/setup.py
@@ -84,7 +84,7 @@
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
- python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
+ python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*",
include_package_data=True,
zip_safe=False,
)
| {"golden_diff": "diff --git a/pubsub/setup.py b/pubsub/setup.py\n--- a/pubsub/setup.py\n+++ b/pubsub/setup.py\n@@ -84,7 +84,7 @@\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n- python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n+ python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*\",\n include_package_data=True,\n zip_safe=False,\n )\n", "issue": "PubSub: declaratively drop Python 3.4 support\nThe README and the language classifiers in `setup.py` both only claim support for Python 3.5+ (and 2.7), but not Python 3.4. However, the `python_requires` in `setup.py` does not reflect that, and does not prevent installing the library in Python 3.4.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-pubsub\"\ndescription = \"Google Cloud Pub/Sub API client library\"\nversion = \"1.1.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.14.0, < 2.0.0dev\",\n \"grpc-google-iam-v1 >= 0.12.3, < 0.13dev\",\n 'enum34; python_version < \"3.4\"',\n]\nextras = {}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/GoogleCloudPlatform/google-cloud-python\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "pubsub/setup.py"}]} | 1,481 | 139 |
gh_patches_debug_2406 | rasdani/github-patches | git_diff | buildbot__buildbot-3490 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnboundLocalError in mq/base.py on master shutdown
Hello,
We're using buildbot in multi-master mode and got this stacktrace on one of the master when shutting it down:
```
2017-07-17 12:33:29+0000 [-] Waiting for 1 build(s) to finish
2017-07-17 12:33:29+0000 [-] Builder <Builder 'u'sql-monitor-bitbucket_scality_ring-monitor_ring_frequent-prod-frontend-0'' at 140555339856784> has 1 builds running
2017-07-17 12:33:29+0000 [-] Not shutting down, there are 1 builds running
2017-07-17 12:33:29+0000 [-] Trying shutdown sequence again
2017-07-17 12:33:30+0000 [-] <Build sql-monitor-bitbucket_scality_ring-monitor_ring_frequent-prod-frontend-0 number:32108L results:exception>: stopping build: Master Shutdown 5
2017-07-17 12:33:30+0000 [-] Unhandled error in Deferred:
2017-07-17 12:33:30+0000 [-] Unhandled Error
Traceback (most recent call last):
File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1299, in _inlineCallbacks
result = g.send(result)
File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/process/botmaster.py", line 105, in cleanShutdown
l.append(build.waitUntilFinished())
File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/process/build.py", line 687, in waitUntilFinished
lambda: self.finished)
File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1445, in unwindGenerator
return _inlineCallbacks(None, gen, Deferred())
— <exception caught here> —
File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1299, in _inlineCallbacks
result = g.send(result)
File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/mq/base.py", line 40, in waitUntilEvent
defer.returnValue(res)
exceptions.UnboundLocalError: local variable 'res' referenced before assignment
```
Looking at the code at the end of `waitUntilEvent()`:
```
if not check:
res = yield d
yield buildCompleteConsumer.stopConsuming
defer.returnValue(res)
```
If the check returned false, we try to return a value (`res`) that was never defined.
</issue>
<code>
[start of master/buildbot/mq/base.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 from __future__ import absolute_import
17 from __future__ import print_function
18
19 from twisted.internet import defer
20 from twisted.python import failure
21 from twisted.python import log
22
23 from buildbot.util import service
24
25
26 class MQBase(service.AsyncService):
27 name = 'mq-implementation'
28
29 @defer.inlineCallbacks
30 def waitUntilEvent(self, filter, check_callback):
31 d = defer.Deferred()
32 buildCompleteConsumer = yield self.startConsuming(
33 lambda key, value: d.callback((key, value)),
34 filter)
35 check = yield check_callback()
36 # we only wait if the check callback return true
37 if not check:
38 res = yield d
39 yield buildCompleteConsumer.stopConsuming
40 defer.returnValue(res)
41
42
43 class QueueRef(object):
44
45 __slots__ = ['callback']
46
47 def __init__(self, callback):
48 self.callback = callback
49
50 def invoke(self, routing_key, data):
51 if not self.callback:
52 return
53
54 try:
55 x = self.callback(routing_key, data)
56 except Exception:
57 log.err(failure.Failure(), 'while invoking %r' % (self.callback,))
58 return
59 if isinstance(x, defer.Deferred):
60 x.addErrback(log.err, 'while invoking %r' % (self.callback,))
61
62 def stopConsuming(self):
63 # subclasses should set self.callback to None in this method
64 raise NotImplementedError
65
[end of master/buildbot/mq/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/master/buildbot/mq/base.py b/master/buildbot/mq/base.py
--- a/master/buildbot/mq/base.py
+++ b/master/buildbot/mq/base.py
@@ -36,7 +36,9 @@
# we only wait if the check callback return true
if not check:
res = yield d
- yield buildCompleteConsumer.stopConsuming
+ else:
+ res = None
+ yield buildCompleteConsumer.stopConsuming()
defer.returnValue(res)
| {"golden_diff": "diff --git a/master/buildbot/mq/base.py b/master/buildbot/mq/base.py\n--- a/master/buildbot/mq/base.py\n+++ b/master/buildbot/mq/base.py\n@@ -36,7 +36,9 @@\n # we only wait if the check callback return true\n if not check:\n res = yield d\n- yield buildCompleteConsumer.stopConsuming\n+ else:\n+ res = None\n+ yield buildCompleteConsumer.stopConsuming()\n defer.returnValue(res)\n", "issue": "UnboundLocalError in mq/base.py on master shutdown\nHello,\r\n\r\nWe're using buildbot in multi-master mode and got this stacktrace on one of the master when shutting it down:\r\n```\r\n2017-07-17 12:33:29+0000 [-] Waiting for 1 build(s) to finish\r\n2017-07-17 12:33:29+0000 [-] Builder <Builder 'u'sql-monitor-bitbucket_scality_ring-monitor_ring_frequent-prod-frontend-0'' at 140555339856784> has 1 builds running\r\n2017-07-17 12:33:29+0000 [-] Not shutting down, there are 1 builds running\r\n2017-07-17 12:33:29+0000 [-] Trying shutdown sequence again\r\n2017-07-17 12:33:30+0000 [-] <Build sql-monitor-bitbucket_scality_ring-monitor_ring_frequent-prod-frontend-0 number:32108L results:exception>: stopping build: Master Shutdown 5\r\n2017-07-17 12:33:30+0000 [-] Unhandled error in Deferred:\r\n2017-07-17 12:33:30+0000 [-] Unhandled Error\r\nTraceback (most recent call last):\r\nFile \"/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 1299, in _inlineCallbacks\r\nresult = g.send(result)\r\nFile \"/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/process/botmaster.py\", line 105, in cleanShutdown\r\nl.append(build.waitUntilFinished())\r\nFile \"/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/process/build.py\", line 687, in waitUntilFinished\r\nlambda: self.finished)\r\nFile \"/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 1445, in unwindGenerator\r\nreturn _inlineCallbacks(None, gen, Deferred())\r\n\u2014 <exception caught here> \u2014\r\nFile \"/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 1299, in _inlineCallbacks\r\nresult = g.send(result)\r\nFile \"/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/mq/base.py\", line 40, in waitUntilEvent\r\ndefer.returnValue(res)\r\nexceptions.UnboundLocalError: local variable 'res' referenced before assignment\r\n```\r\nLooking at the code at the end of `waitUntilEvent()`:\r\n```\r\n if not check:\r\n res = yield d\r\n yield buildCompleteConsumer.stopConsuming\r\n defer.returnValue(res)\r\n```\r\n\r\nIf the check returned false, we try to return a value (`res`) that was never defined.\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom twisted.internet import defer\nfrom twisted.python import failure\nfrom twisted.python import log\n\nfrom buildbot.util import service\n\n\nclass MQBase(service.AsyncService):\n name = 'mq-implementation'\n\n @defer.inlineCallbacks\n def waitUntilEvent(self, filter, check_callback):\n d = defer.Deferred()\n buildCompleteConsumer = yield self.startConsuming(\n lambda key, value: d.callback((key, value)),\n filter)\n check = yield check_callback()\n # we only wait if the check callback return true\n if not check:\n res = yield d\n yield buildCompleteConsumer.stopConsuming\n defer.returnValue(res)\n\n\nclass QueueRef(object):\n\n __slots__ = ['callback']\n\n def __init__(self, callback):\n self.callback = callback\n\n def invoke(self, routing_key, data):\n if not self.callback:\n return\n\n try:\n x = self.callback(routing_key, data)\n except Exception:\n log.err(failure.Failure(), 'while invoking %r' % (self.callback,))\n return\n if isinstance(x, defer.Deferred):\n x.addErrback(log.err, 'while invoking %r' % (self.callback,))\n\n def stopConsuming(self):\n # subclasses should set self.callback to None in this method\n raise NotImplementedError\n", "path": "master/buildbot/mq/base.py"}]} | 1,845 | 111 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.