problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_15901 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-322 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
build local docker image using docker python api
for the edl client to build docker image locally, we need a wrapper for docker python sdk: https://pypi.org/project/docker firstly.
</issue>
<code>
[start of elasticdl/client/client.py]
1 import os
2 import inspect
3 import shutil
4 import time
5 import getpass
6 from string import Template
7
8 def run(model_class, train_data_dir=None,
9 num_epoch=1, minibatch_size=10,
10 record_per_task=100, num_worker=1, grads_to_wait=2):
11 m_path, m_file = _getModelFile()
12 m_file_in_docker = "/model/" + m_file
13 timestamp = int(round(time.time() * 1000))
14 _build_docker_image(m_path, m_file, m_file_in_docker, timestamp)
15 yaml_file = _generate_yaml(m_file_in_docker, model_class.__name__, train_data_dir=train_data_dir,
16 num_epoch=num_epoch, minibatch_size=minibatch_size,
17 record_per_task=record_per_task, num_worker=num_worker,
18 grads_to_wait=grads_to_wait, timestamp=timestamp)
19 _submit(yaml_file)
20
21 def _getModelFile():
22 m_file = inspect.currentframe().f_back.f_back.f_code.co_filename
23 m_path = os.path.abspath(os.path.dirname(m_file))
24 return m_path, m_file
25
26 def _build_docker_image(m_path, m_file, m_file_in_docker, timestamp):
27 d_path = os.path.abspath(os.path.dirname(
28 inspect.currentframe().f_back.f_code.co_filename))
29 new_dfile = m_path + "/Dockerfile"
30 shutil.copyfile(d_path + "/../Dockerfile.dev", new_dfile)
31
32 with open(new_dfile, 'a') as df:
33 df.write("COPY " + m_file + " " + m_file_in_docker)
34 val = os.system('docker build -t elasticdl:dev_' + str(timestamp) + ' -f Dockerfile .')
35
36 # TODO: upload docker image to docker hub.
37
38 def _generate_yaml(m_file, m_class,
39 train_data_dir=None, num_epoch=1,
40 minibatch_size=10, record_per_task=100,
41 num_worker=1, grads_to_wait=2, timestamp=1):
42 YAML_TEMPLATE = """
43 apiVersion: v1
44 kind: Pod
45 metadata:
46 name: elasticdl-master-$timestamp
47 labels:
48 purpose: test-command
49 spec:
50 containers:
51 - name: elasticdl-master-$timestamp
52 image: elasticdl:dev_$timestamp
53 command: ["python"]
54 args: ["-m", "elasticdl.master.main",
55 "--model-file", "$m_file",
56 "--num_worker", "$num_worker",
57 "--worker_image", "elasticdl:dev_$timestamp",
58 "--job_name", "elasticdl-$timestamp",
59 "--model-class", "$m_class",
60 "--train_data_dir", "$train_data_dir",
61 "--num_epoch", "$num_epoch",
62 "--grads_to_wait", "$grads_to_wait",
63 "--minibatch_size", "$minibatch_size",
64 "--record_per_task", "$record_per_task"]
65 imagePullPolicy: Never
66 env:
67 - name: MY_POD_IP
68 valueFrom:
69 fieldRef:
70 fieldPath: status.podIP
71 restartPolicy: Never
72 """
73 t = Template(YAML_TEMPLATE)
74 yaml_file = 'job_desc.yaml'
75 with open(yaml_file, "w") as yaml:
76 yaml.write(t.substitute(m_file=m_file, m_class=m_class,
77 train_data_dir=train_data_dir,
78 timestamp=timestamp, num_worker=num_worker, num_epoch=num_epoch,
79 minibatch_size=minibatch_size, record_per_task=record_per_task,
80 user=getpass.getuser(), grads_to_wait=grads_to_wait))
81 return yaml_file
82
83 def _submit(yaml_file):
84 os.system('kubectl create -f ' + yaml_file)
85
[end of elasticdl/client/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/client/client.py b/elasticdl/client/client.py
--- a/elasticdl/client/client.py
+++ b/elasticdl/client/client.py
@@ -4,6 +4,8 @@
import time
import getpass
from string import Template
+import docker
+
def run(model_class, train_data_dir=None,
num_epoch=1, minibatch_size=10,
@@ -31,7 +33,9 @@
with open(new_dfile, 'a') as df:
df.write("COPY " + m_file + " " + m_file_in_docker)
- val = os.system('docker build -t elasticdl:dev_' + str(timestamp) + ' -f Dockerfile .')
+ client = docker.APIClient(base_url='unix://var/run/docker.sock')
+ for line in client.build(dockerfile='Dockerfile', path='.', tag='elasticdl:dev_' + str(timestamp)):
+ print(str(line, encoding = "utf-8"))
# TODO: upload docker image to docker hub.
| {"golden_diff": "diff --git a/elasticdl/client/client.py b/elasticdl/client/client.py\n--- a/elasticdl/client/client.py\n+++ b/elasticdl/client/client.py\n@@ -4,6 +4,8 @@\n import time\n import getpass\n from string import Template\n+import docker\n+\n \n def run(model_class, train_data_dir=None, \n num_epoch=1, minibatch_size=10, \n@@ -31,7 +33,9 @@\n \n with open(new_dfile, 'a') as df:\n df.write(\"COPY \" + m_file + \" \" + m_file_in_docker)\n- val = os.system('docker build -t elasticdl:dev_' + str(timestamp) + ' -f Dockerfile .')\n+ client = docker.APIClient(base_url='unix://var/run/docker.sock') \n+ for line in client.build(dockerfile='Dockerfile', path='.', tag='elasticdl:dev_' + str(timestamp)):\n+ print(str(line, encoding = \"utf-8\"))\n \n # TODO: upload docker image to docker hub.\n", "issue": "build local docker image using docker python api\nfor the edl client to build docker image locally, we need a wrapper for docker python sdk: https://pypi.org/project/docker firstly.\n", "before_files": [{"content": "import os\nimport inspect\nimport shutil\nimport time\nimport getpass\nfrom string import Template\n\ndef run(model_class, train_data_dir=None, \n num_epoch=1, minibatch_size=10, \n record_per_task=100, num_worker=1, grads_to_wait=2):\n m_path, m_file = _getModelFile()\n m_file_in_docker = \"/model/\" + m_file \n timestamp = int(round(time.time() * 1000))\n _build_docker_image(m_path, m_file, m_file_in_docker, timestamp)\n yaml_file = _generate_yaml(m_file_in_docker, model_class.__name__, train_data_dir=train_data_dir, \n num_epoch=num_epoch, minibatch_size=minibatch_size, \n record_per_task=record_per_task, num_worker=num_worker, \n grads_to_wait=grads_to_wait, timestamp=timestamp)\n _submit(yaml_file)\n\ndef _getModelFile():\n m_file = inspect.currentframe().f_back.f_back.f_code.co_filename\n m_path = os.path.abspath(os.path.dirname(m_file))\n return m_path, m_file\n\ndef _build_docker_image(m_path, m_file, m_file_in_docker, timestamp):\n d_path = os.path.abspath(os.path.dirname(\n inspect.currentframe().f_back.f_code.co_filename))\n new_dfile = m_path + \"/Dockerfile\"\n shutil.copyfile(d_path + \"/../Dockerfile.dev\", new_dfile)\n\n with open(new_dfile, 'a') as df:\n df.write(\"COPY \" + m_file + \" \" + m_file_in_docker)\n val = os.system('docker build -t elasticdl:dev_' + str(timestamp) + ' -f Dockerfile .')\n\n # TODO: upload docker image to docker hub.\n\ndef _generate_yaml(m_file, m_class,\n train_data_dir=None, num_epoch=1,\n minibatch_size=10, record_per_task=100, \n num_worker=1, grads_to_wait=2, timestamp=1):\n YAML_TEMPLATE = \"\"\"\n apiVersion: v1\n kind: Pod\n metadata:\n name: elasticdl-master-$timestamp\n labels:\n purpose: test-command\n spec:\n containers:\n - name: elasticdl-master-$timestamp\n image: elasticdl:dev_$timestamp\n command: [\"python\"]\n args: [\"-m\", \"elasticdl.master.main\",\n \"--model-file\", \"$m_file\",\n \"--num_worker\", \"$num_worker\",\n \"--worker_image\", \"elasticdl:dev_$timestamp\",\n \"--job_name\", \"elasticdl-$timestamp\",\n \"--model-class\", \"$m_class\",\n \"--train_data_dir\", \"$train_data_dir\",\n \"--num_epoch\", \"$num_epoch\",\n \"--grads_to_wait\", \"$grads_to_wait\",\n \"--minibatch_size\", \"$minibatch_size\",\n \"--record_per_task\", \"$record_per_task\"]\n imagePullPolicy: Never\n env:\n - name: MY_POD_IP\n valueFrom:\n fieldRef:\n fieldPath: status.podIP\n restartPolicy: Never\n \"\"\"\n t = Template(YAML_TEMPLATE)\n yaml_file = 'job_desc.yaml'\n with open(yaml_file, \"w\") as yaml:\n yaml.write(t.substitute(m_file=m_file, m_class=m_class, \n train_data_dir=train_data_dir, \n timestamp=timestamp, num_worker=num_worker, num_epoch=num_epoch,\n minibatch_size=minibatch_size, record_per_task=record_per_task,\n user=getpass.getuser(), grads_to_wait=grads_to_wait))\n return yaml_file\n\ndef _submit(yaml_file):\n os.system('kubectl create -f ' + yaml_file)\n", "path": "elasticdl/client/client.py"}]} | 1,551 | 235 |
gh_patches_debug_34012 | rasdani/github-patches | git_diff | deeppavlov__DeepPavlov-545 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Why Levenshtein Corrector make strange inserts inplace of punctuation marks?
```
from deeppavlov.deep import find_config, deep_download
from deeppavlov.core.commands.infer import build_model_from_config
config = find_config('levenshtein_corrector_ru')
deep_download(config)
model = build_model_from_config(config)
print(model(['Сегодня.']))
print(model(['в 3 . Сегодня.']))
```
Gives me
> ['сегодня в']
> ['в 3 и сегодня и']
There are strange "." --> "в" and "." --> "и" inserts.
</issue>
<code>
[start of deeppavlov/models/spelling_correction/levenshtein/searcher_component.py]
1 # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from math import log10
16 from typing import Iterable, List, Tuple
17
18 from deeppavlov.core.common.registry import register
19 from deeppavlov.core.models.component import Component
20 from deeppavlov.core.common.log import get_logger
21
22 from .levenshtein_searcher import LevenshteinSearcher
23
24
25 logger = get_logger(__name__)
26
27
28 @register('spelling_levenshtein')
29 class LevenshteinSearcherComponent(Component):
30 """Component that finds replacement candidates for tokens at a set Damerau-Levenshtein distance
31
32 Args:
33 words: list of every correct word
34 max_distance: maximum allowed Damerau-Levenshtein distance between source words and candidates
35 error_probability: assigned probability for every edit
36
37 Attributes:
38 max_distance: maximum allowed Damerau-Levenshtein distance between source words and candidates
39 error_probability: assigned logarithmic probability for every edit
40 vocab_penalty: assigned logarithmic probability of an out of vocabulary token being the correct one without
41 changes
42 """
43
44 def __init__(self, words: Iterable[str], max_distance: int=1, error_probability: float=1e-4, *args, **kwargs):
45 words = list({word.strip().lower().replace('ё', 'е') for word in words})
46 alphabet = sorted({letter for word in words for letter in word})
47 self.max_distance = max_distance
48 self.error_probability = log10(error_probability)
49 self.vocab_penalty = self.error_probability * 2
50 self.searcher = LevenshteinSearcher(alphabet, words, allow_spaces=True, euristics=2)
51
52 def _infer_instance(self, tokens: Iterable[str]) -> List[List[Tuple[float, str]]]:
53 candidates = []
54 for word in tokens:
55 c = {candidate: self.error_probability * distance
56 for candidate, distance in self.searcher.search(word, d=self.max_distance)}
57 c[word] = c.get(word, self.vocab_penalty)
58 candidates.append([(score, candidate) for candidate, score in c.items()])
59 return candidates
60
61 def __call__(self, batch: Iterable[Iterable[str]], *args, **kwargs) -> List[List[List[Tuple[float, str]]]]:
62 """Propose candidates for tokens in sentences
63
64 Args:
65 batch: batch of tokenized sentences
66
67 Returns:
68 batch of lists of probabilities and candidates for every token
69 """
70 return [self._infer_instance(tokens) for tokens in batch]
71
[end of deeppavlov/models/spelling_correction/levenshtein/searcher_component.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/deeppavlov/models/spelling_correction/levenshtein/searcher_component.py b/deeppavlov/models/spelling_correction/levenshtein/searcher_component.py
--- a/deeppavlov/models/spelling_correction/levenshtein/searcher_component.py
+++ b/deeppavlov/models/spelling_correction/levenshtein/searcher_component.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+import string
from math import log10
from typing import Iterable, List, Tuple
@@ -41,6 +41,8 @@
changes
"""
+ _punctuation = frozenset(string.punctuation)
+
def __init__(self, words: Iterable[str], max_distance: int=1, error_probability: float=1e-4, *args, **kwargs):
words = list({word.strip().lower().replace('ё', 'е') for word in words})
alphabet = sorted({letter for word in words for letter in word})
@@ -52,10 +54,13 @@
def _infer_instance(self, tokens: Iterable[str]) -> List[List[Tuple[float, str]]]:
candidates = []
for word in tokens:
- c = {candidate: self.error_probability * distance
- for candidate, distance in self.searcher.search(word, d=self.max_distance)}
- c[word] = c.get(word, self.vocab_penalty)
- candidates.append([(score, candidate) for candidate, score in c.items()])
+ if word in self._punctuation:
+ candidates.append([(0, word)])
+ else:
+ c = {candidate: self.error_probability * distance
+ for candidate, distance in self.searcher.search(word, d=self.max_distance)}
+ c[word] = c.get(word, self.vocab_penalty)
+ candidates.append([(score, candidate) for candidate, score in c.items()])
return candidates
def __call__(self, batch: Iterable[Iterable[str]], *args, **kwargs) -> List[List[List[Tuple[float, str]]]]:
| {"golden_diff": "diff --git a/deeppavlov/models/spelling_correction/levenshtein/searcher_component.py b/deeppavlov/models/spelling_correction/levenshtein/searcher_component.py\n--- a/deeppavlov/models/spelling_correction/levenshtein/searcher_component.py\n+++ b/deeppavlov/models/spelling_correction/levenshtein/searcher_component.py\n@@ -11,7 +11,7 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n-\n+import string\n from math import log10\n from typing import Iterable, List, Tuple\n \n@@ -41,6 +41,8 @@\n changes\n \"\"\"\n \n+ _punctuation = frozenset(string.punctuation)\n+\n def __init__(self, words: Iterable[str], max_distance: int=1, error_probability: float=1e-4, *args, **kwargs):\n words = list({word.strip().lower().replace('\u0451', '\u0435') for word in words})\n alphabet = sorted({letter for word in words for letter in word})\n@@ -52,10 +54,13 @@\n def _infer_instance(self, tokens: Iterable[str]) -> List[List[Tuple[float, str]]]:\n candidates = []\n for word in tokens:\n- c = {candidate: self.error_probability * distance\n- for candidate, distance in self.searcher.search(word, d=self.max_distance)}\n- c[word] = c.get(word, self.vocab_penalty)\n- candidates.append([(score, candidate) for candidate, score in c.items()])\n+ if word in self._punctuation:\n+ candidates.append([(0, word)])\n+ else:\n+ c = {candidate: self.error_probability * distance\n+ for candidate, distance in self.searcher.search(word, d=self.max_distance)}\n+ c[word] = c.get(word, self.vocab_penalty)\n+ candidates.append([(score, candidate) for candidate, score in c.items()])\n return candidates\n \n def __call__(self, batch: Iterable[Iterable[str]], *args, **kwargs) -> List[List[List[Tuple[float, str]]]]:\n", "issue": "Why Levenshtein Corrector make strange inserts inplace of punctuation marks?\n```\r\nfrom deeppavlov.deep import find_config, deep_download\r\nfrom deeppavlov.core.commands.infer import build_model_from_config\r\nconfig = find_config('levenshtein_corrector_ru')\r\ndeep_download(config)\r\nmodel = build_model_from_config(config)\r\nprint(model(['\u0421\u0435\u0433\u043e\u0434\u043d\u044f.']))\r\nprint(model(['\u0432 3 . \u0421\u0435\u0433\u043e\u0434\u043d\u044f.']))\r\n```\r\nGives me\r\n> ['\u0441\u0435\u0433\u043e\u0434\u043d\u044f \u0432']\r\n> ['\u0432 3 \u0438 \u0441\u0435\u0433\u043e\u0434\u043d\u044f \u0438']\r\n\r\nThere are strange \".\" --> \"\u0432\" and \".\" --> \"\u0438\" inserts.\n", "before_files": [{"content": "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom math import log10\nfrom typing import Iterable, List, Tuple\n\nfrom deeppavlov.core.common.registry import register\nfrom deeppavlov.core.models.component import Component\nfrom deeppavlov.core.common.log import get_logger\n\nfrom .levenshtein_searcher import LevenshteinSearcher\n\n\nlogger = get_logger(__name__)\n\n\n@register('spelling_levenshtein')\nclass LevenshteinSearcherComponent(Component):\n \"\"\"Component that finds replacement candidates for tokens at a set Damerau-Levenshtein distance\n\n Args:\n words: list of every correct word\n max_distance: maximum allowed Damerau-Levenshtein distance between source words and candidates\n error_probability: assigned probability for every edit\n\n Attributes:\n max_distance: maximum allowed Damerau-Levenshtein distance between source words and candidates\n error_probability: assigned logarithmic probability for every edit\n vocab_penalty: assigned logarithmic probability of an out of vocabulary token being the correct one without\n changes\n \"\"\"\n\n def __init__(self, words: Iterable[str], max_distance: int=1, error_probability: float=1e-4, *args, **kwargs):\n words = list({word.strip().lower().replace('\u0451', '\u0435') for word in words})\n alphabet = sorted({letter for word in words for letter in word})\n self.max_distance = max_distance\n self.error_probability = log10(error_probability)\n self.vocab_penalty = self.error_probability * 2\n self.searcher = LevenshteinSearcher(alphabet, words, allow_spaces=True, euristics=2)\n\n def _infer_instance(self, tokens: Iterable[str]) -> List[List[Tuple[float, str]]]:\n candidates = []\n for word in tokens:\n c = {candidate: self.error_probability * distance\n for candidate, distance in self.searcher.search(word, d=self.max_distance)}\n c[word] = c.get(word, self.vocab_penalty)\n candidates.append([(score, candidate) for candidate, score in c.items()])\n return candidates\n\n def __call__(self, batch: Iterable[Iterable[str]], *args, **kwargs) -> List[List[List[Tuple[float, str]]]]:\n \"\"\"Propose candidates for tokens in sentences\n\n Args:\n batch: batch of tokenized sentences\n\n Returns:\n batch of lists of probabilities and candidates for every token\n \"\"\"\n return [self._infer_instance(tokens) for tokens in batch]\n", "path": "deeppavlov/models/spelling_correction/levenshtein/searcher_component.py"}]} | 1,492 | 482 |
gh_patches_debug_42258 | rasdani/github-patches | git_diff | getsentry__sentry-61362 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Streamline issue platfrom message processing for non-Kafka envs
From https://github.com/getsentry/sentry/pull/59330#pullrequestreview-1713484895,
We can simplify the logic and make our tests more meaningful by not duplicating the message processing logic in dev environments. Instead, we can massage the message format to match a Kafka payloads and directly call `process_message`.
</issue>
<code>
[start of src/sentry/issues/producer.py]
1 from __future__ import annotations
2
3 import logging
4 from typing import Any, Dict, MutableMapping, Optional, cast
5
6 from arroyo import Topic
7 from arroyo.backends.kafka import KafkaPayload, KafkaProducer, build_kafka_configuration
8 from django.conf import settings
9
10 from sentry import features
11 from sentry.issues.issue_occurrence import IssueOccurrence
12 from sentry.issues.status_change_consumer import bulk_get_groups_from_fingerprints, update_status
13 from sentry.issues.status_change_message import StatusChangeMessage
14 from sentry.models.project import Project
15 from sentry.services.hybrid_cloud import ValueEqualityEnum
16 from sentry.utils import json
17 from sentry.utils.arroyo_producer import SingletonProducer
18 from sentry.utils.kafka_config import get_kafka_producer_cluster_options, get_topic_definition
19
20 logger = logging.getLogger(__name__)
21
22
23 class PayloadType(ValueEqualityEnum):
24 OCCURRENCE = "occurrence"
25 STATUS_CHANGE = "status_change"
26
27
28 def _get_occurrence_producer() -> KafkaProducer:
29 cluster_name = get_topic_definition(settings.KAFKA_INGEST_OCCURRENCES)["cluster"]
30 producer_config = get_kafka_producer_cluster_options(cluster_name)
31 producer_config.pop("compression.type", None)
32 producer_config.pop("message.max.bytes", None)
33 return KafkaProducer(build_kafka_configuration(default_config=producer_config))
34
35
36 _occurrence_producer = SingletonProducer(
37 _get_occurrence_producer, max_futures=settings.SENTRY_ISSUE_PLATFORM_FUTURES_MAX_LIMIT
38 )
39
40
41 def produce_occurrence_to_kafka(
42 payload_type: PayloadType | None = PayloadType.OCCURRENCE,
43 occurrence: IssueOccurrence | None = None,
44 status_change: StatusChangeMessage | None = None,
45 event_data: Optional[Dict[str, Any]] = None,
46 ) -> None:
47 payload_data = None
48 if payload_type == PayloadType.OCCURRENCE:
49 payload_data = _prepare_occurrence_message(occurrence, event_data)
50 elif payload_type == PayloadType.STATUS_CHANGE:
51 payload_data = _prepare_status_change_message(status_change)
52 else:
53 raise NotImplementedError(f"Unknown payload type: {payload_type}")
54
55 if payload_data is None:
56 return
57
58 payload = KafkaPayload(None, json.dumps(payload_data).encode("utf-8"), [])
59 _occurrence_producer.produce(Topic(settings.KAFKA_INGEST_OCCURRENCES), payload)
60
61
62 def _prepare_occurrence_message(
63 occurrence: IssueOccurrence | None, event_data: Optional[Dict[str, Any]]
64 ) -> MutableMapping[str, Any] | None:
65 if not occurrence:
66 raise ValueError("occurrence must be provided")
67 if event_data and occurrence.event_id != event_data["event_id"]:
68 raise ValueError("Event id on occurrence and event_data must be the same")
69 if settings.SENTRY_EVENTSTREAM != "sentry.eventstream.kafka.KafkaEventStream":
70 # If we're not running Kafka then we're just in dev. Skip producing to Kafka and just
71 # write to the issue platform directly
72 from sentry.issues.ingest import process_occurrence_data
73 from sentry.issues.occurrence_consumer import (
74 lookup_event_and_process_issue_occurrence,
75 process_event_and_issue_occurrence,
76 )
77
78 occurrence_dict = occurrence.to_dict()
79 process_occurrence_data(occurrence_dict)
80 if event_data:
81 process_event_and_issue_occurrence(occurrence_dict, event_data)
82 else:
83 lookup_event_and_process_issue_occurrence(occurrence_dict)
84 return None
85
86 payload_data = cast(MutableMapping[str, Any], occurrence.to_dict())
87 payload_data["payload_type"] = PayloadType.OCCURRENCE.value
88 if event_data:
89 payload_data["event"] = event_data
90
91 return payload_data
92
93
94 def _prepare_status_change_message(
95 status_change: StatusChangeMessage | None,
96 ) -> MutableMapping[str, Any] | None:
97 if not status_change:
98 raise ValueError("status_change must be provided")
99
100 organization = Project.objects.get(id=status_change.project_id).organization
101 if not features.has("organizations:issue-platform-api-crons-sd", organization):
102 return None
103
104 if settings.SENTRY_EVENTSTREAM != "sentry.eventstream.kafka.KafkaEventStream":
105 # Do the change
106 # If we're not running Kafka then we're just in dev. Skip producing to Kafka and just
107 # write to the issue platform directly
108 from sentry.issues.ingest import process_occurrence_data
109
110 process_occurrence_data(status_change.to_dict())
111 fingerprint = status_change.fingerprint
112 groups_by_fingerprints = bulk_get_groups_from_fingerprints(
113 [(status_change.project_id, fingerprint)]
114 )
115
116 key = (status_change.project_id, fingerprint[0])
117 group = groups_by_fingerprints.get(key, None)
118 if not group:
119 return None
120 update_status(group, status_change.to_dict())
121 return None
122
123 payload_data = cast(MutableMapping[str, Any], status_change.to_dict())
124 payload_data["payload_type"] = PayloadType.STATUS_CHANGE.value
125 return payload_data
126
[end of src/sentry/issues/producer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/issues/producer.py b/src/sentry/issues/producer.py
--- a/src/sentry/issues/producer.py
+++ b/src/sentry/issues/producer.py
@@ -5,11 +5,12 @@
from arroyo import Topic
from arroyo.backends.kafka import KafkaPayload, KafkaProducer, build_kafka_configuration
+from arroyo.types import Message, Value
from django.conf import settings
from sentry import features
from sentry.issues.issue_occurrence import IssueOccurrence
-from sentry.issues.status_change_consumer import bulk_get_groups_from_fingerprints, update_status
+from sentry.issues.run import process_message
from sentry.issues.status_change_message import StatusChangeMessage
from sentry.models.project import Project
from sentry.services.hybrid_cloud import ValueEqualityEnum
@@ -56,6 +57,12 @@
return
payload = KafkaPayload(None, json.dumps(payload_data).encode("utf-8"), [])
+ if settings.SENTRY_EVENTSTREAM != "sentry.eventstream.kafka.KafkaEventStream":
+ # If we're not running Kafka then we're just in dev.
+ # Skip producing to Kafka and just process the message directly
+ process_message(Message(Value(payload=payload, committable={})))
+ return
+
_occurrence_producer.produce(Topic(settings.KAFKA_INGEST_OCCURRENCES), payload)
@@ -66,22 +73,6 @@
raise ValueError("occurrence must be provided")
if event_data and occurrence.event_id != event_data["event_id"]:
raise ValueError("Event id on occurrence and event_data must be the same")
- if settings.SENTRY_EVENTSTREAM != "sentry.eventstream.kafka.KafkaEventStream":
- # If we're not running Kafka then we're just in dev. Skip producing to Kafka and just
- # write to the issue platform directly
- from sentry.issues.ingest import process_occurrence_data
- from sentry.issues.occurrence_consumer import (
- lookup_event_and_process_issue_occurrence,
- process_event_and_issue_occurrence,
- )
-
- occurrence_dict = occurrence.to_dict()
- process_occurrence_data(occurrence_dict)
- if event_data:
- process_event_and_issue_occurrence(occurrence_dict, event_data)
- else:
- lookup_event_and_process_issue_occurrence(occurrence_dict)
- return None
payload_data = cast(MutableMapping[str, Any], occurrence.to_dict())
payload_data["payload_type"] = PayloadType.OCCURRENCE.value
@@ -101,25 +92,6 @@
if not features.has("organizations:issue-platform-api-crons-sd", organization):
return None
- if settings.SENTRY_EVENTSTREAM != "sentry.eventstream.kafka.KafkaEventStream":
- # Do the change
- # If we're not running Kafka then we're just in dev. Skip producing to Kafka and just
- # write to the issue platform directly
- from sentry.issues.ingest import process_occurrence_data
-
- process_occurrence_data(status_change.to_dict())
- fingerprint = status_change.fingerprint
- groups_by_fingerprints = bulk_get_groups_from_fingerprints(
- [(status_change.project_id, fingerprint)]
- )
-
- key = (status_change.project_id, fingerprint[0])
- group = groups_by_fingerprints.get(key, None)
- if not group:
- return None
- update_status(group, status_change.to_dict())
- return None
-
payload_data = cast(MutableMapping[str, Any], status_change.to_dict())
payload_data["payload_type"] = PayloadType.STATUS_CHANGE.value
return payload_data
| {"golden_diff": "diff --git a/src/sentry/issues/producer.py b/src/sentry/issues/producer.py\n--- a/src/sentry/issues/producer.py\n+++ b/src/sentry/issues/producer.py\n@@ -5,11 +5,12 @@\n \n from arroyo import Topic\n from arroyo.backends.kafka import KafkaPayload, KafkaProducer, build_kafka_configuration\n+from arroyo.types import Message, Value\n from django.conf import settings\n \n from sentry import features\n from sentry.issues.issue_occurrence import IssueOccurrence\n-from sentry.issues.status_change_consumer import bulk_get_groups_from_fingerprints, update_status\n+from sentry.issues.run import process_message\n from sentry.issues.status_change_message import StatusChangeMessage\n from sentry.models.project import Project\n from sentry.services.hybrid_cloud import ValueEqualityEnum\n@@ -56,6 +57,12 @@\n return\n \n payload = KafkaPayload(None, json.dumps(payload_data).encode(\"utf-8\"), [])\n+ if settings.SENTRY_EVENTSTREAM != \"sentry.eventstream.kafka.KafkaEventStream\":\n+ # If we're not running Kafka then we're just in dev.\n+ # Skip producing to Kafka and just process the message directly\n+ process_message(Message(Value(payload=payload, committable={})))\n+ return\n+\n _occurrence_producer.produce(Topic(settings.KAFKA_INGEST_OCCURRENCES), payload)\n \n \n@@ -66,22 +73,6 @@\n raise ValueError(\"occurrence must be provided\")\n if event_data and occurrence.event_id != event_data[\"event_id\"]:\n raise ValueError(\"Event id on occurrence and event_data must be the same\")\n- if settings.SENTRY_EVENTSTREAM != \"sentry.eventstream.kafka.KafkaEventStream\":\n- # If we're not running Kafka then we're just in dev. Skip producing to Kafka and just\n- # write to the issue platform directly\n- from sentry.issues.ingest import process_occurrence_data\n- from sentry.issues.occurrence_consumer import (\n- lookup_event_and_process_issue_occurrence,\n- process_event_and_issue_occurrence,\n- )\n-\n- occurrence_dict = occurrence.to_dict()\n- process_occurrence_data(occurrence_dict)\n- if event_data:\n- process_event_and_issue_occurrence(occurrence_dict, event_data)\n- else:\n- lookup_event_and_process_issue_occurrence(occurrence_dict)\n- return None\n \n payload_data = cast(MutableMapping[str, Any], occurrence.to_dict())\n payload_data[\"payload_type\"] = PayloadType.OCCURRENCE.value\n@@ -101,25 +92,6 @@\n if not features.has(\"organizations:issue-platform-api-crons-sd\", organization):\n return None\n \n- if settings.SENTRY_EVENTSTREAM != \"sentry.eventstream.kafka.KafkaEventStream\":\n- # Do the change\n- # If we're not running Kafka then we're just in dev. Skip producing to Kafka and just\n- # write to the issue platform directly\n- from sentry.issues.ingest import process_occurrence_data\n-\n- process_occurrence_data(status_change.to_dict())\n- fingerprint = status_change.fingerprint\n- groups_by_fingerprints = bulk_get_groups_from_fingerprints(\n- [(status_change.project_id, fingerprint)]\n- )\n-\n- key = (status_change.project_id, fingerprint[0])\n- group = groups_by_fingerprints.get(key, None)\n- if not group:\n- return None\n- update_status(group, status_change.to_dict())\n- return None\n-\n payload_data = cast(MutableMapping[str, Any], status_change.to_dict())\n payload_data[\"payload_type\"] = PayloadType.STATUS_CHANGE.value\n return payload_data\n", "issue": "Streamline issue platfrom message processing for non-Kafka envs\nFrom https://github.com/getsentry/sentry/pull/59330#pullrequestreview-1713484895, \n\nWe can simplify the logic and make our tests more meaningful by not duplicating the message processing logic in dev environments. Instead, we can massage the message format to match a Kafka payloads and directly call `process_message`. \n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nfrom typing import Any, Dict, MutableMapping, Optional, cast\n\nfrom arroyo import Topic\nfrom arroyo.backends.kafka import KafkaPayload, KafkaProducer, build_kafka_configuration\nfrom django.conf import settings\n\nfrom sentry import features\nfrom sentry.issues.issue_occurrence import IssueOccurrence\nfrom sentry.issues.status_change_consumer import bulk_get_groups_from_fingerprints, update_status\nfrom sentry.issues.status_change_message import StatusChangeMessage\nfrom sentry.models.project import Project\nfrom sentry.services.hybrid_cloud import ValueEqualityEnum\nfrom sentry.utils import json\nfrom sentry.utils.arroyo_producer import SingletonProducer\nfrom sentry.utils.kafka_config import get_kafka_producer_cluster_options, get_topic_definition\n\nlogger = logging.getLogger(__name__)\n\n\nclass PayloadType(ValueEqualityEnum):\n OCCURRENCE = \"occurrence\"\n STATUS_CHANGE = \"status_change\"\n\n\ndef _get_occurrence_producer() -> KafkaProducer:\n cluster_name = get_topic_definition(settings.KAFKA_INGEST_OCCURRENCES)[\"cluster\"]\n producer_config = get_kafka_producer_cluster_options(cluster_name)\n producer_config.pop(\"compression.type\", None)\n producer_config.pop(\"message.max.bytes\", None)\n return KafkaProducer(build_kafka_configuration(default_config=producer_config))\n\n\n_occurrence_producer = SingletonProducer(\n _get_occurrence_producer, max_futures=settings.SENTRY_ISSUE_PLATFORM_FUTURES_MAX_LIMIT\n)\n\n\ndef produce_occurrence_to_kafka(\n payload_type: PayloadType | None = PayloadType.OCCURRENCE,\n occurrence: IssueOccurrence | None = None,\n status_change: StatusChangeMessage | None = None,\n event_data: Optional[Dict[str, Any]] = None,\n) -> None:\n payload_data = None\n if payload_type == PayloadType.OCCURRENCE:\n payload_data = _prepare_occurrence_message(occurrence, event_data)\n elif payload_type == PayloadType.STATUS_CHANGE:\n payload_data = _prepare_status_change_message(status_change)\n else:\n raise NotImplementedError(f\"Unknown payload type: {payload_type}\")\n\n if payload_data is None:\n return\n\n payload = KafkaPayload(None, json.dumps(payload_data).encode(\"utf-8\"), [])\n _occurrence_producer.produce(Topic(settings.KAFKA_INGEST_OCCURRENCES), payload)\n\n\ndef _prepare_occurrence_message(\n occurrence: IssueOccurrence | None, event_data: Optional[Dict[str, Any]]\n) -> MutableMapping[str, Any] | None:\n if not occurrence:\n raise ValueError(\"occurrence must be provided\")\n if event_data and occurrence.event_id != event_data[\"event_id\"]:\n raise ValueError(\"Event id on occurrence and event_data must be the same\")\n if settings.SENTRY_EVENTSTREAM != \"sentry.eventstream.kafka.KafkaEventStream\":\n # If we're not running Kafka then we're just in dev. Skip producing to Kafka and just\n # write to the issue platform directly\n from sentry.issues.ingest import process_occurrence_data\n from sentry.issues.occurrence_consumer import (\n lookup_event_and_process_issue_occurrence,\n process_event_and_issue_occurrence,\n )\n\n occurrence_dict = occurrence.to_dict()\n process_occurrence_data(occurrence_dict)\n if event_data:\n process_event_and_issue_occurrence(occurrence_dict, event_data)\n else:\n lookup_event_and_process_issue_occurrence(occurrence_dict)\n return None\n\n payload_data = cast(MutableMapping[str, Any], occurrence.to_dict())\n payload_data[\"payload_type\"] = PayloadType.OCCURRENCE.value\n if event_data:\n payload_data[\"event\"] = event_data\n\n return payload_data\n\n\ndef _prepare_status_change_message(\n status_change: StatusChangeMessage | None,\n) -> MutableMapping[str, Any] | None:\n if not status_change:\n raise ValueError(\"status_change must be provided\")\n\n organization = Project.objects.get(id=status_change.project_id).organization\n if not features.has(\"organizations:issue-platform-api-crons-sd\", organization):\n return None\n\n if settings.SENTRY_EVENTSTREAM != \"sentry.eventstream.kafka.KafkaEventStream\":\n # Do the change\n # If we're not running Kafka then we're just in dev. Skip producing to Kafka and just\n # write to the issue platform directly\n from sentry.issues.ingest import process_occurrence_data\n\n process_occurrence_data(status_change.to_dict())\n fingerprint = status_change.fingerprint\n groups_by_fingerprints = bulk_get_groups_from_fingerprints(\n [(status_change.project_id, fingerprint)]\n )\n\n key = (status_change.project_id, fingerprint[0])\n group = groups_by_fingerprints.get(key, None)\n if not group:\n return None\n update_status(group, status_change.to_dict())\n return None\n\n payload_data = cast(MutableMapping[str, Any], status_change.to_dict())\n payload_data[\"payload_type\"] = PayloadType.STATUS_CHANGE.value\n return payload_data\n", "path": "src/sentry/issues/producer.py"}]} | 2,002 | 820 |
gh_patches_debug_2711 | rasdani/github-patches | git_diff | getmoto__moto-1462 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add opsworks app mocks
Add the mocks of OpsWork create_app and describe_apps calls. This is part of #1477
</issue>
<code>
[start of moto/__init__.py]
1 from __future__ import unicode_literals
2 import logging
3 # logging.getLogger('boto').setLevel(logging.CRITICAL)
4
5 __title__ = 'moto'
6 __version__ = '1.2.0',
7
8 from .acm import mock_acm # flake8: noqa
9 from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa
10 from .autoscaling import mock_autoscaling, mock_autoscaling_deprecated # flake8: noqa
11 from .awslambda import mock_lambda, mock_lambda_deprecated # flake8: noqa
12 from .cloudformation import mock_cloudformation, mock_cloudformation_deprecated # flake8: noqa
13 from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # flake8: noqa
14 from .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # flake8: noqa
15 from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # flake8: noqa
16 from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # flake8: noqa
17 from .ec2 import mock_ec2, mock_ec2_deprecated # flake8: noqa
18 from .ecr import mock_ecr, mock_ecr_deprecated # flake8: noqa
19 from .ecs import mock_ecs, mock_ecs_deprecated # flake8: noqa
20 from .elb import mock_elb, mock_elb_deprecated # flake8: noqa
21 from .elbv2 import mock_elbv2 # flake8: noqa
22 from .emr import mock_emr, mock_emr_deprecated # flake8: noqa
23 from .events import mock_events # flake8: noqa
24 from .glacier import mock_glacier, mock_glacier_deprecated # flake8: noqa
25 from .iam import mock_iam, mock_iam_deprecated # flake8: noqa
26 from .kinesis import mock_kinesis, mock_kinesis_deprecated # flake8: noqa
27 from .kms import mock_kms, mock_kms_deprecated # flake8: noqa
28 from .opsworks import mock_opsworks, mock_opsworks_deprecated # flake8: noqa
29 from .polly import mock_polly # flake8: noqa
30 from .rds import mock_rds, mock_rds_deprecated # flake8: noqa
31 from .rds2 import mock_rds2, mock_rds2_deprecated # flake8: noqa
32 from .redshift import mock_redshift, mock_redshift_deprecated # flake8: noqa
33 from .s3 import mock_s3, mock_s3_deprecated # flake8: noqa
34 from .ses import mock_ses, mock_ses_deprecated # flake8: noqa
35 from .sns import mock_sns, mock_sns_deprecated # flake8: noqa
36 from .sqs import mock_sqs, mock_sqs_deprecated # flake8: noqa
37 from .sts import mock_sts, mock_sts_deprecated # flake8: noqa
38 from .ssm import mock_ssm # flake8: noqa
39 from .route53 import mock_route53, mock_route53_deprecated # flake8: noqa
40 from .swf import mock_swf, mock_swf_deprecated # flake8: noqa
41 from .xray import mock_xray, mock_xray_client, XRaySegment # flake8: noqa
42 from .logs import mock_logs, mock_logs_deprecated # flake8: noqa
43 from .batch import mock_batch # flake8: noqa
44 from .resourcegroupstaggingapi import mock_resourcegroupstaggingapi # flake8: noqa
45 from .iot import mock_iot # flake8: noqa
46 from .iotdata import mock_iotdata # flake8: noqa
47
48
49 try:
50 # Need to monkey-patch botocore requests back to underlying urllib3 classes
51 from botocore.awsrequest import HTTPSConnectionPool, HTTPConnectionPool, HTTPConnection, VerifiedHTTPSConnection
52 except ImportError:
53 pass
54 else:
55 HTTPSConnectionPool.ConnectionCls = VerifiedHTTPSConnection
56 HTTPConnectionPool.ConnectionCls = HTTPConnection
57
[end of moto/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/moto/__init__.py b/moto/__init__.py
--- a/moto/__init__.py
+++ b/moto/__init__.py
@@ -3,7 +3,7 @@
# logging.getLogger('boto').setLevel(logging.CRITICAL)
__title__ = 'moto'
-__version__ = '1.2.0',
+__version__ = '1.2.0'
from .acm import mock_acm # flake8: noqa
from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa
| {"golden_diff": "diff --git a/moto/__init__.py b/moto/__init__.py\n--- a/moto/__init__.py\n+++ b/moto/__init__.py\n@@ -3,7 +3,7 @@\n # logging.getLogger('boto').setLevel(logging.CRITICAL)\n \n __title__ = 'moto'\n-__version__ = '1.2.0',\n+__version__ = '1.2.0'\n \n from .acm import mock_acm # flake8: noqa\n from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa\n", "issue": "Add opsworks app mocks\nAdd the mocks of OpsWork create_app and describe_apps calls. This is part of #1477 \n", "before_files": [{"content": "from __future__ import unicode_literals\nimport logging\n# logging.getLogger('boto').setLevel(logging.CRITICAL)\n\n__title__ = 'moto'\n__version__ = '1.2.0',\n\nfrom .acm import mock_acm # flake8: noqa\nfrom .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa\nfrom .autoscaling import mock_autoscaling, mock_autoscaling_deprecated # flake8: noqa\nfrom .awslambda import mock_lambda, mock_lambda_deprecated # flake8: noqa\nfrom .cloudformation import mock_cloudformation, mock_cloudformation_deprecated # flake8: noqa\nfrom .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # flake8: noqa\nfrom .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # flake8: noqa\nfrom .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # flake8: noqa\nfrom .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # flake8: noqa\nfrom .ec2 import mock_ec2, mock_ec2_deprecated # flake8: noqa\nfrom .ecr import mock_ecr, mock_ecr_deprecated # flake8: noqa\nfrom .ecs import mock_ecs, mock_ecs_deprecated # flake8: noqa\nfrom .elb import mock_elb, mock_elb_deprecated # flake8: noqa\nfrom .elbv2 import mock_elbv2 # flake8: noqa\nfrom .emr import mock_emr, mock_emr_deprecated # flake8: noqa\nfrom .events import mock_events # flake8: noqa\nfrom .glacier import mock_glacier, mock_glacier_deprecated # flake8: noqa\nfrom .iam import mock_iam, mock_iam_deprecated # flake8: noqa\nfrom .kinesis import mock_kinesis, mock_kinesis_deprecated # flake8: noqa\nfrom .kms import mock_kms, mock_kms_deprecated # flake8: noqa\nfrom .opsworks import mock_opsworks, mock_opsworks_deprecated # flake8: noqa\nfrom .polly import mock_polly # flake8: noqa\nfrom .rds import mock_rds, mock_rds_deprecated # flake8: noqa\nfrom .rds2 import mock_rds2, mock_rds2_deprecated # flake8: noqa\nfrom .redshift import mock_redshift, mock_redshift_deprecated # flake8: noqa\nfrom .s3 import mock_s3, mock_s3_deprecated # flake8: noqa\nfrom .ses import mock_ses, mock_ses_deprecated # flake8: noqa\nfrom .sns import mock_sns, mock_sns_deprecated # flake8: noqa\nfrom .sqs import mock_sqs, mock_sqs_deprecated # flake8: noqa\nfrom .sts import mock_sts, mock_sts_deprecated # flake8: noqa\nfrom .ssm import mock_ssm # flake8: noqa\nfrom .route53 import mock_route53, mock_route53_deprecated # flake8: noqa\nfrom .swf import mock_swf, mock_swf_deprecated # flake8: noqa\nfrom .xray import mock_xray, mock_xray_client, XRaySegment # flake8: noqa\nfrom .logs import mock_logs, mock_logs_deprecated # flake8: noqa\nfrom .batch import mock_batch # flake8: noqa\nfrom .resourcegroupstaggingapi import mock_resourcegroupstaggingapi # flake8: noqa\nfrom .iot import mock_iot # flake8: noqa\nfrom .iotdata import mock_iotdata # flake8: noqa\n\n\ntry:\n # Need to monkey-patch botocore requests back to underlying urllib3 classes\n from botocore.awsrequest import HTTPSConnectionPool, HTTPConnectionPool, HTTPConnection, VerifiedHTTPSConnection\nexcept ImportError:\n pass\nelse:\n HTTPSConnectionPool.ConnectionCls = VerifiedHTTPSConnection\n HTTPConnectionPool.ConnectionCls = HTTPConnection\n", "path": "moto/__init__.py"}]} | 1,606 | 135 |
gh_patches_debug_27558 | rasdani/github-patches | git_diff | fossasia__open-event-server-5311 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong Mail Statistics which troubles it to work completely
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
Wrong Mail Statistics which troubles it to work completely
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
**Stacktrace**
<!-- If applicable, add stacktrace to help explain your problem. -->
**Additional details (please complete the following information):**
- OS: [e.g. MacOS, Ubuntu, CentOS]
- Python Version [e.g. `3.5`, `3.6`]
- `HEAD` Commit hash [e.g. `4629c62`]
**Additional context**
<!-- Add any other context about the problem here. -->
**Wanna work on this issue**
</issue>
<code>
[start of app/api/admin_statistics_api/mails.py]
1 from flask_rest_jsonapi import ResourceDetail
2 from marshmallow_jsonapi.flask import Schema
3 from marshmallow_jsonapi import fields
4 from datetime import datetime, timedelta
5 import pytz
6
7 from app.api.helpers.utilities import dasherize
8 from app.api.bootstrap import api
9 from app.models import db
10 from app.models.mail import Mail
11 from app.api.data_layers.NoModelLayer import NoModelLayer
12 from app.api.helpers.db import get_count
13
14
15 class AdminStatisticsMailSchema(Schema):
16 """
17 Api schema
18 """
19 class Meta:
20 """
21 Meta class
22 """
23 type_ = 'admin-statistics-mail'
24 self_view = 'v1.admin_statistics_mail_detail'
25 inflect = dasherize
26
27 id = fields.String()
28 one_day = fields.Method("mail_last_1_day")
29 three_days = fields.Method("mail_last_3_days")
30 seven_days = fields.Method("mail_last_7_days")
31 thirty_days = fields.Method("mail_last_30_days")
32
33 def mail_last_1_day(self, obj):
34 return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=1)))
35
36 def mail_last_3_days(self, obj):
37 return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=3)))
38
39 def mail_last_7_days(self, obj):
40 return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=7)))
41
42 def mail_last_30_days(self, obj):
43 return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=30)))
44
45
46 class AdminStatisticsMailDetail(ResourceDetail):
47 """
48 Detail by id
49 """
50 methods = ['GET']
51 decorators = (api.has_permission('is_admin'),)
52 schema = AdminStatisticsMailSchema
53 data_layer = {
54 'class': NoModelLayer,
55 'session': db.session
56 }
57
[end of app/api/admin_statistics_api/mails.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/admin_statistics_api/mails.py b/app/api/admin_statistics_api/mails.py
--- a/app/api/admin_statistics_api/mails.py
+++ b/app/api/admin_statistics_api/mails.py
@@ -31,16 +31,24 @@
thirty_days = fields.Method("mail_last_30_days")
def mail_last_1_day(self, obj):
- return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=1)))
+ all_mails = get_count(Mail.query.filter_by(time=datetime.now(pytz.utc)))
+ mails_till_last_1_day = get_count(Mail.query.filter(Mail.time <= datetime.now(pytz.utc) - timedelta(days=1)))
+ return all_mails - mails_till_last_1_day
def mail_last_3_days(self, obj):
- return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=3)))
+ all_mails = get_count(Mail.query.filter_by(time=datetime.now(pytz.utc)))
+ mails_till_last_3_day = get_count(Mail.query.filter(Mail.time <= datetime.now(pytz.utc) - timedelta(days=3)))
+ return all_mails - mails_till_last_3_day
def mail_last_7_days(self, obj):
- return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=7)))
+ all_mails = get_count(Mail.query.filter_by(time=datetime.now(pytz.utc)))
+ mails_till_last_7_day = get_count(Mail.query.filter(Mail.time <= datetime.now(pytz.utc) - timedelta(days=7)))
+ return all_mails - mails_till_last_7_day
def mail_last_30_days(self, obj):
- return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=30)))
+ all_mails = get_count(Mail.query.filter_by(time=datetime.now(pytz.utc)))
+ mails_till_last_30_day = get_count(Mail.query.filter(Mail.time <= datetime.now(pytz.utc) - timedelta(days=30)))
+ return all_mails - mails_till_last_30_day
class AdminStatisticsMailDetail(ResourceDetail):
| {"golden_diff": "diff --git a/app/api/admin_statistics_api/mails.py b/app/api/admin_statistics_api/mails.py\n--- a/app/api/admin_statistics_api/mails.py\n+++ b/app/api/admin_statistics_api/mails.py\n@@ -31,16 +31,24 @@\n thirty_days = fields.Method(\"mail_last_30_days\")\n \n def mail_last_1_day(self, obj):\n- return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=1)))\n+ all_mails = get_count(Mail.query.filter_by(time=datetime.now(pytz.utc)))\n+ mails_till_last_1_day = get_count(Mail.query.filter(Mail.time <= datetime.now(pytz.utc) - timedelta(days=1)))\n+ return all_mails - mails_till_last_1_day\n \n def mail_last_3_days(self, obj):\n- return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=3)))\n+ all_mails = get_count(Mail.query.filter_by(time=datetime.now(pytz.utc)))\n+ mails_till_last_3_day = get_count(Mail.query.filter(Mail.time <= datetime.now(pytz.utc) - timedelta(days=3)))\n+ return all_mails - mails_till_last_3_day\n \n def mail_last_7_days(self, obj):\n- return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=7)))\n+ all_mails = get_count(Mail.query.filter_by(time=datetime.now(pytz.utc)))\n+ mails_till_last_7_day = get_count(Mail.query.filter(Mail.time <= datetime.now(pytz.utc) - timedelta(days=7)))\n+ return all_mails - mails_till_last_7_day\n \n def mail_last_30_days(self, obj):\n- return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=30)))\n+ all_mails = get_count(Mail.query.filter_by(time=datetime.now(pytz.utc)))\n+ mails_till_last_30_day = get_count(Mail.query.filter(Mail.time <= datetime.now(pytz.utc) - timedelta(days=30)))\n+ return all_mails - mails_till_last_30_day\n \n \n class AdminStatisticsMailDetail(ResourceDetail):\n", "issue": "Wrong Mail Statistics which troubles it to work completely\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nWrong Mail Statistics which troubles it to work completely\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to '...'\r\n2. Click on '....'\r\n3. Scroll down to '....'\r\n4. See error\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n**Stacktrace**\r\n<!-- If applicable, add stacktrace to help explain your problem. -->\r\n\r\n**Additional details (please complete the following information):**\r\n - OS: [e.g. MacOS, Ubuntu, CentOS]\r\n - Python Version [e.g. `3.5`, `3.6`]\r\n - `HEAD` Commit hash [e.g. `4629c62`]\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\n**Wanna work on this issue**\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail\nfrom marshmallow_jsonapi.flask import Schema\nfrom marshmallow_jsonapi import fields\nfrom datetime import datetime, timedelta\nimport pytz\n\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.bootstrap import api\nfrom app.models import db\nfrom app.models.mail import Mail\nfrom app.api.data_layers.NoModelLayer import NoModelLayer\nfrom app.api.helpers.db import get_count\n\n\nclass AdminStatisticsMailSchema(Schema):\n \"\"\"\n Api schema\n \"\"\"\n class Meta:\n \"\"\"\n Meta class\n \"\"\"\n type_ = 'admin-statistics-mail'\n self_view = 'v1.admin_statistics_mail_detail'\n inflect = dasherize\n\n id = fields.String()\n one_day = fields.Method(\"mail_last_1_day\")\n three_days = fields.Method(\"mail_last_3_days\")\n seven_days = fields.Method(\"mail_last_7_days\")\n thirty_days = fields.Method(\"mail_last_30_days\")\n\n def mail_last_1_day(self, obj):\n return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=1)))\n\n def mail_last_3_days(self, obj):\n return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=3)))\n\n def mail_last_7_days(self, obj):\n return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=7)))\n\n def mail_last_30_days(self, obj):\n return get_count(Mail.query.filter(datetime.now(pytz.utc) - Mail.time <= timedelta(days=30)))\n\n\nclass AdminStatisticsMailDetail(ResourceDetail):\n \"\"\"\n Detail by id\n \"\"\"\n methods = ['GET']\n decorators = (api.has_permission('is_admin'),)\n schema = AdminStatisticsMailSchema\n data_layer = {\n 'class': NoModelLayer,\n 'session': db.session\n }\n", "path": "app/api/admin_statistics_api/mails.py"}]} | 1,268 | 508 |
gh_patches_debug_14292 | rasdani/github-patches | git_diff | secdev__scapy-855 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
test_pyx problem
I have a unit test that uses scapy library like this:
```
$ cat ut.py
from scapy.all import *
def test_foo():
pass
```
The problem is that testing framework (pytest) detects internal scapy function test_pyx as a test:
```
ut.py::test_foo PASSED
ut.py::test_pyx <- venv/src/scapy/scapy/consts.py PASSED
```
This is because test_pyx function from scapy/consts.py is unnecessarily imported from scapy.all
and pytest treats all test_* functions as tests.
Scapy from current master branch.
</issue>
<code>
[start of scapy/consts.py]
1 ## This file is part of Scapy
2 ## See http://www.secdev.org/projects/scapy for more informations
3 ## Copyright (C) Philippe Biondi <[email protected]>
4 ## This program is published under a GPLv2 license
5
6 import os, inspect
7 from sys import platform, maxsize
8 import platform as platform_lib
9 from scapy.error import *
10
11 import subprocess
12
13 try:
14 from matplotlib import get_backend as matplotlib_get_backend
15 import matplotlib.pyplot as plt
16 MATPLOTLIB = 1
17 if "inline" in matplotlib_get_backend():
18 MATPLOTLIB_INLINED = 1
19 else:
20 MATPLOTLIB_INLINED = 0
21 MATPLOTLIB_DEFAULT_PLOT_KARGS = {"marker": "+"}
22 # RuntimeError to catch gtk "Cannot open display" error
23 except (ImportError, RuntimeError):
24 plt = None
25 MATPLOTLIB = 0
26 MATPLOTLIB_INLINED = 0
27 MATPLOTLIB_DEFAULT_PLOT_KARGS = dict()
28 log_loading.info("Can't import matplotlib. Won't be able to plot.")
29
30 def test_pyx():
31 """Returns if PyX is correctly installed or not"""
32 try:
33 with open(os.devnull, 'wb') as devnull:
34 r = subprocess.check_call(["pdflatex", "--version"], stdout=devnull, stderr=subprocess.STDOUT)
35 except:
36 return False
37 else:
38 return r == 0
39
40 try:
41 import pyx
42 if test_pyx():
43 PYX = 1
44 else:
45 log_loading.warning("PyX dependencies are not installed ! Please install TexLive or MikTeX.")
46 PYX = 0
47 except ImportError:
48 log_loading.info("Can't import PyX. Won't be able to use psdump() or pdfdump().")
49 PYX = 0
50
51
52 LINUX = platform.startswith("linux")
53 OPENBSD = platform.startswith("openbsd")
54 FREEBSD = "freebsd" in platform
55 NETBSD = platform.startswith("netbsd")
56 DARWIN = platform.startswith("darwin")
57 SOLARIS = platform.startswith("sunos")
58 WINDOWS = platform.startswith("win32")
59 BSD = DARWIN or FREEBSD or OPENBSD or NETBSD
60 # See https://docs.python.org/3/library/platform.html#cross-platform
61 IS_64BITS = maxsize > 2**32
62
63 if WINDOWS:
64 try:
65 if float(platform_lib.release()) >= 8.1:
66 LOOPBACK_NAME = "Microsoft KM-TEST Loopback Adapter"
67 else:
68 LOOPBACK_NAME = "Microsoft Loopback Adapter"
69 except ValueError:
70 LOOPBACK_NAME = "Microsoft Loopback Adapter"
71 # Will be different on Windows
72 LOOPBACK_INTERFACE = None
73 else:
74 uname = os.uname()
75 LOOPBACK_NAME = "lo" if LINUX else "lo0"
76 LOOPBACK_INTERFACE = LOOPBACK_NAME
77
78 def parent_function():
79 return inspect.getouterframes(inspect.currentframe())
80
[end of scapy/consts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scapy/consts.py b/scapy/consts.py
--- a/scapy/consts.py
+++ b/scapy/consts.py
@@ -27,7 +27,7 @@
MATPLOTLIB_DEFAULT_PLOT_KARGS = dict()
log_loading.info("Can't import matplotlib. Won't be able to plot.")
-def test_pyx():
+def _test_pyx():
"""Returns if PyX is correctly installed or not"""
try:
with open(os.devnull, 'wb') as devnull:
@@ -39,7 +39,7 @@
try:
import pyx
- if test_pyx():
+ if _test_pyx():
PYX = 1
else:
log_loading.warning("PyX dependencies are not installed ! Please install TexLive or MikTeX.")
| {"golden_diff": "diff --git a/scapy/consts.py b/scapy/consts.py\n--- a/scapy/consts.py\n+++ b/scapy/consts.py\n@@ -27,7 +27,7 @@\n MATPLOTLIB_DEFAULT_PLOT_KARGS = dict()\n log_loading.info(\"Can't import matplotlib. Won't be able to plot.\")\n \n-def test_pyx():\n+def _test_pyx():\n \"\"\"Returns if PyX is correctly installed or not\"\"\"\n try:\n with open(os.devnull, 'wb') as devnull:\n@@ -39,7 +39,7 @@\n \n try:\n import pyx\n- if test_pyx():\n+ if _test_pyx():\n PYX = 1\n else:\n log_loading.warning(\"PyX dependencies are not installed ! Please install TexLive or MikTeX.\")\n", "issue": "test_pyx problem\nI have a unit test that uses scapy library like this:\r\n```\r\n$ cat ut.py \r\nfrom scapy.all import *\r\n\r\ndef test_foo():\r\n pass\r\n```\r\nThe problem is that testing framework (pytest) detects internal scapy function test_pyx as a test:\r\n```\r\nut.py::test_foo PASSED\r\nut.py::test_pyx <- venv/src/scapy/scapy/consts.py PASSED\r\n```\r\nThis is because test_pyx function from scapy/consts.py is unnecessarily imported from scapy.all\r\nand pytest treats all test_* functions as tests.\r\n\r\nScapy from current master branch.\r\n\r\n\n", "before_files": [{"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## This program is published under a GPLv2 license\n\nimport os, inspect\nfrom sys import platform, maxsize\nimport platform as platform_lib\nfrom scapy.error import *\n\nimport subprocess\n\ntry:\n from matplotlib import get_backend as matplotlib_get_backend\n import matplotlib.pyplot as plt\n MATPLOTLIB = 1\n if \"inline\" in matplotlib_get_backend():\n MATPLOTLIB_INLINED = 1\n else:\n MATPLOTLIB_INLINED = 0\n MATPLOTLIB_DEFAULT_PLOT_KARGS = {\"marker\": \"+\"}\n# RuntimeError to catch gtk \"Cannot open display\" error\nexcept (ImportError, RuntimeError):\n plt = None\n MATPLOTLIB = 0\n MATPLOTLIB_INLINED = 0\n MATPLOTLIB_DEFAULT_PLOT_KARGS = dict()\n log_loading.info(\"Can't import matplotlib. Won't be able to plot.\")\n\ndef test_pyx():\n \"\"\"Returns if PyX is correctly installed or not\"\"\"\n try:\n with open(os.devnull, 'wb') as devnull:\n r = subprocess.check_call([\"pdflatex\", \"--version\"], stdout=devnull, stderr=subprocess.STDOUT)\n except:\n return False\n else:\n return r == 0\n\ntry:\n import pyx\n if test_pyx():\n PYX = 1\n else:\n log_loading.warning(\"PyX dependencies are not installed ! Please install TexLive or MikTeX.\")\n PYX = 0\nexcept ImportError:\n log_loading.info(\"Can't import PyX. Won't be able to use psdump() or pdfdump().\")\n PYX = 0\n\n\nLINUX = platform.startswith(\"linux\")\nOPENBSD = platform.startswith(\"openbsd\")\nFREEBSD = \"freebsd\" in platform\nNETBSD = platform.startswith(\"netbsd\")\nDARWIN = platform.startswith(\"darwin\")\nSOLARIS = platform.startswith(\"sunos\")\nWINDOWS = platform.startswith(\"win32\")\nBSD = DARWIN or FREEBSD or OPENBSD or NETBSD\n# See https://docs.python.org/3/library/platform.html#cross-platform\nIS_64BITS = maxsize > 2**32\n\nif WINDOWS:\n try:\n if float(platform_lib.release()) >= 8.1:\n LOOPBACK_NAME = \"Microsoft KM-TEST Loopback Adapter\"\n else:\n LOOPBACK_NAME = \"Microsoft Loopback Adapter\"\n except ValueError:\n LOOPBACK_NAME = \"Microsoft Loopback Adapter\"\n # Will be different on Windows\n LOOPBACK_INTERFACE = None\nelse:\n uname = os.uname()\n LOOPBACK_NAME = \"lo\" if LINUX else \"lo0\"\n LOOPBACK_INTERFACE = LOOPBACK_NAME\n\ndef parent_function():\n return inspect.getouterframes(inspect.currentframe())\n", "path": "scapy/consts.py"}]} | 1,468 | 183 |
gh_patches_debug_19624 | rasdani/github-patches | git_diff | Cloud-CV__EvalAI-1979 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Search submissions by challenge name in submissions table
Currently, we support searching the submissions by the `participant team name, challenge phase name, created by name and submission status`. We would like to add searching the submissions by `challenge name` and also add the same to default list filtering options.
Search submissions by challenge name in submissions table
Currently, we support searching the submissions by the `participant team name, challenge phase name, created by name and submission status`. We would like to add searching the submissions by `challenge name` and also add the same to default list filtering options.
</issue>
<code>
[start of apps/jobs/admin.py]
1 import logging
2
3 from django.contrib import admin
4
5 from base.admin import ImportExportTimeStampedAdmin
6
7 from .models import Submission
8 from .sender import publish_submission_message
9
10
11 logger = logging.getLogger(__name__)
12
13
14 @admin.register(Submission)
15 class SubmissionAdmin(ImportExportTimeStampedAdmin):
16 actions = ['submit_job_to_worker']
17 list_display = ('participant_team', 'get_challenge_name_and_id', 'challenge_phase',
18 'created_by', 'status', 'is_public', 'submission_number', 'submitted_at',
19 'execution_time', 'input_file', 'stdout_file', 'stderr_file',
20 'submission_result_file', 'submission_metadata_file',)
21 list_filter = ('participant_team', 'challenge_phase',
22 'status', 'is_public',)
23 search_fields = ('participant_team__team_name', 'challenge_phase__name',
24 'created_by__username', 'status',)
25
26 def get_challenge_name_and_id(self, obj):
27 """Return challenge name corresponding to phase"""
28 return "%s - %s" % (obj.challenge_phase.challenge.title, obj.challenge_phase.challenge.id)
29 get_challenge_name_and_id.short_description = 'Challenge'
30 get_challenge_name_and_id.admin_order_field = 'challenge_phase__challenge'
31
32 def submit_job_to_worker(self, request, queryset):
33 for submission in queryset:
34 challenge_id = submission.challenge_phase.challenge.id
35 challenge_phase_id = submission.challenge_phase.id
36 submission_id = submission.id
37 logger.info("[x] Received submission message with challenge id {}, challenge phase id {}, submission id {}"
38 .format(challenge_id, challenge_phase_id, submission_id))
39 publish_submission_message(challenge_id, challenge_phase_id, submission.id)
40 queryset.update(status=Submission.SUBMITTED)
41
42 submit_job_to_worker.short_description = "Run selected submissions"
43
[end of apps/jobs/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/jobs/admin.py b/apps/jobs/admin.py
--- a/apps/jobs/admin.py
+++ b/apps/jobs/admin.py
@@ -18,10 +18,9 @@
'created_by', 'status', 'is_public', 'submission_number', 'submitted_at',
'execution_time', 'input_file', 'stdout_file', 'stderr_file',
'submission_result_file', 'submission_metadata_file',)
- list_filter = ('participant_team', 'challenge_phase',
- 'status', 'is_public',)
+ list_filter = ('challenge_phase__challenge', 'challenge_phase', 'status', 'is_public',)
search_fields = ('participant_team__team_name', 'challenge_phase__name',
- 'created_by__username', 'status',)
+ 'challenge_phase__challenge__title', 'created_by__username', 'status',)
def get_challenge_name_and_id(self, obj):
"""Return challenge name corresponding to phase"""
| {"golden_diff": "diff --git a/apps/jobs/admin.py b/apps/jobs/admin.py\n--- a/apps/jobs/admin.py\n+++ b/apps/jobs/admin.py\n@@ -18,10 +18,9 @@\n 'created_by', 'status', 'is_public', 'submission_number', 'submitted_at',\n 'execution_time', 'input_file', 'stdout_file', 'stderr_file',\n 'submission_result_file', 'submission_metadata_file',)\n- list_filter = ('participant_team', 'challenge_phase',\n- 'status', 'is_public',)\n+ list_filter = ('challenge_phase__challenge', 'challenge_phase', 'status', 'is_public',)\n search_fields = ('participant_team__team_name', 'challenge_phase__name',\n- 'created_by__username', 'status',)\n+ 'challenge_phase__challenge__title', 'created_by__username', 'status',)\n \n def get_challenge_name_and_id(self, obj):\n \"\"\"Return challenge name corresponding to phase\"\"\"\n", "issue": "Search submissions by challenge name in submissions table\nCurrently, we support searching the submissions by the `participant team name, challenge phase name, created by name and submission status`. We would like to add searching the submissions by `challenge name` and also add the same to default list filtering options.\nSearch submissions by challenge name in submissions table\nCurrently, we support searching the submissions by the `participant team name, challenge phase name, created by name and submission status`. We would like to add searching the submissions by `challenge name` and also add the same to default list filtering options.\n", "before_files": [{"content": "import logging\n\nfrom django.contrib import admin\n\nfrom base.admin import ImportExportTimeStampedAdmin\n\nfrom .models import Submission\nfrom .sender import publish_submission_message\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](Submission)\nclass SubmissionAdmin(ImportExportTimeStampedAdmin):\n actions = ['submit_job_to_worker']\n list_display = ('participant_team', 'get_challenge_name_and_id', 'challenge_phase',\n 'created_by', 'status', 'is_public', 'submission_number', 'submitted_at',\n 'execution_time', 'input_file', 'stdout_file', 'stderr_file',\n 'submission_result_file', 'submission_metadata_file',)\n list_filter = ('participant_team', 'challenge_phase',\n 'status', 'is_public',)\n search_fields = ('participant_team__team_name', 'challenge_phase__name',\n 'created_by__username', 'status',)\n\n def get_challenge_name_and_id(self, obj):\n \"\"\"Return challenge name corresponding to phase\"\"\"\n return \"%s - %s\" % (obj.challenge_phase.challenge.title, obj.challenge_phase.challenge.id)\n get_challenge_name_and_id.short_description = 'Challenge'\n get_challenge_name_and_id.admin_order_field = 'challenge_phase__challenge'\n\n def submit_job_to_worker(self, request, queryset):\n for submission in queryset:\n challenge_id = submission.challenge_phase.challenge.id\n challenge_phase_id = submission.challenge_phase.id\n submission_id = submission.id\n logger.info(\"[x] Received submission message with challenge id {}, challenge phase id {}, submission id {}\"\n .format(challenge_id, challenge_phase_id, submission_id))\n publish_submission_message(challenge_id, challenge_phase_id, submission.id)\n queryset.update(status=Submission.SUBMITTED)\n\n submit_job_to_worker.short_description = \"Run selected submissions\"\n", "path": "apps/jobs/admin.py"}]} | 1,112 | 206 |
gh_patches_debug_25771 | rasdani/github-patches | git_diff | mne-tools__mne-python-5796 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ENH: Allow retrieval of GFP
#2538 added the ability to plot GFPs. Currently, the GFP is [only temporarily calculated for plotting](https://github.com/Eric89GXL/mne-python/blob/7f8c69bae49041bb4f0507539ccda1bda7f0b394/mne/viz/evoked.py#L397), and the user has no easy way to access the data.
In our EEG workflow, we typically calculate GFPs for every single participant and condition, and average conditions across participants for plotting; or we compute statistics based on the GFP differences. It is therefore highly important for us to have easy access to the GFPs. We resorted to doing the calculations manually based on `Evoked.data`, but this is cumbersome as one has to "leave" the MNE sphere and implement the operations by hand via NumPy and/or Pandas -- which is not easy for beginners and error-prone, as Pandas by default [uses the unbiased estimator](http://stackoverflow.com/questions/24984178/different-std-in-pandas-vs-numpy) for standard deviation and NumPy doesn't.
I can try to implement a GFP function, but I would need assistance in doing so. I don't really know where to start or where to put that code: should it be a method of the `Evoked` class? Potentially exposed as a property, so it could be accessed via `Evoked.gfp`? Or should it be an entirely new class? Would it have to have its own plotting method? etc. pp. Any help and suggestions would be greatly appreciated.
</issue>
<code>
[start of tutorials/plot_object_evoked.py]
1 """
2 .. _tut_evoked_objects:
3
4 The :class:`Evoked <mne.Evoked>` data structure: evoked/averaged data
5 =====================================================================
6
7 The :class:`Evoked <mne.Evoked>` data structure is mainly used for storing
8 averaged data over trials. In MNE the evoked objects are usually created by
9 averaging epochs data with :func:`mne.Epochs.average`.
10 """
11
12 import os.path as op
13
14 import mne
15
16 ###############################################################################
17 # Here for convenience we read the evoked dataset from a file.
18 data_path = mne.datasets.sample.data_path()
19 fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
20 evokeds = mne.read_evokeds(fname, baseline=(None, 0), proj=True)
21 print(evokeds)
22
23 ###############################################################################
24 # Notice that the reader function returned a list of evoked instances. This is
25 # because you can store multiple categories into a single file. Here we have
26 # categories of
27 # ``['Left Auditory', 'Right Auditory', 'Left Visual', 'Right Visual']``.
28 # We can also use ``condition`` parameter to read in only one category.
29 evoked = mne.read_evokeds(fname, condition='Left Auditory')
30 evoked.apply_baseline((None, 0)).apply_proj()
31 print(evoked)
32
33 ###############################################################################
34 # If you're gone through the tutorials of raw and epochs datasets, you're
35 # probably already familiar with the :class:`Info <mne.Info>` attribute.
36 # There is nothing new or special with the ``evoked.info``. All the relevant
37 # info is still there.
38 print(evoked.info)
39 print(evoked.times)
40
41 ###############################################################################
42 # The evoked data structure also contains some new attributes easily
43 # accessible:
44 print(evoked.nave) # Number of averaged epochs.
45 print(evoked.first) # First time sample.
46 print(evoked.last) # Last time sample.
47 print(evoked.comment) # Comment on dataset. Usually the condition.
48 print(evoked.kind) # Type of data, either average or standard_error.
49
50 ###############################################################################
51 # The data is also easily accessible. Since the evoked data arrays are usually
52 # much smaller than raw or epochs datasets, they are preloaded into the memory
53 # when the evoked object is constructed. You can access the data as a numpy
54 # array.
55 data = evoked.data
56 print(data.shape)
57
58 ###############################################################################
59 # The data is arranged in an array of shape `(n_channels, n_times)`. Notice
60 # that unlike epochs, evoked object does not support indexing. This means that
61 # to access the data of a specific channel you must use the data array
62 # directly.
63 print('Data from channel {0}:'.format(evoked.ch_names[10]))
64 print(data[10])
65
66 ###############################################################################
67 # If you want to import evoked data from some other system and you have it in a
68 # numpy array you can use :class:`mne.EvokedArray` for that. All you need is
69 # the data and some info about the evoked data. For more information, see
70 # :ref:`tut_creating_data_structures`.
71 evoked = mne.EvokedArray(data, evoked.info, tmin=evoked.times[0])
72 evoked.plot(time_unit='s')
73
74 ###############################################################################
75 # To write an evoked dataset to a file, use the :meth:`mne.Evoked.save` method.
76 # To save multiple categories to a single file, see :func:`mne.write_evokeds`.
77
[end of tutorials/plot_object_evoked.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tutorials/plot_object_evoked.py b/tutorials/plot_object_evoked.py
--- a/tutorials/plot_object_evoked.py
+++ b/tutorials/plot_object_evoked.py
@@ -8,9 +8,11 @@
averaged data over trials. In MNE the evoked objects are usually created by
averaging epochs data with :func:`mne.Epochs.average`.
"""
+# sphinx_gallery_thumbnail_number = 2
import os.path as op
+import matplotlib.pyplot as plt
import mne
###############################################################################
@@ -63,6 +65,16 @@
print('Data from channel {0}:'.format(evoked.ch_names[10]))
print(data[10])
+###############################################################################
+# In the same vein, we can quickly extract (and, e.g., plot) the GFP as the
+# standard deviation across channels, here shown just for EEG.
+
+gfp = evoked.copy().pick_types(eeg=True, meg=False).data.std(axis=0)
+fig, ax = plt.subplots(1)
+ax.plot(evoked.times, gfp / 1e6) # scale to uV
+ax.set(xlabel='Time (sec)', ylabel='GFP (uV)')
+fig.tight_layout()
+
###############################################################################
# If you want to import evoked data from some other system and you have it in a
# numpy array you can use :class:`mne.EvokedArray` for that. All you need is
| {"golden_diff": "diff --git a/tutorials/plot_object_evoked.py b/tutorials/plot_object_evoked.py\n--- a/tutorials/plot_object_evoked.py\n+++ b/tutorials/plot_object_evoked.py\n@@ -8,9 +8,11 @@\n averaged data over trials. In MNE the evoked objects are usually created by\n averaging epochs data with :func:`mne.Epochs.average`.\n \"\"\"\n+# sphinx_gallery_thumbnail_number = 2\n \n import os.path as op\n \n+import matplotlib.pyplot as plt\n import mne\n \n ###############################################################################\n@@ -63,6 +65,16 @@\n print('Data from channel {0}:'.format(evoked.ch_names[10]))\n print(data[10])\n \n+###############################################################################\n+# In the same vein, we can quickly extract (and, e.g., plot) the GFP as the\n+# standard deviation across channels, here shown just for EEG.\n+\n+gfp = evoked.copy().pick_types(eeg=True, meg=False).data.std(axis=0)\n+fig, ax = plt.subplots(1)\n+ax.plot(evoked.times, gfp / 1e6) # scale to uV\n+ax.set(xlabel='Time (sec)', ylabel='GFP (uV)')\n+fig.tight_layout()\n+\n ###############################################################################\n # If you want to import evoked data from some other system and you have it in a\n # numpy array you can use :class:`mne.EvokedArray` for that. All you need is\n", "issue": "ENH: Allow retrieval of GFP\n#2538 added the ability to plot GFPs. Currently, the GFP is [only temporarily calculated for plotting](https://github.com/Eric89GXL/mne-python/blob/7f8c69bae49041bb4f0507539ccda1bda7f0b394/mne/viz/evoked.py#L397), and the user has no easy way to access the data. \r\n\r\n In our EEG workflow, we typically calculate GFPs for every single participant and condition, and average conditions across participants for plotting; or we compute statistics based on the GFP differences. It is therefore highly important for us to have easy access to the GFPs. We resorted to doing the calculations manually based on `Evoked.data`, but this is cumbersome as one has to \"leave\" the MNE sphere and implement the operations by hand via NumPy and/or Pandas -- which is not easy for beginners and error-prone, as Pandas by default [uses the unbiased estimator](http://stackoverflow.com/questions/24984178/different-std-in-pandas-vs-numpy) for standard deviation and NumPy doesn't.\r\n\r\nI can try to implement a GFP function, but I would need assistance in doing so. I don't really know where to start or where to put that code: should it be a method of the `Evoked` class? Potentially exposed as a property, so it could be accessed via `Evoked.gfp`? Or should it be an entirely new class? Would it have to have its own plotting method? etc. pp. Any help and suggestions would be greatly appreciated.\n", "before_files": [{"content": "\"\"\"\n.. _tut_evoked_objects:\n\nThe :class:`Evoked <mne.Evoked>` data structure: evoked/averaged data\n=====================================================================\n\nThe :class:`Evoked <mne.Evoked>` data structure is mainly used for storing\naveraged data over trials. In MNE the evoked objects are usually created by\naveraging epochs data with :func:`mne.Epochs.average`.\n\"\"\"\n\nimport os.path as op\n\nimport mne\n\n###############################################################################\n# Here for convenience we read the evoked dataset from a file.\ndata_path = mne.datasets.sample.data_path()\nfname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')\nevokeds = mne.read_evokeds(fname, baseline=(None, 0), proj=True)\nprint(evokeds)\n\n###############################################################################\n# Notice that the reader function returned a list of evoked instances. This is\n# because you can store multiple categories into a single file. Here we have\n# categories of\n# ``['Left Auditory', 'Right Auditory', 'Left Visual', 'Right Visual']``.\n# We can also use ``condition`` parameter to read in only one category.\nevoked = mne.read_evokeds(fname, condition='Left Auditory')\nevoked.apply_baseline((None, 0)).apply_proj()\nprint(evoked)\n\n###############################################################################\n# If you're gone through the tutorials of raw and epochs datasets, you're\n# probably already familiar with the :class:`Info <mne.Info>` attribute.\n# There is nothing new or special with the ``evoked.info``. All the relevant\n# info is still there.\nprint(evoked.info)\nprint(evoked.times)\n\n###############################################################################\n# The evoked data structure also contains some new attributes easily\n# accessible:\nprint(evoked.nave) # Number of averaged epochs.\nprint(evoked.first) # First time sample.\nprint(evoked.last) # Last time sample.\nprint(evoked.comment) # Comment on dataset. Usually the condition.\nprint(evoked.kind) # Type of data, either average or standard_error.\n\n###############################################################################\n# The data is also easily accessible. Since the evoked data arrays are usually\n# much smaller than raw or epochs datasets, they are preloaded into the memory\n# when the evoked object is constructed. You can access the data as a numpy\n# array.\ndata = evoked.data\nprint(data.shape)\n\n###############################################################################\n# The data is arranged in an array of shape `(n_channels, n_times)`. Notice\n# that unlike epochs, evoked object does not support indexing. This means that\n# to access the data of a specific channel you must use the data array\n# directly.\nprint('Data from channel {0}:'.format(evoked.ch_names[10]))\nprint(data[10])\n\n###############################################################################\n# If you want to import evoked data from some other system and you have it in a\n# numpy array you can use :class:`mne.EvokedArray` for that. All you need is\n# the data and some info about the evoked data. For more information, see\n# :ref:`tut_creating_data_structures`.\nevoked = mne.EvokedArray(data, evoked.info, tmin=evoked.times[0])\nevoked.plot(time_unit='s')\n\n###############################################################################\n# To write an evoked dataset to a file, use the :meth:`mne.Evoked.save` method.\n# To save multiple categories to a single file, see :func:`mne.write_evokeds`.\n", "path": "tutorials/plot_object_evoked.py"}]} | 1,803 | 319 |
gh_patches_debug_1665 | rasdani/github-patches | git_diff | python-pillow__Pillow-1686 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Repeated looping over image stack shows last frame in place of first frame
When looping through the frames in an animation or TIFF stack with `ImageSequence.Iterator`, the frame pointer is not reset for the first frame. Consequently, if the loop is run through a second time the final frame is shown again instead of the first frame.
### Demo
Code
``` python
from PIL import Image, ImageSequence
import os
# Make a test image
os.system((
"convert -depth 8 -size 1x1 xc:'rgb(100,100,100)' xc:'rgb(121,121,121)'"
" xc:'rgb(142,142,142)' xc:'rgb(163,163,163)' image.tif"
))
# Open the image
im = Image.open('image.tif')
# Run through the image
print('First run')
for frame in ImageSequence.Iterator(im):
print(list(frame.getdata()))
# Run through the image again
print('Second run')
for frame in ImageSequence.Iterator(im):
print(list(frame.getdata()))
```
Output
```
First run
[100]
[121]
[142]
[163]
Second run
[163]
[121]
[142]
[163]
```
</issue>
<code>
[start of PIL/ImageSequence.py]
1 #
2 # The Python Imaging Library.
3 # $Id$
4 #
5 # sequence support classes
6 #
7 # history:
8 # 1997-02-20 fl Created
9 #
10 # Copyright (c) 1997 by Secret Labs AB.
11 # Copyright (c) 1997 by Fredrik Lundh.
12 #
13 # See the README file for information on usage and redistribution.
14 #
15
16 ##
17
18
19 class Iterator(object):
20 """
21 This class implements an iterator object that can be used to loop
22 over an image sequence.
23
24 You can use the ``[]`` operator to access elements by index. This operator
25 will raise an :py:exc:`IndexError` if you try to access a nonexistent
26 frame.
27
28 :param im: An image object.
29 """
30
31 def __init__(self, im):
32 if not hasattr(im, "seek"):
33 raise AttributeError("im must have seek method")
34 self.im = im
35
36 def __getitem__(self, ix):
37 try:
38 if ix:
39 self.im.seek(ix)
40 return self.im
41 except EOFError:
42 raise IndexError # end of sequence
43
[end of PIL/ImageSequence.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PIL/ImageSequence.py b/PIL/ImageSequence.py
--- a/PIL/ImageSequence.py
+++ b/PIL/ImageSequence.py
@@ -35,8 +35,7 @@
def __getitem__(self, ix):
try:
- if ix:
- self.im.seek(ix)
+ self.im.seek(ix)
return self.im
except EOFError:
raise IndexError # end of sequence
| {"golden_diff": "diff --git a/PIL/ImageSequence.py b/PIL/ImageSequence.py\n--- a/PIL/ImageSequence.py\n+++ b/PIL/ImageSequence.py\n@@ -35,8 +35,7 @@\n \n def __getitem__(self, ix):\n try:\n- if ix:\n- self.im.seek(ix)\n+ self.im.seek(ix)\n return self.im\n except EOFError:\n raise IndexError # end of sequence\n", "issue": "Repeated looping over image stack shows last frame in place of first frame\nWhen looping through the frames in an animation or TIFF stack with `ImageSequence.Iterator`, the frame pointer is not reset for the first frame. Consequently, if the loop is run through a second time the final frame is shown again instead of the first frame.\n### Demo\n\nCode\n\n``` python\nfrom PIL import Image, ImageSequence\nimport os\n# Make a test image\nos.system((\n \"convert -depth 8 -size 1x1 xc:'rgb(100,100,100)' xc:'rgb(121,121,121)'\"\n \" xc:'rgb(142,142,142)' xc:'rgb(163,163,163)' image.tif\"\n))\n# Open the image\nim = Image.open('image.tif')\n# Run through the image\nprint('First run')\nfor frame in ImageSequence.Iterator(im):\n print(list(frame.getdata()))\n# Run through the image again\nprint('Second run')\nfor frame in ImageSequence.Iterator(im):\n print(list(frame.getdata()))\n```\n\nOutput\n\n```\nFirst run\n[100]\n[121]\n[142]\n[163]\nSecond run\n[163]\n[121]\n[142]\n[163]\n```\n\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# sequence support classes\n#\n# history:\n# 1997-02-20 fl Created\n#\n# Copyright (c) 1997 by Secret Labs AB.\n# Copyright (c) 1997 by Fredrik Lundh.\n#\n# See the README file for information on usage and redistribution.\n#\n\n##\n\n\nclass Iterator(object):\n \"\"\"\n This class implements an iterator object that can be used to loop\n over an image sequence.\n\n You can use the ``[]`` operator to access elements by index. This operator\n will raise an :py:exc:`IndexError` if you try to access a nonexistent\n frame.\n\n :param im: An image object.\n \"\"\"\n\n def __init__(self, im):\n if not hasattr(im, \"seek\"):\n raise AttributeError(\"im must have seek method\")\n self.im = im\n\n def __getitem__(self, ix):\n try:\n if ix:\n self.im.seek(ix)\n return self.im\n except EOFError:\n raise IndexError # end of sequence\n", "path": "PIL/ImageSequence.py"}]} | 1,152 | 96 |
gh_patches_debug_16844 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1227 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Warning Check on Unused Parameter hides Error Check about Missing Parameter Type
*cfn-lint version: cfn-lint 0.25.3*
Parameters defined in a template, but not directly used, are not validated for missing attributes like `Type`.
For various reasons, we want to include parameters in our templates that are not used by resources in the templates and therefore disable `W2001` When this happens, the following template will not fail cfn-lint. If I uncomment the `Metadata` section, I will finally see the `E1012` failure. I should not have to resolve a Warning in order to unmask an Error.
```yaml
Parameters:
Foo:
Description: "Foo?"
Conditions:
AlwaysFalse: !Equals [ true, false ]
Resources:
# Metadata:
# Foo: !Ref Foo
NullResource:
Type: Custom::NullResource
Condition: AlwaysFalse
```
</issue>
<code>
[start of src/cfnlint/rules/parameters/Configuration.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from cfnlint.rules import CloudFormationLintRule
6 from cfnlint.rules import RuleMatch
7
8
9 class Configuration(CloudFormationLintRule):
10 """Check if Parameters are configured correctly"""
11 id = 'E2001'
12 shortdesc = 'Parameters have appropriate properties'
13 description = 'Making sure the parameters are properly configured'
14 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html'
15 tags = ['parameters']
16
17 valid_keys = [
18 'AllowedPattern',
19 'AllowedValues',
20 'ConstraintDescription',
21 'Default',
22 'Description',
23 'MaxLength',
24 'MaxValue',
25 'MinLength',
26 'MinValue',
27 'NoEcho',
28 'Type',
29 ]
30
31 def match(self, cfn):
32 """Check CloudFormation Parameters"""
33
34 matches = []
35
36 for paramname, paramvalue in cfn.get_parameters().items():
37 for propname, _ in paramvalue.items():
38 if propname not in self.valid_keys:
39 message = 'Parameter {0} has invalid property {1}'
40 matches.append(RuleMatch(
41 ['Parameters', paramname, propname],
42 message.format(paramname, propname)
43 ))
44
45 return matches
46
[end of src/cfnlint/rules/parameters/Configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/parameters/Configuration.py b/src/cfnlint/rules/parameters/Configuration.py
--- a/src/cfnlint/rules/parameters/Configuration.py
+++ b/src/cfnlint/rules/parameters/Configuration.py
@@ -28,6 +28,10 @@
'Type',
]
+ required_keys = [
+ 'Type'
+ ]
+
def match(self, cfn):
"""Check CloudFormation Parameters"""
@@ -41,5 +45,12 @@
['Parameters', paramname, propname],
message.format(paramname, propname)
))
+ for reqname in self.required_keys:
+ if reqname not in paramvalue.keys():
+ message = 'Parameter {0} is missing required property {1}'
+ matches.append(RuleMatch(
+ ['Parameters', paramname],
+ message.format(paramname, reqname)
+ ))
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/parameters/Configuration.py b/src/cfnlint/rules/parameters/Configuration.py\n--- a/src/cfnlint/rules/parameters/Configuration.py\n+++ b/src/cfnlint/rules/parameters/Configuration.py\n@@ -28,6 +28,10 @@\n 'Type',\n ]\n \n+ required_keys = [\n+ 'Type'\n+ ]\n+\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n \n@@ -41,5 +45,12 @@\n ['Parameters', paramname, propname],\n message.format(paramname, propname)\n ))\n+ for reqname in self.required_keys:\n+ if reqname not in paramvalue.keys():\n+ message = 'Parameter {0} is missing required property {1}'\n+ matches.append(RuleMatch(\n+ ['Parameters', paramname],\n+ message.format(paramname, reqname)\n+ ))\n \n return matches\n", "issue": "Warning Check on Unused Parameter hides Error Check about Missing Parameter Type\n*cfn-lint version: cfn-lint 0.25.3*\r\n\r\nParameters defined in a template, but not directly used, are not validated for missing attributes like `Type`.\r\n\r\nFor various reasons, we want to include parameters in our templates that are not used by resources in the templates and therefore disable `W2001` When this happens, the following template will not fail cfn-lint. If I uncomment the `Metadata` section, I will finally see the `E1012` failure. I should not have to resolve a Warning in order to unmask an Error.\r\n\r\n```yaml\r\nParameters:\r\n Foo:\r\n Description: \"Foo?\"\r\nConditions:\r\n AlwaysFalse: !Equals [ true, false ]\r\nResources:\r\n # Metadata:\r\n # Foo: !Ref Foo\r\n NullResource:\r\n Type: Custom::NullResource\r\n Condition: AlwaysFalse\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check if Parameters are configured correctly\"\"\"\n id = 'E2001'\n shortdesc = 'Parameters have appropriate properties'\n description = 'Making sure the parameters are properly configured'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html'\n tags = ['parameters']\n\n valid_keys = [\n 'AllowedPattern',\n 'AllowedValues',\n 'ConstraintDescription',\n 'Default',\n 'Description',\n 'MaxLength',\n 'MaxValue',\n 'MinLength',\n 'MinValue',\n 'NoEcho',\n 'Type',\n ]\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n\n matches = []\n\n for paramname, paramvalue in cfn.get_parameters().items():\n for propname, _ in paramvalue.items():\n if propname not in self.valid_keys:\n message = 'Parameter {0} has invalid property {1}'\n matches.append(RuleMatch(\n ['Parameters', paramname, propname],\n message.format(paramname, propname)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/parameters/Configuration.py"}]} | 1,127 | 209 |
gh_patches_debug_31157 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3349 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider signet_jewelers is broken
During the global build at 2021-07-14-14-42-22, spider **signet_jewelers** failed with **2353 features** and **6 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/logs/signet_jewelers.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/signet_jewelers.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/signet_jewelers.geojson))
</issue>
<code>
[start of locations/spiders/signet_jewelers.py]
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10
11 class SignetJewelersSpider(scrapy.Spider):
12 name = "signet_jewelers"
13 allowed_domains = ['www.jared.com', 'www.kay.com', 'www.zales.com', 'www.pagoda.com', 'www.peoplesjewellers.com',
14 'www.ernestjones.co.uk', 'www.hsamuel.co.uk']
15 download_delay = 0.5 # limit the delay to avoid 403 errors
16
17 ca_prov = ['Alberta', 'British Columbia', 'Manitoba', 'New Brunswick', 'Newfoundland and Labrador',
18 'Nova Scotia', 'Ontario', 'Saskatchewan']
19
20 states = ["Alabama", "Alaska", "Arizona", "Arkansas", "California", "Colorado",
21 "Connecticut", "Delaware", "Florida", "Georgia", "Hawaii", "Idaho", "Illinois",
22 "Indiana", "Iowa", "Kansas", "Kentucky", "Louisiana", "Maine", "Maryland",
23 "Massachusetts", "Michigan", "Minnesota", "Mississippi", "Missouri", "Montana",
24 "Nebraska", "Nevada", "New Hampshire", "New Jersey", "New Mexico", "New York",
25 "North Carolina", "North Dakota", "Ohio", "Oklahoma", "Oregon", "Pennsylvania",
26 "Rhode Island", "South Carolina", "South Dakota", "Tennessee", "Texas", "Utah",
27 "Vermont", "Virginia", "Washington", "West Virginia", "Wisconsin", "Wyoming"
28 ]
29
30 def start_requests(self):
31 north_america_brands = ["jared", "kay", "zales", "pagoda", "peoplesjewellers"]
32
33 uk_urls = [
34 'https://www.hsamuel.co.uk/scripts/dist/store-locator/functionality/store-details.min.js?sprint-17_20190911.3',
35 'https://www.ernestjones.co.uk/scripts/store-locator/storeLocationDetails.js']
36
37 for url in uk_urls:
38 yield scrapy.Request(url=url, callback=self.parse_uk)
39
40 template = 'https://www.{brand}.com/store-finder/view-stores/{region}'
41
42 for brand in north_america_brands:
43 if brand == "peoplesjewellers":
44 for prov in SignetJewelersSpider.ca_prov:
45 url = template.format(brand=brand, region=prov)
46 yield scrapy.Request(url, callback=self.parse_cities)
47 else:
48 for state in SignetJewelersSpider.states:
49 url = template.format(brand=brand, region=state)
50 yield scrapy.Request(url, callback=self.parse_cities)
51
52 def parse_cities(self, response):
53 cities = response.xpath('//*[@class="viewstoreslist"]/a/@href').extract()
54 for i in cities:
55 yield scrapy.Request(response.urljoin(i), callback=self.parse)
56
57 def parse(self, response):
58 script = " ".join(response.xpath('//*[@id="js-store-details"]/div/script/text()').extract())
59 data = re.search(r'storeInformation\s=\s((?s).*)', script).groups()[0]
60 data = data.replace(";", '')
61 data = eval(data)
62
63 if data["region"] in SignetJewelersSpider.ca_prov:
64 country = 'CA'
65 else:
66 country = 'US'
67
68 properties = {
69 'ref': data["name"],
70 'name': data["displayName"],
71 'addr_full': data["line1"],
72 'city': data["town"],
73 'state': data["region"],
74 'postcode': data["postalCode"],
75 'country': country,
76 'lat': data["latitude"],
77 'lon': data["longitude"],
78 'phone': data["phone"],
79 'website': response.url,
80 'brand': re.search(r'www.(\w+)', response.url)[1],
81 }
82
83 yield GeojsonPointItem(**properties)
84
85 def parse_uk(self, response):
86 data = re.search(r'Signet.allStoreDetails=((?s).*)', response.text)[1]
87 data = data.replace(';', '')
88 data = json.loads(data)
89
90 for store in data:
91 properties = {
92 'ref': store["number"],
93 'name': store["name"],
94 'addr_full': store["addressLine1"],
95 'city': store["town"],
96 'postcode': store["postcode"],
97 'country': 'GB',
98 'lat': store["latitude"],
99 'lon': store["longitude"],
100 'brand': re.search(r'www.(\w+)', response.url)[1],
101 }
102
103 yield GeojsonPointItem(**properties)
104
[end of locations/spiders/signet_jewelers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/signet_jewelers.py b/locations/spiders/signet_jewelers.py
--- a/locations/spiders/signet_jewelers.py
+++ b/locations/spiders/signet_jewelers.py
@@ -56,29 +56,38 @@
def parse(self, response):
script = " ".join(response.xpath('//*[@id="js-store-details"]/div/script/text()').extract())
- data = re.search(r'storeInformation\s=\s((?s).*)', script).groups()[0]
- data = data.replace(";", '')
- data = eval(data)
-
- if data["region"] in SignetJewelersSpider.ca_prov:
- country = 'CA'
- else:
- country = 'US'
-
- properties = {
- 'ref': data["name"],
- 'name': data["displayName"],
- 'addr_full': data["line1"],
- 'city': data["town"],
- 'state': data["region"],
- 'postcode': data["postalCode"],
- 'country': country,
- 'lat': data["latitude"],
- 'lon': data["longitude"],
- 'phone': data["phone"],
- 'website': response.url,
- 'brand': re.search(r'www.(\w+)', response.url)[1],
- }
+ data = None
+
+ if re.search(r'storeInformation\s=\s((?s).*)', script) is not None:
+ data = re.search(r'storeInformation\s=\s((?s).*)', script).groups()
+
+ properties = {}
+
+ if data is not None:
+ if len(data) > 0:
+ data = data[0]
+ data = data.replace(";", '')
+ data = eval(data)
+
+ if data["region"] in SignetJewelersSpider.ca_prov:
+ country = 'CA'
+ else:
+ country = 'US'
+
+ properties = {
+ 'ref': data["name"],
+ 'name': data["displayName"],
+ 'addr_full': data["line1"],
+ 'city': data["town"],
+ 'state': data["region"],
+ 'postcode': data["postalCode"],
+ 'country': country,
+ 'lat': data["latitude"],
+ 'lon': data["longitude"],
+ 'phone': data["phone"],
+ 'website': response.url,
+ 'brand': re.search(r'www.(\w+)', response.url)[1],
+ }
yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/signet_jewelers.py b/locations/spiders/signet_jewelers.py\n--- a/locations/spiders/signet_jewelers.py\n+++ b/locations/spiders/signet_jewelers.py\n@@ -56,29 +56,38 @@\n \n def parse(self, response):\n script = \" \".join(response.xpath('//*[@id=\"js-store-details\"]/div/script/text()').extract())\n- data = re.search(r'storeInformation\\s=\\s((?s).*)', script).groups()[0]\n- data = data.replace(\";\", '')\n- data = eval(data)\n-\n- if data[\"region\"] in SignetJewelersSpider.ca_prov:\n- country = 'CA'\n- else:\n- country = 'US'\n-\n- properties = {\n- 'ref': data[\"name\"],\n- 'name': data[\"displayName\"],\n- 'addr_full': data[\"line1\"],\n- 'city': data[\"town\"],\n- 'state': data[\"region\"],\n- 'postcode': data[\"postalCode\"],\n- 'country': country,\n- 'lat': data[\"latitude\"],\n- 'lon': data[\"longitude\"],\n- 'phone': data[\"phone\"],\n- 'website': response.url,\n- 'brand': re.search(r'www.(\\w+)', response.url)[1],\n- }\n+ data = None\n+\n+ if re.search(r'storeInformation\\s=\\s((?s).*)', script) is not None:\n+ data = re.search(r'storeInformation\\s=\\s((?s).*)', script).groups()\n+\n+ properties = {}\n+\n+ if data is not None:\n+ if len(data) > 0:\n+ data = data[0]\n+ data = data.replace(\";\", '')\n+ data = eval(data)\n+\n+ if data[\"region\"] in SignetJewelersSpider.ca_prov:\n+ country = 'CA'\n+ else:\n+ country = 'US'\n+\n+ properties = {\n+ 'ref': data[\"name\"],\n+ 'name': data[\"displayName\"],\n+ 'addr_full': data[\"line1\"],\n+ 'city': data[\"town\"],\n+ 'state': data[\"region\"],\n+ 'postcode': data[\"postalCode\"],\n+ 'country': country,\n+ 'lat': data[\"latitude\"],\n+ 'lon': data[\"longitude\"],\n+ 'phone': data[\"phone\"],\n+ 'website': response.url,\n+ 'brand': re.search(r'www.(\\w+)', response.url)[1],\n+ }\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider signet_jewelers is broken\nDuring the global build at 2021-07-14-14-42-22, spider **signet_jewelers** failed with **2353 features** and **6 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/logs/signet_jewelers.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/signet_jewelers.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/signet_jewelers.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass SignetJewelersSpider(scrapy.Spider):\n name = \"signet_jewelers\"\n allowed_domains = ['www.jared.com', 'www.kay.com', 'www.zales.com', 'www.pagoda.com', 'www.peoplesjewellers.com',\n 'www.ernestjones.co.uk', 'www.hsamuel.co.uk']\n download_delay = 0.5 # limit the delay to avoid 403 errors\n\n ca_prov = ['Alberta', 'British Columbia', 'Manitoba', 'New Brunswick', 'Newfoundland and Labrador',\n 'Nova Scotia', 'Ontario', 'Saskatchewan']\n\n states = [\"Alabama\", \"Alaska\", \"Arizona\", \"Arkansas\", \"California\", \"Colorado\",\n \"Connecticut\", \"Delaware\", \"Florida\", \"Georgia\", \"Hawaii\", \"Idaho\", \"Illinois\",\n \"Indiana\", \"Iowa\", \"Kansas\", \"Kentucky\", \"Louisiana\", \"Maine\", \"Maryland\",\n \"Massachusetts\", \"Michigan\", \"Minnesota\", \"Mississippi\", \"Missouri\", \"Montana\",\n \"Nebraska\", \"Nevada\", \"New Hampshire\", \"New Jersey\", \"New Mexico\", \"New York\",\n \"North Carolina\", \"North Dakota\", \"Ohio\", \"Oklahoma\", \"Oregon\", \"Pennsylvania\",\n \"Rhode Island\", \"South Carolina\", \"South Dakota\", \"Tennessee\", \"Texas\", \"Utah\",\n \"Vermont\", \"Virginia\", \"Washington\", \"West Virginia\", \"Wisconsin\", \"Wyoming\"\n ]\n\n def start_requests(self):\n north_america_brands = [\"jared\", \"kay\", \"zales\", \"pagoda\", \"peoplesjewellers\"]\n\n uk_urls = [\n 'https://www.hsamuel.co.uk/scripts/dist/store-locator/functionality/store-details.min.js?sprint-17_20190911.3',\n 'https://www.ernestjones.co.uk/scripts/store-locator/storeLocationDetails.js']\n\n for url in uk_urls:\n yield scrapy.Request(url=url, callback=self.parse_uk)\n\n template = 'https://www.{brand}.com/store-finder/view-stores/{region}'\n\n for brand in north_america_brands:\n if brand == \"peoplesjewellers\":\n for prov in SignetJewelersSpider.ca_prov:\n url = template.format(brand=brand, region=prov)\n yield scrapy.Request(url, callback=self.parse_cities)\n else:\n for state in SignetJewelersSpider.states:\n url = template.format(brand=brand, region=state)\n yield scrapy.Request(url, callback=self.parse_cities)\n\n def parse_cities(self, response):\n cities = response.xpath('//*[@class=\"viewstoreslist\"]/a/@href').extract()\n for i in cities:\n yield scrapy.Request(response.urljoin(i), callback=self.parse)\n\n def parse(self, response):\n script = \" \".join(response.xpath('//*[@id=\"js-store-details\"]/div/script/text()').extract())\n data = re.search(r'storeInformation\\s=\\s((?s).*)', script).groups()[0]\n data = data.replace(\";\", '')\n data = eval(data)\n\n if data[\"region\"] in SignetJewelersSpider.ca_prov:\n country = 'CA'\n else:\n country = 'US'\n\n properties = {\n 'ref': data[\"name\"],\n 'name': data[\"displayName\"],\n 'addr_full': data[\"line1\"],\n 'city': data[\"town\"],\n 'state': data[\"region\"],\n 'postcode': data[\"postalCode\"],\n 'country': country,\n 'lat': data[\"latitude\"],\n 'lon': data[\"longitude\"],\n 'phone': data[\"phone\"],\n 'website': response.url,\n 'brand': re.search(r'www.(\\w+)', response.url)[1],\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_uk(self, response):\n data = re.search(r'Signet.allStoreDetails=((?s).*)', response.text)[1]\n data = data.replace(';', '')\n data = json.loads(data)\n\n for store in data:\n properties = {\n 'ref': store[\"number\"],\n 'name': store[\"name\"],\n 'addr_full': store[\"addressLine1\"],\n 'city': store[\"town\"],\n 'postcode': store[\"postcode\"],\n 'country': 'GB',\n 'lat': store[\"latitude\"],\n 'lon': store[\"longitude\"],\n 'brand': re.search(r'www.(\\w+)', response.url)[1],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/signet_jewelers.py"}]} | 2,013 | 588 |
gh_patches_debug_37852 | rasdani/github-patches | git_diff | akvo__akvo-rsr-5268 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature Request: Document the results framework
### What are you trying to do?
Understand how the results framework functions
### Describe the solution you'd like
A technical documentation of how it works.
### Have you consider alternatives?
_No response_
### Additional context
_No response_
</issue>
<code>
[start of akvo/rsr/models/project_hierarchy.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.db import models
8 from django.utils.translation import ugettext_lazy as _
9
10
11 class ProjectHierarchy(models.Model):
12 project_relation = 'projecthierarchy__in'
13 root_project = models.OneToOneField('Project', on_delete=models.CASCADE, db_index=True)
14 max_depth = models.PositiveSmallIntegerField()
15 is_master = models.BooleanField(_('is master program'), default=False)
16
17 class Meta:
18 app_label = 'rsr'
19 verbose_name = _('program')
20 verbose_name_plural = _('programs')
21 ordering = ['-id']
22
23 @property
24 def descendants(self):
25 return self.root_project.descendants(max_depth=self.max_depth)
26
27 @property
28 def project_count(self):
29 return self.descendants.count() - 1 # remove root_project from count
30
31 @property
32 def project_ids(self):
33 return self.descendants.values_list('id', flat=True)
34
35 @property
36 def organisation(self):
37 return self.root_project.reporting_org
38
39 def __str__(self):
40 return self.root_project.title
41
[end of akvo/rsr/models/project_hierarchy.py]
[start of doc/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # For the full list of built-in configuration values, see the documentation:
4 # https://www.sphinx-doc.org/en/master/usage/configuration.html
5
6 # -- Project information -----------------------------------------------------
7 # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
8
9 import os
10 import sys
11 import django
12 sys.path.insert(0, os.path.abspath('..'))
13 os.environ['DJANGO_SETTINGS_MODULE'] = 'akvo.settings'
14 django.setup()
15
16 project = 'Akvo RSR'
17 copyright = '2023, Akvo Foundation'
18 author = 'Akvo Foundation'
19
20 # -- General configuration ---------------------------------------------------
21 # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
22
23 extensions = [
24 'sphinx.ext.autodoc',
25 'sphinx.ext.autosummary',
26 'sphinx.ext.viewcode',
27 'myst_parser',
28 ]
29
30 templates_path = ['_templates']
31 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
32
33
34
35 # -- Options for HTML output -------------------------------------------------
36 # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
37
38 html_theme = 'cloud'
39 html_static_path = ['_static']
40
[end of doc/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/models/project_hierarchy.py b/akvo/rsr/models/project_hierarchy.py
--- a/akvo/rsr/models/project_hierarchy.py
+++ b/akvo/rsr/models/project_hierarchy.py
@@ -9,10 +9,22 @@
class ProjectHierarchy(models.Model):
+ """
+ The actual "Program" with a project hierarchy.
+ """
project_relation = 'projecthierarchy__in'
+
root_project = models.OneToOneField('Project', on_delete=models.CASCADE, db_index=True)
+ """
+ The root of the program
+ It can be used to create subprograms / a program tree
+ """
+
max_depth = models.PositiveSmallIntegerField()
+ """TODO: It is unclear why this field currently exists"""
+
is_master = models.BooleanField(_('is master program'), default=False)
+ """Used when an organisation has one program under which they would like to create subprograms"""
class Meta:
app_label = 'rsr'
@@ -22,10 +34,15 @@
@property
def descendants(self):
+ """
+ The entire tree in a list.
+ No order is guaranteed
+ """
return self.root_project.descendants(max_depth=self.max_depth)
@property
def project_count(self):
+ """The number of children without counting the root project"""
return self.descendants.count() - 1 # remove root_project from count
@property
@@ -34,6 +51,7 @@
@property
def organisation(self):
+ """The reporting organisation of the tree"""
return self.root_project.reporting_org
def __str__(self):
diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -21,6 +21,7 @@
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
+ 'sphinxcontrib.plantuml',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
@@ -30,7 +31,9 @@
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
-
+myst_enable_extensions = [
+ "colon_fence", # https://myst-parser.readthedocs.io/en/latest/syntax/optional.html#syntax-colon-fence
+]
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
| {"golden_diff": "diff --git a/akvo/rsr/models/project_hierarchy.py b/akvo/rsr/models/project_hierarchy.py\n--- a/akvo/rsr/models/project_hierarchy.py\n+++ b/akvo/rsr/models/project_hierarchy.py\n@@ -9,10 +9,22 @@\n \n \n class ProjectHierarchy(models.Model):\n+ \"\"\"\n+ The actual \"Program\" with a project hierarchy.\n+ \"\"\"\n project_relation = 'projecthierarchy__in'\n+\n root_project = models.OneToOneField('Project', on_delete=models.CASCADE, db_index=True)\n+ \"\"\"\n+ The root of the program\n+ It can be used to create subprograms / a program tree\n+ \"\"\"\n+\n max_depth = models.PositiveSmallIntegerField()\n+ \"\"\"TODO: It is unclear why this field currently exists\"\"\"\n+\n is_master = models.BooleanField(_('is master program'), default=False)\n+ \"\"\"Used when an organisation has one program under which they would like to create subprograms\"\"\"\n \n class Meta:\n app_label = 'rsr'\n@@ -22,10 +34,15 @@\n \n @property\n def descendants(self):\n+ \"\"\"\n+ The entire tree in a list.\n+ No order is guaranteed\n+ \"\"\"\n return self.root_project.descendants(max_depth=self.max_depth)\n \n @property\n def project_count(self):\n+ \"\"\"The number of children without counting the root project\"\"\"\n return self.descendants.count() - 1 # remove root_project from count\n \n @property\n@@ -34,6 +51,7 @@\n \n @property\n def organisation(self):\n+ \"\"\"The reporting organisation of the tree\"\"\"\n return self.root_project.reporting_org\n \n def __str__(self):\ndiff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -21,6 +21,7 @@\n # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n \n extensions = [\n+ 'sphinxcontrib.plantuml',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.viewcode',\n@@ -30,7 +31,9 @@\n templates_path = ['_templates']\n exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n \n-\n+myst_enable_extensions = [\n+ \"colon_fence\", # https://myst-parser.readthedocs.io/en/latest/syntax/optional.html#syntax-colon-fence\n+]\n \n # -- Options for HTML output -------------------------------------------------\n # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n", "issue": "Feature Request: Document the results framework\n### What are you trying to do?\n\nUnderstand how the results framework functions\n\n### Describe the solution you'd like\n\nA technical documentation of how it works.\n\n### Have you consider alternatives?\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass ProjectHierarchy(models.Model):\n project_relation = 'projecthierarchy__in'\n root_project = models.OneToOneField('Project', on_delete=models.CASCADE, db_index=True)\n max_depth = models.PositiveSmallIntegerField()\n is_master = models.BooleanField(_('is master program'), default=False)\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _('program')\n verbose_name_plural = _('programs')\n ordering = ['-id']\n\n @property\n def descendants(self):\n return self.root_project.descendants(max_depth=self.max_depth)\n\n @property\n def project_count(self):\n return self.descendants.count() - 1 # remove root_project from count\n\n @property\n def project_ids(self):\n return self.descendants.values_list('id', flat=True)\n\n @property\n def organisation(self):\n return self.root_project.reporting_org\n\n def __str__(self):\n return self.root_project.title\n", "path": "akvo/rsr/models/project_hierarchy.py"}, {"content": "# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nimport os\nimport sys\nimport django\nsys.path.insert(0, os.path.abspath('..'))\nos.environ['DJANGO_SETTINGS_MODULE'] = 'akvo.settings'\ndjango.setup()\n\nproject = 'Akvo RSR'\ncopyright = '2023, Akvo Foundation'\nauthor = 'Akvo Foundation'\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.viewcode',\n 'myst_parser',\n]\n\ntemplates_path = ['_templates']\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = 'cloud'\nhtml_static_path = ['_static']\n", "path": "doc/conf.py"}]} | 1,310 | 570 |
gh_patches_debug_5813 | rasdani/github-patches | git_diff | saleor__saleor-2087 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing variable in "fulfillment" email
Two small issues in the "fulfillment" email:
- logo is missing
- in footer there is missing template variable
I've just tested it and this is how the email looks like:

</issue>
<code>
[start of saleor/order/emails.py]
1 from celery import shared_task
2 from django.conf import settings
3 from django.contrib.sites.models import Site
4 from django.urls import reverse
5 from templated_email import send_templated_mail
6
7 from ..core.utils import build_absolute_uri
8 from ..seo.schema.email import get_order_confirmation_markup
9 from .models import Fulfillment, Order
10
11 CONFIRM_ORDER_TEMPLATE = 'source/order/confirm_order'
12 CONFIRM_FULFILLMENT_TEMPLATE = 'source/order/confirm_fulfillment'
13 UPDATE_FULFILLMENT_TEMPLATE = 'source/order/update_fulfillment'
14 CONFIRM_PAYMENT_TEMPLATE = 'source/order/payment/confirm_payment'
15 CONFIRM_NOTE_TEMPLATE = 'source/order/note/confirm_note'
16
17
18 def get_email_context(order_token):
19 """Prepares context required for email template rendering."""
20 site = Site.objects.get_current()
21 order_url = build_absolute_uri(
22 reverse('order:details', kwargs={'token': order_token}))
23 ctx = {
24 'protocol': 'https' if settings.ENABLE_SSL else 'http',
25 'site_name': site.name,
26 'domain': site.domain,
27 'url': order_url}
28 return ctx
29
30
31 def collect_data_for_email(order_pk, template):
32 """Collects data required for email sending.
33
34 Args:
35 order_pk (int): order primary key
36 template (str): email template path
37 """
38 order = Order.objects.get(pk=order_pk)
39 recipient_email = order.get_user_current_email()
40 email_context = get_email_context(order.token)
41
42 # Order confirmation template requires additional information
43 if template == CONFIRM_ORDER_TEMPLATE:
44 email_markup = get_order_confirmation_markup(order)
45 email_context.update(
46 {'order': order, 'schema_markup': email_markup})
47
48 return {
49 'recipient_list': [recipient_email], 'template_name': template,
50 'context': email_context, 'from_email': settings.ORDER_FROM_EMAIL}
51
52
53 def collect_data_for_fullfillment_email(order_pk, template, fulfillment_pk):
54 fulfillment = Fulfillment.objects.get(pk=fulfillment_pk)
55 email_data = collect_data_for_email(order_pk, template)
56 email_data.update({'context': {'fulfillment': fulfillment}})
57 return email_data
58
59
60 @shared_task
61 def send_order_confirmation(order_pk):
62 """Sends order confirmation email."""
63 email_data = collect_data_for_email(order_pk, CONFIRM_ORDER_TEMPLATE)
64 send_templated_mail(**email_data)
65
66
67 @shared_task
68 def send_fulfillment_confirmation(order_pk, fulfillment_pk):
69 email_data = collect_data_for_fullfillment_email(
70 order_pk, CONFIRM_FULFILLMENT_TEMPLATE, fulfillment_pk)
71 send_templated_mail(**email_data)
72
73
74 @shared_task
75 def send_fulfillment_update(order_pk, fulfillment_pk):
76 email_data = collect_data_for_fullfillment_email(
77 order_pk, UPDATE_FULFILLMENT_TEMPLATE, fulfillment_pk)
78 send_templated_mail(**email_data)
79
80
81 @shared_task
82 def send_payment_confirmation(order_pk):
83 """Sends payment confirmation email."""
84 email_data = collect_data_for_email(order_pk, CONFIRM_PAYMENT_TEMPLATE)
85 send_templated_mail(**email_data)
86
87
88 @shared_task
89 def send_note_confirmation(order_pk):
90 """Notifies customer, when new note was added to an order."""
91 email_data = collect_data_for_email(order_pk, CONFIRM_NOTE_TEMPLATE)
92 send_templated_mail(**email_data)
93
[end of saleor/order/emails.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/order/emails.py b/saleor/order/emails.py
--- a/saleor/order/emails.py
+++ b/saleor/order/emails.py
@@ -53,7 +53,7 @@
def collect_data_for_fullfillment_email(order_pk, template, fulfillment_pk):
fulfillment = Fulfillment.objects.get(pk=fulfillment_pk)
email_data = collect_data_for_email(order_pk, template)
- email_data.update({'context': {'fulfillment': fulfillment}})
+ email_data['context'].update({'fulfillment': fulfillment})
return email_data
| {"golden_diff": "diff --git a/saleor/order/emails.py b/saleor/order/emails.py\n--- a/saleor/order/emails.py\n+++ b/saleor/order/emails.py\n@@ -53,7 +53,7 @@\n def collect_data_for_fullfillment_email(order_pk, template, fulfillment_pk):\n fulfillment = Fulfillment.objects.get(pk=fulfillment_pk)\n email_data = collect_data_for_email(order_pk, template)\n- email_data.update({'context': {'fulfillment': fulfillment}})\n+ email_data['context'].update({'fulfillment': fulfillment})\n return email_data\n", "issue": "Missing variable in \"fulfillment\" email\nTwo small issues in the \"fulfillment\" email:\r\n- logo is missing\r\n- in footer there is missing template variable\r\n\r\nI've just tested it and this is how the email looks like:\r\n\r\n\r\n\n", "before_files": [{"content": "from celery import shared_task\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.urls import reverse\nfrom templated_email import send_templated_mail\n\nfrom ..core.utils import build_absolute_uri\nfrom ..seo.schema.email import get_order_confirmation_markup\nfrom .models import Fulfillment, Order\n\nCONFIRM_ORDER_TEMPLATE = 'source/order/confirm_order'\nCONFIRM_FULFILLMENT_TEMPLATE = 'source/order/confirm_fulfillment'\nUPDATE_FULFILLMENT_TEMPLATE = 'source/order/update_fulfillment'\nCONFIRM_PAYMENT_TEMPLATE = 'source/order/payment/confirm_payment'\nCONFIRM_NOTE_TEMPLATE = 'source/order/note/confirm_note'\n\n\ndef get_email_context(order_token):\n \"\"\"Prepares context required for email template rendering.\"\"\"\n site = Site.objects.get_current()\n order_url = build_absolute_uri(\n reverse('order:details', kwargs={'token': order_token}))\n ctx = {\n 'protocol': 'https' if settings.ENABLE_SSL else 'http',\n 'site_name': site.name,\n 'domain': site.domain,\n 'url': order_url}\n return ctx\n\n\ndef collect_data_for_email(order_pk, template):\n \"\"\"Collects data required for email sending.\n\n Args:\n order_pk (int): order primary key\n template (str): email template path\n \"\"\"\n order = Order.objects.get(pk=order_pk)\n recipient_email = order.get_user_current_email()\n email_context = get_email_context(order.token)\n\n # Order confirmation template requires additional information\n if template == CONFIRM_ORDER_TEMPLATE:\n email_markup = get_order_confirmation_markup(order)\n email_context.update(\n {'order': order, 'schema_markup': email_markup})\n\n return {\n 'recipient_list': [recipient_email], 'template_name': template,\n 'context': email_context, 'from_email': settings.ORDER_FROM_EMAIL}\n\n\ndef collect_data_for_fullfillment_email(order_pk, template, fulfillment_pk):\n fulfillment = Fulfillment.objects.get(pk=fulfillment_pk)\n email_data = collect_data_for_email(order_pk, template)\n email_data.update({'context': {'fulfillment': fulfillment}})\n return email_data\n\n\n@shared_task\ndef send_order_confirmation(order_pk):\n \"\"\"Sends order confirmation email.\"\"\"\n email_data = collect_data_for_email(order_pk, CONFIRM_ORDER_TEMPLATE)\n send_templated_mail(**email_data)\n\n\n@shared_task\ndef send_fulfillment_confirmation(order_pk, fulfillment_pk):\n email_data = collect_data_for_fullfillment_email(\n order_pk, CONFIRM_FULFILLMENT_TEMPLATE, fulfillment_pk)\n send_templated_mail(**email_data)\n\n\n@shared_task\ndef send_fulfillment_update(order_pk, fulfillment_pk):\n email_data = collect_data_for_fullfillment_email(\n order_pk, UPDATE_FULFILLMENT_TEMPLATE, fulfillment_pk)\n send_templated_mail(**email_data)\n\n\n@shared_task\ndef send_payment_confirmation(order_pk):\n \"\"\"Sends payment confirmation email.\"\"\"\n email_data = collect_data_for_email(order_pk, CONFIRM_PAYMENT_TEMPLATE)\n send_templated_mail(**email_data)\n\n\n@shared_task\ndef send_note_confirmation(order_pk):\n \"\"\"Notifies customer, when new note was added to an order.\"\"\"\n email_data = collect_data_for_email(order_pk, CONFIRM_NOTE_TEMPLATE)\n send_templated_mail(**email_data)\n", "path": "saleor/order/emails.py"}]} | 1,555 | 132 |
gh_patches_debug_728 | rasdani/github-patches | git_diff | speechbrain__speechbrain-1504 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Torch 1.12 not compatible?
working to install speechbrain 0.5.12, and getting the error that "speechbrain 0.5.12 requires torch<=1.11,>=1.7, but you have torch 1.12.0 which is incompatible." read elsewhere that it should work with >=1.7.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2 import os
3 import sys
4 import site
5 import setuptools
6 from distutils.core import setup
7
8
9 # Editable install in user site directory can be allowed with this hack:
10 # https://github.com/pypa/pip/issues/7953.
11 site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
12
13 with open("README.md") as f:
14 long_description = f.read()
15
16 with open(os.path.join("speechbrain", "version.txt")) as f:
17 version = f.read().strip()
18
19 setup(
20 name="speechbrain",
21 version=version,
22 description="All-in-one speech toolkit in pure Python and Pytorch",
23 long_description=long_description,
24 long_description_content_type="text/markdown",
25 author="Mirco Ravanelli & Others",
26 author_email="[email protected]",
27 classifiers=[
28 "Programming Language :: Python :: 3",
29 "License :: OSI Approved :: Apache Software License",
30 ],
31 packages=setuptools.find_packages(),
32 package_data={"speechbrain": ["version.txt", "log-config.yaml"]},
33 install_requires=[
34 "hyperpyyaml",
35 "joblib",
36 "numpy",
37 "packaging",
38 "scipy",
39 "sentencepiece",
40 "torch>=1.7,<=1.11",
41 "torchaudio",
42 "tqdm",
43 "huggingface_hub",
44 ],
45 python_requires=">=3.7",
46 url="https://speechbrain.github.io/",
47 )
48
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -37,7 +37,7 @@
"packaging",
"scipy",
"sentencepiece",
- "torch>=1.7,<=1.11",
+ "torch>=1.9",
"torchaudio",
"tqdm",
"huggingface_hub",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,7 +37,7 @@\n \"packaging\",\n \"scipy\",\n \"sentencepiece\",\n- \"torch>=1.7,<=1.11\",\n+ \"torch>=1.9\",\n \"torchaudio\",\n \"tqdm\",\n \"huggingface_hub\",\n", "issue": "Torch 1.12 not compatible?\nworking to install speechbrain 0.5.12, and getting the error that \"speechbrain 0.5.12 requires torch<=1.11,>=1.7, but you have torch 1.12.0 which is incompatible.\" read elsewhere that it should work with >=1.7. \n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nimport sys\nimport site\nimport setuptools\nfrom distutils.core import setup\n\n\n# Editable install in user site directory can be allowed with this hack:\n# https://github.com/pypa/pip/issues/7953.\nsite.ENABLE_USER_SITE = \"--user\" in sys.argv[1:]\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nwith open(os.path.join(\"speechbrain\", \"version.txt\")) as f:\n version = f.read().strip()\n\nsetup(\n name=\"speechbrain\",\n version=version,\n description=\"All-in-one speech toolkit in pure Python and Pytorch\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Mirco Ravanelli & Others\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n ],\n packages=setuptools.find_packages(),\n package_data={\"speechbrain\": [\"version.txt\", \"log-config.yaml\"]},\n install_requires=[\n \"hyperpyyaml\",\n \"joblib\",\n \"numpy\",\n \"packaging\",\n \"scipy\",\n \"sentencepiece\",\n \"torch>=1.7,<=1.11\",\n \"torchaudio\",\n \"tqdm\",\n \"huggingface_hub\",\n ],\n python_requires=\">=3.7\",\n url=\"https://speechbrain.github.io/\",\n)\n", "path": "setup.py"}]} | 1,019 | 91 |
gh_patches_debug_18013 | rasdani/github-patches | git_diff | lk-geimfari__mimesis-446 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
romanized decorator mutates ROMANIZATION_DICT
After `@romanized` is used, ROMANIZATION_DICT gets updated and every module importing it will get this mutated ROMANIZATION_DICT.
Snippet below should reproduce problem.
```
from mimesis import decorators, data
if __name__ == '__main__':
print('ROMANIZATION_DICT: before')
print(data.ROMANIZATION_DICT)
@decorators.romanized('ru')
def russian_name(): return 'Петр Петрович'
# next line is where ROMANIZATION_DICT mutates
russian_name()
print('ROMANIZATION_DICT: after')
print(data.ROMANIZATION_DICT)
```
Problem is here:
https://github.com/lk-geimfari/mimesis/blob/master/mimesis/decorators.py#L29
</issue>
<code>
[start of mimesis/decorators.py]
1 """Decorators for the public API and for internal purpose."""
2
3 import functools
4 from string import ascii_letters as letters
5 from string import digits, punctuation
6 from typing import Callable
7
8 from mimesis import data
9 from mimesis.exceptions import UnsupportedLocale
10
11
12 def romanized(locale: str = '') -> Callable:
13 """Romanize the Cyrillic text.
14
15 Transliterate the Cyrillic language from the Cyrillic
16 script into the Latin alphabet.
17
18 .. note:: At this moment it works only for `ru`, `uk`, `kk`.
19
20 :param locale: Locale code.
21 :return: Latinized text.
22 """
23 def romanized_deco(func):
24 @functools.wraps(func)
25 def wrapper(*args, **kwargs):
26 try:
27 alphabet = data.ROMANIZATION_DICT[locale]
28 # Add common cyrillic common letters
29 alphabet.update(data.COMMON_LETTERS)
30 # String can contain ascii symbols, digits and
31 # punctuation symbols.
32 alphabet.update({s: s for s in
33 letters + digits + punctuation})
34 except KeyError:
35 raise UnsupportedLocale(locale)
36 result = func(*args, **kwargs)
37 txt = ''.join([alphabet[i] for i in result if i in alphabet])
38 return txt
39
40 return wrapper
41
42 return romanized_deco
43
[end of mimesis/decorators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mimesis/decorators.py b/mimesis/decorators.py
--- a/mimesis/decorators.py
+++ b/mimesis/decorators.py
@@ -24,13 +24,13 @@
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
- alphabet = data.ROMANIZATION_DICT[locale]
- # Add common cyrillic common letters
- alphabet.update(data.COMMON_LETTERS)
# String can contain ascii symbols, digits and
# punctuation symbols.
- alphabet.update({s: s for s in
- letters + digits + punctuation})
+ alphabet = {s: s for s in
+ letters + digits + punctuation}
+ alphabet.update(data.ROMANIZATION_DICT[locale])
+ # Add common cyrillic letters
+ alphabet.update(data.COMMON_LETTERS)
except KeyError:
raise UnsupportedLocale(locale)
result = func(*args, **kwargs)
| {"golden_diff": "diff --git a/mimesis/decorators.py b/mimesis/decorators.py\n--- a/mimesis/decorators.py\n+++ b/mimesis/decorators.py\n@@ -24,13 +24,13 @@\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n- alphabet = data.ROMANIZATION_DICT[locale]\n- # Add common cyrillic common letters\n- alphabet.update(data.COMMON_LETTERS)\n # String can contain ascii symbols, digits and\n # punctuation symbols.\n- alphabet.update({s: s for s in\n- letters + digits + punctuation})\n+ alphabet = {s: s for s in\n+ letters + digits + punctuation}\n+ alphabet.update(data.ROMANIZATION_DICT[locale])\n+ # Add common cyrillic letters\n+ alphabet.update(data.COMMON_LETTERS)\n except KeyError:\n raise UnsupportedLocale(locale)\n result = func(*args, **kwargs)\n", "issue": "romanized decorator mutates ROMANIZATION_DICT\nAfter `@romanized` is used, ROMANIZATION_DICT gets updated and every module importing it will get this mutated ROMANIZATION_DICT.\r\nSnippet below should reproduce problem.\r\n```\r\nfrom mimesis import decorators, data\r\n\r\n\r\nif __name__ == '__main__':\r\n print('ROMANIZATION_DICT: before')\r\n print(data.ROMANIZATION_DICT)\r\n\r\n @decorators.romanized('ru')\r\n def russian_name(): return '\u041f\u0435\u0442\u0440 \u041f\u0435\u0442\u0440\u043e\u0432\u0438\u0447'\r\n # next line is where ROMANIZATION_DICT mutates\r\n russian_name()\r\n\r\n print('ROMANIZATION_DICT: after')\r\n print(data.ROMANIZATION_DICT)\r\n```\r\nProblem is here:\r\nhttps://github.com/lk-geimfari/mimesis/blob/master/mimesis/decorators.py#L29\r\n\n", "before_files": [{"content": "\"\"\"Decorators for the public API and for internal purpose.\"\"\"\n\nimport functools\nfrom string import ascii_letters as letters\nfrom string import digits, punctuation\nfrom typing import Callable\n\nfrom mimesis import data\nfrom mimesis.exceptions import UnsupportedLocale\n\n\ndef romanized(locale: str = '') -> Callable:\n \"\"\"Romanize the Cyrillic text.\n\n Transliterate the Cyrillic language from the Cyrillic\n script into the Latin alphabet.\n\n .. note:: At this moment it works only for `ru`, `uk`, `kk`.\n\n :param locale: Locale code.\n :return: Latinized text.\n \"\"\"\n def romanized_deco(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n alphabet = data.ROMANIZATION_DICT[locale]\n # Add common cyrillic common letters\n alphabet.update(data.COMMON_LETTERS)\n # String can contain ascii symbols, digits and\n # punctuation symbols.\n alphabet.update({s: s for s in\n letters + digits + punctuation})\n except KeyError:\n raise UnsupportedLocale(locale)\n result = func(*args, **kwargs)\n txt = ''.join([alphabet[i] for i in result if i in alphabet])\n return txt\n\n return wrapper\n\n return romanized_deco\n", "path": "mimesis/decorators.py"}]} | 1,079 | 221 |
gh_patches_debug_26944 | rasdani/github-patches | git_diff | Qiskit__qiskit-12321 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add insert_barrier argument to UnitaryOverlap
### What should we add?
This argument would insert a barrier between the two unitaries. This is useful if you want to prevent circuit optimization between the two parts.
</issue>
<code>
[start of qiskit/circuit/library/overlap.py]
1 # This code is part of Qiskit.
2 #
3 # (C) Copyright IBM 2023.
4 #
5 # This code is licensed under the Apache License, Version 2.0. You may
6 # obtain a copy of this license in the LICENSE.txt file in the root directory
7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8 #
9 # Any modifications or derivative works of this code must retain this
10 # copyright notice, and modified files need to carry a notice indicating
11 # that they have been altered from the originals.
12
13 """Unitary overlap circuit."""
14
15 from qiskit.circuit import QuantumCircuit, Gate
16 from qiskit.circuit.parametervector import ParameterVector
17 from qiskit.circuit.exceptions import CircuitError
18 from qiskit.circuit import Barrier
19
20
21 class UnitaryOverlap(QuantumCircuit):
22 r"""Circuit that returns the overlap between two unitaries :math:`U_2^{\dag} U_1`.
23
24 The input quantum circuits must represent unitary operations, since they must be invertible.
25 If the inputs will have parameters, they are replaced by :class:`.ParameterVector`\s with
26 names `"p1"` (for circuit ``unitary1``) and `"p2"` (for circuit ``unitary_2``) in the output
27 circuit.
28
29 This circuit is usually employed in computing the fidelity:
30
31 .. math::
32
33 \left|\langle 0| U_2^{\dag} U_1|0\rangle\right|^{2}
34
35 by computing the probability of being in the all-zeros bit-string, or equivalently,
36 the expectation value of projector :math:`|0\rangle\langle 0|`.
37
38 Example::
39
40 import numpy as np
41 from qiskit.circuit.library import EfficientSU2, UnitaryOverlap
42 from qiskit.primitives import Sampler
43
44 # get two circuit to prepare states of which we comput the overlap
45 circuit = EfficientSU2(2, reps=1)
46 unitary1 = circuit.assign_parameters(np.random.random(circuit.num_parameters))
47 unitary2 = circuit.assign_parameters(np.random.random(circuit.num_parameters))
48
49 # create the overlap circuit
50 overlap = UnitaryOverap(unitary1, unitary2)
51
52 # sample from the overlap
53 sampler = Sampler(options={"shots": 100})
54 result = sampler.run(overlap).result()
55
56 # the fidelity is the probability to measure 0
57 fidelity = result.quasi_dists[0].get(0, 0)
58
59 """
60
61 def __init__(
62 self, unitary1: QuantumCircuit, unitary2: QuantumCircuit, prefix1="p1", prefix2="p2"
63 ):
64 """
65 Args:
66 unitary1: Unitary acting on the ket vector.
67 unitary2: Unitary whose inverse operates on the bra vector.
68 prefix1: The name of the parameter vector associated to ``unitary1``,
69 if it is parameterized. Defaults to ``"p1"``.
70 prefix2: The name of the parameter vector associated to ``unitary2``,
71 if it is parameterized. Defaults to ``"p2"``.
72
73 Raises:
74 CircuitError: Number of qubits in ``unitary1`` and ``unitary2`` does not match.
75 CircuitError: Inputs contain measurements and/or resets.
76 """
77 # check inputs are valid
78 if unitary1.num_qubits != unitary2.num_qubits:
79 raise CircuitError(
80 f"Number of qubits in unitaries does "
81 f"not match: {unitary1.num_qubits} != {unitary2.num_qubits}."
82 )
83
84 unitaries = [unitary1, unitary2]
85 for unitary in unitaries:
86 _check_unitary(unitary)
87
88 # Vectors of new parameters, if any. Need the unitaries in a list here to ensure
89 # we can overwrite them.
90 for i, prefix in enumerate([prefix1, prefix2]):
91 if unitaries[i].num_parameters > 0:
92 new_params = ParameterVector(prefix, unitaries[i].num_parameters)
93 unitaries[i] = unitaries[i].assign_parameters(new_params)
94
95 # Generate the actual overlap circuit
96 super().__init__(unitaries[0].num_qubits, name="UnitaryOverlap")
97 self.compose(unitaries[0], inplace=True)
98 self.compose(unitaries[1].inverse(), inplace=True)
99
100
101 def _check_unitary(circuit):
102 """Check a circuit is unitary by checking if all operations are of type ``Gate``."""
103
104 for instruction in circuit.data:
105 if not isinstance(instruction.operation, (Gate, Barrier)):
106 raise CircuitError(
107 (
108 "One or more instructions cannot be converted to"
109 ' a gate. "{}" is not a gate instruction'
110 ).format(instruction.operation.name)
111 )
112
[end of qiskit/circuit/library/overlap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qiskit/circuit/library/overlap.py b/qiskit/circuit/library/overlap.py
--- a/qiskit/circuit/library/overlap.py
+++ b/qiskit/circuit/library/overlap.py
@@ -59,7 +59,12 @@
"""
def __init__(
- self, unitary1: QuantumCircuit, unitary2: QuantumCircuit, prefix1="p1", prefix2="p2"
+ self,
+ unitary1: QuantumCircuit,
+ unitary2: QuantumCircuit,
+ prefix1: str = "p1",
+ prefix2: str = "p2",
+ insert_barrier: bool = False,
):
"""
Args:
@@ -69,6 +74,7 @@
if it is parameterized. Defaults to ``"p1"``.
prefix2: The name of the parameter vector associated to ``unitary2``,
if it is parameterized. Defaults to ``"p2"``.
+ insert_barrier: Whether to insert a barrier between the two unitaries.
Raises:
CircuitError: Number of qubits in ``unitary1`` and ``unitary2`` does not match.
@@ -95,6 +101,8 @@
# Generate the actual overlap circuit
super().__init__(unitaries[0].num_qubits, name="UnitaryOverlap")
self.compose(unitaries[0], inplace=True)
+ if insert_barrier:
+ self.barrier()
self.compose(unitaries[1].inverse(), inplace=True)
| {"golden_diff": "diff --git a/qiskit/circuit/library/overlap.py b/qiskit/circuit/library/overlap.py\n--- a/qiskit/circuit/library/overlap.py\n+++ b/qiskit/circuit/library/overlap.py\n@@ -59,7 +59,12 @@\n \"\"\"\n \n def __init__(\n- self, unitary1: QuantumCircuit, unitary2: QuantumCircuit, prefix1=\"p1\", prefix2=\"p2\"\n+ self,\n+ unitary1: QuantumCircuit,\n+ unitary2: QuantumCircuit,\n+ prefix1: str = \"p1\",\n+ prefix2: str = \"p2\",\n+ insert_barrier: bool = False,\n ):\n \"\"\"\n Args:\n@@ -69,6 +74,7 @@\n if it is parameterized. Defaults to ``\"p1\"``.\n prefix2: The name of the parameter vector associated to ``unitary2``,\n if it is parameterized. Defaults to ``\"p2\"``.\n+ insert_barrier: Whether to insert a barrier between the two unitaries.\n \n Raises:\n CircuitError: Number of qubits in ``unitary1`` and ``unitary2`` does not match.\n@@ -95,6 +101,8 @@\n # Generate the actual overlap circuit\n super().__init__(unitaries[0].num_qubits, name=\"UnitaryOverlap\")\n self.compose(unitaries[0], inplace=True)\n+ if insert_barrier:\n+ self.barrier()\n self.compose(unitaries[1].inverse(), inplace=True)\n", "issue": "Add insert_barrier argument to UnitaryOverlap\n### What should we add?\n\nThis argument would insert a barrier between the two unitaries. This is useful if you want to prevent circuit optimization between the two parts.\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2023.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Unitary overlap circuit.\"\"\"\n\nfrom qiskit.circuit import QuantumCircuit, Gate\nfrom qiskit.circuit.parametervector import ParameterVector\nfrom qiskit.circuit.exceptions import CircuitError\nfrom qiskit.circuit import Barrier\n\n\nclass UnitaryOverlap(QuantumCircuit):\n r\"\"\"Circuit that returns the overlap between two unitaries :math:`U_2^{\\dag} U_1`.\n\n The input quantum circuits must represent unitary operations, since they must be invertible.\n If the inputs will have parameters, they are replaced by :class:`.ParameterVector`\\s with\n names `\"p1\"` (for circuit ``unitary1``) and `\"p2\"` (for circuit ``unitary_2``) in the output\n circuit.\n\n This circuit is usually employed in computing the fidelity:\n\n .. math::\n\n \\left|\\langle 0| U_2^{\\dag} U_1|0\\rangle\\right|^{2}\n\n by computing the probability of being in the all-zeros bit-string, or equivalently,\n the expectation value of projector :math:`|0\\rangle\\langle 0|`.\n\n Example::\n\n import numpy as np\n from qiskit.circuit.library import EfficientSU2, UnitaryOverlap\n from qiskit.primitives import Sampler\n\n # get two circuit to prepare states of which we comput the overlap\n circuit = EfficientSU2(2, reps=1)\n unitary1 = circuit.assign_parameters(np.random.random(circuit.num_parameters))\n unitary2 = circuit.assign_parameters(np.random.random(circuit.num_parameters))\n\n # create the overlap circuit\n overlap = UnitaryOverap(unitary1, unitary2)\n\n # sample from the overlap\n sampler = Sampler(options={\"shots\": 100})\n result = sampler.run(overlap).result()\n\n # the fidelity is the probability to measure 0\n fidelity = result.quasi_dists[0].get(0, 0)\n\n \"\"\"\n\n def __init__(\n self, unitary1: QuantumCircuit, unitary2: QuantumCircuit, prefix1=\"p1\", prefix2=\"p2\"\n ):\n \"\"\"\n Args:\n unitary1: Unitary acting on the ket vector.\n unitary2: Unitary whose inverse operates on the bra vector.\n prefix1: The name of the parameter vector associated to ``unitary1``,\n if it is parameterized. Defaults to ``\"p1\"``.\n prefix2: The name of the parameter vector associated to ``unitary2``,\n if it is parameterized. Defaults to ``\"p2\"``.\n\n Raises:\n CircuitError: Number of qubits in ``unitary1`` and ``unitary2`` does not match.\n CircuitError: Inputs contain measurements and/or resets.\n \"\"\"\n # check inputs are valid\n if unitary1.num_qubits != unitary2.num_qubits:\n raise CircuitError(\n f\"Number of qubits in unitaries does \"\n f\"not match: {unitary1.num_qubits} != {unitary2.num_qubits}.\"\n )\n\n unitaries = [unitary1, unitary2]\n for unitary in unitaries:\n _check_unitary(unitary)\n\n # Vectors of new parameters, if any. Need the unitaries in a list here to ensure\n # we can overwrite them.\n for i, prefix in enumerate([prefix1, prefix2]):\n if unitaries[i].num_parameters > 0:\n new_params = ParameterVector(prefix, unitaries[i].num_parameters)\n unitaries[i] = unitaries[i].assign_parameters(new_params)\n\n # Generate the actual overlap circuit\n super().__init__(unitaries[0].num_qubits, name=\"UnitaryOverlap\")\n self.compose(unitaries[0], inplace=True)\n self.compose(unitaries[1].inverse(), inplace=True)\n\n\ndef _check_unitary(circuit):\n \"\"\"Check a circuit is unitary by checking if all operations are of type ``Gate``.\"\"\"\n\n for instruction in circuit.data:\n if not isinstance(instruction.operation, (Gate, Barrier)):\n raise CircuitError(\n (\n \"One or more instructions cannot be converted to\"\n ' a gate. \"{}\" is not a gate instruction'\n ).format(instruction.operation.name)\n )\n", "path": "qiskit/circuit/library/overlap.py"}]} | 1,881 | 347 |
gh_patches_debug_60680 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-1798 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ebola page: loading second page of datasets reloads to top of page
Would it be easy to have it load the page at the `Datasets [41]` line?
</issue>
<code>
[start of ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py]
1 '''
2 Created on Nov 3, 2014
3
4 @author: alexandru-m-g
5 '''
6
7 import logging
8 import datetime as dt
9 import decimal
10
11 import pylons.config as config
12
13 import ckan.lib.base as base
14 import ckan.logic as logic
15 import ckan.model as model
16 import ckan.common as common
17 import ckan.lib.helpers as h
18
19 render = base.render
20 get_action = logic.get_action
21 c = common.c
22 request = common.request
23 _ = common._
24
25 Decimal = decimal.Decimal
26
27 log = logging.getLogger(__name__)
28
29
30 class CrisisController(base.BaseController):
31
32 def show(self):
33
34 context = {'model': model, 'session': model.Session,
35 'user': c.user or c.author, 'for_view': True,
36 'auth_user_obj': c.userobj}
37
38 datastore_resource_id = self._get_datastore_resource_id(
39 context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))
40 if datastore_resource_id:
41 c.top_line_items = self._get_top_line_items(
42 context, datastore_resource_id)
43
44 limit = 25
45 c.q = u'ebola'
46
47 page = int(request.params.get('page', 1))
48 data_dict = {'sort': u'metadata_modified desc',
49 'fq': '+dataset_type:dataset',
50 'rows': limit,
51 'q': c.q,
52 'start': (page - 1) * limit
53 }
54 query = get_action("package_search")(context, data_dict)
55
56 def pager_url(q=None, page=None):
57 return h.url_for('show_crisis', page=page)
58
59 c.page = h.Page(
60 collection=query['results'],
61 page=page,
62 url=pager_url,
63 item_count=query['count'],
64 items_per_page=limit
65 )
66 c.items = query['results']
67 c.item_count = query['count']
68
69 c.other_links = {}
70 c.other_links['show_more'] = h.url_for(
71 "search", **{'q': u'ebola', 'sort': u'metadata_modified desc',
72 'ext_indicator': '0'})
73
74 return render('crisis/crisis.html')
75
76 def _get_decimal_value(self, value):
77 decimal_value = Decimal(str(value)).quantize(
78 Decimal('.1'), rounding=decimal.ROUND_HALF_UP)
79 return decimal_value
80
81 def _format_results(self, result):
82 for r in result['records']:
83 d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')
84 r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')
85
86 modified_value = r[u'value']
87 if r[u'units'] == 'ratio':
88 modified_value *= 100.0
89 elif r[u'units'] == 'million':
90 modified_value /= 1000000.0
91
92 int_value = int(modified_value)
93 if int_value == modified_value:
94 r[u'formatted_value'] = '{:,}'.format(int_value)
95 else:
96 if r[u'units'] == 'ratio':
97 r[u'formatted_value'] = '{:,.1f}'.format(
98 self._get_decimal_value(modified_value))
99 elif r[u'units'] == 'million':
100 r[u'formatted_value'] = '{:,.1f}'.format(
101 self._get_decimal_value(modified_value))
102 #r[u'formatted_value'] += ' ' + _('million')
103
104 def _get_top_line_items(self, context, datastore_resource_id):
105 modified_context = dict(context)
106 modified_context['ignore_auth'] = True
107 result = get_action('datastore_search')(
108 modified_context, {'resource_id': datastore_resource_id})
109 if 'records' in result:
110 self._format_results(result)
111 return result['records']
112 return []
113
114 def _get_datastore_resource_id(self, context, dataset_id, resource_name):
115 try:
116 modified_context = dict(context)
117 modified_context['ignore_auth'] = True
118 dataset = get_action('package_show')(
119 modified_context, {'id': dataset_id})
120
121 if 'resources' in dataset:
122 for r in dataset['resources']:
123 if 'datastore_active' in r and r['datastore_active'] \
124 and r['name'] == resource_name:
125 return r['id']
126 return None
127 except:
128 log.warning('No dataset with id ' + dataset_id)
129 return None
130
[end of ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
--- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
+++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
@@ -54,7 +54,8 @@
query = get_action("package_search")(context, data_dict)
def pager_url(q=None, page=None):
- return h.url_for('show_crisis', page=page)
+ url = h.url_for('show_crisis', page=page) + '#datasets-section'
+ return url
c.page = h.Page(
collection=query['results'],
| {"golden_diff": "diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n--- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n+++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n@@ -54,7 +54,8 @@\n query = get_action(\"package_search\")(context, data_dict)\n \n def pager_url(q=None, page=None):\n- return h.url_for('show_crisis', page=page)\n+ url = h.url_for('show_crisis', page=page) + '#datasets-section'\n+ return url\n \n c.page = h.Page(\n collection=query['results'],\n", "issue": "Ebola page: loading second page of datasets reloads to top of page\nWould it be easy to have it load the page at the `Datasets [41]` line?\n\n", "before_files": [{"content": "'''\nCreated on Nov 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport logging\nimport datetime as dt\nimport decimal\n\nimport pylons.config as config\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.lib.helpers as h\n\nrender = base.render\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n_ = common._\n\nDecimal = decimal.Decimal\n\nlog = logging.getLogger(__name__)\n\n\nclass CrisisController(base.BaseController):\n\n def show(self):\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj}\n\n datastore_resource_id = self._get_datastore_resource_id(\n context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))\n if datastore_resource_id:\n c.top_line_items = self._get_top_line_items(\n context, datastore_resource_id)\n\n limit = 25\n c.q = u'ebola'\n\n page = int(request.params.get('page', 1))\n data_dict = {'sort': u'metadata_modified desc',\n 'fq': '+dataset_type:dataset',\n 'rows': limit,\n 'q': c.q,\n 'start': (page - 1) * limit\n }\n query = get_action(\"package_search\")(context, data_dict)\n\n def pager_url(q=None, page=None):\n return h.url_for('show_crisis', page=page)\n\n c.page = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit\n )\n c.items = query['results']\n c.item_count = query['count']\n\n c.other_links = {}\n c.other_links['show_more'] = h.url_for(\n \"search\", **{'q': u'ebola', 'sort': u'metadata_modified desc',\n 'ext_indicator': '0'})\n\n return render('crisis/crisis.html')\n\n def _get_decimal_value(self, value):\n decimal_value = Decimal(str(value)).quantize(\n Decimal('.1'), rounding=decimal.ROUND_HALF_UP)\n return decimal_value\n\n def _format_results(self, result):\n for r in result['records']:\n d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')\n r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')\n\n modified_value = r[u'value']\n if r[u'units'] == 'ratio':\n modified_value *= 100.0\n elif r[u'units'] == 'million':\n modified_value /= 1000000.0\n\n int_value = int(modified_value)\n if int_value == modified_value:\n r[u'formatted_value'] = '{:,}'.format(int_value)\n else:\n if r[u'units'] == 'ratio':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n elif r[u'units'] == 'million':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n #r[u'formatted_value'] += ' ' + _('million')\n\n def _get_top_line_items(self, context, datastore_resource_id):\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n result = get_action('datastore_search')(\n modified_context, {'resource_id': datastore_resource_id})\n if 'records' in result:\n self._format_results(result)\n return result['records']\n return []\n\n def _get_datastore_resource_id(self, context, dataset_id, resource_name):\n try:\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n dataset = get_action('package_show')(\n modified_context, {'id': dataset_id})\n\n if 'resources' in dataset:\n for r in dataset['resources']:\n if 'datastore_active' in r and r['datastore_active'] \\\n and r['name'] == resource_name:\n return r['id']\n return None\n except:\n log.warning('No dataset with id ' + dataset_id)\n return None\n", "path": "ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py"}]} | 1,885 | 202 |
gh_patches_debug_2938 | rasdani/github-patches | git_diff | Parsl__parsl-613 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TorqueProvider fails on NSCC
The following patch is required in order to run the `TorqueProvider` on NSCC:
```
[nscc04] ~/libsubmit >git diff
diff --git a/libsubmit/providers/torque/template.py b/libsubmit/providers/torque/template.py
index a00ce7c..056c648 100644
--- a/libsubmit/providers/torque/template.py
+++ b/libsubmit/providers/torque/template.py
@@ -8,7 +8,6 @@ template_string = '''#!/bin/bash
#PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}
#PBS -o ${submit_script_dir}/${jobname}.submit.stdout
#PBS -e ${submit_script_dir}/${jobname}.submit.stderr
-#PBS -v WORKER_LOGGING_LEVEL
${overrides}
export JOBNAME="${jobname}"
```
Otherwise, the job fails with `qsub: cannot send environment with the job`. Could we just merge the patch, or should we make this configurable somehow?
</issue>
<code>
[start of parsl/providers/torque/template.py]
1 template_string = '''#!/bin/bash
2
3 #PBS -S /bin/bash
4 #PBS -N ${jobname}
5 #PBS -m n
6 #PBS -k eo
7 #PBS -l walltime=$walltime
8 #PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}
9 #PBS -o ${submit_script_dir}/${jobname}.submit.stdout
10 #PBS -e ${submit_script_dir}/${jobname}.submit.stderr
11 #PBS -v WORKER_LOGGING_LEVEL
12 ${overrides}
13
14 export JOBNAME="${jobname}"
15
16 ${user_script}
17
18 '''
19
[end of parsl/providers/torque/template.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/providers/torque/template.py b/parsl/providers/torque/template.py
--- a/parsl/providers/torque/template.py
+++ b/parsl/providers/torque/template.py
@@ -8,7 +8,6 @@
#PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}
#PBS -o ${submit_script_dir}/${jobname}.submit.stdout
#PBS -e ${submit_script_dir}/${jobname}.submit.stderr
-#PBS -v WORKER_LOGGING_LEVEL
${overrides}
export JOBNAME="${jobname}"
| {"golden_diff": "diff --git a/parsl/providers/torque/template.py b/parsl/providers/torque/template.py\n--- a/parsl/providers/torque/template.py\n+++ b/parsl/providers/torque/template.py\n@@ -8,7 +8,6 @@\n #PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}\n #PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n #PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n-#PBS -v WORKER_LOGGING_LEVEL\n ${overrides}\n \n export JOBNAME=\"${jobname}\"\n", "issue": "TorqueProvider fails on NSCC \nThe following patch is required in order to run the `TorqueProvider` on NSCC:\r\n```\r\n[nscc04] ~/libsubmit >git diff\r\ndiff --git a/libsubmit/providers/torque/template.py b/libsubmit/providers/torque/template.py\r\nindex a00ce7c..056c648 100644\r\n--- a/libsubmit/providers/torque/template.py\r\n+++ b/libsubmit/providers/torque/template.py\r\n@@ -8,7 +8,6 @@ template_string = '''#!/bin/bash\r\n #PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}\r\n #PBS -o ${submit_script_dir}/${jobname}.submit.stdout\r\n #PBS -e ${submit_script_dir}/${jobname}.submit.stderr\r\n-#PBS -v WORKER_LOGGING_LEVEL\r\n ${overrides}\r\n\r\n export JOBNAME=\"${jobname}\"\r\n```\r\n\r\nOtherwise, the job fails with `qsub: cannot send environment with the job`. Could we just merge the patch, or should we make this configurable somehow?\n", "before_files": [{"content": "template_string = '''#!/bin/bash\n\n#PBS -S /bin/bash\n#PBS -N ${jobname}\n#PBS -m n\n#PBS -k eo\n#PBS -l walltime=$walltime\n#PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}\n#PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n#PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n#PBS -v WORKER_LOGGING_LEVEL\n${overrides}\n\nexport JOBNAME=\"${jobname}\"\n\n${user_script}\n\n'''\n", "path": "parsl/providers/torque/template.py"}]} | 917 | 124 |
gh_patches_debug_584 | rasdani/github-patches | git_diff | pex-tool__pex-1709 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.77
On the docket:
+ [x] Fix pathologic lock creation slowness. #1707
+ [x] Support uncompressed PEXes. (#1705)
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.76"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.76"
+__version__ = "2.1.77"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.76\"\n+__version__ = \"2.1.77\"\n", "issue": "Release 2.1.77\nOn the docket:\r\n+ [x] Fix pathologic lock creation slowness. #1707 \r\n+ [x] Support uncompressed PEXes. (#1705)\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.76\"\n", "path": "pex/version.py"}]} | 632 | 97 |
gh_patches_debug_60843 | rasdani/github-patches | git_diff | doccano__doccano-1670 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Database table for SpanType has invalid name
How to reproduce the behaviour
---------
- Pull latest changes from master
- ./manage.py migrate
- ./api/migrations/0033_auto_20220127_0654.py will migrate the database table for `SpanType` to `label_types_spanType`
- Delete a project `Project.objects.first().delete()``
Exception:
<img width="511" alt="image" src="https://user-images.githubusercontent.com/6747788/152384221-a6a549b8-1cca-49c0-86e4-6a20f7d0a266.png">
The issue can be resolved by either renaming db table `label_types_spanType` to `label_types_spantype` or by explicitly setting `tb_table` for SpanType model like this: `db_table = "label_types_spanType"`
Your Environment
---------
* Operating System: macOS Monterey, doccano is locally executed
* Python Version Used: 3.9
</issue>
<code>
[start of backend/api/migrations/0033_auto_20220127_0654.py]
1 # Generated by Django 3.2.11 on 2022-01-27 06:54
2
3 from django.db import migrations
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('labels', '0003_auto_20220127_0654'),
10 ('api', '0032_auto_20220127_0654'),
11 ]
12
13 operations = [
14 migrations.SeparateDatabaseAndState(
15 state_operations=[
16 migrations.DeleteModel(
17 name='CategoryType',
18 ),
19 migrations.DeleteModel(
20 name='RelationTypes',
21 ),
22 migrations.DeleteModel(
23 name='SpanType',
24 ),
25 ],
26 database_operations=[
27 migrations.AlterModelTable(
28 name='CategoryType',
29 table='label_types_categorytype'
30 ),
31 migrations.AlterModelTable(
32 name='RelationTypes',
33 table='label_types_relationtypes'
34 ),
35 migrations.AlterModelTable(
36 name='SpanType',
37 table='label_types_spanType'
38 )
39 ]
40 )
41 ]
42
[end of backend/api/migrations/0033_auto_20220127_0654.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/api/migrations/0033_auto_20220127_0654.py b/backend/api/migrations/0033_auto_20220127_0654.py
--- a/backend/api/migrations/0033_auto_20220127_0654.py
+++ b/backend/api/migrations/0033_auto_20220127_0654.py
@@ -34,7 +34,7 @@
),
migrations.AlterModelTable(
name='SpanType',
- table='label_types_spanType'
+ table='label_types_spantype'
)
]
)
| {"golden_diff": "diff --git a/backend/api/migrations/0033_auto_20220127_0654.py b/backend/api/migrations/0033_auto_20220127_0654.py\n--- a/backend/api/migrations/0033_auto_20220127_0654.py\n+++ b/backend/api/migrations/0033_auto_20220127_0654.py\n@@ -34,7 +34,7 @@\n ),\n migrations.AlterModelTable(\n name='SpanType',\n- table='label_types_spanType'\n+ table='label_types_spantype'\n )\n ]\n )\n", "issue": "Database table for SpanType has invalid name \nHow to reproduce the behaviour\r\n---------\r\n- Pull latest changes from master\r\n- ./manage.py migrate\r\n- ./api/migrations/0033_auto_20220127_0654.py will migrate the database table for `SpanType` to `label_types_spanType`\r\n- Delete a project `Project.objects.first().delete()``\r\n\r\nException:\r\n\r\n<img width=\"511\" alt=\"image\" src=\"https://user-images.githubusercontent.com/6747788/152384221-a6a549b8-1cca-49c0-86e4-6a20f7d0a266.png\">\r\n \r\nThe issue can be resolved by either renaming db table `label_types_spanType` to `label_types_spantype` or by explicitly setting `tb_table` for SpanType model like this: `db_table = \"label_types_spanType\"`\r\n\r\nYour Environment\r\n---------\r\n* Operating System: macOS Monterey, doccano is locally executed\r\n* Python Version Used: 3.9\r\n\n", "before_files": [{"content": "# Generated by Django 3.2.11 on 2022-01-27 06:54\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('labels', '0003_auto_20220127_0654'),\n ('api', '0032_auto_20220127_0654'),\n ]\n\n operations = [\n migrations.SeparateDatabaseAndState(\n state_operations=[\n migrations.DeleteModel(\n name='CategoryType',\n ),\n migrations.DeleteModel(\n name='RelationTypes',\n ),\n migrations.DeleteModel(\n name='SpanType',\n ),\n ],\n database_operations=[\n migrations.AlterModelTable(\n name='CategoryType',\n table='label_types_categorytype'\n ),\n migrations.AlterModelTable(\n name='RelationTypes',\n table='label_types_relationtypes'\n ),\n migrations.AlterModelTable(\n name='SpanType',\n table='label_types_spanType'\n )\n ]\n )\n ]\n", "path": "backend/api/migrations/0033_auto_20220127_0654.py"}]} | 1,132 | 164 |
gh_patches_debug_29475 | rasdani/github-patches | git_diff | litestar-org__litestar-2259 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py]
1 import json
2 from pathlib import Path
3 from typing import Any
4
5 from rich import get_console
6 from sqlalchemy import create_engine
7 from sqlalchemy.orm import Mapped, Session, sessionmaker
8
9 from litestar.contrib.sqlalchemy.base import UUIDBase
10 from litestar.contrib.sqlalchemy.repository import SQLAlchemySyncRepository
11 from litestar.repository.filters import LimitOffset
12
13 here = Path(__file__).parent
14 console = get_console()
15
16
17 class USState(UUIDBase):
18 # you can optionally override the generated table name by manually setting it.
19 __tablename__ = "us_state_lookup" # type: ignore[assignment]
20 abbreviation: Mapped[str]
21 name: Mapped[str]
22
23
24 class USStateRepository(SQLAlchemySyncRepository[USState]):
25 """US State repository."""
26
27 model_type = USState
28
29
30 engine = create_engine(
31 "duckdb:///:memory:",
32 future=True,
33 )
34 session_factory: sessionmaker[Session] = sessionmaker(engine, expire_on_commit=False)
35
36
37 def open_fixture(fixtures_path: Path, fixture_name: str) -> Any:
38 """Loads JSON file with the specified fixture name
39
40 Args:
41 fixtures_path (Path): The path to look for fixtures
42 fixture_name (str): The fixture name to load.
43
44 Raises:
45 FileNotFoundError: Fixtures not found.
46
47 Returns:
48 Any: The parsed JSON data
49 """
50 fixture = Path(fixtures_path / f"{fixture_name}.json")
51 if fixture.exists():
52 with fixture.open(mode="r", encoding="utf-8") as f:
53 f_data = f.read()
54 return json.loads(f_data)
55 raise FileNotFoundError(f"Could not find the {fixture_name} fixture")
56
57
58 def run_script() -> None:
59 """Load data from a fixture."""
60
61 # Initializes the database.
62 with engine.begin() as conn:
63 USState.metadata.create_all(conn)
64
65 with session_factory() as db_session:
66 # 1) load the JSON data into the US States table
67 repo = USStateRepository(session=db_session)
68 fixture = open_fixture(here, USStateRepository.model_type.__tablename__) # type: ignore
69 objs = repo.add_many([USStateRepository.model_type(**raw_obj) for raw_obj in fixture])
70 db_session.commit()
71 console.print(f"Created {len(objs)} new objects.")
72
73 # 2) Select paginated data and total row count.
74 created_objs, total_objs = repo.list_and_count(LimitOffset(limit=10, offset=0))
75 console.print(f"Selected {len(created_objs)} records out of a total of {total_objs}.")
76
77 # 2) Let's remove the batch of records selected.
78 deleted_objs = repo.delete_many([new_obj.id for new_obj in created_objs])
79 console.print(f"Removed {len(deleted_objs)} records out of a total of {total_objs}.")
80
81 # 3) Le'ts count the remaining rows
82 remaining_count = repo.count()
83 console.print(f"Found {remaining_count} remaining records after delete.")
84
85
86 if __name__ == "__main__":
87 run_script()
88
[end of docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py b/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py
--- a/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py
+++ b/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py
@@ -63,7 +63,7 @@
USState.metadata.create_all(conn)
with session_factory() as db_session:
- # 1) load the JSON data into the US States table
+ # 1) Load the JSON data into the US States table.
repo = USStateRepository(session=db_session)
fixture = open_fixture(here, USStateRepository.model_type.__tablename__) # type: ignore
objs = repo.add_many([USStateRepository.model_type(**raw_obj) for raw_obj in fixture])
@@ -74,11 +74,11 @@
created_objs, total_objs = repo.list_and_count(LimitOffset(limit=10, offset=0))
console.print(f"Selected {len(created_objs)} records out of a total of {total_objs}.")
- # 2) Let's remove the batch of records selected.
+ # 3) Let's remove the batch of records selected.
deleted_objs = repo.delete_many([new_obj.id for new_obj in created_objs])
console.print(f"Removed {len(deleted_objs)} records out of a total of {total_objs}.")
- # 3) Le'ts count the remaining rows
+ # 4) Let's count the remaining rows
remaining_count = repo.count()
console.print(f"Found {remaining_count} remaining records after delete.")
| {"golden_diff": "diff --git a/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py b/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py\n--- a/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py\n+++ b/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py\n@@ -63,7 +63,7 @@\n USState.metadata.create_all(conn)\n \n with session_factory() as db_session:\n- # 1) load the JSON data into the US States table\n+ # 1) Load the JSON data into the US States table.\n repo = USStateRepository(session=db_session)\n fixture = open_fixture(here, USStateRepository.model_type.__tablename__) # type: ignore\n objs = repo.add_many([USStateRepository.model_type(**raw_obj) for raw_obj in fixture])\n@@ -74,11 +74,11 @@\n created_objs, total_objs = repo.list_and_count(LimitOffset(limit=10, offset=0))\n console.print(f\"Selected {len(created_objs)} records out of a total of {total_objs}.\")\n \n- # 2) Let's remove the batch of records selected.\n+ # 3) Let's remove the batch of records selected.\n deleted_objs = repo.delete_many([new_obj.id for new_obj in created_objs])\n console.print(f\"Removed {len(deleted_objs)} records out of a total of {total_objs}.\")\n \n- # 3) Le'ts count the remaining rows\n+ # 4) Let's count the remaining rows\n remaining_count = repo.count()\n console.print(f\"Found {remaining_count} remaining records after delete.\")\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "import json\nfrom pathlib import Path\nfrom typing import Any\n\nfrom rich import get_console\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Mapped, Session, sessionmaker\n\nfrom litestar.contrib.sqlalchemy.base import UUIDBase\nfrom litestar.contrib.sqlalchemy.repository import SQLAlchemySyncRepository\nfrom litestar.repository.filters import LimitOffset\n\nhere = Path(__file__).parent\nconsole = get_console()\n\n\nclass USState(UUIDBase):\n # you can optionally override the generated table name by manually setting it.\n __tablename__ = \"us_state_lookup\" # type: ignore[assignment]\n abbreviation: Mapped[str]\n name: Mapped[str]\n\n\nclass USStateRepository(SQLAlchemySyncRepository[USState]):\n \"\"\"US State repository.\"\"\"\n\n model_type = USState\n\n\nengine = create_engine(\n \"duckdb:///:memory:\",\n future=True,\n)\nsession_factory: sessionmaker[Session] = sessionmaker(engine, expire_on_commit=False)\n\n\ndef open_fixture(fixtures_path: Path, fixture_name: str) -> Any:\n \"\"\"Loads JSON file with the specified fixture name\n\n Args:\n fixtures_path (Path): The path to look for fixtures\n fixture_name (str): The fixture name to load.\n\n Raises:\n FileNotFoundError: Fixtures not found.\n\n Returns:\n Any: The parsed JSON data\n \"\"\"\n fixture = Path(fixtures_path / f\"{fixture_name}.json\")\n if fixture.exists():\n with fixture.open(mode=\"r\", encoding=\"utf-8\") as f:\n f_data = f.read()\n return json.loads(f_data)\n raise FileNotFoundError(f\"Could not find the {fixture_name} fixture\")\n\n\ndef run_script() -> None:\n \"\"\"Load data from a fixture.\"\"\"\n\n # Initializes the database.\n with engine.begin() as conn:\n USState.metadata.create_all(conn)\n\n with session_factory() as db_session:\n # 1) load the JSON data into the US States table\n repo = USStateRepository(session=db_session)\n fixture = open_fixture(here, USStateRepository.model_type.__tablename__) # type: ignore\n objs = repo.add_many([USStateRepository.model_type(**raw_obj) for raw_obj in fixture])\n db_session.commit()\n console.print(f\"Created {len(objs)} new objects.\")\n\n # 2) Select paginated data and total row count.\n created_objs, total_objs = repo.list_and_count(LimitOffset(limit=10, offset=0))\n console.print(f\"Selected {len(created_objs)} records out of a total of {total_objs}.\")\n\n # 2) Let's remove the batch of records selected.\n deleted_objs = repo.delete_many([new_obj.id for new_obj in created_objs])\n console.print(f\"Removed {len(deleted_objs)} records out of a total of {total_objs}.\")\n\n # 3) Le'ts count the remaining rows\n remaining_count = repo.count()\n console.print(f\"Found {remaining_count} remaining records after delete.\")\n\n\nif __name__ == \"__main__\":\n run_script()\n", "path": "docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py"}]} | 1,547 | 358 |
gh_patches_debug_4016 | rasdani/github-patches | git_diff | ansible__awx-14626 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue on awx.awx.export/import awx cli/collection
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
- [X] I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `[email protected]` instead.)
### Bug Summary
Hi team,
I founded tow bugs related awx cli and collection import / export.
The first issue is related export module, that not work if user is a system_auditor (or not have certain admin role in object like schedule.
I already found why this bug is present and proposed a PR: #14626 .
Second "bug" is related import module (I don't know and don't find if someone decide it) and is related how import will be processed.
Actually import perform a `POST`, if object not exists, or a `PUT`, if object exists. In case of we `PUT` an object that already exist api will replace it in all fields, including encrypted key, that in export are removed (correctly).
So, i don't know if it's better approach with `PATCH` instead a `PUT`, but I think that here the issue is in the export itself, that will replace value of encrypted key `$encrypted$` with `''` .
The side effect of "restore" `$encrypted$ ` on the export is that we remove it for the POST, due to the fact that is a reseved keyword.
I will propose a PR also to fix the second bugs
### AWX version
23.3.1
### Select the relevant components
- [ ] UI
- [ ] UI (tech preview)
- [X] API
- [ ] Docs
- [X] Collection
- [X] CLI
- [ ] Other
### Installation method
kubernetes
### Modifications
no
### Ansible version
_No response_
### Operating system
_No response_
### Web browser
_No response_
### Steps to reproduce
bug 1: execute an export of schedule with a system_auditor
bug 2: import a credential already present in AWX
### Expected results
bug 1: export will go fine
bug 2: credential will be updated with only fields present in export
### Actual results
bug 1: export will fail
bug 2: credential will be replaced with exported data. But due to the fact that encrypted key are not exported and replaced the value `$encrypted$` with `''` we replace current secrets with `''`
### Additional information
_No response_
</issue>
<code>
[start of awxkit/awxkit/api/utils.py]
1 import logging
2 import re
3
4
5 log = logging.getLogger(__name__)
6
7 descRE = re.compile(r'^[*] `(\w+)`: [^(]*\((\w+), ([^)]+)\)')
8
9
10 def freeze(key):
11 if key is None:
12 return None
13 return frozenset((k, freeze(v) if isinstance(v, dict) else v) for k, v in key.items())
14
15
16 def parse_description(desc):
17 options = {}
18 for line in desc[desc.index('POST') :].splitlines():
19 match = descRE.match(line)
20 if not match:
21 continue
22 options[match.group(1)] = {'type': match.group(2), 'required': match.group(3) == 'required'}
23 return options
24
25
26 def remove_encrypted(value):
27 if value == '$encrypted$':
28 return ''
29 if isinstance(value, list):
30 return [remove_encrypted(item) for item in value]
31 if isinstance(value, dict):
32 return {k: remove_encrypted(v) for k, v in value.items()}
33 return value
34
35
36 def get_post_fields(page, cache):
37 options_page = cache.get_options(page)
38 if options_page is None:
39 return None
40
41 if 'POST' not in options_page.r.headers.get('Allow', ''):
42 return None
43
44 if 'POST' in options_page.json['actions']:
45 return options_page.json['actions']['POST']
46 else:
47 log.warning("Insufficient privileges on %s, inferring POST fields from description.", options_page.endpoint)
48 return parse_description(options_page.json['description'])
49
[end of awxkit/awxkit/api/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awxkit/awxkit/api/utils.py b/awxkit/awxkit/api/utils.py
--- a/awxkit/awxkit/api/utils.py
+++ b/awxkit/awxkit/api/utils.py
@@ -15,7 +15,12 @@
def parse_description(desc):
options = {}
- for line in desc[desc.index('POST') :].splitlines():
+ desc_lines = []
+ if 'POST' in desc:
+ desc_lines = desc[desc.index('POST') :].splitlines()
+ else:
+ desc_lines = desc.splitlines()
+ for line in desc_lines:
match = descRE.match(line)
if not match:
continue
| {"golden_diff": "diff --git a/awxkit/awxkit/api/utils.py b/awxkit/awxkit/api/utils.py\n--- a/awxkit/awxkit/api/utils.py\n+++ b/awxkit/awxkit/api/utils.py\n@@ -15,7 +15,12 @@\n \n def parse_description(desc):\n options = {}\n- for line in desc[desc.index('POST') :].splitlines():\n+ desc_lines = []\n+ if 'POST' in desc:\n+ desc_lines = desc[desc.index('POST') :].splitlines()\n+ else:\n+ desc_lines = desc.splitlines()\n+ for line in desc_lines:\n match = descRE.match(line)\n if not match:\n continue\n", "issue": "Issue on awx.awx.export/import awx cli/collection\n### Please confirm the following\r\n\r\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\r\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\r\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\r\n- [X] I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `[email protected]` instead.)\r\n\r\n### Bug Summary\r\n\r\nHi team,\r\nI founded tow bugs related awx cli and collection import / export.\r\n\r\nThe first issue is related export module, that not work if user is a system_auditor (or not have certain admin role in object like schedule.\r\nI already found why this bug is present and proposed a PR: #14626 .\r\n\r\nSecond \"bug\" is related import module (I don't know and don't find if someone decide it) and is related how import will be processed.\r\nActually import perform a `POST`, if object not exists, or a `PUT`, if object exists. In case of we `PUT` an object that already exist api will replace it in all fields, including encrypted key, that in export are removed (correctly).\r\n\r\nSo, i don't know if it's better approach with `PATCH` instead a `PUT`, but I think that here the issue is in the export itself, that will replace value of encrypted key `$encrypted$` with `''` .\r\nThe side effect of \"restore\" `$encrypted$ ` on the export is that we remove it for the POST, due to the fact that is a reseved keyword.\r\n \r\n I will propose a PR also to fix the second bugs\r\n\r\n### AWX version\r\n\r\n23.3.1\r\n\r\n### Select the relevant components\r\n\r\n- [ ] UI\r\n- [ ] UI (tech preview)\r\n- [X] API\r\n- [ ] Docs\r\n- [X] Collection\r\n- [X] CLI\r\n- [ ] Other\r\n\r\n### Installation method\r\n\r\nkubernetes\r\n\r\n### Modifications\r\n\r\nno\r\n\r\n### Ansible version\r\n\r\n_No response_\r\n\r\n### Operating system\r\n\r\n_No response_\r\n\r\n### Web browser\r\n\r\n_No response_\r\n\r\n### Steps to reproduce\r\n\r\nbug 1: execute an export of schedule with a system_auditor\r\n\r\nbug 2: import a credential already present in AWX\r\n\r\n### Expected results\r\n\r\nbug 1: export will go fine\r\n\r\nbug 2: credential will be updated with only fields present in export\r\n\r\n### Actual results\r\n\r\nbug 1: export will fail\r\n\r\nbug 2: credential will be replaced with exported data. But due to the fact that encrypted key are not exported and replaced the value `$encrypted$` with `''` we replace current secrets with `''`\r\n\r\n### Additional information\r\n\r\n_No response_\n", "before_files": [{"content": "import logging\nimport re\n\n\nlog = logging.getLogger(__name__)\n\ndescRE = re.compile(r'^[*] `(\\w+)`: [^(]*\\((\\w+), ([^)]+)\\)')\n\n\ndef freeze(key):\n if key is None:\n return None\n return frozenset((k, freeze(v) if isinstance(v, dict) else v) for k, v in key.items())\n\n\ndef parse_description(desc):\n options = {}\n for line in desc[desc.index('POST') :].splitlines():\n match = descRE.match(line)\n if not match:\n continue\n options[match.group(1)] = {'type': match.group(2), 'required': match.group(3) == 'required'}\n return options\n\n\ndef remove_encrypted(value):\n if value == '$encrypted$':\n return ''\n if isinstance(value, list):\n return [remove_encrypted(item) for item in value]\n if isinstance(value, dict):\n return {k: remove_encrypted(v) for k, v in value.items()}\n return value\n\n\ndef get_post_fields(page, cache):\n options_page = cache.get_options(page)\n if options_page is None:\n return None\n\n if 'POST' not in options_page.r.headers.get('Allow', ''):\n return None\n\n if 'POST' in options_page.json['actions']:\n return options_page.json['actions']['POST']\n else:\n log.warning(\"Insufficient privileges on %s, inferring POST fields from description.\", options_page.endpoint)\n return parse_description(options_page.json['description'])\n", "path": "awxkit/awxkit/api/utils.py"}]} | 1,590 | 166 |
gh_patches_debug_59177 | rasdani/github-patches | git_diff | fossasia__open-event-server-4147 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IntegrityError: (psycopg2.IntegrityError) column "field_identifier" contains null values
https://sentry.eventyay.com/eventyay/api/issues/25/
```
IntegrityError: (psycopg2.IntegrityError) column "field_identifier" contains null values
[SQL: 'ALTER TABLE custom_forms ADD COLUMN field_identifier VARCHAR NOT NULL']
(25 additional frame(s) were not displayed)
...
File "sqlalchemy/engine/base.py", line 1189, in _execute_context
context)
File "sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
IntegrityError: (psycopg2.IntegrityError) column "field_identifier" contains null values
[SQL: 'ALTER TABLE custom_forms ADD COLUMN field_identifier VARCHAR NOT NULL']
```
</issue>
<code>
[start of migrations/versions/aefa134809bf_.py]
1 """empty message
2
3 Revision ID: aefa134809bf
4 Revises: 2b39d8c05788
5 Create Date: 2017-07-21 20:37:50.193436
6
7 """
8
9 from alembic import op
10 import sqlalchemy as sa
11 import sqlalchemy_utils
12
13
14 # revision identifiers, used by Alembic.
15 revision = 'aefa134809bf'
16 down_revision = '2b39d8c05788'
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 op.add_column('custom_forms', sa.Column('field_identifier', sa.String(), nullable=False))
22 op.add_column('custom_forms', sa.Column('form', sa.String(), nullable=False))
23 op.add_column('custom_forms', sa.Column('is_fixed', sa.Boolean(), nullable=True))
24 op.add_column('custom_forms', sa.Column('is_included', sa.Boolean(), nullable=True))
25 op.add_column('custom_forms', sa.Column('is_required', sa.Boolean(), nullable=True))
26 op.add_column('custom_forms', sa.Column('type', sa.String(), nullable=False))
27 op.create_unique_constraint('custom_form_identifier', 'custom_forms', ['event_id', 'field_identifier', 'form'])
28 op.drop_column('custom_forms', 'speaker_form')
29 op.drop_column('custom_forms', 'session_form')
30 # ### end Alembic commands ###
31
32
33 def downgrade():
34 # ### commands auto generated by Alembic - please adjust! ###
35 op.add_column('custom_forms', sa.Column('session_form', sa.VARCHAR(), autoincrement=False, nullable=False))
36 op.add_column('custom_forms', sa.Column('speaker_form', sa.VARCHAR(), autoincrement=False, nullable=False))
37 op.drop_constraint('custom_form_identifier', 'custom_forms', type_='unique')
38 op.drop_column('custom_forms', 'type')
39 op.drop_column('custom_forms', 'is_required')
40 op.drop_column('custom_forms', 'is_included')
41 op.drop_column('custom_forms', 'is_fixed')
42 op.drop_column('custom_forms', 'form')
43 op.drop_column('custom_forms', 'field_identifier')
44 # ### end Alembic commands ###
45
[end of migrations/versions/aefa134809bf_.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/migrations/versions/aefa134809bf_.py b/migrations/versions/aefa134809bf_.py
--- a/migrations/versions/aefa134809bf_.py
+++ b/migrations/versions/aefa134809bf_.py
@@ -18,6 +18,7 @@
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
+ op.execute("DELETE FROM custom_forms")
op.add_column('custom_forms', sa.Column('field_identifier', sa.String(), nullable=False))
op.add_column('custom_forms', sa.Column('form', sa.String(), nullable=False))
op.add_column('custom_forms', sa.Column('is_fixed', sa.Boolean(), nullable=True))
| {"golden_diff": "diff --git a/migrations/versions/aefa134809bf_.py b/migrations/versions/aefa134809bf_.py\n--- a/migrations/versions/aefa134809bf_.py\n+++ b/migrations/versions/aefa134809bf_.py\n@@ -18,6 +18,7 @@\n \n def upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n+ op.execute(\"DELETE FROM custom_forms\")\n op.add_column('custom_forms', sa.Column('field_identifier', sa.String(), nullable=False))\n op.add_column('custom_forms', sa.Column('form', sa.String(), nullable=False))\n op.add_column('custom_forms', sa.Column('is_fixed', sa.Boolean(), nullable=True))\n", "issue": "IntegrityError: (psycopg2.IntegrityError) column \"field_identifier\" contains null values\nhttps://sentry.eventyay.com/eventyay/api/issues/25/\r\n\r\n```\r\nIntegrityError: (psycopg2.IntegrityError) column \"field_identifier\" contains null values\r\n [SQL: 'ALTER TABLE custom_forms ADD COLUMN field_identifier VARCHAR NOT NULL']\r\n(25 additional frame(s) were not displayed)\r\n...\r\n File \"sqlalchemy/engine/base.py\", line 1189, in _execute_context\r\n context)\r\n File \"sqlalchemy/engine/base.py\", line 1402, in _handle_dbapi_exception\r\n exc_info\r\n File \"sqlalchemy/util/compat.py\", line 203, in raise_from_cause\r\n reraise(type(exception), exception, tb=exc_tb, cause=cause)\r\n File \"sqlalchemy/engine/base.py\", line 1182, in _execute_context\r\n context)\r\n File \"sqlalchemy/engine/default.py\", line 470, in do_execute\r\n cursor.execute(statement, parameters)\r\n\r\nIntegrityError: (psycopg2.IntegrityError) column \"field_identifier\" contains null values\r\n [SQL: 'ALTER TABLE custom_forms ADD COLUMN field_identifier VARCHAR NOT NULL']\r\n```\n", "before_files": [{"content": "\"\"\"empty message\n\nRevision ID: aefa134809bf\nRevises: 2b39d8c05788\nCreate Date: 2017-07-21 20:37:50.193436\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\n# revision identifiers, used by Alembic.\nrevision = 'aefa134809bf'\ndown_revision = '2b39d8c05788'\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('custom_forms', sa.Column('field_identifier', sa.String(), nullable=False))\n op.add_column('custom_forms', sa.Column('form', sa.String(), nullable=False))\n op.add_column('custom_forms', sa.Column('is_fixed', sa.Boolean(), nullable=True))\n op.add_column('custom_forms', sa.Column('is_included', sa.Boolean(), nullable=True))\n op.add_column('custom_forms', sa.Column('is_required', sa.Boolean(), nullable=True))\n op.add_column('custom_forms', sa.Column('type', sa.String(), nullable=False))\n op.create_unique_constraint('custom_form_identifier', 'custom_forms', ['event_id', 'field_identifier', 'form'])\n op.drop_column('custom_forms', 'speaker_form')\n op.drop_column('custom_forms', 'session_form')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('custom_forms', sa.Column('session_form', sa.VARCHAR(), autoincrement=False, nullable=False))\n op.add_column('custom_forms', sa.Column('speaker_form', sa.VARCHAR(), autoincrement=False, nullable=False))\n op.drop_constraint('custom_form_identifier', 'custom_forms', type_='unique')\n op.drop_column('custom_forms', 'type')\n op.drop_column('custom_forms', 'is_required')\n op.drop_column('custom_forms', 'is_included')\n op.drop_column('custom_forms', 'is_fixed')\n op.drop_column('custom_forms', 'form')\n op.drop_column('custom_forms', 'field_identifier')\n # ### end Alembic commands ###\n", "path": "migrations/versions/aefa134809bf_.py"}]} | 1,396 | 173 |
gh_patches_debug_18270 | rasdani/github-patches | git_diff | dask__distributed-6904 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Importing from distributed shows pyright error
**What happened**:
When type-checking a program that imports from distributed with pyright, an error is accused:
```python
# foo.py
from distributed import Client
print(Client)
```
```
pyright foo.py
...
/tmp/test-area/foo.py:1:25 - error: "Client" is not exported from module "distributed"
Import from "distributed.client" instead (reportPrivateImportUsage)
1 error, 0 warnings, 0 informations
```
**What you expected to happen**:
I expect the package to be correctly typed, following PEP 484
**Anything else we need to know?**:
PEP 484 states that
> Modules and variables imported into the stub are not considered exported from the stub unless the import uses the `import ... as ... form` or the equivalent `from ... import ... as ... form`
and Pyright follows this guideline, although mypy doesn't.
**Environment**:
- Dask version: 2022.8.0
- Python version: 3.10.5
- Operating System: Arch linux
- Install method (conda, pip, source): pip inside an environment
</issue>
<code>
[start of distributed/__init__.py]
1 from __future__ import annotations
2
3 # isort: off
4 from distributed import config # load distributed configuration first
5 from distributed import widgets # load distributed widgets second
6
7 # isort: on
8
9 import atexit
10
11 import dask
12 from dask.config import config # type: ignore
13
14 from distributed._version import get_versions
15 from distributed.actor import Actor, ActorFuture, BaseActorFuture
16 from distributed.client import (
17 Client,
18 CompatibleExecutor,
19 Future,
20 as_completed,
21 default_client,
22 fire_and_forget,
23 futures_of,
24 get_task_metadata,
25 get_task_stream,
26 performance_report,
27 wait,
28 )
29 from distributed.core import Status, connect, rpc
30 from distributed.deploy import Adaptive, LocalCluster, SpecCluster, SSHCluster
31 from distributed.diagnostics.plugin import (
32 Environ,
33 NannyPlugin,
34 PipInstall,
35 SchedulerPlugin,
36 UploadDirectory,
37 UploadFile,
38 WorkerPlugin,
39 )
40 from distributed.diagnostics.progressbar import progress
41 from distributed.event import Event
42 from distributed.lock import Lock
43 from distributed.multi_lock import MultiLock
44 from distributed.nanny import Nanny
45 from distributed.pubsub import Pub, Sub
46 from distributed.queues import Queue
47 from distributed.scheduler import KilledWorker, Scheduler
48 from distributed.security import Security
49 from distributed.semaphore import Semaphore
50 from distributed.threadpoolexecutor import rejoin
51 from distributed.utils import CancelledError, TimeoutError, sync
52 from distributed.variable import Variable
53 from distributed.worker import (
54 Reschedule,
55 Worker,
56 get_client,
57 get_worker,
58 print,
59 secede,
60 warn,
61 )
62 from distributed.worker_client import local_client, worker_client
63
64
65 def __getattr__(name):
66 global __version__, __git_revision__
67
68 if name == "__version__":
69 from importlib.metadata import version
70
71 __version__ = version("distributed")
72 return __version__
73
74 if name == "__git_revision__":
75 from distributed._version import get_versions
76
77 __git_revision__ = get_versions()["full-revisionid"]
78 return __git_revision__
79
80 raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
81
82
83 _python_shutting_down = False
84
85
86 @atexit.register
87 def _():
88 """Set a global when Python shuts down.
89
90 Note
91 ----
92 This function must be registered with atexit *after* any class that invokes
93 ``dstributed.utils.is_python_shutting_down`` has been defined. This way it
94 will be called before the ``__del__`` method of those classes.
95
96 See Also
97 --------
98 distributed.utils.is_python_shutting_down
99 """
100 global _python_shutting_down
101 _python_shutting_down = True
102
[end of distributed/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/__init__.py b/distributed/__init__.py
--- a/distributed/__init__.py
+++ b/distributed/__init__.py
@@ -99,3 +99,65 @@
"""
global _python_shutting_down
_python_shutting_down = True
+
+
+__all__ = [
+ "Actor",
+ "ActorFuture",
+ "Adaptive",
+ "BaseActorFuture",
+ "CancelledError",
+ "Client",
+ "CompatibleExecutor",
+ "Environ",
+ "Event",
+ "Future",
+ "KilledWorker",
+ "LocalCluster",
+ "Lock",
+ "MultiLock",
+ "Nanny",
+ "NannyPlugin",
+ "PipInstall",
+ "Pub",
+ "Queue",
+ "Reschedule",
+ "SSHCluster",
+ "Scheduler",
+ "SchedulerPlugin",
+ "Security",
+ "Semaphore",
+ "SpecCluster",
+ "Status",
+ "Sub",
+ "TimeoutError",
+ "UploadDirectory",
+ "UploadFile",
+ "Variable",
+ "Worker",
+ "WorkerPlugin",
+ "as_completed",
+ "config",
+ "connect",
+ "dask",
+ "default_client",
+ "fire_and_forget",
+ "futures_of",
+ "get_client",
+ "get_task_metadata",
+ "get_task_stream",
+ "get_versions",
+ "get_worker",
+ "local_client",
+ "performance_report",
+ "print",
+ "progress",
+ "rejoin",
+ "rpc",
+ "secede",
+ "sync",
+ "wait",
+ "warn",
+ "widgets",
+ "worker_client",
+]
| {"golden_diff": "diff --git a/distributed/__init__.py b/distributed/__init__.py\n--- a/distributed/__init__.py\n+++ b/distributed/__init__.py\n@@ -99,3 +99,65 @@\n \"\"\"\n global _python_shutting_down\n _python_shutting_down = True\n+\n+\n+__all__ = [\n+ \"Actor\",\n+ \"ActorFuture\",\n+ \"Adaptive\",\n+ \"BaseActorFuture\",\n+ \"CancelledError\",\n+ \"Client\",\n+ \"CompatibleExecutor\",\n+ \"Environ\",\n+ \"Event\",\n+ \"Future\",\n+ \"KilledWorker\",\n+ \"LocalCluster\",\n+ \"Lock\",\n+ \"MultiLock\",\n+ \"Nanny\",\n+ \"NannyPlugin\",\n+ \"PipInstall\",\n+ \"Pub\",\n+ \"Queue\",\n+ \"Reschedule\",\n+ \"SSHCluster\",\n+ \"Scheduler\",\n+ \"SchedulerPlugin\",\n+ \"Security\",\n+ \"Semaphore\",\n+ \"SpecCluster\",\n+ \"Status\",\n+ \"Sub\",\n+ \"TimeoutError\",\n+ \"UploadDirectory\",\n+ \"UploadFile\",\n+ \"Variable\",\n+ \"Worker\",\n+ \"WorkerPlugin\",\n+ \"as_completed\",\n+ \"config\",\n+ \"connect\",\n+ \"dask\",\n+ \"default_client\",\n+ \"fire_and_forget\",\n+ \"futures_of\",\n+ \"get_client\",\n+ \"get_task_metadata\",\n+ \"get_task_stream\",\n+ \"get_versions\",\n+ \"get_worker\",\n+ \"local_client\",\n+ \"performance_report\",\n+ \"print\",\n+ \"progress\",\n+ \"rejoin\",\n+ \"rpc\",\n+ \"secede\",\n+ \"sync\",\n+ \"wait\",\n+ \"warn\",\n+ \"widgets\",\n+ \"worker_client\",\n+]\n", "issue": "Importing from distributed shows pyright error\n**What happened**:\r\nWhen type-checking a program that imports from distributed with pyright, an error is accused:\r\n\r\n```python\r\n# foo.py\r\nfrom distributed import Client\r\nprint(Client)\r\n```\r\n\r\n```\r\npyright foo.py\r\n...\r\n /tmp/test-area/foo.py:1:25 - error: \"Client\" is not exported from module \"distributed\"\r\n \u00a0\u00a0Import from \"distributed.client\" instead (reportPrivateImportUsage)\r\n1 error, 0 warnings, 0 informations\r\n```\r\n\r\n**What you expected to happen**:\r\nI expect the package to be correctly typed, following PEP 484\r\n\r\n\r\n**Anything else we need to know?**:\r\n\r\nPEP 484 states that\r\n\r\n> Modules and variables imported into the stub are not considered exported from the stub unless the import uses the `import ... as ... form` or the equivalent `from ... import ... as ... form`\r\n\r\nand Pyright follows this guideline, although mypy doesn't.\r\n\r\n**Environment**:\r\n\r\n- Dask version: 2022.8.0\r\n- Python version: 3.10.5\r\n- Operating System: Arch linux\r\n- Install method (conda, pip, source): pip inside an environment\n", "before_files": [{"content": "from __future__ import annotations\n\n# isort: off\nfrom distributed import config # load distributed configuration first\nfrom distributed import widgets # load distributed widgets second\n\n# isort: on\n\nimport atexit\n\nimport dask\nfrom dask.config import config # type: ignore\n\nfrom distributed._version import get_versions\nfrom distributed.actor import Actor, ActorFuture, BaseActorFuture\nfrom distributed.client import (\n Client,\n CompatibleExecutor,\n Future,\n as_completed,\n default_client,\n fire_and_forget,\n futures_of,\n get_task_metadata,\n get_task_stream,\n performance_report,\n wait,\n)\nfrom distributed.core import Status, connect, rpc\nfrom distributed.deploy import Adaptive, LocalCluster, SpecCluster, SSHCluster\nfrom distributed.diagnostics.plugin import (\n Environ,\n NannyPlugin,\n PipInstall,\n SchedulerPlugin,\n UploadDirectory,\n UploadFile,\n WorkerPlugin,\n)\nfrom distributed.diagnostics.progressbar import progress\nfrom distributed.event import Event\nfrom distributed.lock import Lock\nfrom distributed.multi_lock import MultiLock\nfrom distributed.nanny import Nanny\nfrom distributed.pubsub import Pub, Sub\nfrom distributed.queues import Queue\nfrom distributed.scheduler import KilledWorker, Scheduler\nfrom distributed.security import Security\nfrom distributed.semaphore import Semaphore\nfrom distributed.threadpoolexecutor import rejoin\nfrom distributed.utils import CancelledError, TimeoutError, sync\nfrom distributed.variable import Variable\nfrom distributed.worker import (\n Reschedule,\n Worker,\n get_client,\n get_worker,\n print,\n secede,\n warn,\n)\nfrom distributed.worker_client import local_client, worker_client\n\n\ndef __getattr__(name):\n global __version__, __git_revision__\n\n if name == \"__version__\":\n from importlib.metadata import version\n\n __version__ = version(\"distributed\")\n return __version__\n\n if name == \"__git_revision__\":\n from distributed._version import get_versions\n\n __git_revision__ = get_versions()[\"full-revisionid\"]\n return __git_revision__\n\n raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n\n\n_python_shutting_down = False\n\n\[email protected]\ndef _():\n \"\"\"Set a global when Python shuts down.\n\n Note\n ----\n This function must be registered with atexit *after* any class that invokes\n ``dstributed.utils.is_python_shutting_down`` has been defined. This way it\n will be called before the ``__del__`` method of those classes.\n\n See Also\n --------\n distributed.utils.is_python_shutting_down\n \"\"\"\n global _python_shutting_down\n _python_shutting_down = True\n", "path": "distributed/__init__.py"}]} | 1,578 | 413 |
gh_patches_debug_8505 | rasdani/github-patches | git_diff | Textualize__textual-1552 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change Clock color
Following on from #1411, perhaps the clock colour needs a wee revisit too?

</issue>
<code>
[start of src/textual/widgets/_header.py]
1 from __future__ import annotations
2
3 from datetime import datetime
4
5 from rich.text import Text
6
7 from ..widget import Widget
8 from ..reactive import Reactive, watch
9
10
11 class HeaderIcon(Widget):
12 """Display an 'icon' on the left of the header."""
13
14 DEFAULT_CSS = """
15 HeaderIcon {
16 dock: left;
17 padding: 0 1;
18 width: 8;
19 content-align: left middle;
20 }
21 """
22 icon = Reactive("⭘")
23
24 def render(self):
25 return self.icon
26
27
28 class HeaderClockSpace(Widget):
29 """The space taken up by the clock on the right of the header."""
30
31 DEFAULT_CSS = """
32 HeaderClockSpace {
33 dock: right;
34 width: 10;
35 padding: 0 1;
36 }
37 """
38
39 def render(self) -> str:
40 return ""
41
42
43 class HeaderClock(HeaderClockSpace):
44 """Display a clock on the right of the header."""
45
46 DEFAULT_CSS = """
47 HeaderClock {
48 background: $secondary-background-lighten-1;
49 color: $text;
50 text-opacity: 85%;
51 content-align: center middle;
52 }
53 """
54
55 def on_mount(self) -> None:
56 self.set_interval(1, callback=self.refresh, name=f"update header clock")
57
58 def render(self):
59 return Text(datetime.now().time().strftime("%X"))
60
61
62 class HeaderTitle(Widget):
63 """Display the title / subtitle in the header."""
64
65 DEFAULT_CSS = """
66 HeaderTitle {
67 content-align: center middle;
68 width: 100%;
69 }
70 """
71
72 text: Reactive[str] = Reactive("")
73 sub_text = Reactive("")
74
75 def render(self) -> Text:
76 text = Text(self.text, no_wrap=True, overflow="ellipsis")
77 if self.sub_text:
78 text.append(" — ")
79 text.append(self.sub_text, "dim")
80 return text
81
82
83 class Header(Widget):
84 """A header widget with icon and clock.
85
86 Args:
87 show_clock (bool, optional): True if the clock should be shown on the right of the header.
88 """
89
90 DEFAULT_CSS = """
91 Header {
92 dock: top;
93 width: 100%;
94 background: $foreground 5%;
95 color: $text;
96 height: 1;
97 }
98 Header.-tall {
99 height: 3;
100 }
101 """
102
103 tall = Reactive(False)
104
105 DEFAULT_CLASSES = ""
106
107 def __init__(
108 self,
109 show_clock: bool = False,
110 *,
111 name: str | None = None,
112 id: str | None = None,
113 classes: str | None = None,
114 ):
115 super().__init__(name=name, id=id, classes=classes)
116 self.show_clock = show_clock
117
118 def compose(self):
119 yield HeaderIcon()
120 yield HeaderTitle()
121 yield HeaderClock() if self.show_clock else HeaderClockSpace()
122
123 def watch_tall(self, tall: bool) -> None:
124 self.set_class(tall, "-tall")
125
126 def on_click(self):
127 self.toggle_class("-tall")
128
129 def on_mount(self) -> None:
130 def set_title(title: str) -> None:
131 self.query_one(HeaderTitle).text = title
132
133 def set_sub_title(sub_title: str) -> None:
134 self.query_one(HeaderTitle).sub_text = sub_title
135
136 watch(self.app, "title", set_title)
137 watch(self.app, "sub_title", set_sub_title)
138
[end of src/textual/widgets/_header.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/textual/widgets/_header.py b/src/textual/widgets/_header.py
--- a/src/textual/widgets/_header.py
+++ b/src/textual/widgets/_header.py
@@ -45,7 +45,7 @@
DEFAULT_CSS = """
HeaderClock {
- background: $secondary-background-lighten-1;
+ background: $foreground-darken-1 5%;
color: $text;
text-opacity: 85%;
content-align: center middle;
@@ -97,7 +97,7 @@
}
Header.-tall {
height: 3;
- }
+ }
"""
tall = Reactive(False)
| {"golden_diff": "diff --git a/src/textual/widgets/_header.py b/src/textual/widgets/_header.py\n--- a/src/textual/widgets/_header.py\n+++ b/src/textual/widgets/_header.py\n@@ -45,7 +45,7 @@\n \n DEFAULT_CSS = \"\"\"\n HeaderClock {\n- background: $secondary-background-lighten-1;\n+ background: $foreground-darken-1 5%;\n color: $text;\n text-opacity: 85%;\n content-align: center middle;\n@@ -97,7 +97,7 @@\n }\n Header.-tall {\n height: 3;\n- } \n+ }\n \"\"\"\n \n tall = Reactive(False)\n", "issue": "Change Clock color\nFollowing on from #1411, perhaps the clock colour needs a wee revisit too?\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom datetime import datetime\n\nfrom rich.text import Text\n\nfrom ..widget import Widget\nfrom ..reactive import Reactive, watch\n\n\nclass HeaderIcon(Widget):\n \"\"\"Display an 'icon' on the left of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderIcon {\n dock: left;\n padding: 0 1;\n width: 8;\n content-align: left middle;\n }\n \"\"\"\n icon = Reactive(\"\u2b58\")\n\n def render(self):\n return self.icon\n\n\nclass HeaderClockSpace(Widget):\n \"\"\"The space taken up by the clock on the right of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderClockSpace {\n dock: right;\n width: 10;\n padding: 0 1;\n }\n \"\"\"\n\n def render(self) -> str:\n return \"\"\n\n\nclass HeaderClock(HeaderClockSpace):\n \"\"\"Display a clock on the right of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderClock {\n background: $secondary-background-lighten-1;\n color: $text;\n text-opacity: 85%;\n content-align: center middle;\n }\n \"\"\"\n\n def on_mount(self) -> None:\n self.set_interval(1, callback=self.refresh, name=f\"update header clock\")\n\n def render(self):\n return Text(datetime.now().time().strftime(\"%X\"))\n\n\nclass HeaderTitle(Widget):\n \"\"\"Display the title / subtitle in the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderTitle {\n content-align: center middle;\n width: 100%;\n }\n \"\"\"\n\n text: Reactive[str] = Reactive(\"\")\n sub_text = Reactive(\"\")\n\n def render(self) -> Text:\n text = Text(self.text, no_wrap=True, overflow=\"ellipsis\")\n if self.sub_text:\n text.append(\" \u2014 \")\n text.append(self.sub_text, \"dim\")\n return text\n\n\nclass Header(Widget):\n \"\"\"A header widget with icon and clock.\n\n Args:\n show_clock (bool, optional): True if the clock should be shown on the right of the header.\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Header {\n dock: top;\n width: 100%;\n background: $foreground 5%;\n color: $text;\n height: 1;\n }\n Header.-tall {\n height: 3;\n } \n \"\"\"\n\n tall = Reactive(False)\n\n DEFAULT_CLASSES = \"\"\n\n def __init__(\n self,\n show_clock: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ):\n super().__init__(name=name, id=id, classes=classes)\n self.show_clock = show_clock\n\n def compose(self):\n yield HeaderIcon()\n yield HeaderTitle()\n yield HeaderClock() if self.show_clock else HeaderClockSpace()\n\n def watch_tall(self, tall: bool) -> None:\n self.set_class(tall, \"-tall\")\n\n def on_click(self):\n self.toggle_class(\"-tall\")\n\n def on_mount(self) -> None:\n def set_title(title: str) -> None:\n self.query_one(HeaderTitle).text = title\n\n def set_sub_title(sub_title: str) -> None:\n self.query_one(HeaderTitle).sub_text = sub_title\n\n watch(self.app, \"title\", set_title)\n watch(self.app, \"sub_title\", set_sub_title)\n", "path": "src/textual/widgets/_header.py"}]} | 1,717 | 151 |
gh_patches_debug_642 | rasdani/github-patches | git_diff | pex-tool__pex-2062 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.123
On the docket:
+ [x] Create lockfile for xmlsec fails #2063
+ [x] Internal not enough values to unpack error for pex3 lock create 'pip @ https://github.com/pypa/pip/archive/22.0.2.zip' ... #2057
+ [x] Pex lock creation does not handle wheels with non {cp,pp,py} pyver tag. #2059
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.122"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.122"
+__version__ = "2.1.123"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.122\"\n+__version__ = \"2.1.123\"\n", "issue": "Release 2.1.123\nOn the docket:\r\n+ [x] Create lockfile for xmlsec fails #2063\r\n+ [x] Internal not enough values to unpack error for pex3 lock create 'pip @ https://github.com/pypa/pip/archive/22.0.2.zip' ... #2057\r\n+ [x] Pex lock creation does not handle wheels with non {cp,pp,py} pyver tag. #2059\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.122\"\n", "path": "pex/version.py"}]} | 693 | 99 |
gh_patches_debug_14865 | rasdani/github-patches | git_diff | spacetelescope__jwql-419 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make JWQL pip installable
Currently our `jwql` package is only installable by cloning the repository and running `setup.py`. It would be easier for users (and perhaps easier for us when distributing our code (#294)) if it were also uploaded to PyPI and `pip` installable.
</issue>
<code>
[start of setup.py]
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.20.0'
6
7 AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, '
8 AUTHORS += 'Graham Kanarek, Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
9
10 REQUIRES = [
11 'astropy',
12 'astroquery>=0.3.9',
13 'authlib',
14 'bokeh>=1.0',
15 'django>=2.0',
16 'jinja2',
17 'jwedb',
18 'jwst',
19 'matplotlib',
20 'numpy',
21 'numpydoc',
22 'pandas',
23 'psycopg2',
24 'pysiaf',
25 'pytest',
26 'sphinx',
27 'sqlalchemy',
28 'stsci_rtd_theme'
29 ]
30
31 setup(
32 name='jwql',
33 version=VERSION,
34 description='The JWST Quicklook Project',
35 url='https://github.com/spacetelescope/jwql.git',
36 author=AUTHORS,
37 author_email='[email protected]',
38 license='BSD',
39 keywords=['astronomy', 'python'],
40 classifiers=['Programming Language :: Python'],
41 packages=find_packages(),
42 install_requires=REQUIRES,
43 include_package_data=True,
44 include_dirs=[np.get_include()],
45 )
46
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,8 +4,10 @@
VERSION = '0.20.0'
-AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, '
-AUTHORS += 'Graham Kanarek, Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
+AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
+AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'
+
+DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
REQUIRES = [
'astropy',
@@ -31,7 +33,7 @@
setup(
name='jwql',
version=VERSION,
- description='The JWST Quicklook Project',
+ description=DESCRIPTION,
url='https://github.com/spacetelescope/jwql.git',
author=AUTHORS,
author_email='[email protected]',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,8 +4,10 @@\n \n VERSION = '0.20.0'\n \n-AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, '\n-AUTHORS += 'Graham Kanarek, Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n+AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\n+AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'\n+\n+DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n \n REQUIRES = [\n 'astropy',\n@@ -31,7 +33,7 @@\n setup(\n name='jwql',\n version=VERSION,\n- description='The JWST Quicklook Project',\n+ description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n", "issue": "Make JWQL pip installable\nCurrently our `jwql` package is only installable by cloning the repository and running `setup.py`. It would be easier for users (and perhaps easier for us when distributing our code (#294)) if it were also uploaded to PyPI and `pip` installable. \n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.20.0'\n\nAUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, '\nAUTHORS += 'Graham Kanarek, Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n\nREQUIRES = [\n 'astropy',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n 'django>=2.0',\n 'jinja2',\n 'jwedb',\n 'jwst',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description='The JWST Quicklook Project',\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]} | 979 | 245 |
gh_patches_debug_1991 | rasdani/github-patches | git_diff | pypi__warehouse-3056 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Disable 'delete confirm' button until confirmation word is correct
We currently have a modal on `warehouse/templates/manage/settings.html`, that allows the user to confirm that they want to delete their project:

The user is required to enter the project name as an extra security measure. If they get it wrong, we show them this error:

## Proposal
It would be really nice if we could `disable` the delete button until the correct project name is given, e.g.


## Notes
We will have several other delete confirmation modals on other pages, sometimes with multiple modals on a single page (e.g. delete release, delete file) - so the code will need to be written to take this into account.
</issue>
<code>
[start of warehouse/utils/project.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from packaging.utils import canonicalize_name
14 from pyramid.httpexceptions import HTTPSeeOther
15
16 from warehouse.packaging.models import (
17 Release, Dependency, File, Role, JournalEntry, release_classifiers
18 )
19
20
21 def confirm_project(project, request, fail_route):
22 confirm = request.POST.get("confirm")
23 project_name = project.normalized_name
24 if not confirm:
25 request.session.flash(
26 "Must confirm the request.",
27 queue="error",
28 )
29 raise HTTPSeeOther(
30 request.route_path(fail_route, project_name=project_name)
31 )
32 if canonicalize_name(confirm) != project.normalized_name:
33 request.session.flash(
34 "Could not delete project - " +
35 f"{confirm!r} is not the same as {project.normalized_name!r}",
36 queue="error",
37 )
38 raise HTTPSeeOther(
39 request.route_path(fail_route, project_name=project_name)
40 )
41
42
43 def remove_project(project, request, flash=True):
44 # TODO: We don't actually delete files from the data store. We should add
45 # some kind of garbage collection at some point.
46
47 request.db.add(
48 JournalEntry(
49 name=project.name,
50 action="remove",
51 submitted_by=request.user,
52 submitted_from=request.remote_addr,
53 )
54 )
55 request.db.query(Role).filter(Role.project == project).delete()
56 request.db.query(File).filter(File.name == project.name).delete()
57 (request.db.query(Dependency).filter(Dependency.name == project.name)
58 .delete())
59 (request.db.execute(release_classifiers.delete()
60 .where(release_classifiers.c.name ==
61 project.name)))
62
63 # Load the following objects into the session and individually delete them
64 # so they are included in `session.deleted` and their cache keys are purged
65
66 # Delete releases first, otherwise they will get cascade-deleted by the
67 # project deletion and won't be purged
68 for release in (
69 request.db.query(Release)
70 .filter(Release.project == project)
71 .all()):
72 request.db.delete(release)
73
74 # Finally, delete the project
75 request.db.delete(project)
76
77 # Flush so we can repeat this multiple times if necessary
78 request.db.flush()
79
80 if flash:
81 request.session.flash(
82 f"Successfully deleted the project {project.name!r}.",
83 queue="success",
84 )
85
[end of warehouse/utils/project.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/utils/project.py b/warehouse/utils/project.py
--- a/warehouse/utils/project.py
+++ b/warehouse/utils/project.py
@@ -19,7 +19,7 @@
def confirm_project(project, request, fail_route):
- confirm = request.POST.get("confirm")
+ confirm = request.POST.get("confirm_project_name")
project_name = project.normalized_name
if not confirm:
request.session.flash(
| {"golden_diff": "diff --git a/warehouse/utils/project.py b/warehouse/utils/project.py\n--- a/warehouse/utils/project.py\n+++ b/warehouse/utils/project.py\n@@ -19,7 +19,7 @@\n \n \n def confirm_project(project, request, fail_route):\n- confirm = request.POST.get(\"confirm\")\n+ confirm = request.POST.get(\"confirm_project_name\")\n project_name = project.normalized_name\n if not confirm:\n request.session.flash(\n", "issue": "Disable 'delete confirm' button until confirmation word is correct\nWe currently have a modal on `warehouse/templates/manage/settings.html`, that allows the user to confirm that they want to delete their project:\r\n\r\n\r\n\r\nThe user is required to enter the project name as an extra security measure. If they get it wrong, we show them this error:\r\n\r\n\r\n\r\n## Proposal\r\n\r\nIt would be really nice if we could `disable` the delete button until the correct project name is given, e.g.\r\n\r\n\r\n\r\n\r\n\r\n## Notes\r\n\r\nWe will have several other delete confirmation modals on other pages, sometimes with multiple modals on a single page (e.g. delete release, delete file) - so the code will need to be written to take this into account.\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom packaging.utils import canonicalize_name\nfrom pyramid.httpexceptions import HTTPSeeOther\n\nfrom warehouse.packaging.models import (\n Release, Dependency, File, Role, JournalEntry, release_classifiers\n)\n\n\ndef confirm_project(project, request, fail_route):\n confirm = request.POST.get(\"confirm\")\n project_name = project.normalized_name\n if not confirm:\n request.session.flash(\n \"Must confirm the request.\",\n queue=\"error\",\n )\n raise HTTPSeeOther(\n request.route_path(fail_route, project_name=project_name)\n )\n if canonicalize_name(confirm) != project.normalized_name:\n request.session.flash(\n \"Could not delete project - \" +\n f\"{confirm!r} is not the same as {project.normalized_name!r}\",\n queue=\"error\",\n )\n raise HTTPSeeOther(\n request.route_path(fail_route, project_name=project_name)\n )\n\n\ndef remove_project(project, request, flash=True):\n # TODO: We don't actually delete files from the data store. We should add\n # some kind of garbage collection at some point.\n\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"remove\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n request.db.query(Role).filter(Role.project == project).delete()\n request.db.query(File).filter(File.name == project.name).delete()\n (request.db.query(Dependency).filter(Dependency.name == project.name)\n .delete())\n (request.db.execute(release_classifiers.delete()\n .where(release_classifiers.c.name ==\n project.name)))\n\n # Load the following objects into the session and individually delete them\n # so they are included in `session.deleted` and their cache keys are purged\n\n # Delete releases first, otherwise they will get cascade-deleted by the\n # project deletion and won't be purged\n for release in (\n request.db.query(Release)\n .filter(Release.project == project)\n .all()):\n request.db.delete(release)\n\n # Finally, delete the project\n request.db.delete(project)\n\n # Flush so we can repeat this multiple times if necessary\n request.db.flush()\n\n if flash:\n request.session.flash(\n f\"Successfully deleted the project {project.name!r}.\",\n queue=\"success\",\n )\n", "path": "warehouse/utils/project.py"}]} | 1,796 | 96 |
gh_patches_debug_7415 | rasdani/github-patches | git_diff | fonttools__fonttools-2439 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ReemKufiInk crashes pyftsubset
```
pyftsubset --text=duck --output-file=/tmp/reem.otf ReemKufiInk-Bold.otf
Traceback (most recent call last):
File "/tmp/venv/bin/pyftsubset", line 8, in <module>
sys.exit(main())
File "/tmp/venv/lib/python3.9/site-packages/fontTools/misc/loggingTools.py", line 372, in wrapper
return func(*args, **kwds)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py", line 3104, in main
subsetter.subset(font)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py", line 2908, in subset
self._subset_glyphs(font)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py", line 2846, in _subset_glyphs
retain = table.subset_glyphs(self)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py", line 2086, in subset_glyphs
colorGlyphsV1 = unbuildColrV1(self.table.LayerList, self.table.BaseGlyphList)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/colorLib/unbuilder.py", line 6, in unbuildColrV1
unbuilder = LayerListUnbuilder(layerV1List.Paint)
AttributeError: 'NoneType' object has no attribute 'Paint'
```
TTX handles the font just fine. File from https://github.com/aliftype/reem-kufi/commits/colr-v1 at 93d6dcd693ae42bb4295701e88a07cc4d04db73c
</issue>
<code>
[start of Lib/fontTools/colorLib/unbuilder.py]
1 from fontTools.ttLib.tables import otTables as ot
2 from .table_builder import TableUnbuilder
3
4
5 def unbuildColrV1(layerV1List, baseGlyphV1List):
6 unbuilder = LayerListUnbuilder(layerV1List.Paint)
7 return {
8 rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)
9 for rec in baseGlyphV1List.BaseGlyphPaintRecord
10 }
11
12
13 def _flatten(lst):
14 for el in lst:
15 if isinstance(el, list):
16 yield from _flatten(el)
17 else:
18 yield el
19
20
21 class LayerListUnbuilder:
22 def __init__(self, layers):
23 self.layers = layers
24
25 callbacks = {
26 (
27 ot.Paint,
28 ot.PaintFormat.PaintColrLayers,
29 ): self._unbuildPaintColrLayers,
30 }
31 self.tableUnbuilder = TableUnbuilder(callbacks)
32
33 def unbuildPaint(self, paint):
34 assert isinstance(paint, ot.Paint)
35 return self.tableUnbuilder.unbuild(paint)
36
37 def _unbuildPaintColrLayers(self, source):
38 assert source["Format"] == ot.PaintFormat.PaintColrLayers
39
40 layers = list(
41 _flatten(
42 [
43 self.unbuildPaint(childPaint)
44 for childPaint in self.layers[
45 source["FirstLayerIndex"] : source["FirstLayerIndex"]
46 + source["NumLayers"]
47 ]
48 ]
49 )
50 )
51
52 if len(layers) == 1:
53 return layers[0]
54
55 return {"Format": source["Format"], "Layers": layers}
56
57
58 if __name__ == "__main__":
59 from pprint import pprint
60 import sys
61 from fontTools.ttLib import TTFont
62
63 try:
64 fontfile = sys.argv[1]
65 except IndexError:
66 sys.exit("usage: fonttools colorLib.unbuilder FONTFILE")
67
68 font = TTFont(fontfile)
69 colr = font["COLR"]
70 if colr.version < 1:
71 sys.exit(f"error: No COLR table version=1 found in {fontfile}")
72
73 colorGlyphs = unbuildColrV1(
74 colr.table.LayerList,
75 colr.table.BaseGlyphList,
76 ignoreVarIdx=not colr.table.VarStore,
77 )
78
79 pprint(colorGlyphs)
80
[end of Lib/fontTools/colorLib/unbuilder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Lib/fontTools/colorLib/unbuilder.py b/Lib/fontTools/colorLib/unbuilder.py
--- a/Lib/fontTools/colorLib/unbuilder.py
+++ b/Lib/fontTools/colorLib/unbuilder.py
@@ -2,11 +2,14 @@
from .table_builder import TableUnbuilder
-def unbuildColrV1(layerV1List, baseGlyphV1List):
- unbuilder = LayerListUnbuilder(layerV1List.Paint)
+def unbuildColrV1(layerList, baseGlyphList):
+ layers = []
+ if layerList:
+ layers = layerList.Paint
+ unbuilder = LayerListUnbuilder(layers)
return {
rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)
- for rec in baseGlyphV1List.BaseGlyphPaintRecord
+ for rec in baseGlyphList.BaseGlyphPaintRecord
}
| {"golden_diff": "diff --git a/Lib/fontTools/colorLib/unbuilder.py b/Lib/fontTools/colorLib/unbuilder.py\n--- a/Lib/fontTools/colorLib/unbuilder.py\n+++ b/Lib/fontTools/colorLib/unbuilder.py\n@@ -2,11 +2,14 @@\n from .table_builder import TableUnbuilder\n \n \n-def unbuildColrV1(layerV1List, baseGlyphV1List):\n- unbuilder = LayerListUnbuilder(layerV1List.Paint)\n+def unbuildColrV1(layerList, baseGlyphList):\n+ layers = []\n+ if layerList:\n+ layers = layerList.Paint\n+ unbuilder = LayerListUnbuilder(layers)\n return {\n rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)\n- for rec in baseGlyphV1List.BaseGlyphPaintRecord\n+ for rec in baseGlyphList.BaseGlyphPaintRecord\n }\n", "issue": "ReemKufiInk crashes pyftsubset\n```\r\npyftsubset --text=duck --output-file=/tmp/reem.otf ReemKufiInk-Bold.otf\r\n\r\nTraceback (most recent call last):\r\n File \"/tmp/venv/bin/pyftsubset\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/misc/loggingTools.py\", line 372, in wrapper\r\n return func(*args, **kwds)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py\", line 3104, in main\r\n subsetter.subset(font)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py\", line 2908, in subset\r\n self._subset_glyphs(font)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py\", line 2846, in _subset_glyphs\r\n retain = table.subset_glyphs(self)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py\", line 2086, in subset_glyphs\r\n colorGlyphsV1 = unbuildColrV1(self.table.LayerList, self.table.BaseGlyphList)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/colorLib/unbuilder.py\", line 6, in unbuildColrV1\r\n unbuilder = LayerListUnbuilder(layerV1List.Paint)\r\nAttributeError: 'NoneType' object has no attribute 'Paint'\r\n```\r\n\r\nTTX handles the font just fine. File from https://github.com/aliftype/reem-kufi/commits/colr-v1 at 93d6dcd693ae42bb4295701e88a07cc4d04db73c\n", "before_files": [{"content": "from fontTools.ttLib.tables import otTables as ot\nfrom .table_builder import TableUnbuilder\n\n\ndef unbuildColrV1(layerV1List, baseGlyphV1List):\n unbuilder = LayerListUnbuilder(layerV1List.Paint)\n return {\n rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)\n for rec in baseGlyphV1List.BaseGlyphPaintRecord\n }\n\n\ndef _flatten(lst):\n for el in lst:\n if isinstance(el, list):\n yield from _flatten(el)\n else:\n yield el\n\n\nclass LayerListUnbuilder:\n def __init__(self, layers):\n self.layers = layers\n\n callbacks = {\n (\n ot.Paint,\n ot.PaintFormat.PaintColrLayers,\n ): self._unbuildPaintColrLayers,\n }\n self.tableUnbuilder = TableUnbuilder(callbacks)\n\n def unbuildPaint(self, paint):\n assert isinstance(paint, ot.Paint)\n return self.tableUnbuilder.unbuild(paint)\n\n def _unbuildPaintColrLayers(self, source):\n assert source[\"Format\"] == ot.PaintFormat.PaintColrLayers\n\n layers = list(\n _flatten(\n [\n self.unbuildPaint(childPaint)\n for childPaint in self.layers[\n source[\"FirstLayerIndex\"] : source[\"FirstLayerIndex\"]\n + source[\"NumLayers\"]\n ]\n ]\n )\n )\n\n if len(layers) == 1:\n return layers[0]\n\n return {\"Format\": source[\"Format\"], \"Layers\": layers}\n\n\nif __name__ == \"__main__\":\n from pprint import pprint\n import sys\n from fontTools.ttLib import TTFont\n\n try:\n fontfile = sys.argv[1]\n except IndexError:\n sys.exit(\"usage: fonttools colorLib.unbuilder FONTFILE\")\n\n font = TTFont(fontfile)\n colr = font[\"COLR\"]\n if colr.version < 1:\n sys.exit(f\"error: No COLR table version=1 found in {fontfile}\")\n\n colorGlyphs = unbuildColrV1(\n colr.table.LayerList,\n colr.table.BaseGlyphList,\n ignoreVarIdx=not colr.table.VarStore,\n )\n\n pprint(colorGlyphs)\n", "path": "Lib/fontTools/colorLib/unbuilder.py"}]} | 1,619 | 195 |
gh_patches_debug_6818 | rasdani/github-patches | git_diff | sbi-dev__sbi-11 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Non-conditional density estimators are initialized with context_features=None, should support 0 as well
the CDEs have a default input context_features which defaults to None. When this is a positive integer, we get a CDE, for a DE we need to set it to None. It should support a value of zero, though.
</issue>
<code>
[start of setup.py]
1 from setuptools import find_packages, setup
2
3 exec(open("lfi/version.py").read())
4
5 setup(
6 name="lfi",
7 version=__version__,
8 description="LFI + CDE.",
9 url="https://github.com/mackelab/lfi",
10 author="Conor Durkan",
11 packages=find_packages(exclude=["tests"]),
12 license="GPLv3",
13 test_requires=["pytest", "deepdiff", "torchtestcase"],
14 install_requires=[
15 "matplotlib",
16 "numpy",
17 "pyro-ppl",
18 "scipy",
19 "tensorboard",
20 "torch",
21 "tqdm",
22 ],
23 extras_requires={"dev": ["autoflake", "black", "flake8", "isort", "pytest"]},
24 dependency_links=[],
25 )
26
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,12 +1,12 @@
from setuptools import find_packages, setup
-exec(open("lfi/version.py").read())
+exec(open("sbi/version.py").read())
setup(
- name="lfi",
+ name="sbi",
version=__version__,
- description="LFI + CDE.",
- url="https://github.com/mackelab/lfi",
+ description="Simulation-based inference",
+ url="https://github.com/mackelab/sbi",
author="Conor Durkan",
packages=find_packages(exclude=["tests"]),
license="GPLv3",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,12 +1,12 @@\n from setuptools import find_packages, setup\n \n-exec(open(\"lfi/version.py\").read())\n+exec(open(\"sbi/version.py\").read())\n \n setup(\n- name=\"lfi\",\n+ name=\"sbi\",\n version=__version__,\n- description=\"LFI + CDE.\",\n- url=\"https://github.com/mackelab/lfi\",\n+ description=\"Simulation-based inference\",\n+ url=\"https://github.com/mackelab/sbi\",\n author=\"Conor Durkan\",\n packages=find_packages(exclude=[\"tests\"]),\n license=\"GPLv3\",\n", "issue": "Non-conditional density estimators are initialized with context_features=None, should support 0 as well\nthe CDEs have a default input context_features which defaults to None. When this is a positive integer, we get a CDE, for a DE we need to set it to None. It should support a value of zero, though.\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nexec(open(\"lfi/version.py\").read())\n\nsetup(\n name=\"lfi\",\n version=__version__,\n description=\"LFI + CDE.\",\n url=\"https://github.com/mackelab/lfi\",\n author=\"Conor Durkan\",\n packages=find_packages(exclude=[\"tests\"]),\n license=\"GPLv3\",\n test_requires=[\"pytest\", \"deepdiff\", \"torchtestcase\"],\n install_requires=[\n \"matplotlib\",\n \"numpy\",\n \"pyro-ppl\",\n \"scipy\",\n \"tensorboard\",\n \"torch\",\n \"tqdm\",\n ],\n extras_requires={\"dev\": [\"autoflake\", \"black\", \"flake8\", \"isort\", \"pytest\"]},\n dependency_links=[],\n)\n", "path": "setup.py"}]} | 810 | 156 |
gh_patches_debug_834 | rasdani/github-patches | git_diff | craiga__will-of-the-prophets-26 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clean up login form
</issue>
<code>
[start of will_of_the_prophets/settings/__init__.py]
1 """
2 Django settings for will_of_the_prophets project.
3
4 Generated by 'django-admin startproject' using Django 2.0.4.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/2.0/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/2.0/ref/settings/
11 """
12
13 import os
14
15 import django_heroku
16
17 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
18 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
19
20
21 # Quick-start development settings - unsuitable for production
22 # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
23
24 # SECURITY WARNING: keep the secret key used in production secret!
25 SECRET_KEY = os.environ.get(
26 'SECRET_KEY', 'bah!osmx@cpaoulc-!eohvd3ldoy*^oki#l25-v6tvq04=6npz')
27
28 # SECURITY WARNING: don't run with debug turned on in production!
29 DEBUG = os.environ.get('DEBUG', False)
30
31 ALLOWED_HOSTS = ['*.herokuapp.com', 'localhost']
32
33
34 # Application definition
35
36 INSTALLED_APPS = [
37 'raven.contrib.django.raven_compat',
38 'django.contrib.admin',
39 'django.contrib.auth',
40 'django.contrib.contenttypes',
41 'django.contrib.sessions',
42 'django.contrib.messages',
43 'django.contrib.staticfiles',
44 'sass_processor',
45 'bootstrap',
46 'will_of_the_prophets',
47 ]
48
49 MIDDLEWARE = [
50 'django.middleware.security.SecurityMiddleware',
51 'django.contrib.sessions.middleware.SessionMiddleware',
52 'django.middleware.common.CommonMiddleware',
53 'django.middleware.csrf.CsrfViewMiddleware',
54 'django.contrib.auth.middleware.AuthenticationMiddleware',
55 'django.contrib.messages.middleware.MessageMiddleware',
56 'django.middleware.clickjacking.XFrameOptionsMiddleware',
57 ]
58
59 ROOT_URLCONF = 'will_of_the_prophets.urls'
60
61 TEMPLATES = [
62 {
63 'BACKEND': 'django.template.backends.django.DjangoTemplates',
64 'DIRS': [],
65 'APP_DIRS': True,
66 'OPTIONS': {
67 'context_processors': [
68 'django.template.context_processors.debug',
69 'django.template.context_processors.request',
70 'django.contrib.auth.context_processors.auth',
71 'django.contrib.messages.context_processors.messages',
72 ],
73 },
74 },
75 ]
76
77 WSGI_APPLICATION = 'will_of_the_prophets.wsgi.application'
78
79
80 # Database
81 # https://docs.djangoproject.com/en/2.0/ref/settings/#databases
82
83 DATABASES = {
84 'default': {
85 'ENGINE': 'django.db.backends.sqlite3',
86 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
87 }
88 }
89
90
91 # Password validation
92 # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
93
94 AUTH_PASSWORD_VALIDATORS = [
95 {
96 'NAME': ('django.contrib.auth.password_validation'
97 '.UserAttributeSimilarityValidator'),
98 },
99 {
100 'NAME': ('django.contrib.auth.password_validation'
101 '.MinimumLengthValidator'),
102 },
103 {
104 'NAME': ('django.contrib.auth.password_validation'
105 '.CommonPasswordValidator'),
106 },
107 {
108 'NAME': ('django.contrib.auth.password_validation'
109 '.NumericPasswordValidator'),
110 },
111 ]
112
113
114 # Internationalization
115 # https://docs.djangoproject.com/en/2.0/topics/i18n/
116
117 LANGUAGE_CODE = 'en-us'
118
119 TIME_ZONE = 'UTC'
120
121 USE_I18N = True
122
123 USE_L10N = True
124
125 USE_TZ = True
126
127
128 # Static files (CSS, JavaScript, Images)
129 # https://docs.djangoproject.com/en/2.0/howto/static-files/
130
131 STATIC_URL = '/static/'
132
133 STATICFILES_FINDERS = [
134 'django.contrib.staticfiles.finders.FileSystemFinder',
135 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
136 # https://github.com/jrief/django-sass-processor
137 'sass_processor.finders.CssFinder',
138 ]
139
140
141 # django-sass-processor
142 # https://github.com/jrief/django-sass-processor
143 SASS_OUTPUT_STYLE = 'compressed'
144
145
146 # Configure Django App for Heroku.
147 django_heroku.settings(locals())
148
[end of will_of_the_prophets/settings/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/will_of_the_prophets/settings/__init__.py b/will_of_the_prophets/settings/__init__.py
--- a/will_of_the_prophets/settings/__init__.py
+++ b/will_of_the_prophets/settings/__init__.py
@@ -42,6 +42,7 @@
'django.contrib.messages',
'django.contrib.staticfiles',
'sass_processor',
+ 'widget_tweaks',
'bootstrap',
'will_of_the_prophets',
]
| {"golden_diff": "diff --git a/will_of_the_prophets/settings/__init__.py b/will_of_the_prophets/settings/__init__.py\n--- a/will_of_the_prophets/settings/__init__.py\n+++ b/will_of_the_prophets/settings/__init__.py\n@@ -42,6 +42,7 @@\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'sass_processor',\n+ 'widget_tweaks',\n 'bootstrap',\n 'will_of_the_prophets',\n ]\n", "issue": "Clean up login form\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for will_of_the_prophets project.\n\nGenerated by 'django-admin startproject' using Django 2.0.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\"\"\"\n\nimport os\n\nimport django_heroku\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'SECRET_KEY', 'bah!osmx@cpaoulc-!eohvd3ldoy*^oki#l25-v6tvq04=6npz')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get('DEBUG', False)\n\nALLOWED_HOSTS = ['*.herokuapp.com', 'localhost']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'raven.contrib.django.raven_compat',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'sass_processor',\n 'bootstrap',\n 'will_of_the_prophets',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'will_of_the_prophets.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'will_of_the_prophets.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.UserAttributeSimilarityValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.MinimumLengthValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.CommonPasswordValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.NumericPasswordValidator'),\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # https://github.com/jrief/django-sass-processor\n 'sass_processor.finders.CssFinder',\n]\n\n\n# django-sass-processor\n# https://github.com/jrief/django-sass-processor\nSASS_OUTPUT_STYLE = 'compressed'\n\n\n# Configure Django App for Heroku.\ndjango_heroku.settings(locals())\n", "path": "will_of_the_prophets/settings/__init__.py"}]} | 1,802 | 113 |
gh_patches_debug_9798 | rasdani/github-patches | git_diff | netbox-community__netbox-15788 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New User model has a 32-bit integer `id` field
### Deployment Type
NetBox Cloud
### NetBox Version
v4.0-beta1
### Python Version
3.10
### Steps to Reproduce
1. Upgrade a v3.7 database to v4.0
2. Inspect the `users_user` table
### Expected Behavior
The `id` column of the `users_user` table should be a `bigint` (64-bit integer), like all other models in NetBox.
### Observed Behavior
The `id` column is a regular 32-bit integer. This is because we renamed the stock Django table, which uses a 32-bit integer `id` field.
</issue>
<code>
[start of netbox/users/migrations/0005_alter_user_table.py]
1 from django.db import migrations
2
3
4 def update_content_types(apps, schema_editor):
5 ContentType = apps.get_model('contenttypes', 'ContentType')
6 # Delete the new ContentTypes effected by the new models in the users app
7 ContentType.objects.filter(app_label='users', model='user').delete()
8
9 # Update the app labels of the original ContentTypes for auth.User to ensure
10 # that any foreign key references are preserved
11 ContentType.objects.filter(app_label='auth', model='user').update(app_label='users')
12
13 netboxuser_ct = ContentType.objects.filter(app_label='users', model='netboxuser').first()
14 if netboxuser_ct:
15 user_ct = ContentType.objects.filter(app_label='users', model='user').first()
16 CustomField = apps.get_model('extras', 'CustomField')
17 CustomField.objects.filter(related_object_type_id=netboxuser_ct.id).update(related_object_type_id=user_ct.id)
18 netboxuser_ct.delete()
19
20
21 class Migration(migrations.Migration):
22
23 dependencies = [
24 ('users', '0002_squashed_0004'),
25 ]
26
27 operations = [
28 # The User table was originally created as 'auth_user'. Now we nullify the model's
29 # db_table option, so that it defaults to the app & model name (users_user). This
30 # causes the database table to be renamed.
31 migrations.AlterModelTable(
32 name='user',
33 table=None,
34 ),
35
36 # Rename auth_user_* sequences
37 migrations.RunSQL("ALTER TABLE auth_user_groups_id_seq RENAME TO users_user_groups_id_seq"),
38 migrations.RunSQL("ALTER TABLE auth_user_id_seq RENAME TO users_user_id_seq"),
39 migrations.RunSQL("ALTER TABLE auth_user_user_permissions_id_seq RENAME TO users_user_user_permissions_id_seq"),
40
41 # Rename auth_user_* indexes
42 migrations.RunSQL("ALTER INDEX auth_user_pkey RENAME TO users_user_pkey"),
43 # Hash is deterministic; generated via schema_editor._create_index_name()
44 migrations.RunSQL("ALTER INDEX auth_user_username_6821ab7c_like RENAME TO users_user_username_06e46fe6_like"),
45 migrations.RunSQL("ALTER INDEX auth_user_username_key RENAME TO users_user_username_key"),
46
47 # Update ContentTypes
48 migrations.RunPython(
49 code=update_content_types,
50 reverse_code=migrations.RunPython.noop
51 ),
52 ]
53
[end of netbox/users/migrations/0005_alter_user_table.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/users/migrations/0005_alter_user_table.py b/netbox/users/migrations/0005_alter_user_table.py
--- a/netbox/users/migrations/0005_alter_user_table.py
+++ b/netbox/users/migrations/0005_alter_user_table.py
@@ -33,6 +33,9 @@
table=None,
),
+ # Convert the `id` column to a 64-bit integer (BigAutoField is implied by DEFAULT_AUTO_FIELD)
+ migrations.RunSQL("ALTER TABLE users_user ALTER COLUMN id TYPE bigint"),
+
# Rename auth_user_* sequences
migrations.RunSQL("ALTER TABLE auth_user_groups_id_seq RENAME TO users_user_groups_id_seq"),
migrations.RunSQL("ALTER TABLE auth_user_id_seq RENAME TO users_user_id_seq"),
| {"golden_diff": "diff --git a/netbox/users/migrations/0005_alter_user_table.py b/netbox/users/migrations/0005_alter_user_table.py\n--- a/netbox/users/migrations/0005_alter_user_table.py\n+++ b/netbox/users/migrations/0005_alter_user_table.py\n@@ -33,6 +33,9 @@\n table=None,\n ),\n \n+ # Convert the `id` column to a 64-bit integer (BigAutoField is implied by DEFAULT_AUTO_FIELD)\n+ migrations.RunSQL(\"ALTER TABLE users_user ALTER COLUMN id TYPE bigint\"),\n+\n # Rename auth_user_* sequences\n migrations.RunSQL(\"ALTER TABLE auth_user_groups_id_seq RENAME TO users_user_groups_id_seq\"),\n migrations.RunSQL(\"ALTER TABLE auth_user_id_seq RENAME TO users_user_id_seq\"),\n", "issue": "New User model has a 32-bit integer `id` field\n### Deployment Type\n\nNetBox Cloud\n\n### NetBox Version\n\nv4.0-beta1\n\n### Python Version\n\n3.10\n\n### Steps to Reproduce\n\n1. Upgrade a v3.7 database to v4.0\r\n2. Inspect the `users_user` table\n\n### Expected Behavior\n\nThe `id` column of the `users_user` table should be a `bigint` (64-bit integer), like all other models in NetBox.\n\n### Observed Behavior\n\nThe `id` column is a regular 32-bit integer. This is because we renamed the stock Django table, which uses a 32-bit integer `id` field.\n", "before_files": [{"content": "from django.db import migrations\n\n\ndef update_content_types(apps, schema_editor):\n ContentType = apps.get_model('contenttypes', 'ContentType')\n # Delete the new ContentTypes effected by the new models in the users app\n ContentType.objects.filter(app_label='users', model='user').delete()\n\n # Update the app labels of the original ContentTypes for auth.User to ensure\n # that any foreign key references are preserved\n ContentType.objects.filter(app_label='auth', model='user').update(app_label='users')\n\n netboxuser_ct = ContentType.objects.filter(app_label='users', model='netboxuser').first()\n if netboxuser_ct:\n user_ct = ContentType.objects.filter(app_label='users', model='user').first()\n CustomField = apps.get_model('extras', 'CustomField')\n CustomField.objects.filter(related_object_type_id=netboxuser_ct.id).update(related_object_type_id=user_ct.id)\n netboxuser_ct.delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0002_squashed_0004'),\n ]\n\n operations = [\n # The User table was originally created as 'auth_user'. Now we nullify the model's\n # db_table option, so that it defaults to the app & model name (users_user). This\n # causes the database table to be renamed.\n migrations.AlterModelTable(\n name='user',\n table=None,\n ),\n\n # Rename auth_user_* sequences\n migrations.RunSQL(\"ALTER TABLE auth_user_groups_id_seq RENAME TO users_user_groups_id_seq\"),\n migrations.RunSQL(\"ALTER TABLE auth_user_id_seq RENAME TO users_user_id_seq\"),\n migrations.RunSQL(\"ALTER TABLE auth_user_user_permissions_id_seq RENAME TO users_user_user_permissions_id_seq\"),\n\n # Rename auth_user_* indexes\n migrations.RunSQL(\"ALTER INDEX auth_user_pkey RENAME TO users_user_pkey\"),\n # Hash is deterministic; generated via schema_editor._create_index_name()\n migrations.RunSQL(\"ALTER INDEX auth_user_username_6821ab7c_like RENAME TO users_user_username_06e46fe6_like\"),\n migrations.RunSQL(\"ALTER INDEX auth_user_username_key RENAME TO users_user_username_key\"),\n\n # Update ContentTypes\n migrations.RunPython(\n code=update_content_types,\n reverse_code=migrations.RunPython.noop\n ),\n ]\n", "path": "netbox/users/migrations/0005_alter_user_table.py"}]} | 1,324 | 184 |
gh_patches_debug_32866 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-4451 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Documentation for the v2 of the configuration file
At first, I was thinking to automate this given the schema, but the spec isn't very large so we can just hand-write this without too much effort.
</issue>
<code>
[start of docs/doc_extensions.py]
1 """
2 Read the Docs documentation extensions for Sphinx
3
4 Adds the following roles:
5
6 djangosetting
7 Output an inline literal of the corresponding setting value. Useful for
8 keeping documentation up to date without editing on settings changes.
9 """
10
11 from docutils import nodes, utils
12
13 from django.conf import settings
14
15 from readthedocs.projects.models import Feature
16
17
18 def django_setting_role(typ, rawtext, text, lineno, inliner, options=None,
19 content=None):
20 """Always up to date Django settings from the application"""
21 dj_setting = getattr(settings, utils.unescape(text), 'None')
22 node = nodes.literal(dj_setting, dj_setting)
23 return [node], []
24
25
26 def feature_flags_role(typ, rawtext, text, lineno, inliner, options=None,
27 content=None):
28 """Up to date feature flags from the application."""
29 all_features = Feature.FEATURES
30 requested_feature = utils.unescape(text)
31 for feature in all_features:
32 if requested_feature.lower() == feature[0].lower():
33 desc = nodes.Text(feature[1], feature[1])
34 return [desc], []
35
36
37 def setup(_):
38 from docutils.parsers.rst import roles
39 roles.register_local_role(
40 'djangosetting',
41 django_setting_role
42 )
43 roles.register_local_role(
44 'featureflags',
45 feature_flags_role
46 )
47
48 return {
49 'version': 'builtin',
50 'parallel_read_safe': True,
51 'parallel_write_safe': True,
52 }
53
[end of docs/doc_extensions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/doc_extensions.py b/docs/doc_extensions.py
--- a/docs/doc_extensions.py
+++ b/docs/doc_extensions.py
@@ -6,11 +6,14 @@
djangosetting
Output an inline literal of the corresponding setting value. Useful for
keeping documentation up to date without editing on settings changes.
-"""
-from docutils import nodes, utils
+buildpyversions
+ Output a comma separated list of the supported python versions for a
+ Read the Docs build image.
+"""
from django.conf import settings
+from docutils import nodes, utils
from readthedocs.projects.models import Feature
@@ -23,8 +26,23 @@
return [node], []
+def python_supported_versions_role(typ, rawtext, text, lineno, inliner,
+ options=None, content=None):
+ """Up to date supported python versions for each build image."""
+ image = '{}:{}'.format(settings.DOCKER_DEFAULT_IMAGE, text)
+ image_settings = settings.DOCKER_IMAGE_SETTINGS[image]
+ python_versions = image_settings['python']['supported_versions']
+ node_list = []
+ separator = ', '
+ for i, version in enumerate(python_versions):
+ node_list.append(nodes.literal(version, version))
+ if i < len(python_versions) - 1:
+ node_list.append(nodes.Text(separator))
+ return (node_list, [])
+
+
def feature_flags_role(typ, rawtext, text, lineno, inliner, options=None,
- content=None):
+ content=None):
"""Up to date feature flags from the application."""
all_features = Feature.FEATURES
requested_feature = utils.unescape(text)
@@ -40,9 +58,13 @@
'djangosetting',
django_setting_role
)
+ roles.register_local_role(
+ 'buildpyversions',
+ python_supported_versions_role,
+ )
roles.register_local_role(
'featureflags',
- feature_flags_role
+ feature_flags_role,
)
return {
| {"golden_diff": "diff --git a/docs/doc_extensions.py b/docs/doc_extensions.py\n--- a/docs/doc_extensions.py\n+++ b/docs/doc_extensions.py\n@@ -6,11 +6,14 @@\n djangosetting\n Output an inline literal of the corresponding setting value. Useful for\n keeping documentation up to date without editing on settings changes.\n-\"\"\"\n \n-from docutils import nodes, utils\n+buildpyversions\n+ Output a comma separated list of the supported python versions for a\n+ Read the Docs build image.\n+\"\"\"\n \n from django.conf import settings\n+from docutils import nodes, utils\n \n from readthedocs.projects.models import Feature\n \n@@ -23,8 +26,23 @@\n return [node], []\n \n \n+def python_supported_versions_role(typ, rawtext, text, lineno, inliner,\n+ options=None, content=None):\n+ \"\"\"Up to date supported python versions for each build image.\"\"\"\n+ image = '{}:{}'.format(settings.DOCKER_DEFAULT_IMAGE, text)\n+ image_settings = settings.DOCKER_IMAGE_SETTINGS[image]\n+ python_versions = image_settings['python']['supported_versions']\n+ node_list = []\n+ separator = ', '\n+ for i, version in enumerate(python_versions):\n+ node_list.append(nodes.literal(version, version))\n+ if i < len(python_versions) - 1:\n+ node_list.append(nodes.Text(separator))\n+ return (node_list, [])\n+\n+\n def feature_flags_role(typ, rawtext, text, lineno, inliner, options=None,\n- content=None):\n+ content=None):\n \"\"\"Up to date feature flags from the application.\"\"\"\n all_features = Feature.FEATURES\n requested_feature = utils.unescape(text)\n@@ -40,9 +58,13 @@\n 'djangosetting',\n django_setting_role\n )\n+ roles.register_local_role(\n+ 'buildpyversions',\n+ python_supported_versions_role,\n+ )\n roles.register_local_role(\n 'featureflags',\n- feature_flags_role\n+ feature_flags_role,\n )\n \n return {\n", "issue": "Documentation for the v2 of the configuration file\nAt first, I was thinking to automate this given the schema, but the spec isn't very large so we can just hand-write this without too much effort.\n", "before_files": [{"content": "\"\"\"\nRead the Docs documentation extensions for Sphinx\n\nAdds the following roles:\n\ndjangosetting\n Output an inline literal of the corresponding setting value. Useful for\n keeping documentation up to date without editing on settings changes.\n\"\"\"\n\nfrom docutils import nodes, utils\n\nfrom django.conf import settings\n\nfrom readthedocs.projects.models import Feature\n\n\ndef django_setting_role(typ, rawtext, text, lineno, inliner, options=None,\n content=None):\n \"\"\"Always up to date Django settings from the application\"\"\"\n dj_setting = getattr(settings, utils.unescape(text), 'None')\n node = nodes.literal(dj_setting, dj_setting)\n return [node], []\n\n\ndef feature_flags_role(typ, rawtext, text, lineno, inliner, options=None,\n content=None):\n \"\"\"Up to date feature flags from the application.\"\"\"\n all_features = Feature.FEATURES\n requested_feature = utils.unescape(text)\n for feature in all_features:\n if requested_feature.lower() == feature[0].lower():\n desc = nodes.Text(feature[1], feature[1])\n return [desc], []\n\n\ndef setup(_):\n from docutils.parsers.rst import roles\n roles.register_local_role(\n 'djangosetting',\n django_setting_role\n )\n roles.register_local_role(\n 'featureflags',\n feature_flags_role\n )\n\n return {\n 'version': 'builtin',\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n", "path": "docs/doc_extensions.py"}]} | 993 | 447 |
gh_patches_debug_2000 | rasdani/github-patches | git_diff | automl__auto-sklearn-190 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add warning if dependencies are not met
There should be a warning if one of the following dependencies is not met:
- scikit-learn==0.17
- smac==0.0.1
- lockfile>=0.10
- ConfigSpace>=0.2.1
- pyrfr==0.2.1
</issue>
<code>
[start of autosklearn/util/dependencies.py]
1 from warnings import warn
2
3 import pkg_resources
4 import re
5
6 from distutils.version import LooseVersion
7
8
9 RE_PATTERN = re.compile('^(?P<name>[\w\-]+)((?P<operation>==|>=|>)(?P<version>(\d+\.)?(\d+\.)?(\d+)))?$')
10
11
12 def verify_packages(packages):
13 if not packages:
14 return
15 if isinstance(packages, str):
16 packages = packages.splitlines()
17
18 for package in packages:
19 if not package:
20 continue
21
22 match = RE_PATTERN.match(package)
23 if match:
24 name = match.group('name')
25 operation = match.group('operation')
26 version = match.group('version')
27 _verify_package(name, operation, version)
28 else:
29 raise ValueError('Unable to read requirement: %s' % package)
30
31
32 def _verify_package(name, operation, version):
33 try:
34 module = pkg_resources.get_distribution(name)
35 except pkg_resources.DistributionNotFound:
36 raise MissingPackageError(name) from None
37
38 if not operation:
39 return
40
41 required_version = LooseVersion(version)
42 installed_version = LooseVersion(module.version)
43
44 if operation == '==':
45 check = required_version == installed_version
46 elif operation == '>':
47 check = installed_version > required_version
48 elif operation == '>=':
49 check = installed_version > required_version or \
50 installed_version == required_version
51 else:
52 raise NotImplementedError('operation \'%s\' is not supported' % operation)
53 if not check:
54 raise IncorrectPackageVersionError(name, installed_version, operation, required_version)
55
56
57 class MissingPackageError(Exception):
58
59 error_message = 'mandatory package \'{name}\' not found'
60
61 def __init__(self, package_name):
62 self.package_name = package_name
63 super(MissingPackageError, self).__init__(self.error_message.format(name=package_name))
64
65
66 class IncorrectPackageVersionError(Exception):
67
68 error_message = '\'{name} {installed_version}\' version mismatch ({operation}{required_version})'
69
70 def __init__(self, package_name, installed_version, operation, required_version):
71 self.package_name = package_name
72 self.installed_version = installed_version
73 self.operation = operation
74 self.required_version = required_version
75 message = self.error_message.format(name=package_name,
76 installed_version=installed_version,
77 operation=operation,
78 required_version=required_version)
79 super(IncorrectPackageVersionError, self).__init__(message)
80
[end of autosklearn/util/dependencies.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/autosklearn/util/dependencies.py b/autosklearn/util/dependencies.py
--- a/autosklearn/util/dependencies.py
+++ b/autosklearn/util/dependencies.py
@@ -33,7 +33,7 @@
try:
module = pkg_resources.get_distribution(name)
except pkg_resources.DistributionNotFound:
- raise MissingPackageError(name) from None
+ raise MissingPackageError(name)
if not operation:
return
| {"golden_diff": "diff --git a/autosklearn/util/dependencies.py b/autosklearn/util/dependencies.py\n--- a/autosklearn/util/dependencies.py\n+++ b/autosklearn/util/dependencies.py\n@@ -33,7 +33,7 @@\n try:\n module = pkg_resources.get_distribution(name)\n except pkg_resources.DistributionNotFound:\n- raise MissingPackageError(name) from None\n+ raise MissingPackageError(name)\n \n if not operation:\n return\n", "issue": "Add warning if dependencies are not met\nThere should be a warning if one of the following dependencies is not met:\r\n- scikit-learn==0.17\r\n- smac==0.0.1\r\n- lockfile>=0.10\r\n- ConfigSpace>=0.2.1\r\n- pyrfr==0.2.1\r\n\n", "before_files": [{"content": "from warnings import warn\n\nimport pkg_resources\nimport re\n\nfrom distutils.version import LooseVersion\n\n\nRE_PATTERN = re.compile('^(?P<name>[\\w\\-]+)((?P<operation>==|>=|>)(?P<version>(\\d+\\.)?(\\d+\\.)?(\\d+)))?$')\n\n\ndef verify_packages(packages):\n if not packages:\n return\n if isinstance(packages, str):\n packages = packages.splitlines()\n\n for package in packages:\n if not package:\n continue\n\n match = RE_PATTERN.match(package)\n if match:\n name = match.group('name')\n operation = match.group('operation')\n version = match.group('version')\n _verify_package(name, operation, version)\n else:\n raise ValueError('Unable to read requirement: %s' % package)\n\n\ndef _verify_package(name, operation, version):\n try:\n module = pkg_resources.get_distribution(name)\n except pkg_resources.DistributionNotFound:\n raise MissingPackageError(name) from None\n\n if not operation:\n return\n\n required_version = LooseVersion(version)\n installed_version = LooseVersion(module.version)\n\n if operation == '==':\n check = required_version == installed_version\n elif operation == '>':\n check = installed_version > required_version\n elif operation == '>=':\n check = installed_version > required_version or \\\n installed_version == required_version\n else:\n raise NotImplementedError('operation \\'%s\\' is not supported' % operation)\n if not check:\n raise IncorrectPackageVersionError(name, installed_version, operation, required_version)\n\n\nclass MissingPackageError(Exception):\n\n error_message = 'mandatory package \\'{name}\\' not found'\n\n def __init__(self, package_name):\n self.package_name = package_name\n super(MissingPackageError, self).__init__(self.error_message.format(name=package_name))\n\n\nclass IncorrectPackageVersionError(Exception):\n\n error_message = '\\'{name} {installed_version}\\' version mismatch ({operation}{required_version})'\n\n def __init__(self, package_name, installed_version, operation, required_version):\n self.package_name = package_name\n self.installed_version = installed_version\n self.operation = operation\n self.required_version = required_version\n message = self.error_message.format(name=package_name,\n installed_version=installed_version,\n operation=operation,\n required_version=required_version)\n super(IncorrectPackageVersionError, self).__init__(message)\n", "path": "autosklearn/util/dependencies.py"}]} | 1,305 | 105 |
gh_patches_debug_12462 | rasdani/github-patches | git_diff | dask__distributed-6306 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
parse_stdout.py produces malformed Junit reports
parse_stdout.py has recently stopped working.
This causes Windows timeouts to be reported as a white box instead of a red box in https://dask.org/distributed/test_report.html.
https://github.com/dask/distributed/actions/runs/2293197167
> Publish test results: artifacts/windows-latest-3.10-notci1/pytest.xml#L976Error processing result file: not well-formed (invalid token): line 976, column 93
</issue>
<code>
[start of continuous_integration/scripts/parse_stdout.py]
1 """On Windows, pytest-timeout kills off the whole test suite, leaving no junit report
2 behind. Parse the stdout of pytest to generate one.
3 """
4 from __future__ import annotations
5
6 import re
7 import sys
8 from collections import Counter, defaultdict
9 from collections.abc import Iterable
10 from datetime import datetime
11
12 OUTCOMES = {
13 "PASSED",
14 "FAILED",
15 # Test timeout. Marked as a variant of FAILED in the junit report
16 None,
17 # Setup failed or teardown failed.
18 # In the latter case, if the test also failed, show both a FAILED and an ERROR line.
19 "ERROR",
20 # @pytest.mark.skip, @pytest.mark.skipif, or raise pytest.skip()
21 "SKIPPED",
22 # Reported as a variant of SKIPPED in the junit report
23 "XFAIL",
24 # These appear respectively before and after another status. Ignore.
25 "RERUN",
26 "LEAKED",
27 }
28
29
30 def parse_rows(rows: Iterable[str]) -> list[tuple[str, str, set[str | None]]]:
31 match = re.compile(
32 r"(distributed/.*test.*)::([^ ]*)"
33 r"( (.*)(PASSED|FAILED|ERROR|SKIPPED|XFAIL|RERUN|LEAKED).*| )$"
34 )
35
36 out: defaultdict[tuple[str, str], set[str | None]] = defaultdict(set)
37
38 for row in rows:
39 m = match.match(row)
40 if not m:
41 continue
42
43 fname = m.group(1)
44 clsname = fname.replace("/", ".").replace(".py", "").replace("::", ".")
45
46 tname = m.group(2).strip()
47 if m.group(4) and "]" in m.group(4):
48 tname += " " + m.group(4).split("]")[0] + "]"
49
50 outcome = m.group(5)
51 assert outcome in OUTCOMES
52 if outcome not in {"RERUN", "LEAKED"}:
53 out[clsname, tname].add(outcome)
54
55 return [(clsname, tname, outcomes) for (clsname, tname), outcomes in out.items()]
56
57
58 def build_xml(rows: list[tuple[str, str, set[str | None]]]) -> None:
59 cnt = Counter(outcome for _, _, outcomes in rows for outcome in outcomes)
60 timestamp = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
61
62 # We could have used ElementTree but it feels like overkill here
63 print('<?xml version="1.0" encoding="utf-8"?>')
64 print("<testsuites>")
65 print(
66 '<testsuite name="distributed" '
67 f'errors="{cnt["ERROR"]}" failures="{cnt["FAILED"] + cnt[None]}" '
68 f'skipped="{cnt["SKIPPED"] + cnt["XFAIL"]}" tests="{sum(cnt.values())}" '
69 f'time="0.0" timestamp="{timestamp}" hostname="">'
70 )
71
72 for clsname, tname, outcomes in rows:
73 print(f'<testcase classname="{clsname}" name="{tname}" time="0.0"', end="")
74 if outcomes == {"PASSED"}:
75 print(" />")
76 elif outcomes == {"FAILED"}:
77 print('><failure message=""></failure></testcase>')
78 elif outcomes == {None}:
79 print('><failure message="pytest-timeout exceeded"></failure></testcase>')
80 elif outcomes == {"ERROR"}:
81 print('><error message="failed on setup"></error></testcase>')
82 elif outcomes == {"PASSED", "ERROR"}:
83 print('><error message="failed on teardown"></error></testcase>')
84 elif outcomes == {"FAILED", "ERROR"}:
85 print(
86 '><failure message=""></failure></testcase>\n'
87 f'<testcase classname="{clsname}" name="{tname}" time="0.0">'
88 '<error message="failed on teardown"></error></testcase>'
89 )
90 elif outcomes == {"SKIPPED"}:
91 print('><skipped type="pytest.skip" message="skip"></skipped></testcase>')
92 elif outcomes == {"XFAIL"}:
93 print('><skipped type="pytest.xfail" message="xfail"></skipped></testcase>')
94 else: # pragma: nocover
95 # This should be unreachable. We would normally raise ValueError, except
96 # that a crash in this script would be pretty much invisible.
97 print(
98 f' />\n<testcase classname="parse_stdout" name="build_xml" time="0.0">'
99 f'><failure message="Unexpected {outcomes=}"></failure></testcase>'
100 )
101
102 print("</testsuite>")
103 print("</testsuites>")
104
105
106 def main() -> None: # pragma: nocover
107 build_xml(parse_rows(sys.stdin))
108
109
110 if __name__ == "__main__":
111 main() # pragma: nocover
112
[end of continuous_integration/scripts/parse_stdout.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/continuous_integration/scripts/parse_stdout.py b/continuous_integration/scripts/parse_stdout.py
--- a/continuous_integration/scripts/parse_stdout.py
+++ b/continuous_integration/scripts/parse_stdout.py
@@ -3,6 +3,7 @@
"""
from __future__ import annotations
+import html
import re
import sys
from collections import Counter, defaultdict
@@ -70,6 +71,8 @@
)
for clsname, tname, outcomes in rows:
+ clsname = html.escape(clsname)
+ tname = html.escape(tname)
print(f'<testcase classname="{clsname}" name="{tname}" time="0.0"', end="")
if outcomes == {"PASSED"}:
print(" />")
| {"golden_diff": "diff --git a/continuous_integration/scripts/parse_stdout.py b/continuous_integration/scripts/parse_stdout.py\n--- a/continuous_integration/scripts/parse_stdout.py\n+++ b/continuous_integration/scripts/parse_stdout.py\n@@ -3,6 +3,7 @@\n \"\"\"\n from __future__ import annotations\n \n+import html\n import re\n import sys\n from collections import Counter, defaultdict\n@@ -70,6 +71,8 @@\n )\n \n for clsname, tname, outcomes in rows:\n+ clsname = html.escape(clsname)\n+ tname = html.escape(tname)\n print(f'<testcase classname=\"{clsname}\" name=\"{tname}\" time=\"0.0\"', end=\"\")\n if outcomes == {\"PASSED\"}:\n print(\" />\")\n", "issue": "parse_stdout.py produces malformed Junit reports\nparse_stdout.py has recently stopped working.\r\nThis causes Windows timeouts to be reported as a white box instead of a red box in https://dask.org/distributed/test_report.html.\r\n\r\nhttps://github.com/dask/distributed/actions/runs/2293197167\r\n> Publish test results:\u00a0artifacts/windows-latest-3.10-notci1/pytest.xml#L976Error processing result file: not well-formed (invalid token): line 976, column 93\r\n\n", "before_files": [{"content": "\"\"\"On Windows, pytest-timeout kills off the whole test suite, leaving no junit report\nbehind. Parse the stdout of pytest to generate one.\n\"\"\"\nfrom __future__ import annotations\n\nimport re\nimport sys\nfrom collections import Counter, defaultdict\nfrom collections.abc import Iterable\nfrom datetime import datetime\n\nOUTCOMES = {\n \"PASSED\",\n \"FAILED\",\n # Test timeout. Marked as a variant of FAILED in the junit report\n None,\n # Setup failed or teardown failed.\n # In the latter case, if the test also failed, show both a FAILED and an ERROR line.\n \"ERROR\",\n # @pytest.mark.skip, @pytest.mark.skipif, or raise pytest.skip()\n \"SKIPPED\",\n # Reported as a variant of SKIPPED in the junit report\n \"XFAIL\",\n # These appear respectively before and after another status. Ignore.\n \"RERUN\",\n \"LEAKED\",\n}\n\n\ndef parse_rows(rows: Iterable[str]) -> list[tuple[str, str, set[str | None]]]:\n match = re.compile(\n r\"(distributed/.*test.*)::([^ ]*)\"\n r\"( (.*)(PASSED|FAILED|ERROR|SKIPPED|XFAIL|RERUN|LEAKED).*| )$\"\n )\n\n out: defaultdict[tuple[str, str], set[str | None]] = defaultdict(set)\n\n for row in rows:\n m = match.match(row)\n if not m:\n continue\n\n fname = m.group(1)\n clsname = fname.replace(\"/\", \".\").replace(\".py\", \"\").replace(\"::\", \".\")\n\n tname = m.group(2).strip()\n if m.group(4) and \"]\" in m.group(4):\n tname += \" \" + m.group(4).split(\"]\")[0] + \"]\"\n\n outcome = m.group(5)\n assert outcome in OUTCOMES\n if outcome not in {\"RERUN\", \"LEAKED\"}:\n out[clsname, tname].add(outcome)\n\n return [(clsname, tname, outcomes) for (clsname, tname), outcomes in out.items()]\n\n\ndef build_xml(rows: list[tuple[str, str, set[str | None]]]) -> None:\n cnt = Counter(outcome for _, _, outcomes in rows for outcome in outcomes)\n timestamp = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n # We could have used ElementTree but it feels like overkill here\n print('<?xml version=\"1.0\" encoding=\"utf-8\"?>')\n print(\"<testsuites>\")\n print(\n '<testsuite name=\"distributed\" '\n f'errors=\"{cnt[\"ERROR\"]}\" failures=\"{cnt[\"FAILED\"] + cnt[None]}\" '\n f'skipped=\"{cnt[\"SKIPPED\"] + cnt[\"XFAIL\"]}\" tests=\"{sum(cnt.values())}\" '\n f'time=\"0.0\" timestamp=\"{timestamp}\" hostname=\"\">'\n )\n\n for clsname, tname, outcomes in rows:\n print(f'<testcase classname=\"{clsname}\" name=\"{tname}\" time=\"0.0\"', end=\"\")\n if outcomes == {\"PASSED\"}:\n print(\" />\")\n elif outcomes == {\"FAILED\"}:\n print('><failure message=\"\"></failure></testcase>')\n elif outcomes == {None}:\n print('><failure message=\"pytest-timeout exceeded\"></failure></testcase>')\n elif outcomes == {\"ERROR\"}:\n print('><error message=\"failed on setup\"></error></testcase>')\n elif outcomes == {\"PASSED\", \"ERROR\"}:\n print('><error message=\"failed on teardown\"></error></testcase>')\n elif outcomes == {\"FAILED\", \"ERROR\"}:\n print(\n '><failure message=\"\"></failure></testcase>\\n'\n f'<testcase classname=\"{clsname}\" name=\"{tname}\" time=\"0.0\">'\n '<error message=\"failed on teardown\"></error></testcase>'\n )\n elif outcomes == {\"SKIPPED\"}:\n print('><skipped type=\"pytest.skip\" message=\"skip\"></skipped></testcase>')\n elif outcomes == {\"XFAIL\"}:\n print('><skipped type=\"pytest.xfail\" message=\"xfail\"></skipped></testcase>')\n else: # pragma: nocover\n # This should be unreachable. We would normally raise ValueError, except\n # that a crash in this script would be pretty much invisible.\n print(\n f' />\\n<testcase classname=\"parse_stdout\" name=\"build_xml\" time=\"0.0\">'\n f'><failure message=\"Unexpected {outcomes=}\"></failure></testcase>'\n )\n\n print(\"</testsuite>\")\n print(\"</testsuites>\")\n\n\ndef main() -> None: # pragma: nocover\n build_xml(parse_rows(sys.stdin))\n\n\nif __name__ == \"__main__\":\n main() # pragma: nocover\n", "path": "continuous_integration/scripts/parse_stdout.py"}]} | 1,959 | 166 |
gh_patches_debug_9502 | rasdani/github-patches | git_diff | redis__redis-py-2112 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support CASESENSITIVE tag in Tag Field
link: https://oss.redis.com/redisearch/Commands/#ftcreate
</issue>
<code>
[start of redis/commands/search/field.py]
1 from typing import List
2
3 from redis import DataError
4
5
6 class Field:
7
8 NUMERIC = "NUMERIC"
9 TEXT = "TEXT"
10 WEIGHT = "WEIGHT"
11 GEO = "GEO"
12 TAG = "TAG"
13 VECTOR = "VECTOR"
14 SORTABLE = "SORTABLE"
15 NOINDEX = "NOINDEX"
16 AS = "AS"
17
18 def __init__(
19 self,
20 name: str,
21 args: List[str] = None,
22 sortable: bool = False,
23 no_index: bool = False,
24 as_name: str = None,
25 ):
26 if args is None:
27 args = []
28 self.name = name
29 self.args = args
30 self.args_suffix = list()
31 self.as_name = as_name
32
33 if sortable:
34 self.args_suffix.append(Field.SORTABLE)
35 if no_index:
36 self.args_suffix.append(Field.NOINDEX)
37
38 if no_index and not sortable:
39 raise ValueError("Non-Sortable non-Indexable fields are ignored")
40
41 def append_arg(self, value):
42 self.args.append(value)
43
44 def redis_args(self):
45 args = [self.name]
46 if self.as_name:
47 args += [self.AS, self.as_name]
48 args += self.args
49 args += self.args_suffix
50 return args
51
52
53 class TextField(Field):
54 """
55 TextField is used to define a text field in a schema definition
56 """
57
58 NOSTEM = "NOSTEM"
59 PHONETIC = "PHONETIC"
60
61 def __init__(
62 self,
63 name: str,
64 weight: float = 1.0,
65 no_stem: bool = False,
66 phonetic_matcher: str = None,
67 **kwargs,
68 ):
69 Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
70
71 if no_stem:
72 Field.append_arg(self, self.NOSTEM)
73 if phonetic_matcher and phonetic_matcher in [
74 "dm:en",
75 "dm:fr",
76 "dm:pt",
77 "dm:es",
78 ]:
79 Field.append_arg(self, self.PHONETIC)
80 Field.append_arg(self, phonetic_matcher)
81
82
83 class NumericField(Field):
84 """
85 NumericField is used to define a numeric field in a schema definition
86 """
87
88 def __init__(self, name: str, **kwargs):
89 Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)
90
91
92 class GeoField(Field):
93 """
94 GeoField is used to define a geo-indexing field in a schema definition
95 """
96
97 def __init__(self, name: str, **kwargs):
98 Field.__init__(self, name, args=[Field.GEO], **kwargs)
99
100
101 class TagField(Field):
102 """
103 TagField is a tag-indexing field with simpler compression and tokenization.
104 See http://redisearch.io/Tags/
105 """
106
107 SEPARATOR = "SEPARATOR"
108
109 def __init__(self, name: str, separator: str = ",", **kwargs):
110 Field.__init__(
111 self, name, args=[Field.TAG, self.SEPARATOR, separator], **kwargs
112 )
113
114
115 class VectorField(Field):
116 """
117 Allows vector similarity queries against the value in this attribute.
118 See https://oss.redis.com/redisearch/Vectors/#vector_fields.
119 """
120
121 def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):
122 """
123 Create Vector Field. Notice that Vector cannot have sortable or no_index tag,
124 although it's also a Field.
125
126 ``name`` is the name of the field.
127
128 ``algorithm`` can be "FLAT" or "HNSW".
129
130 ``attributes`` each algorithm can have specific attributes. Some of them
131 are mandatory and some of them are optional. See
132 https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm
133 for more information.
134 """
135 sort = kwargs.get("sortable", False)
136 noindex = kwargs.get("no_index", False)
137
138 if sort or noindex:
139 raise DataError("Cannot set 'sortable' or 'no_index' in Vector fields.")
140
141 if algorithm.upper() not in ["FLAT", "HNSW"]:
142 raise DataError(
143 "Realtime vector indexing supporting 2 Indexing Methods:"
144 "'FLAT' and 'HNSW'."
145 )
146
147 attr_li = []
148
149 for key, value in attributes.items():
150 attr_li.extend([key, value])
151
152 Field.__init__(
153 self,
154 name,
155 args=[Field.VECTOR, algorithm, len(attr_li), *attr_li],
156 **kwargs,
157 )
158
[end of redis/commands/search/field.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py
--- a/redis/commands/search/field.py
+++ b/redis/commands/search/field.py
@@ -105,11 +105,20 @@
"""
SEPARATOR = "SEPARATOR"
+ CASESENSITIVE = "CASESENSITIVE"
- def __init__(self, name: str, separator: str = ",", **kwargs):
- Field.__init__(
- self, name, args=[Field.TAG, self.SEPARATOR, separator], **kwargs
- )
+ def __init__(
+ self,
+ name: str,
+ separator: str = ",",
+ case_sensitive: bool = False,
+ **kwargs,
+ ):
+ args = [Field.TAG, self.SEPARATOR, separator]
+ if case_sensitive:
+ args.append(self.CASESENSITIVE)
+
+ Field.__init__(self, name, args=args, **kwargs)
class VectorField(Field):
| {"golden_diff": "diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py\n--- a/redis/commands/search/field.py\n+++ b/redis/commands/search/field.py\n@@ -105,11 +105,20 @@\n \"\"\"\n \n SEPARATOR = \"SEPARATOR\"\n+ CASESENSITIVE = \"CASESENSITIVE\"\n \n- def __init__(self, name: str, separator: str = \",\", **kwargs):\n- Field.__init__(\n- self, name, args=[Field.TAG, self.SEPARATOR, separator], **kwargs\n- )\n+ def __init__(\n+ self,\n+ name: str,\n+ separator: str = \",\",\n+ case_sensitive: bool = False,\n+ **kwargs,\n+ ):\n+ args = [Field.TAG, self.SEPARATOR, separator]\n+ if case_sensitive:\n+ args.append(self.CASESENSITIVE)\n+\n+ Field.__init__(self, name, args=args, **kwargs)\n \n \n class VectorField(Field):\n", "issue": "Support CASESENSITIVE tag in Tag Field \nlink: https://oss.redis.com/redisearch/Commands/#ftcreate\n", "before_files": [{"content": "from typing import List\n\nfrom redis import DataError\n\n\nclass Field:\n\n NUMERIC = \"NUMERIC\"\n TEXT = \"TEXT\"\n WEIGHT = \"WEIGHT\"\n GEO = \"GEO\"\n TAG = \"TAG\"\n VECTOR = \"VECTOR\"\n SORTABLE = \"SORTABLE\"\n NOINDEX = \"NOINDEX\"\n AS = \"AS\"\n\n def __init__(\n self,\n name: str,\n args: List[str] = None,\n sortable: bool = False,\n no_index: bool = False,\n as_name: str = None,\n ):\n if args is None:\n args = []\n self.name = name\n self.args = args\n self.args_suffix = list()\n self.as_name = as_name\n\n if sortable:\n self.args_suffix.append(Field.SORTABLE)\n if no_index:\n self.args_suffix.append(Field.NOINDEX)\n\n if no_index and not sortable:\n raise ValueError(\"Non-Sortable non-Indexable fields are ignored\")\n\n def append_arg(self, value):\n self.args.append(value)\n\n def redis_args(self):\n args = [self.name]\n if self.as_name:\n args += [self.AS, self.as_name]\n args += self.args\n args += self.args_suffix\n return args\n\n\nclass TextField(Field):\n \"\"\"\n TextField is used to define a text field in a schema definition\n \"\"\"\n\n NOSTEM = \"NOSTEM\"\n PHONETIC = \"PHONETIC\"\n\n def __init__(\n self,\n name: str,\n weight: float = 1.0,\n no_stem: bool = False,\n phonetic_matcher: str = None,\n **kwargs,\n ):\n Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)\n\n if no_stem:\n Field.append_arg(self, self.NOSTEM)\n if phonetic_matcher and phonetic_matcher in [\n \"dm:en\",\n \"dm:fr\",\n \"dm:pt\",\n \"dm:es\",\n ]:\n Field.append_arg(self, self.PHONETIC)\n Field.append_arg(self, phonetic_matcher)\n\n\nclass NumericField(Field):\n \"\"\"\n NumericField is used to define a numeric field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)\n\n\nclass GeoField(Field):\n \"\"\"\n GeoField is used to define a geo-indexing field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.GEO], **kwargs)\n\n\nclass TagField(Field):\n \"\"\"\n TagField is a tag-indexing field with simpler compression and tokenization.\n See http://redisearch.io/Tags/\n \"\"\"\n\n SEPARATOR = \"SEPARATOR\"\n\n def __init__(self, name: str, separator: str = \",\", **kwargs):\n Field.__init__(\n self, name, args=[Field.TAG, self.SEPARATOR, separator], **kwargs\n )\n\n\nclass VectorField(Field):\n \"\"\"\n Allows vector similarity queries against the value in this attribute.\n See https://oss.redis.com/redisearch/Vectors/#vector_fields.\n \"\"\"\n\n def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):\n \"\"\"\n Create Vector Field. Notice that Vector cannot have sortable or no_index tag,\n although it's also a Field.\n\n ``name`` is the name of the field.\n\n ``algorithm`` can be \"FLAT\" or \"HNSW\".\n\n ``attributes`` each algorithm can have specific attributes. Some of them\n are mandatory and some of them are optional. See\n https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm\n for more information.\n \"\"\"\n sort = kwargs.get(\"sortable\", False)\n noindex = kwargs.get(\"no_index\", False)\n\n if sort or noindex:\n raise DataError(\"Cannot set 'sortable' or 'no_index' in Vector fields.\")\n\n if algorithm.upper() not in [\"FLAT\", \"HNSW\"]:\n raise DataError(\n \"Realtime vector indexing supporting 2 Indexing Methods:\"\n \"'FLAT' and 'HNSW'.\"\n )\n\n attr_li = []\n\n for key, value in attributes.items():\n attr_li.extend([key, value])\n\n Field.__init__(\n self,\n name,\n args=[Field.VECTOR, algorithm, len(attr_li), *attr_li],\n **kwargs,\n )\n", "path": "redis/commands/search/field.py"}]} | 1,959 | 231 |
gh_patches_debug_61829 | rasdani/github-patches | git_diff | pulp__pulpcore-4010 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RESTAPI document fix for Upstream Pulp Replication API
**Version**
Pulp installed through the Python modules.
"core:3.28.0"
"certguard:3.28.0"
"file:3.28.0"
"python:3.28.0"
"rpm:3.28.0"
**Describe the bug**
Why the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request "https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/"?
**To Reproduce**
None.
**Expected behavior**
A fix is required in the REST API document.
**Additional context**
Create Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create
Upstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate
</issue>
<code>
[start of pulpcore/app/viewsets/replica.py]
1 """
2 ViewSet for replicating repositories and distributions from an upstream Pulp
3 """
4 from django.conf import settings
5 from drf_spectacular.utils import extend_schema
6 from rest_framework import mixins
7 from rest_framework.decorators import action
8
9 from pulpcore.app.models import TaskGroup, UpstreamPulp
10 from pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer
11 from pulpcore.app.viewsets import NamedModelViewSet
12 from pulpcore.app.response import TaskGroupOperationResponse
13 from pulpcore.app.tasks import replicate_distributions
14 from pulpcore.tasking.tasks import dispatch
15
16
17 class UpstreamPulpViewSet(
18 NamedModelViewSet,
19 mixins.CreateModelMixin,
20 mixins.RetrieveModelMixin,
21 mixins.ListModelMixin,
22 mixins.DestroyModelMixin,
23 mixins.UpdateModelMixin,
24 ):
25 """API for configuring an upstream Pulp to replicate. This API is provided as a tech preview."""
26
27 queryset = UpstreamPulp.objects.all()
28 endpoint_name = "upstream-pulps"
29 serializer_class = UpstreamPulpSerializer
30 ordering = "-pulp_created"
31
32 @extend_schema(
33 summary="Replicate",
34 description="Trigger an asynchronous repository replication task group. This API is "
35 "provided as a tech preview.",
36 responses={202: AsyncOperationResponseSerializer},
37 )
38 @action(detail=True, methods=["post"])
39 def replicate(self, request, pk):
40 """
41 Triggers an asynchronous repository replication operation.
42 """
43 server = UpstreamPulp.objects.get(pk=pk)
44 task_group = TaskGroup.objects.create(description=f"Replication of {server.name}")
45
46 uri = "/api/v3/servers/"
47 if settings.DOMAIN_ENABLED:
48 uri = f"/{request.domain.name}{uri}"
49
50 dispatch(
51 replicate_distributions,
52 exclusive_resources=[uri],
53 kwargs={"server_pk": pk},
54 task_group=task_group,
55 )
56
57 return TaskGroupOperationResponse(task_group, request)
58
[end of pulpcore/app/viewsets/replica.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py
--- a/pulpcore/app/viewsets/replica.py
+++ b/pulpcore/app/viewsets/replica.py
@@ -33,6 +33,7 @@
summary="Replicate",
description="Trigger an asynchronous repository replication task group. This API is "
"provided as a tech preview.",
+ request=None,
responses={202: AsyncOperationResponseSerializer},
)
@action(detail=True, methods=["post"])
| {"golden_diff": "diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py\n--- a/pulpcore/app/viewsets/replica.py\n+++ b/pulpcore/app/viewsets/replica.py\n@@ -33,6 +33,7 @@\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n+ request=None,\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n", "issue": "RESTAPI document fix for Upstream Pulp Replication API\n**Version**\r\nPulp installed through the Python modules.\r\n\"core:3.28.0\"\r\n\"certguard:3.28.0\"\r\n\"file:3.28.0\"\r\n\"python:3.28.0\"\r\n\"rpm:3.28.0\"\r\n\r\n**Describe the bug**\r\nWhy the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate\" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request \"https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/\"?\r\n\r\n**To Reproduce**\r\nNone.\r\n\r\n**Expected behavior**\r\nA fix is required in the REST API document.\r\n\r\n**Additional context**\r\nCreate Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create\r\nUpstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nViewSet for replicating repositories and distributions from an upstream Pulp\n\"\"\"\nfrom django.conf import settings\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\nfrom rest_framework.decorators import action\n\nfrom pulpcore.app.models import TaskGroup, UpstreamPulp\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer\nfrom pulpcore.app.viewsets import NamedModelViewSet\nfrom pulpcore.app.response import TaskGroupOperationResponse\nfrom pulpcore.app.tasks import replicate_distributions\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass UpstreamPulpViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"API for configuring an upstream Pulp to replicate. This API is provided as a tech preview.\"\"\"\n\n queryset = UpstreamPulp.objects.all()\n endpoint_name = \"upstream-pulps\"\n serializer_class = UpstreamPulpSerializer\n ordering = \"-pulp_created\"\n\n @extend_schema(\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n def replicate(self, request, pk):\n \"\"\"\n Triggers an asynchronous repository replication operation.\n \"\"\"\n server = UpstreamPulp.objects.get(pk=pk)\n task_group = TaskGroup.objects.create(description=f\"Replication of {server.name}\")\n\n uri = \"/api/v3/servers/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.domain.name}{uri}\"\n\n dispatch(\n replicate_distributions,\n exclusive_resources=[uri],\n kwargs={\"server_pk\": pk},\n task_group=task_group,\n )\n\n return TaskGroupOperationResponse(task_group, request)\n", "path": "pulpcore/app/viewsets/replica.py"}]} | 1,329 | 123 |
gh_patches_debug_10948 | rasdani/github-patches | git_diff | dmlc__dgl-1305 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: module 'dgl.nn' has no attribute 'pytorch'
## 🐛 Bug
When I try to use some of the predefined modules of dgl by the following code, I encounter an error: AttributeError: module 'dgl.nn' has no attribute 'pytorch'.
Similar problems also happen to other backends, including TensorFlow and MXNet.
## To Reproduce
Steps to reproduce the behavior:
```python
import dgl.nn
# or import dgl
c = dgl.nn.pytorch.conv.GraphConv(10,2)
```
## Expected behavior
The code should generate a GraphConv layer without any error.
## Environment
- DGL Version (e.g., 1.0): 0.4.2
- Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): Pytorch 1.4.0
- OS (e.g., Linux): Irrelelevent
- How you installed DGL (`conda`, `pip`, source): conda
- Build command you used (if compiling from source):
- Python version: 3.7
- CUDA/cuDNN version (if applicable): Irrelelevent
- GPU models and configuration (e.g. V100): Irrelelevent
- Any other relevant information:
## Additional context
I read the source code and **found the reason and solution** to this problem.
### Reason:
The `__init__.py` file of `dgl/nn` is empty. Therefore, if i import dgl or dgl.nn, python cannot automatically find files of its sub-directories.
I verified it by the following code:
```python
import dgl.nn.python
c = dgl.nn.pytorch.conv.GraphConv(10,2)
```
It works fine.
### Solution:
Add 3 lines of code like `from . import pytorch` into file `dgl/nn/__init__.py` for PyTorch and the other 2 backends.
It is better to automatically detect the backend library and import the correct sub-directory.
</issue>
<code>
[start of python/dgl/__init__.py]
1 """DGL root package."""
2 # Windows compatibility
3 # This initializes Winsock and performs cleanup at termination as required
4 import socket
5
6 # Need to ensure that the backend framework is imported before load dgl libs,
7 # otherwise weird cuda problem happens
8 from .backend import load_backend
9
10 from . import function
11 from . import nn
12 from . import contrib
13 from . import container
14 from . import random
15 from . import sampling
16
17 from ._ffi.runtime_ctypes import TypeCode
18 from ._ffi.function import register_func, get_global_func, list_global_func_names, extract_ext_funcs
19 from ._ffi.base import DGLError, __version__
20
21 from .base import ALL, NTYPE, NID, ETYPE, EID
22 from .readout import *
23 from .batched_heterograph import *
24 from .convert import *
25 from .graph import DGLGraph, batch, unbatch
26 from .generators import *
27 from .heterograph import DGLHeteroGraph
28 from .nodeflow import *
29 from .traversal import *
30 from .transform import *
31 from .propagate import *
32 from .udf import NodeBatch, EdgeBatch
33
[end of python/dgl/__init__.py]
[start of python/dgl/nn/__init__.py]
1 """Package for neural network common components."""
2
[end of python/dgl/nn/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/dgl/__init__.py b/python/dgl/__init__.py
--- a/python/dgl/__init__.py
+++ b/python/dgl/__init__.py
@@ -8,7 +8,6 @@
from .backend import load_backend
from . import function
-from . import nn
from . import contrib
from . import container
from . import random
diff --git a/python/dgl/nn/__init__.py b/python/dgl/nn/__init__.py
--- a/python/dgl/nn/__init__.py
+++ b/python/dgl/nn/__init__.py
@@ -1 +1,12 @@
"""Package for neural network common components."""
+import importlib
+import sys
+from ..backend import backend_name
+
+def _load_backend(mod_name):
+ mod = importlib.import_module('.%s' % mod_name, __name__)
+ thismod = sys.modules[__name__]
+ for api, obj in mod.__dict__.items():
+ setattr(thismod, api, obj)
+
+_load_backend(backend_name)
| {"golden_diff": "diff --git a/python/dgl/__init__.py b/python/dgl/__init__.py\n--- a/python/dgl/__init__.py\n+++ b/python/dgl/__init__.py\n@@ -8,7 +8,6 @@\n from .backend import load_backend\n \n from . import function\n-from . import nn\n from . import contrib\n from . import container\n from . import random\ndiff --git a/python/dgl/nn/__init__.py b/python/dgl/nn/__init__.py\n--- a/python/dgl/nn/__init__.py\n+++ b/python/dgl/nn/__init__.py\n@@ -1 +1,12 @@\n \"\"\"Package for neural network common components.\"\"\"\n+import importlib\n+import sys\n+from ..backend import backend_name\n+\n+def _load_backend(mod_name):\n+ mod = importlib.import_module('.%s' % mod_name, __name__)\n+ thismod = sys.modules[__name__]\n+ for api, obj in mod.__dict__.items():\n+ setattr(thismod, api, obj)\n+\n+_load_backend(backend_name)\n", "issue": "AttributeError: module 'dgl.nn' has no attribute 'pytorch'\n## \ud83d\udc1b Bug\r\n\r\nWhen I try to use some of the predefined modules of dgl by the following code, I encounter an error: AttributeError: module 'dgl.nn' has no attribute 'pytorch'.\r\n\r\nSimilar problems also happen to other backends, including TensorFlow and MXNet.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n```python\r\nimport dgl.nn\r\n# or import dgl\r\nc = dgl.nn.pytorch.conv.GraphConv(10,2)\r\n```\r\n\r\n## Expected behavior\r\n\r\nThe code should generate a GraphConv layer without any error.\r\n\r\n## Environment\r\n\r\n - DGL Version (e.g., 1.0): 0.4.2\r\n - Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): Pytorch 1.4.0\r\n - OS (e.g., Linux): Irrelelevent \r\n - How you installed DGL (`conda`, `pip`, source): conda\r\n - Build command you used (if compiling from source):\r\n - Python version: 3.7\r\n - CUDA/cuDNN version (if applicable): Irrelelevent \r\n - GPU models and configuration (e.g. V100): Irrelelevent \r\n - Any other relevant information:\r\n\r\n## Additional context\r\n\r\nI read the source code and **found the reason and solution** to this problem.\r\n\r\n### Reason:\r\n\r\nThe `__init__.py` file of `dgl/nn` is empty. Therefore, if i import dgl or dgl.nn, python cannot automatically find files of its sub-directories.\r\n\r\nI verified it by the following code:\r\n```python\r\nimport dgl.nn.python\r\nc = dgl.nn.pytorch.conv.GraphConv(10,2)\r\n```\r\nIt works fine.\r\n\r\n### Solution:\r\n\r\nAdd 3 lines of code like `from . import pytorch` into file `dgl/nn/__init__.py` for PyTorch and the other 2 backends.\r\nIt is better to automatically detect the backend library and import the correct sub-directory.\n", "before_files": [{"content": "\"\"\"DGL root package.\"\"\"\n# Windows compatibility\n# This initializes Winsock and performs cleanup at termination as required\nimport socket\n\n# Need to ensure that the backend framework is imported before load dgl libs,\n# otherwise weird cuda problem happens\nfrom .backend import load_backend\n\nfrom . import function\nfrom . import nn\nfrom . import contrib\nfrom . import container\nfrom . import random\nfrom . import sampling\n\nfrom ._ffi.runtime_ctypes import TypeCode\nfrom ._ffi.function import register_func, get_global_func, list_global_func_names, extract_ext_funcs\nfrom ._ffi.base import DGLError, __version__\n\nfrom .base import ALL, NTYPE, NID, ETYPE, EID\nfrom .readout import *\nfrom .batched_heterograph import *\nfrom .convert import *\nfrom .graph import DGLGraph, batch, unbatch\nfrom .generators import *\nfrom .heterograph import DGLHeteroGraph\nfrom .nodeflow import *\nfrom .traversal import *\nfrom .transform import *\nfrom .propagate import *\nfrom .udf import NodeBatch, EdgeBatch\n", "path": "python/dgl/__init__.py"}, {"content": "\"\"\"Package for neural network common components.\"\"\"\n", "path": "python/dgl/nn/__init__.py"}]} | 1,318 | 235 |
gh_patches_debug_14394 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1728 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Annotation answers get parsed incorrectly in csv export
For annotation type answers, the csv export looks like this currently:

It appears the annotation json gets part as part of the export. We should probably add some escaping.
</issue>
<code>
[start of app/grandchallenge/core/renderers.py]
1 from rest_framework_csv.renderers import CSVRenderer
2
3
4 class PaginatedCSVRenderer(CSVRenderer):
5 results_field = "results"
6
7 def render(self, data, *args, **kwargs):
8 if self.results_field in data:
9 data = data[self.results_field]
10
11 return super().render(data, *args, **kwargs)
12
[end of app/grandchallenge/core/renderers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/core/renderers.py b/app/grandchallenge/core/renderers.py
--- a/app/grandchallenge/core/renderers.py
+++ b/app/grandchallenge/core/renderers.py
@@ -1,3 +1,5 @@
+import json
+
from rest_framework_csv.renderers import CSVRenderer
@@ -9,3 +11,19 @@
data = data[self.results_field]
return super().render(data, *args, **kwargs)
+
+ def flatten_data(self, data):
+ """
+ Create a dictionary that is 1 level deep, with nested values serialized
+ as json. This means that the header rows are now consistent.
+ """
+ for row in data:
+ flat_row = {k: self._flatten_value(v) for k, v in row.items()}
+ yield flat_row
+
+ @staticmethod
+ def _flatten_value(value):
+ if isinstance(value, (dict, list)):
+ return json.dumps(value)
+ else:
+ return value
| {"golden_diff": "diff --git a/app/grandchallenge/core/renderers.py b/app/grandchallenge/core/renderers.py\n--- a/app/grandchallenge/core/renderers.py\n+++ b/app/grandchallenge/core/renderers.py\n@@ -1,3 +1,5 @@\n+import json\n+\n from rest_framework_csv.renderers import CSVRenderer\n \n \n@@ -9,3 +11,19 @@\n data = data[self.results_field]\n \n return super().render(data, *args, **kwargs)\n+\n+ def flatten_data(self, data):\n+ \"\"\"\n+ Create a dictionary that is 1 level deep, with nested values serialized\n+ as json. This means that the header rows are now consistent.\n+ \"\"\"\n+ for row in data:\n+ flat_row = {k: self._flatten_value(v) for k, v in row.items()}\n+ yield flat_row\n+\n+ @staticmethod\n+ def _flatten_value(value):\n+ if isinstance(value, (dict, list)):\n+ return json.dumps(value)\n+ else:\n+ return value\n", "issue": "Annotation answers get parsed incorrectly in csv export\nFor annotation type answers, the csv export looks like this currently:\r\n\r\n\r\nIt appears the annotation json gets part as part of the export. We should probably add some escaping.\n", "before_files": [{"content": "from rest_framework_csv.renderers import CSVRenderer\n\n\nclass PaginatedCSVRenderer(CSVRenderer):\n results_field = \"results\"\n\n def render(self, data, *args, **kwargs):\n if self.results_field in data:\n data = data[self.results_field]\n\n return super().render(data, *args, **kwargs)\n", "path": "app/grandchallenge/core/renderers.py"}]} | 754 | 229 |
gh_patches_debug_14311 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-6264 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Monitoring: where is CallOptions on monitoring API example?
[OS] macOS Sierra 10.12.6
[Versions]
- Python 3.6.1
```
google-api-core==1.2.1
google-api-python-client==1.7.3
google-auth==1.5.0
google-auth-httplib2==0.0.3
google-cloud-monitoring==0.30.0
googleapis-common-protos==1.5.3
```
----
## CallOptions class was not found!
Hi. I'm new to GCP and Stackdriver. I wanted to use Google Kubernetes Engine and its auto scaling by custom metrics. Then, it is required to export the metrics to Stackdriver Monitoring, so I am trying to do it.
But, After installing above-mentioned libraries, the example code on monitoring API README document failed. The pit hole is that `CallOptions` was not found, thus I've searched it in this repository and some other repositories.
And finally, I couldn't find it...
`CallOptions` is defined in gax.python, but the package is currently deprecated and moved to google-api-core. So I guess that also the dependency is currently corrupted or the some examples are out-of-date.
Please tell me how handle this problem.
_Thank you for the great package and platform._
</issue>
<code>
[start of vision/google/cloud/vision_helpers/__init__.py]
1 # Copyright 2017, Google LLC All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 import io
17
18 from google.api_core import protobuf_helpers as protobuf
19
20
21 class VisionHelpers(object):
22 """A set of convenience methods to make the Vision GAPIC easier to use.
23
24 This class should be considered abstract; it is used as a superclass
25 in a multiple-inheritance construction alongside the applicable GAPIC.
26 See the :class:`~google.cloud.vision_v1.ImageAnnotatorClient`.
27 """
28 def annotate_image(self, request, retry=None, timeout=None):
29 """Run image detection and annotation for an image.
30
31 Example:
32 >>> from google.cloud.vision_v1 import ImageAnnotatorClient
33 >>> client = ImageAnnotatorClient()
34 >>> request = {
35 ... 'image': {
36 ... 'source': {'image_uri': 'https://foo.com/image.jpg'},
37 ... },
38 ... }
39 >>> response = client.annotate_image(request)
40
41 Args:
42 request (:class:`~.vision_v1.types.AnnotateImageRequest`)
43 options (:class:`google.gax.CallOptions`): Overrides the default
44 settings for this call, e.g, timeout, retries, etc.
45
46 Returns:
47 :class:`~.vision_v1.types.AnnotateImageResponse` The API response.
48 """
49 # If the image is a file handler, set the content.
50 image = protobuf.get(request, 'image')
51 if hasattr(image, 'read'):
52 img_bytes = image.read()
53 protobuf.set(request, 'image', {})
54 protobuf.set(request, 'image.content', img_bytes)
55 image = protobuf.get(request, 'image')
56
57 # If a filename is provided, read the file.
58 filename = protobuf.get(image, 'source.filename', default=None)
59 if filename:
60 with io.open(filename, 'rb') as img_file:
61 protobuf.set(request, 'image.content', img_file.read())
62 protobuf.set(request, 'image.source', None)
63
64 # This method allows features not to be specified, and you get all
65 # of them.
66 protobuf.setdefault(request, 'features', self._get_all_features())
67 r = self.batch_annotate_images([request], retry=retry, timeout=timeout)
68 return r.responses[0]
69
70 def _get_all_features(self):
71 """Return a list of all features.
72
73 Returns:
74 list: A list of all available features.
75 """
76 return [
77 {'type': feature}
78 for feature in self.enums.Feature.Type if feature != 0]
79
[end of vision/google/cloud/vision_helpers/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vision/google/cloud/vision_helpers/__init__.py b/vision/google/cloud/vision_helpers/__init__.py
--- a/vision/google/cloud/vision_helpers/__init__.py
+++ b/vision/google/cloud/vision_helpers/__init__.py
@@ -40,8 +40,12 @@
Args:
request (:class:`~.vision_v1.types.AnnotateImageRequest`)
- options (:class:`google.gax.CallOptions`): Overrides the default
- settings for this call, e.g, timeout, retries, etc.
+ retry (Optional[google.api_core.retry.Retry]): A retry object used
+ to retry requests. If ``None`` is specified, requests will not
+ be retried.
+ timeout (Optional[float]): The amount of time, in seconds, to wait
+ for the request to complete. Note that if ``retry`` is
+ specified, the timeout applies to each individual attempt.
Returns:
:class:`~.vision_v1.types.AnnotateImageResponse` The API response.
| {"golden_diff": "diff --git a/vision/google/cloud/vision_helpers/__init__.py b/vision/google/cloud/vision_helpers/__init__.py\n--- a/vision/google/cloud/vision_helpers/__init__.py\n+++ b/vision/google/cloud/vision_helpers/__init__.py\n@@ -40,8 +40,12 @@\n \n Args:\n request (:class:`~.vision_v1.types.AnnotateImageRequest`)\n- options (:class:`google.gax.CallOptions`): Overrides the default\n- settings for this call, e.g, timeout, retries, etc.\n+ retry (Optional[google.api_core.retry.Retry]): A retry object used\n+ to retry requests. If ``None`` is specified, requests will not\n+ be retried.\n+ timeout (Optional[float]): The amount of time, in seconds, to wait\n+ for the request to complete. Note that if ``retry`` is\n+ specified, the timeout applies to each individual attempt.\n \n Returns:\n :class:`~.vision_v1.types.AnnotateImageResponse` The API response.\n", "issue": "Monitoring: where is CallOptions on monitoring API example?\n[OS] macOS Sierra 10.12.6\r\n[Versions]\r\n\r\n- Python 3.6.1\r\n\r\n```\r\ngoogle-api-core==1.2.1\r\ngoogle-api-python-client==1.7.3\r\ngoogle-auth==1.5.0\r\ngoogle-auth-httplib2==0.0.3\r\ngoogle-cloud-monitoring==0.30.0\r\ngoogleapis-common-protos==1.5.3\r\n```\r\n\r\n----\r\n\r\n## CallOptions class was not found!\r\n\r\nHi. I'm new to GCP and Stackdriver. I wanted to use Google Kubernetes Engine and its auto scaling by custom metrics. Then, it is required to export the metrics to Stackdriver Monitoring, so I am trying to do it.\r\n\r\nBut, After installing above-mentioned libraries, the example code on monitoring API README document failed. The pit hole is that `CallOptions` was not found, thus I've searched it in this repository and some other repositories.\r\n\r\nAnd finally, I couldn't find it...\r\n\r\n`CallOptions` is defined in gax.python, but the package is currently deprecated and moved to google-api-core. So I guess that also the dependency is currently corrupted or the some examples are out-of-date.\r\n\r\nPlease tell me how handle this problem.\r\n\r\n_Thank you for the great package and platform._\n", "before_files": [{"content": "# Copyright 2017, Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport io\n\nfrom google.api_core import protobuf_helpers as protobuf\n\n\nclass VisionHelpers(object):\n \"\"\"A set of convenience methods to make the Vision GAPIC easier to use.\n\n This class should be considered abstract; it is used as a superclass\n in a multiple-inheritance construction alongside the applicable GAPIC.\n See the :class:`~google.cloud.vision_v1.ImageAnnotatorClient`.\n \"\"\"\n def annotate_image(self, request, retry=None, timeout=None):\n \"\"\"Run image detection and annotation for an image.\n\n Example:\n >>> from google.cloud.vision_v1 import ImageAnnotatorClient\n >>> client = ImageAnnotatorClient()\n >>> request = {\n ... 'image': {\n ... 'source': {'image_uri': 'https://foo.com/image.jpg'},\n ... },\n ... }\n >>> response = client.annotate_image(request)\n\n Args:\n request (:class:`~.vision_v1.types.AnnotateImageRequest`)\n options (:class:`google.gax.CallOptions`): Overrides the default\n settings for this call, e.g, timeout, retries, etc.\n\n Returns:\n :class:`~.vision_v1.types.AnnotateImageResponse` The API response.\n \"\"\"\n # If the image is a file handler, set the content.\n image = protobuf.get(request, 'image')\n if hasattr(image, 'read'):\n img_bytes = image.read()\n protobuf.set(request, 'image', {})\n protobuf.set(request, 'image.content', img_bytes)\n image = protobuf.get(request, 'image')\n\n # If a filename is provided, read the file.\n filename = protobuf.get(image, 'source.filename', default=None)\n if filename:\n with io.open(filename, 'rb') as img_file:\n protobuf.set(request, 'image.content', img_file.read())\n protobuf.set(request, 'image.source', None)\n\n # This method allows features not to be specified, and you get all\n # of them.\n protobuf.setdefault(request, 'features', self._get_all_features())\n r = self.batch_annotate_images([request], retry=retry, timeout=timeout)\n return r.responses[0]\n\n def _get_all_features(self):\n \"\"\"Return a list of all features.\n\n Returns:\n list: A list of all available features.\n \"\"\"\n return [\n {'type': feature}\n for feature in self.enums.Feature.Type if feature != 0]\n", "path": "vision/google/cloud/vision_helpers/__init__.py"}]} | 1,644 | 234 |
gh_patches_debug_21937 | rasdani/github-patches | git_diff | TheAlgorithms__Python-9228 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Concatenate/consolidate all algorithms with different implementations
### Feature description
There are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file
</issue>
<code>
[start of maths/miller_rabin.py]
1 import random
2
3 from .binary_exp_mod import bin_exp_mod
4
5
6 # This is a probabilistic check to test primality, useful for big numbers!
7 # if it's a prime, it will return true
8 # if it's not a prime, the chance of it returning true is at most 1/4**prec
9 def is_prime_big(n, prec=1000):
10 """
11 >>> from maths.prime_check import is_prime
12 >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s
13 >>> all(is_prime_big(i) == is_prime(i) for i in range(256))
14 True
15 """
16 if n < 2:
17 return False
18
19 if n % 2 == 0:
20 return n == 2
21
22 # this means n is odd
23 d = n - 1
24 exp = 0
25 while d % 2 == 0:
26 d /= 2
27 exp += 1
28
29 # n - 1=d*(2**exp)
30 count = 0
31 while count < prec:
32 a = random.randint(2, n - 1)
33 b = bin_exp_mod(a, d, n)
34 if b != 1:
35 flag = True
36 for _ in range(exp):
37 if b == n - 1:
38 flag = False
39 break
40 b = b * b
41 b %= n
42 if flag:
43 return False
44 count += 1
45 return True
46
47
48 if __name__ == "__main__":
49 n = abs(int(input("Enter bound : ").strip()))
50 print("Here's the list of primes:")
51 print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
52
[end of maths/miller_rabin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/maths/miller_rabin.py b/maths/miller_rabin.py
deleted file mode 100644
--- a/maths/miller_rabin.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import random
-
-from .binary_exp_mod import bin_exp_mod
-
-
-# This is a probabilistic check to test primality, useful for big numbers!
-# if it's a prime, it will return true
-# if it's not a prime, the chance of it returning true is at most 1/4**prec
-def is_prime_big(n, prec=1000):
- """
- >>> from maths.prime_check import is_prime
- >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s
- >>> all(is_prime_big(i) == is_prime(i) for i in range(256))
- True
- """
- if n < 2:
- return False
-
- if n % 2 == 0:
- return n == 2
-
- # this means n is odd
- d = n - 1
- exp = 0
- while d % 2 == 0:
- d /= 2
- exp += 1
-
- # n - 1=d*(2**exp)
- count = 0
- while count < prec:
- a = random.randint(2, n - 1)
- b = bin_exp_mod(a, d, n)
- if b != 1:
- flag = True
- for _ in range(exp):
- if b == n - 1:
- flag = False
- break
- b = b * b
- b %= n
- if flag:
- return False
- count += 1
- return True
-
-
-if __name__ == "__main__":
- n = abs(int(input("Enter bound : ").strip()))
- print("Here's the list of primes:")
- print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| {"golden_diff": "diff --git a/maths/miller_rabin.py b/maths/miller_rabin.py\ndeleted file mode 100644\n--- a/maths/miller_rabin.py\n+++ /dev/null\n@@ -1,51 +0,0 @@\n-import random\n-\n-from .binary_exp_mod import bin_exp_mod\n-\n-\n-# This is a probabilistic check to test primality, useful for big numbers!\n-# if it's a prime, it will return true\n-# if it's not a prime, the chance of it returning true is at most 1/4**prec\n-def is_prime_big(n, prec=1000):\n- \"\"\"\n- >>> from maths.prime_check import is_prime\n- >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s\n- >>> all(is_prime_big(i) == is_prime(i) for i in range(256))\n- True\n- \"\"\"\n- if n < 2:\n- return False\n-\n- if n % 2 == 0:\n- return n == 2\n-\n- # this means n is odd\n- d = n - 1\n- exp = 0\n- while d % 2 == 0:\n- d /= 2\n- exp += 1\n-\n- # n - 1=d*(2**exp)\n- count = 0\n- while count < prec:\n- a = random.randint(2, n - 1)\n- b = bin_exp_mod(a, d, n)\n- if b != 1:\n- flag = True\n- for _ in range(exp):\n- if b == n - 1:\n- flag = False\n- break\n- b = b * b\n- b %= n\n- if flag:\n- return False\n- count += 1\n- return True\n-\n-\n-if __name__ == \"__main__\":\n- n = abs(int(input(\"Enter bound : \").strip()))\n- print(\"Here's the list of primes:\")\n- print(\", \".join(str(i) for i in range(n + 1) if is_prime_big(i)))\n", "issue": "Concatenate/consolidate all algorithms with different implementations\n### Feature description\n\nThere are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file\n", "before_files": [{"content": "import random\n\nfrom .binary_exp_mod import bin_exp_mod\n\n\n# This is a probabilistic check to test primality, useful for big numbers!\n# if it's a prime, it will return true\n# if it's not a prime, the chance of it returning true is at most 1/4**prec\ndef is_prime_big(n, prec=1000):\n \"\"\"\n >>> from maths.prime_check import is_prime\n >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s\n >>> all(is_prime_big(i) == is_prime(i) for i in range(256))\n True\n \"\"\"\n if n < 2:\n return False\n\n if n % 2 == 0:\n return n == 2\n\n # this means n is odd\n d = n - 1\n exp = 0\n while d % 2 == 0:\n d /= 2\n exp += 1\n\n # n - 1=d*(2**exp)\n count = 0\n while count < prec:\n a = random.randint(2, n - 1)\n b = bin_exp_mod(a, d, n)\n if b != 1:\n flag = True\n for _ in range(exp):\n if b == n - 1:\n flag = False\n break\n b = b * b\n b %= n\n if flag:\n return False\n count += 1\n return True\n\n\nif __name__ == \"__main__\":\n n = abs(int(input(\"Enter bound : \").strip()))\n print(\"Here's the list of primes:\")\n print(\", \".join(str(i) for i in range(n + 1) if is_prime_big(i)))\n", "path": "maths/miller_rabin.py"}]} | 1,077 | 498 |
gh_patches_debug_9138 | rasdani/github-patches | git_diff | keras-team__autokeras-277 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cannot install autokeras because of package dependency confliction
### Bug Description
following package dependency is configured at setup.py
https://github.com/jhfjhfj1/autokeras/blob/master/setup.py#L6
```
install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
'tensorflow>=1.10.0', 'tqdm==4.25.0'],
```
When execute `pip install autokeras`, following error is appeared.
```
keras 2.2.2 has requirement keras-applications==1.0.4, but you'll have keras-applications 1.0.6 which is incompatible.
keras 2.2.2 has requirement keras-preprocessing==1.0.2, but you'll have keras-preprocessing 1.0.5 which is incompatible.
```
It is because that tensorflow==1.11.0 is installed first and
keras-applications >= 1.0.5 and keras-preprocessing > = 1.0.3 can installed with tensorflow==1.11.0.
On the other hand, keras==2.2.2's dependency versions are keras-applications==1.0.4 and keras-preprocessing==1.0.2.
tensorflow version should be defined as `tensorflow==1.10.0`at [setup.py L6](https://github.com/jhfjhfj1/autokeras/blob/master/setup.py#L6).
```
# before
install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
'tensorflow>=1.10.0', 'tqdm==4.25.0'],
# after
install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
'tensorflow==1.10.0', 'tqdm==4.25.0'],
```
### Reproducing Steps
Step1: curl https://gist.githubusercontent.com/chie8842/b3b9f3ea2d886bbb5aa5c903b9e42ee3/raw/e94cc375ca1265c66d4517a25a748f1e13a3de9d/Dockerfile -o Dockerfile
Step2: docker build -t autokeras -f Dockerfile .
Step3: docker run -it --rm autokeras /bin/bash
Step4: sudo pip install autokeras
</issue>
<code>
[start of setup.py]
1 from distutils.core import setup
2
3 setup(
4 name='autokeras',
5 packages=['autokeras'], # this must be the same as the name above
6 install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
7 'tensorflow>=1.10.0', 'tqdm==4.25.0'],
8 version='0.2.18',
9 description='AutoML for deep learning',
10 author='Haifeng Jin',
11 author_email='[email protected]',
12 url='http://autokeras.com',
13 download_url='https://github.com/jhfjhfj1/autokeras/archive/0.2.18.tar.gz',
14 keywords=['automl'], # arbitrary keywords
15 classifiers=[]
16 )
17
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,7 @@
name='autokeras',
packages=['autokeras'], # this must be the same as the name above
install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
- 'tensorflow>=1.10.0', 'tqdm==4.25.0'],
+ 'tensorflow==1.10.0', 'tqdm==4.25.0'],
version='0.2.18',
description='AutoML for deep learning',
author='Haifeng Jin',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,7 +4,7 @@\n name='autokeras',\n packages=['autokeras'], # this must be the same as the name above\n install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\n- 'tensorflow>=1.10.0', 'tqdm==4.25.0'],\n+ 'tensorflow==1.10.0', 'tqdm==4.25.0'],\n version='0.2.18',\n description='AutoML for deep learning',\n author='Haifeng Jin',\n", "issue": "cannot install autokeras because of package dependency confliction\n### Bug Description\r\nfollowing package dependency is configured at setup.py\r\nhttps://github.com/jhfjhfj1/autokeras/blob/master/setup.py#L6\r\n\r\n```\r\ninstall_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\r\n 'tensorflow>=1.10.0', 'tqdm==4.25.0'],\r\n```\r\n\r\nWhen execute `pip install autokeras`, following error is appeared.\r\n\r\n```\r\nkeras 2.2.2 has requirement keras-applications==1.0.4, but you'll have keras-applications 1.0.6 which is incompatible.\r\nkeras 2.2.2 has requirement keras-preprocessing==1.0.2, but you'll have keras-preprocessing 1.0.5 which is incompatible.\r\n```\r\n\r\nIt is because that tensorflow==1.11.0 is installed first and\r\nkeras-applications >= 1.0.5 and keras-preprocessing > = 1.0.3 can installed with tensorflow==1.11.0.\r\nOn the other hand, keras==2.2.2's dependency versions are keras-applications==1.0.4 and keras-preprocessing==1.0.2.\r\n\r\n tensorflow version should be defined as `tensorflow==1.10.0`at [setup.py L6](https://github.com/jhfjhfj1/autokeras/blob/master/setup.py#L6).\r\n\r\n```\r\n# before\r\ninstall_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\r\n 'tensorflow>=1.10.0', 'tqdm==4.25.0'],\r\n\r\n# after\r\ninstall_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\r\n 'tensorflow==1.10.0', 'tqdm==4.25.0'],\r\n```\r\n\r\n### Reproducing Steps\r\n\u00a0\r\nStep1: curl https://gist.githubusercontent.com/chie8842/b3b9f3ea2d886bbb5aa5c903b9e42ee3/raw/e94cc375ca1265c66d4517a25a748f1e13a3de9d/Dockerfile -o Dockerfile\r\nStep2: docker build -t autokeras -f Dockerfile .\r\nStep3: docker run -it --rm autokeras /bin/bash\r\nStep4: sudo pip install autokeras\n", "before_files": [{"content": "from distutils.core import setup\n\nsetup(\n name='autokeras',\n packages=['autokeras'], # this must be the same as the name above\n install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\n 'tensorflow>=1.10.0', 'tqdm==4.25.0'],\n version='0.2.18',\n description='AutoML for deep learning',\n author='Haifeng Jin',\n author_email='[email protected]',\n url='http://autokeras.com',\n download_url='https://github.com/jhfjhfj1/autokeras/archive/0.2.18.tar.gz',\n keywords=['automl'], # arbitrary keywords\n classifiers=[]\n)\n", "path": "setup.py"}]} | 1,418 | 187 |
gh_patches_debug_17732 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1381 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Import from Goodreads doesn't work correctly
**Describe the bug**
Import from goodreads csv imports only first line of csv and stops with 'success' status. If user tries to reimport same csv again importer takes the same first imported line yet again.
Broken import examples https://bookwyrm.social/import/775 https://bookwyrm.social/import/776
**Expected behavior**
Importer correctly imports all lines of csv or returns error message to user
</issue>
<code>
[start of bookwyrm/views/import_data.py]
1 """ import books from another app """
2 from io import TextIOWrapper
3
4 from django.contrib.auth.decorators import login_required
5 from django.core.exceptions import PermissionDenied
6 from django.http import HttpResponseBadRequest
7 from django.shortcuts import get_object_or_404, redirect
8 from django.template.response import TemplateResponse
9 from django.utils.decorators import method_decorator
10 from django.utils.translation import gettext_lazy as _
11 from django.views import View
12
13 from bookwyrm import forms, models
14 from bookwyrm.importers import (
15 Importer,
16 LibrarythingImporter,
17 GoodreadsImporter,
18 StorygraphImporter,
19 )
20 from bookwyrm.tasks import app
21
22 # pylint: disable= no-self-use
23 @method_decorator(login_required, name="dispatch")
24 class Import(View):
25 """import view"""
26
27 def get(self, request):
28 """load import page"""
29 return TemplateResponse(
30 request,
31 "import.html",
32 {
33 "import_form": forms.ImportForm(),
34 "jobs": models.ImportJob.objects.filter(user=request.user).order_by(
35 "-created_date"
36 ),
37 },
38 )
39
40 def post(self, request):
41 """ingest a goodreads csv"""
42 form = forms.ImportForm(request.POST, request.FILES)
43 if form.is_valid():
44 include_reviews = request.POST.get("include_reviews") == "on"
45 privacy = request.POST.get("privacy")
46 source = request.POST.get("source")
47
48 importer = None
49 if source == "LibraryThing":
50 importer = LibrarythingImporter()
51 elif source == "Storygraph":
52 importer = StorygraphImporter()
53 else:
54 # Default : GoodReads
55 importer = GoodreadsImporter()
56
57 try:
58 job = importer.create_job(
59 request.user,
60 TextIOWrapper(
61 request.FILES["csv_file"], encoding=importer.encoding
62 ),
63 include_reviews,
64 privacy,
65 )
66 except (UnicodeDecodeError, ValueError, KeyError):
67 return HttpResponseBadRequest(_("Not a valid csv file"))
68
69 importer.start_import(job)
70
71 return redirect("/import/%d" % job.id)
72 return HttpResponseBadRequest()
73
74
75 @method_decorator(login_required, name="dispatch")
76 class ImportStatus(View):
77 """status of an existing import"""
78
79 def get(self, request, job_id):
80 """status of an import job"""
81 job = get_object_or_404(models.ImportJob, id=job_id)
82 if job.user != request.user:
83 raise PermissionDenied
84
85 try:
86 task = app.AsyncResult(job.task_id)
87 # triggers attribute error if the task won't load
88 task.status # pylint: disable=pointless-statement
89 except (ValueError, AttributeError):
90 task = None
91
92 items = job.items.order_by("index").all()
93 failed_items = [i for i in items if i.fail_reason]
94 items = [i for i in items if not i.fail_reason]
95 return TemplateResponse(
96 request,
97 "import_status.html",
98 {"job": job, "items": items, "failed_items": failed_items, "task": task},
99 )
100
101 def post(self, request, job_id):
102 """retry lines from an import"""
103 job = get_object_or_404(models.ImportJob, id=job_id)
104 items = []
105 for item in request.POST.getlist("import_item"):
106 items.append(get_object_or_404(models.ImportItem, id=item))
107
108 importer = Importer()
109 job = importer.create_retry_job(
110 request.user,
111 job,
112 items,
113 )
114 importer.start_import(job)
115 return redirect("/import/%d" % job.id)
116
[end of bookwyrm/views/import_data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/views/import_data.py b/bookwyrm/views/import_data.py
--- a/bookwyrm/views/import_data.py
+++ b/bookwyrm/views/import_data.py
@@ -28,7 +28,7 @@
"""load import page"""
return TemplateResponse(
request,
- "import.html",
+ "import/import.html",
{
"import_form": forms.ImportForm(),
"jobs": models.ImportJob.objects.filter(user=request.user).order_by(
@@ -94,7 +94,7 @@
items = [i for i in items if not i.fail_reason]
return TemplateResponse(
request,
- "import_status.html",
+ "import/import_status.html",
{"job": job, "items": items, "failed_items": failed_items, "task": task},
)
| {"golden_diff": "diff --git a/bookwyrm/views/import_data.py b/bookwyrm/views/import_data.py\n--- a/bookwyrm/views/import_data.py\n+++ b/bookwyrm/views/import_data.py\n@@ -28,7 +28,7 @@\n \"\"\"load import page\"\"\"\n return TemplateResponse(\n request,\n- \"import.html\",\n+ \"import/import.html\",\n {\n \"import_form\": forms.ImportForm(),\n \"jobs\": models.ImportJob.objects.filter(user=request.user).order_by(\n@@ -94,7 +94,7 @@\n items = [i for i in items if not i.fail_reason]\n return TemplateResponse(\n request,\n- \"import_status.html\",\n+ \"import/import_status.html\",\n {\"job\": job, \"items\": items, \"failed_items\": failed_items, \"task\": task},\n )\n", "issue": "Import from Goodreads doesn't work correctly\n**Describe the bug**\r\n\r\nImport from goodreads csv imports only first line of csv and stops with 'success' status. If user tries to reimport same csv again importer takes the same first imported line yet again. \r\n\r\nBroken import examples https://bookwyrm.social/import/775 https://bookwyrm.social/import/776\r\n\r\n**Expected behavior**\r\nImporter correctly imports all lines of csv or returns error message to user\n", "before_files": [{"content": "\"\"\" import books from another app \"\"\"\nfrom io import TextIOWrapper\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.importers import (\n Importer,\n LibrarythingImporter,\n GoodreadsImporter,\n StorygraphImporter,\n)\nfrom bookwyrm.tasks import app\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass Import(View):\n \"\"\"import view\"\"\"\n\n def get(self, request):\n \"\"\"load import page\"\"\"\n return TemplateResponse(\n request,\n \"import.html\",\n {\n \"import_form\": forms.ImportForm(),\n \"jobs\": models.ImportJob.objects.filter(user=request.user).order_by(\n \"-created_date\"\n ),\n },\n )\n\n def post(self, request):\n \"\"\"ingest a goodreads csv\"\"\"\n form = forms.ImportForm(request.POST, request.FILES)\n if form.is_valid():\n include_reviews = request.POST.get(\"include_reviews\") == \"on\"\n privacy = request.POST.get(\"privacy\")\n source = request.POST.get(\"source\")\n\n importer = None\n if source == \"LibraryThing\":\n importer = LibrarythingImporter()\n elif source == \"Storygraph\":\n importer = StorygraphImporter()\n else:\n # Default : GoodReads\n importer = GoodreadsImporter()\n\n try:\n job = importer.create_job(\n request.user,\n TextIOWrapper(\n request.FILES[\"csv_file\"], encoding=importer.encoding\n ),\n include_reviews,\n privacy,\n )\n except (UnicodeDecodeError, ValueError, KeyError):\n return HttpResponseBadRequest(_(\"Not a valid csv file\"))\n\n importer.start_import(job)\n\n return redirect(\"/import/%d\" % job.id)\n return HttpResponseBadRequest()\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ImportStatus(View):\n \"\"\"status of an existing import\"\"\"\n\n def get(self, request, job_id):\n \"\"\"status of an import job\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n if job.user != request.user:\n raise PermissionDenied\n\n try:\n task = app.AsyncResult(job.task_id)\n # triggers attribute error if the task won't load\n task.status # pylint: disable=pointless-statement\n except (ValueError, AttributeError):\n task = None\n\n items = job.items.order_by(\"index\").all()\n failed_items = [i for i in items if i.fail_reason]\n items = [i for i in items if not i.fail_reason]\n return TemplateResponse(\n request,\n \"import_status.html\",\n {\"job\": job, \"items\": items, \"failed_items\": failed_items, \"task\": task},\n )\n\n def post(self, request, job_id):\n \"\"\"retry lines from an import\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n items = []\n for item in request.POST.getlist(\"import_item\"):\n items.append(get_object_or_404(models.ImportItem, id=item))\n\n importer = Importer()\n job = importer.create_retry_job(\n request.user,\n job,\n items,\n )\n importer.start_import(job)\n return redirect(\"/import/%d\" % job.id)\n", "path": "bookwyrm/views/import_data.py"}]} | 1,645 | 182 |
gh_patches_debug_35754 | rasdani/github-patches | git_diff | beetbox__beets-1595 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plexupdate: Doesn't work with libaries not named "Music"
I've named my music libaries `Music (New)` and `Music (Untagged)`. The plex update plugin should update the `Music (New)` section, but instead of updating at least both music libaries it doesn't update anything. If I change the library name from `Music (New)` to `Music` it works like a charm. This is specified on line 33 of the beets plugin. A config option to add libraries other than `Music` would make sense imo.
</issue>
<code>
[start of beetsplug/plexupdate.py]
1 """Updates an Plex library whenever the beets library is changed.
2
3 Plex Home users enter the Plex Token to enable updating.
4 Put something like the following in your config.yaml to configure:
5 plex:
6 host: localhost
7 port: 32400
8 token: token
9 """
10 from __future__ import (division, absolute_import, print_function,
11 unicode_literals)
12
13 import requests
14 from urlparse import urljoin
15 from urllib import urlencode
16 import xml.etree.ElementTree as ET
17 from beets import config
18 from beets.plugins import BeetsPlugin
19
20
21 def get_music_section(host, port, token):
22 """Getting the section key for the music library in Plex.
23 """
24 api_endpoint = append_token('library/sections', token)
25 url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
26
27 # Sends request.
28 r = requests.get(url)
29
30 # Parse xml tree and extract music section key.
31 tree = ET.fromstring(r.text)
32 for child in tree.findall('Directory'):
33 if child.get('title') == 'Music':
34 return child.get('key')
35
36
37 def update_plex(host, port, token):
38 """Sends request to the Plex api to start a library refresh.
39 """
40 # Getting section key and build url.
41 section_key = get_music_section(host, port, token)
42 api_endpoint = 'library/sections/{0}/refresh'.format(section_key)
43 api_endpoint = append_token(api_endpoint, token)
44 url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
45
46 # Sends request and returns requests object.
47 r = requests.get(url)
48 return r
49
50
51 def append_token(url, token):
52 """Appends the Plex Home token to the api call if required.
53 """
54 if token:
55 url += '?' + urlencode({'X-Plex-Token': token})
56 return url
57
58
59 class PlexUpdate(BeetsPlugin):
60 def __init__(self):
61 super(PlexUpdate, self).__init__()
62
63 # Adding defaults.
64 config['plex'].add({
65 u'host': u'localhost',
66 u'port': 32400,
67 u'token': u''})
68
69 self.register_listener('database_change', self.listen_for_db_change)
70
71 def listen_for_db_change(self, lib, model):
72 """Listens for beets db change and register the update for the end"""
73 self.register_listener('cli_exit', self.update)
74
75 def update(self, lib):
76 """When the client exists try to send refresh request to Plex server.
77 """
78 self._log.info('Updating Plex library...')
79
80 # Try to send update request.
81 try:
82 update_plex(
83 config['plex']['host'].get(),
84 config['plex']['port'].get(),
85 config['plex']['token'].get())
86 self._log.info('... started.')
87
88 except requests.exceptions.RequestException:
89 self._log.warning('Update failed.')
90
[end of beetsplug/plexupdate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py
--- a/beetsplug/plexupdate.py
+++ b/beetsplug/plexupdate.py
@@ -18,7 +18,7 @@
from beets.plugins import BeetsPlugin
-def get_music_section(host, port, token):
+def get_music_section(host, port, token, library_name):
"""Getting the section key for the music library in Plex.
"""
api_endpoint = append_token('library/sections', token)
@@ -30,15 +30,15 @@
# Parse xml tree and extract music section key.
tree = ET.fromstring(r.text)
for child in tree.findall('Directory'):
- if child.get('title') == 'Music':
+ if child.get('title') == library_name:
return child.get('key')
-def update_plex(host, port, token):
+def update_plex(host, port, token, library_name):
"""Sends request to the Plex api to start a library refresh.
"""
# Getting section key and build url.
- section_key = get_music_section(host, port, token)
+ section_key = get_music_section(host, port, token, library_name)
api_endpoint = 'library/sections/{0}/refresh'.format(section_key)
api_endpoint = append_token(api_endpoint, token)
url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
@@ -64,7 +64,8 @@
config['plex'].add({
u'host': u'localhost',
u'port': 32400,
- u'token': u''})
+ u'token': u'',
+ u'library_name': u'Music'})
self.register_listener('database_change', self.listen_for_db_change)
@@ -82,7 +83,8 @@
update_plex(
config['plex']['host'].get(),
config['plex']['port'].get(),
- config['plex']['token'].get())
+ config['plex']['token'].get(),
+ config['plex']['library_name'].get())
self._log.info('... started.')
except requests.exceptions.RequestException:
| {"golden_diff": "diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py\n--- a/beetsplug/plexupdate.py\n+++ b/beetsplug/plexupdate.py\n@@ -18,7 +18,7 @@\n from beets.plugins import BeetsPlugin\n \n \n-def get_music_section(host, port, token):\n+def get_music_section(host, port, token, library_name):\n \"\"\"Getting the section key for the music library in Plex.\n \"\"\"\n api_endpoint = append_token('library/sections', token)\n@@ -30,15 +30,15 @@\n # Parse xml tree and extract music section key.\n tree = ET.fromstring(r.text)\n for child in tree.findall('Directory'):\n- if child.get('title') == 'Music':\n+ if child.get('title') == library_name:\n return child.get('key')\n \n \n-def update_plex(host, port, token):\n+def update_plex(host, port, token, library_name):\n \"\"\"Sends request to the Plex api to start a library refresh.\n \"\"\"\n # Getting section key and build url.\n- section_key = get_music_section(host, port, token)\n+ section_key = get_music_section(host, port, token, library_name)\n api_endpoint = 'library/sections/{0}/refresh'.format(section_key)\n api_endpoint = append_token(api_endpoint, token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n@@ -64,7 +64,8 @@\n config['plex'].add({\n u'host': u'localhost',\n u'port': 32400,\n- u'token': u''})\n+ u'token': u'',\n+ u'library_name': u'Music'})\n \n self.register_listener('database_change', self.listen_for_db_change)\n \n@@ -82,7 +83,8 @@\n update_plex(\n config['plex']['host'].get(),\n config['plex']['port'].get(),\n- config['plex']['token'].get())\n+ config['plex']['token'].get(),\n+ config['plex']['library_name'].get())\n self._log.info('... started.')\n \n except requests.exceptions.RequestException:\n", "issue": "plexupdate: Doesn't work with libaries not named \"Music\"\nI've named my music libaries `Music (New)` and `Music (Untagged)`. The plex update plugin should update the `Music (New)` section, but instead of updating at least both music libaries it doesn't update anything. If I change the library name from `Music (New)` to `Music` it works like a charm. This is specified on line 33 of the beets plugin. A config option to add libraries other than `Music` would make sense imo.\n\n", "before_files": [{"content": "\"\"\"Updates an Plex library whenever the beets library is changed.\n\nPlex Home users enter the Plex Token to enable updating.\nPut something like the following in your config.yaml to configure:\n plex:\n host: localhost\n port: 32400\n token: token\n\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport requests\nfrom urlparse import urljoin\nfrom urllib import urlencode\nimport xml.etree.ElementTree as ET\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n\ndef get_music_section(host, port, token):\n \"\"\"Getting the section key for the music library in Plex.\n \"\"\"\n api_endpoint = append_token('library/sections', token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request.\n r = requests.get(url)\n\n # Parse xml tree and extract music section key.\n tree = ET.fromstring(r.text)\n for child in tree.findall('Directory'):\n if child.get('title') == 'Music':\n return child.get('key')\n\n\ndef update_plex(host, port, token):\n \"\"\"Sends request to the Plex api to start a library refresh.\n \"\"\"\n # Getting section key and build url.\n section_key = get_music_section(host, port, token)\n api_endpoint = 'library/sections/{0}/refresh'.format(section_key)\n api_endpoint = append_token(api_endpoint, token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request and returns requests object.\n r = requests.get(url)\n return r\n\n\ndef append_token(url, token):\n \"\"\"Appends the Plex Home token to the api call if required.\n \"\"\"\n if token:\n url += '?' + urlencode({'X-Plex-Token': token})\n return url\n\n\nclass PlexUpdate(BeetsPlugin):\n def __init__(self):\n super(PlexUpdate, self).__init__()\n\n # Adding defaults.\n config['plex'].add({\n u'host': u'localhost',\n u'port': 32400,\n u'token': u''})\n\n self.register_listener('database_change', self.listen_for_db_change)\n\n def listen_for_db_change(self, lib, model):\n \"\"\"Listens for beets db change and register the update for the end\"\"\"\n self.register_listener('cli_exit', self.update)\n\n def update(self, lib):\n \"\"\"When the client exists try to send refresh request to Plex server.\n \"\"\"\n self._log.info('Updating Plex library...')\n\n # Try to send update request.\n try:\n update_plex(\n config['plex']['host'].get(),\n config['plex']['port'].get(),\n config['plex']['token'].get())\n self._log.info('... started.')\n\n except requests.exceptions.RequestException:\n self._log.warning('Update failed.')\n", "path": "beetsplug/plexupdate.py"}]} | 1,466 | 487 |
gh_patches_debug_21909 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-2512 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Strawberry cli commands fail with error: strawberry.exe\__main__.py not found
After upgrading strawberry to latest version (0.154.1), I am unable to run any strawberry cli commands.
## Describe the Bug
- Upgraded strawberry from 0.152.0 to 0.154.1
```
poetry add strawberry-graphql[debug-server]@0.154.1
```
- Executed below commands:
```
strawberry server myapp.schema
strawberry export-schema myapp.schema:schema
```
- Both these commands are failing in below error:
**FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\myyuser\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\straw-k47ybk7v-py3.10\\Scripts\\strawberry.exe\\\_\_main\_\_.py'**
## System Information
- Operating system: Windows 10
- Strawberry version (if applicable): 0.154.1
- Python: 3.10.9
## Additional Context
There is no issue with strawberry cli in version 0.152.0 which I am using currently. If we downgrade the package to this version, cli commands work just fine.
Strawberry cli commands fail with error: strawberry.exe\__main__.py not found
After upgrading strawberry to latest version (0.154.1), I am unable to run any strawberry cli commands.
## Describe the Bug
- Upgraded strawberry from 0.152.0 to 0.154.1
```
poetry add strawberry-graphql[debug-server]@0.154.1
```
- Executed below commands:
```
strawberry server myapp.schema
strawberry export-schema myapp.schema:schema
```
- Both these commands are failing in below error:
**FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\myyuser\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\straw-k47ybk7v-py3.10\\Scripts\\strawberry.exe\\\_\_main\_\_.py'**
## System Information
- Operating system: Windows 10
- Strawberry version (if applicable): 0.154.1
- Python: 3.10.9
## Additional Context
There is no issue with strawberry cli in version 0.152.0 which I am using currently. If we downgrade the package to this version, cli commands work just fine.
</issue>
<code>
[start of strawberry/lazy_type.py]
1 import importlib
2 import inspect
3 import sys
4 import warnings
5 from dataclasses import dataclass
6 from pathlib import Path
7 from typing import ForwardRef, Generic, Optional, Type, TypeVar, cast
8
9 TypeName = TypeVar("TypeName")
10 Module = TypeVar("Module")
11
12
13 @dataclass(frozen=True)
14 class LazyType(Generic[TypeName, Module]):
15 type_name: str
16 module: str
17 package: Optional[str] = None
18
19 def __class_getitem__(cls, params):
20 warnings.warn(
21 (
22 "LazyType is deprecated, use "
23 "Annotated[YourType, strawberry.lazy(path)] instead"
24 ),
25 DeprecationWarning,
26 stacklevel=2,
27 )
28
29 type_name, module = params
30
31 package = None
32
33 if module.startswith("."):
34 current_frame = inspect.currentframe()
35 assert current_frame is not None
36 assert current_frame.f_back is not None
37 package = current_frame.f_back.f_globals["__package__"]
38
39 return cls(type_name, module, package)
40
41 def resolve_type(self) -> Type:
42 module = importlib.import_module(self.module, self.package)
43 main_module = sys.modules.get("__main__", None)
44 if main_module:
45 # If lazy type points to the main module, use it instead of the imported
46 # module. Otherwise duplication checks during schema-conversion might fail.
47 # Refer to: https://github.com/strawberry-graphql/strawberry/issues/2397
48 if main_module.__spec__ and main_module.__spec__.name == self.module:
49 module = main_module
50 elif hasattr(main_module, "__file__") and hasattr(module, "__file__"):
51 if (
52 main_module.__file__
53 and module.__file__
54 and Path(main_module.__file__).samefile(module.__file__)
55 ):
56 module = main_module
57 return module.__dict__[self.type_name]
58
59 # this empty call method allows LazyTypes to be used in generic types
60 # for example: List[LazyType["A", "module"]]
61
62 def __call__(self): # pragma: no cover
63 return None
64
65
66 class StrawberryLazyReference:
67 def __init__(self, module: str) -> None:
68 self.module = module
69 self.package = None
70
71 if module.startswith("."):
72 frame = inspect.stack()[2][0]
73 # TODO: raise a nice error if frame is None
74 assert frame is not None
75 self.package = cast(str, frame.f_globals["__package__"])
76
77 def resolve_forward_ref(self, forward_ref: ForwardRef) -> LazyType:
78 return LazyType(forward_ref.__forward_arg__, self.module, self.package)
79
80
81 def lazy(module_path: str) -> StrawberryLazyReference:
82 return StrawberryLazyReference(module_path)
83
[end of strawberry/lazy_type.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/lazy_type.py b/strawberry/lazy_type.py
--- a/strawberry/lazy_type.py
+++ b/strawberry/lazy_type.py
@@ -48,12 +48,16 @@
if main_module.__spec__ and main_module.__spec__.name == self.module:
module = main_module
elif hasattr(main_module, "__file__") and hasattr(module, "__file__"):
- if (
- main_module.__file__
- and module.__file__
- and Path(main_module.__file__).samefile(module.__file__)
- ):
- module = main_module
+ main_file = main_module.__file__
+ module_file = module.__file__
+ if main_file and module_file:
+ try:
+ is_samefile = Path(main_file).samefile(module_file)
+ except FileNotFoundError:
+ # Can be raised when run through the CLI as the __main__ file
+ # path contains `strawberry.exe`
+ is_samefile = False
+ module = main_module if is_samefile else module
return module.__dict__[self.type_name]
# this empty call method allows LazyTypes to be used in generic types
| {"golden_diff": "diff --git a/strawberry/lazy_type.py b/strawberry/lazy_type.py\n--- a/strawberry/lazy_type.py\n+++ b/strawberry/lazy_type.py\n@@ -48,12 +48,16 @@\n if main_module.__spec__ and main_module.__spec__.name == self.module:\n module = main_module\n elif hasattr(main_module, \"__file__\") and hasattr(module, \"__file__\"):\n- if (\n- main_module.__file__\n- and module.__file__\n- and Path(main_module.__file__).samefile(module.__file__)\n- ):\n- module = main_module\n+ main_file = main_module.__file__\n+ module_file = module.__file__\n+ if main_file and module_file:\n+ try:\n+ is_samefile = Path(main_file).samefile(module_file)\n+ except FileNotFoundError:\n+ # Can be raised when run through the CLI as the __main__ file\n+ # path contains `strawberry.exe`\n+ is_samefile = False\n+ module = main_module if is_samefile else module\n return module.__dict__[self.type_name]\n \n # this empty call method allows LazyTypes to be used in generic types\n", "issue": "Strawberry cli commands fail with error: strawberry.exe\\__main__.py not found\nAfter upgrading strawberry to latest version (0.154.1), I am unable to run any strawberry cli commands.\r\n\r\n## Describe the Bug\r\n- Upgraded strawberry from 0.152.0 to 0.154.1\r\n```\r\npoetry add strawberry-graphql[debug-server]@0.154.1\r\n```\r\n- Executed below commands:\r\n```\r\nstrawberry server myapp.schema\r\nstrawberry export-schema myapp.schema:schema\r\n```\r\n- Both these commands are failing in below error:\r\n\r\n**FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\\\Users\\\\myyuser\\\\AppData\\\\Local\\\\pypoetry\\\\Cache\\\\virtualenvs\\\\straw-k47ybk7v-py3.10\\\\Scripts\\\\strawberry.exe\\\\\\_\\_main\\_\\_.py'**\r\n\r\n## System Information\r\n\r\n - Operating system: Windows 10\r\n - Strawberry version (if applicable): 0.154.1\r\n - Python: 3.10.9\r\n\r\n## Additional Context\r\n\r\nThere is no issue with strawberry cli in version 0.152.0 which I am using currently. If we downgrade the package to this version, cli commands work just fine.\r\n\nStrawberry cli commands fail with error: strawberry.exe\\__main__.py not found\nAfter upgrading strawberry to latest version (0.154.1), I am unable to run any strawberry cli commands.\r\n\r\n## Describe the Bug\r\n- Upgraded strawberry from 0.152.0 to 0.154.1\r\n```\r\npoetry add strawberry-graphql[debug-server]@0.154.1\r\n```\r\n- Executed below commands:\r\n```\r\nstrawberry server myapp.schema\r\nstrawberry export-schema myapp.schema:schema\r\n```\r\n- Both these commands are failing in below error:\r\n\r\n**FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\\\Users\\\\myyuser\\\\AppData\\\\Local\\\\pypoetry\\\\Cache\\\\virtualenvs\\\\straw-k47ybk7v-py3.10\\\\Scripts\\\\strawberry.exe\\\\\\_\\_main\\_\\_.py'**\r\n\r\n## System Information\r\n\r\n - Operating system: Windows 10\r\n - Strawberry version (if applicable): 0.154.1\r\n - Python: 3.10.9\r\n\r\n## Additional Context\r\n\r\nThere is no issue with strawberry cli in version 0.152.0 which I am using currently. If we downgrade the package to this version, cli commands work just fine.\r\n\n", "before_files": [{"content": "import importlib\nimport inspect\nimport sys\nimport warnings\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import ForwardRef, Generic, Optional, Type, TypeVar, cast\n\nTypeName = TypeVar(\"TypeName\")\nModule = TypeVar(\"Module\")\n\n\n@dataclass(frozen=True)\nclass LazyType(Generic[TypeName, Module]):\n type_name: str\n module: str\n package: Optional[str] = None\n\n def __class_getitem__(cls, params):\n warnings.warn(\n (\n \"LazyType is deprecated, use \"\n \"Annotated[YourType, strawberry.lazy(path)] instead\"\n ),\n DeprecationWarning,\n stacklevel=2,\n )\n\n type_name, module = params\n\n package = None\n\n if module.startswith(\".\"):\n current_frame = inspect.currentframe()\n assert current_frame is not None\n assert current_frame.f_back is not None\n package = current_frame.f_back.f_globals[\"__package__\"]\n\n return cls(type_name, module, package)\n\n def resolve_type(self) -> Type:\n module = importlib.import_module(self.module, self.package)\n main_module = sys.modules.get(\"__main__\", None)\n if main_module:\n # If lazy type points to the main module, use it instead of the imported\n # module. Otherwise duplication checks during schema-conversion might fail.\n # Refer to: https://github.com/strawberry-graphql/strawberry/issues/2397\n if main_module.__spec__ and main_module.__spec__.name == self.module:\n module = main_module\n elif hasattr(main_module, \"__file__\") and hasattr(module, \"__file__\"):\n if (\n main_module.__file__\n and module.__file__\n and Path(main_module.__file__).samefile(module.__file__)\n ):\n module = main_module\n return module.__dict__[self.type_name]\n\n # this empty call method allows LazyTypes to be used in generic types\n # for example: List[LazyType[\"A\", \"module\"]]\n\n def __call__(self): # pragma: no cover\n return None\n\n\nclass StrawberryLazyReference:\n def __init__(self, module: str) -> None:\n self.module = module\n self.package = None\n\n if module.startswith(\".\"):\n frame = inspect.stack()[2][0]\n # TODO: raise a nice error if frame is None\n assert frame is not None\n self.package = cast(str, frame.f_globals[\"__package__\"])\n\n def resolve_forward_ref(self, forward_ref: ForwardRef) -> LazyType:\n return LazyType(forward_ref.__forward_arg__, self.module, self.package)\n\n\ndef lazy(module_path: str) -> StrawberryLazyReference:\n return StrawberryLazyReference(module_path)\n", "path": "strawberry/lazy_type.py"}]} | 1,885 | 273 |
gh_patches_debug_9378 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-346 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New user creation results in TypeError
If one wants to create a new user via the network settings an error will occur. The user gets created anyway, but this should be fixed quite fast.

New user creation results in TypeError
If one wants to create a new user via the network settings an error will occur. The user gets created anyway, but this should be fixed quite fast.

</issue>
<code>
[start of src/cms/forms/users/user_profile_form.py]
1 """
2 Form for creating a user object
3 """
4 import logging
5
6 from django import forms
7
8 from ...models import UserProfile
9
10
11 logger = logging.getLogger(__name__)
12
13
14 class UserProfileForm(forms.ModelForm):
15
16 class Meta:
17 model = UserProfile
18 fields = [
19 'regions',
20 'organization'
21 ]
22
23 # pylint: disable=arguments-differ
24 def save(self, *args, **kwargs):
25
26 logger.info(
27 'UserProfileForm saved with args %s and kwargs %s',
28 args,
29 kwargs
30 )
31
32 # pop kwarg to make sure the super class does not get this param
33 user = kwargs.pop('user', None)
34
35 if not self.instance.id:
36 # don't commit saving of ModelForm, because required user field is still missing
37 kwargs['commit'] = False
38
39 # save ModelForm
40 user_profile = super(UserProfileForm, self).save(*args, **kwargs)
41
42 if not self.instance.id:
43 user_profile.user = user
44 user_profile.save()
45 # check if called from UserProfileForm or RegionUserProfileForm
46 if 'regions' in self.cleaned_data:
47 # regions can't be saved if commit=False on the ModelForm, so we have to save them explicitly
48 user_profile.regions = self.cleaned_data['regions']
49 user_profile.save()
50
51 return user_profile
52
[end of src/cms/forms/users/user_profile_form.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cms/forms/users/user_profile_form.py b/src/cms/forms/users/user_profile_form.py
--- a/src/cms/forms/users/user_profile_form.py
+++ b/src/cms/forms/users/user_profile_form.py
@@ -45,7 +45,6 @@
# check if called from UserProfileForm or RegionUserProfileForm
if 'regions' in self.cleaned_data:
# regions can't be saved if commit=False on the ModelForm, so we have to save them explicitly
- user_profile.regions = self.cleaned_data['regions']
- user_profile.save()
+ user_profile.regions.set(self.cleaned_data['regions'])
return user_profile
| {"golden_diff": "diff --git a/src/cms/forms/users/user_profile_form.py b/src/cms/forms/users/user_profile_form.py\n--- a/src/cms/forms/users/user_profile_form.py\n+++ b/src/cms/forms/users/user_profile_form.py\n@@ -45,7 +45,6 @@\n # check if called from UserProfileForm or RegionUserProfileForm\n if 'regions' in self.cleaned_data:\n # regions can't be saved if commit=False on the ModelForm, so we have to save them explicitly\n- user_profile.regions = self.cleaned_data['regions']\n- user_profile.save()\n+ user_profile.regions.set(self.cleaned_data['regions'])\n \n return user_profile\n", "issue": "New user creation results in TypeError\nIf one wants to create a new user via the network settings an error will occur. The user gets created anyway, but this should be fixed quite fast.\r\n\r\n\r\n\nNew user creation results in TypeError\nIf one wants to create a new user via the network settings an error will occur. The user gets created anyway, but this should be fixed quite fast.\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nForm for creating a user object\n\"\"\"\nimport logging\n\nfrom django import forms\n\nfrom ...models import UserProfile\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UserProfileForm(forms.ModelForm):\n\n class Meta:\n model = UserProfile\n fields = [\n 'regions',\n 'organization'\n ]\n\n # pylint: disable=arguments-differ\n def save(self, *args, **kwargs):\n\n logger.info(\n 'UserProfileForm saved with args %s and kwargs %s',\n args,\n kwargs\n )\n\n # pop kwarg to make sure the super class does not get this param\n user = kwargs.pop('user', None)\n\n if not self.instance.id:\n # don't commit saving of ModelForm, because required user field is still missing\n kwargs['commit'] = False\n\n # save ModelForm\n user_profile = super(UserProfileForm, self).save(*args, **kwargs)\n\n if not self.instance.id:\n user_profile.user = user\n user_profile.save()\n # check if called from UserProfileForm or RegionUserProfileForm\n if 'regions' in self.cleaned_data:\n # regions can't be saved if commit=False on the ModelForm, so we have to save them explicitly\n user_profile.regions = self.cleaned_data['regions']\n user_profile.save()\n\n return user_profile\n", "path": "src/cms/forms/users/user_profile_form.py"}]} | 1,141 | 141 |
gh_patches_debug_321 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-5424 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove possibel unused constant
At first sight looks like isn't used anymore after https://github.com/rtfd/readthedocs.org/pull/5383
https://github.com/rtfd/readthedocs.org/blob/78c34c904b347110b2cd545b4b5a80ed526590f7/readthedocs/core/models.py#L13-L13
We should still double check and make sure tests are passing after the removal.
</issue>
<code>
[start of readthedocs/core/models.py]
1 # -*- coding: utf-8 -*-
2
3 """Models for the core app."""
4 import logging
5
6 from annoying.fields import AutoOneToOneField
7 from django.db import models
8 from django.urls import reverse
9 from django.utils.translation import ugettext
10 from django.utils.translation import ugettext_lazy as _
11
12
13 STANDARD_EMAIL = '[email protected]'
14
15 log = logging.getLogger(__name__)
16
17
18 class UserProfile(models.Model):
19
20 """Additional information about a User."""
21
22 user = AutoOneToOneField(
23 'auth.User',
24 verbose_name=_('User'),
25 related_name='profile',
26 )
27 whitelisted = models.BooleanField(_('Whitelisted'), default=False)
28 banned = models.BooleanField(_('Banned'), default=False)
29 homepage = models.CharField(_('Homepage'), max_length=100, blank=True)
30 allow_ads = models.BooleanField(
31 _('See paid advertising'),
32 help_text=_('If unchecked, you will still see community ads.'),
33 default=True,
34 )
35
36 def __str__(self):
37 return (
38 ugettext("%(username)s's profile") %
39 {'username': self.user.username}
40 )
41
42 def get_absolute_url(self):
43 return reverse(
44 'profiles_profile_detail',
45 kwargs={'username': self.user.username},
46 )
47
[end of readthedocs/core/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/core/models.py b/readthedocs/core/models.py
--- a/readthedocs/core/models.py
+++ b/readthedocs/core/models.py
@@ -10,8 +10,6 @@
from django.utils.translation import ugettext_lazy as _
-STANDARD_EMAIL = '[email protected]'
-
log = logging.getLogger(__name__)
| {"golden_diff": "diff --git a/readthedocs/core/models.py b/readthedocs/core/models.py\n--- a/readthedocs/core/models.py\n+++ b/readthedocs/core/models.py\n@@ -10,8 +10,6 @@\n from django.utils.translation import ugettext_lazy as _\n \n \n-STANDARD_EMAIL = '[email protected]'\n-\n log = logging.getLogger(__name__)\n", "issue": "Remove possibel unused constant\nAt first sight looks like isn't used anymore after https://github.com/rtfd/readthedocs.org/pull/5383\r\n\r\nhttps://github.com/rtfd/readthedocs.org/blob/78c34c904b347110b2cd545b4b5a80ed526590f7/readthedocs/core/models.py#L13-L13\r\n\r\nWe should still double check and make sure tests are passing after the removal.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Models for the core app.\"\"\"\nimport logging\n\nfrom annoying.fields import AutoOneToOneField\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext\nfrom django.utils.translation import ugettext_lazy as _\n\n\nSTANDARD_EMAIL = '[email protected]'\n\nlog = logging.getLogger(__name__)\n\n\nclass UserProfile(models.Model):\n\n \"\"\"Additional information about a User.\"\"\"\n\n user = AutoOneToOneField(\n 'auth.User',\n verbose_name=_('User'),\n related_name='profile',\n )\n whitelisted = models.BooleanField(_('Whitelisted'), default=False)\n banned = models.BooleanField(_('Banned'), default=False)\n homepage = models.CharField(_('Homepage'), max_length=100, blank=True)\n allow_ads = models.BooleanField(\n _('See paid advertising'),\n help_text=_('If unchecked, you will still see community ads.'),\n default=True,\n )\n\n def __str__(self):\n return (\n ugettext(\"%(username)s's profile\") %\n {'username': self.user.username}\n )\n\n def get_absolute_url(self):\n return reverse(\n 'profiles_profile_detail',\n kwargs={'username': self.user.username},\n )\n", "path": "readthedocs/core/models.py"}]} | 1,001 | 80 |
gh_patches_debug_1133 | rasdani/github-patches | git_diff | joke2k__faker-512 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Using É, é (e-acute) in emails.
It looks that É, é (e-acute) symbols are not appropriate for valid email. I used https://pypi.python.org/pypi/robotframework-faker/ which uses this library and the following email was returned:
andré[email protected]
But email verification was failed for this email.
Could you remove É, é and other such letters if they are present from valid email generation?
</issue>
<code>
[start of faker/providers/internet/de_DE/__init__.py]
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4 from .. import Provider as InternetProvider
5
6 class Provider(InternetProvider):
7
8 free_email_domains = (
9 'aol.de', 'gmail.com', 'gmx.de', 'googlemail.com', 'hotmail.de',
10 'web.de', 'yahoo.de',
11 )
12 tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de', )
13
14 replacements = (
15 ('ä', 'ae'), ('Ä', 'Ae'),
16 ('ö', 'oe'), ('Ö', 'Oe'),
17 ('ü', 'ue'), ('Ü', 'Ue'),
18 ('ß', 'ss'),
19 )
20
[end of faker/providers/internet/de_DE/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/internet/de_DE/__init__.py b/faker/providers/internet/de_DE/__init__.py
--- a/faker/providers/internet/de_DE/__init__.py
+++ b/faker/providers/internet/de_DE/__init__.py
@@ -15,5 +15,7 @@
('ä', 'ae'), ('Ä', 'Ae'),
('ö', 'oe'), ('Ö', 'Oe'),
('ü', 'ue'), ('Ü', 'Ue'),
+ ('é', 'e'), ('É', 'E'),
+ ('à', 'a'), ('À', 'A'),
('ß', 'ss'),
)
| {"golden_diff": "diff --git a/faker/providers/internet/de_DE/__init__.py b/faker/providers/internet/de_DE/__init__.py\n--- a/faker/providers/internet/de_DE/__init__.py\n+++ b/faker/providers/internet/de_DE/__init__.py\n@@ -15,5 +15,7 @@\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n ('\u00f6', 'oe'), ('\u00d6', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n+ ('\u00e9', 'e'), ('\u00c9', 'E'),\n+ ('\u00e0', 'a'), ('\u00c0', 'A'),\n ('\u00df', 'ss'),\n )\n", "issue": "Using \u00c9, \u00e9 (e-acute) in emails.\nIt looks that \u00c9, \u00e9 (e-acute) symbols are not appropriate for valid email. I used https://pypi.python.org/pypi/robotframework-faker/ which uses this library and the following email was returned: \r\nandr\[email protected]\r\n\r\nBut email verification was failed for this email. \r\nCould you remove \u00c9, \u00e9 and other such letters if they are present from valid email generation?\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\nfrom .. import Provider as InternetProvider\n\nclass Provider(InternetProvider):\n\n free_email_domains = (\n 'aol.de', 'gmail.com', 'gmx.de', 'googlemail.com', 'hotmail.de',\n 'web.de', 'yahoo.de',\n )\n tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de', )\n\n replacements = (\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n ('\u00f6', 'oe'), ('\u00d6', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n ('\u00df', 'ss'),\n )\n", "path": "faker/providers/internet/de_DE/__init__.py"}]} | 832 | 148 |
gh_patches_debug_35965 | rasdani/github-patches | git_diff | ethereum__web3.py-914 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Erorr in websockets.py: '<=' not supported between instances of 'int' and 'NoneType'
* web3 (4.3.0)
* websockets (4.0.1)
* Python: 3.6
* OS: osx HighSierra
### What was wrong?
`web3 = Web3(Web3.WebsocketProvider("ws://10.224.12.6:8546"))`
`web3.eth.syncing //returns data`
The websocket is clearly open but when I run a filter which is supposed to have many entries, I get the following error trace:
Upon running: `data = web3.eth.getFilterLogs(new_block_filter.filter_id)`, I get:
```
~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/web3/providers/websocket.py in make_request(self, method, params)
81 WebsocketProvider._loop
82 )
---> 83 return future.result()
/anaconda3/lib/python3.6/concurrent/futures/_base.py in result(self, timeout)
430 raise CancelledError()
431 elif self._state == FINISHED:
--> 432 return self.__get_result()
433 else:
434 raise TimeoutError()
/anaconda3/lib/python3.6/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/web3/providers/websocket.py in coro_make_request(self, request_data)
71 async with self.conn as conn:
72 await conn.send(request_data)
---> 73 return json.loads(await conn.recv())
74
75 def make_request(self, method, params):
~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/websockets/protocol.py in recv(self)
321 next_message.cancel()
322 if not self.legacy_recv:
--> 323 raise ConnectionClosed(self.close_code, self.close_reason)
324
325 @asyncio.coroutine
~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/websockets/exceptions.py in __init__(self, code, reason)
145 self.reason = reason
146 message = "WebSocket connection is closed: "
--> 147 if 3000 <= code < 4000:
148 explanation = "registered"
149 elif 4000 <= code < 5000:
TypeError: '<=' not supported between instances of 'int' and 'NoneType'
```
The same filter runs fine (albeit a bit slow) using `Web3.HTTPProvider()`
</issue>
<code>
[start of web3/providers/websocket.py]
1 import asyncio
2 import json
3 import logging
4 import os
5 from threading import (
6 Thread,
7 )
8
9 import websockets
10
11 from web3.providers.base import (
12 JSONBaseProvider,
13 )
14
15
16 def _start_event_loop(loop):
17 asyncio.set_event_loop(loop)
18 loop.run_forever()
19 loop.close()
20
21
22 def _get_threaded_loop():
23 new_loop = asyncio.new_event_loop()
24 thread_loop = Thread(target=_start_event_loop, args=(new_loop,), daemon=True)
25 thread_loop.start()
26 return new_loop
27
28
29 def get_default_endpoint():
30 return os.environ.get('WEB3_WS_PROVIDER_URI', 'ws://127.0.0.1:8546')
31
32
33 class PersistentWebSocket:
34
35 def __init__(self, endpoint_uri, loop):
36 self.ws = None
37 self.endpoint_uri = endpoint_uri
38 self.loop = loop
39
40 async def __aenter__(self):
41 if self.ws is None:
42 self.ws = await websockets.connect(uri=self.endpoint_uri, loop=self.loop)
43 return self.ws
44
45 async def __aexit__(self, exc_type, exc_val, exc_tb):
46 if exc_val is not None:
47 try:
48 await self.ws.close()
49 except Exception:
50 pass
51 self.ws = None
52
53
54 class WebsocketProvider(JSONBaseProvider):
55 logger = logging.getLogger("web3.providers.WebsocketProvider")
56 _loop = None
57
58 def __init__(self, endpoint_uri=None):
59 self.endpoint_uri = endpoint_uri
60 if self.endpoint_uri is None:
61 self.endpoint_uri = get_default_endpoint()
62 if WebsocketProvider._loop is None:
63 WebsocketProvider._loop = _get_threaded_loop()
64 self.conn = PersistentWebSocket(self.endpoint_uri, WebsocketProvider._loop)
65 super().__init__()
66
67 def __str__(self):
68 return "WS connection {0}".format(self.endpoint_uri)
69
70 async def coro_make_request(self, request_data):
71 async with self.conn as conn:
72 await conn.send(request_data)
73 return json.loads(await conn.recv())
74
75 def make_request(self, method, params):
76 self.logger.debug("Making request WebSocket. URI: %s, "
77 "Method: %s", self.endpoint_uri, method)
78 request_data = self.encode_rpc_request(method, params)
79 future = asyncio.run_coroutine_threadsafe(
80 self.coro_make_request(request_data),
81 WebsocketProvider._loop
82 )
83 return future.result()
84
[end of web3/providers/websocket.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/web3/providers/websocket.py b/web3/providers/websocket.py
--- a/web3/providers/websocket.py
+++ b/web3/providers/websocket.py
@@ -8,10 +8,15 @@
import websockets
+from web3.exceptions import (
+ ValidationError,
+)
from web3.providers.base import (
JSONBaseProvider,
)
+RESTRICTED_WEBSOCKET_KWARGS = {'uri', 'loop'}
+
def _start_event_loop(loop):
asyncio.set_event_loop(loop)
@@ -32,14 +37,17 @@
class PersistentWebSocket:
- def __init__(self, endpoint_uri, loop):
+ def __init__(self, endpoint_uri, loop, websocket_kwargs):
self.ws = None
self.endpoint_uri = endpoint_uri
self.loop = loop
+ self.websocket_kwargs = websocket_kwargs
async def __aenter__(self):
if self.ws is None:
- self.ws = await websockets.connect(uri=self.endpoint_uri, loop=self.loop)
+ self.ws = await websockets.connect(
+ uri=self.endpoint_uri, loop=self.loop, **self.websocket_kwargs
+ )
return self.ws
async def __aexit__(self, exc_type, exc_val, exc_tb):
@@ -55,13 +63,26 @@
logger = logging.getLogger("web3.providers.WebsocketProvider")
_loop = None
- def __init__(self, endpoint_uri=None):
+ def __init__(self, endpoint_uri=None, websocket_kwargs=None):
self.endpoint_uri = endpoint_uri
if self.endpoint_uri is None:
self.endpoint_uri = get_default_endpoint()
if WebsocketProvider._loop is None:
WebsocketProvider._loop = _get_threaded_loop()
- self.conn = PersistentWebSocket(self.endpoint_uri, WebsocketProvider._loop)
+ if websocket_kwargs is None:
+ websocket_kwargs = {}
+ else:
+ found_restricted_keys = set(websocket_kwargs.keys()).intersection(
+ RESTRICTED_WEBSOCKET_KWARGS
+ )
+ if found_restricted_keys:
+ raise ValidationError(
+ '{0} are not allowed in websocket_kwargs, '
+ 'found: {1}'.format(RESTRICTED_WEBSOCKET_KWARGS, found_restricted_keys)
+ )
+ self.conn = PersistentWebSocket(
+ self.endpoint_uri, WebsocketProvider._loop, websocket_kwargs
+ )
super().__init__()
def __str__(self):
| {"golden_diff": "diff --git a/web3/providers/websocket.py b/web3/providers/websocket.py\n--- a/web3/providers/websocket.py\n+++ b/web3/providers/websocket.py\n@@ -8,10 +8,15 @@\n \n import websockets\n \n+from web3.exceptions import (\n+ ValidationError,\n+)\n from web3.providers.base import (\n JSONBaseProvider,\n )\n \n+RESTRICTED_WEBSOCKET_KWARGS = {'uri', 'loop'}\n+\n \n def _start_event_loop(loop):\n asyncio.set_event_loop(loop)\n@@ -32,14 +37,17 @@\n \n class PersistentWebSocket:\n \n- def __init__(self, endpoint_uri, loop):\n+ def __init__(self, endpoint_uri, loop, websocket_kwargs):\n self.ws = None\n self.endpoint_uri = endpoint_uri\n self.loop = loop\n+ self.websocket_kwargs = websocket_kwargs\n \n async def __aenter__(self):\n if self.ws is None:\n- self.ws = await websockets.connect(uri=self.endpoint_uri, loop=self.loop)\n+ self.ws = await websockets.connect(\n+ uri=self.endpoint_uri, loop=self.loop, **self.websocket_kwargs\n+ )\n return self.ws\n \n async def __aexit__(self, exc_type, exc_val, exc_tb):\n@@ -55,13 +63,26 @@\n logger = logging.getLogger(\"web3.providers.WebsocketProvider\")\n _loop = None\n \n- def __init__(self, endpoint_uri=None):\n+ def __init__(self, endpoint_uri=None, websocket_kwargs=None):\n self.endpoint_uri = endpoint_uri\n if self.endpoint_uri is None:\n self.endpoint_uri = get_default_endpoint()\n if WebsocketProvider._loop is None:\n WebsocketProvider._loop = _get_threaded_loop()\n- self.conn = PersistentWebSocket(self.endpoint_uri, WebsocketProvider._loop)\n+ if websocket_kwargs is None:\n+ websocket_kwargs = {}\n+ else:\n+ found_restricted_keys = set(websocket_kwargs.keys()).intersection(\n+ RESTRICTED_WEBSOCKET_KWARGS\n+ )\n+ if found_restricted_keys:\n+ raise ValidationError(\n+ '{0} are not allowed in websocket_kwargs, '\n+ 'found: {1}'.format(RESTRICTED_WEBSOCKET_KWARGS, found_restricted_keys)\n+ )\n+ self.conn = PersistentWebSocket(\n+ self.endpoint_uri, WebsocketProvider._loop, websocket_kwargs\n+ )\n super().__init__()\n \n def __str__(self):\n", "issue": "Erorr in websockets.py: '<=' not supported between instances of 'int' and 'NoneType'\n* web3 (4.3.0)\r\n* websockets (4.0.1)\r\n* Python: 3.6\r\n* OS: osx HighSierra\r\n\r\n\r\n### What was wrong?\r\n\r\n`web3 = Web3(Web3.WebsocketProvider(\"ws://10.224.12.6:8546\"))`\r\n`web3.eth.syncing //returns data`\r\n\r\nThe websocket is clearly open but when I run a filter which is supposed to have many entries, I get the following error trace:\r\n\r\nUpon running: `data = web3.eth.getFilterLogs(new_block_filter.filter_id)`, I get:\r\n\r\n```\r\n~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/web3/providers/websocket.py in make_request(self, method, params)\r\n 81 WebsocketProvider._loop\r\n 82 )\r\n---> 83 return future.result()\r\n\r\n/anaconda3/lib/python3.6/concurrent/futures/_base.py in result(self, timeout)\r\n 430 raise CancelledError()\r\n 431 elif self._state == FINISHED:\r\n--> 432 return self.__get_result()\r\n 433 else:\r\n 434 raise TimeoutError()\r\n\r\n/anaconda3/lib/python3.6/concurrent/futures/_base.py in __get_result(self)\r\n 382 def __get_result(self):\r\n 383 if self._exception:\r\n--> 384 raise self._exception\r\n 385 else:\r\n 386 return self._result\r\n\r\n~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/web3/providers/websocket.py in coro_make_request(self, request_data)\r\n 71 async with self.conn as conn:\r\n 72 await conn.send(request_data)\r\n---> 73 return json.loads(await conn.recv())\r\n 74 \r\n 75 def make_request(self, method, params):\r\n\r\n~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/websockets/protocol.py in recv(self)\r\n 321 next_message.cancel()\r\n 322 if not self.legacy_recv:\r\n--> 323 raise ConnectionClosed(self.close_code, self.close_reason)\r\n 324 \r\n 325 @asyncio.coroutine\r\n\r\n~/Desktop/contracts-py/contracts/lib/python3.6/site-packages/websockets/exceptions.py in __init__(self, code, reason)\r\n 145 self.reason = reason\r\n 146 message = \"WebSocket connection is closed: \"\r\n--> 147 if 3000 <= code < 4000:\r\n 148 explanation = \"registered\"\r\n 149 elif 4000 <= code < 5000:\r\n\r\nTypeError: '<=' not supported between instances of 'int' and 'NoneType'\r\n```\r\n\r\nThe same filter runs fine (albeit a bit slow) using `Web3.HTTPProvider()`\r\n\r\n\n", "before_files": [{"content": "import asyncio\nimport json\nimport logging\nimport os\nfrom threading import (\n Thread,\n)\n\nimport websockets\n\nfrom web3.providers.base import (\n JSONBaseProvider,\n)\n\n\ndef _start_event_loop(loop):\n asyncio.set_event_loop(loop)\n loop.run_forever()\n loop.close()\n\n\ndef _get_threaded_loop():\n new_loop = asyncio.new_event_loop()\n thread_loop = Thread(target=_start_event_loop, args=(new_loop,), daemon=True)\n thread_loop.start()\n return new_loop\n\n\ndef get_default_endpoint():\n return os.environ.get('WEB3_WS_PROVIDER_URI', 'ws://127.0.0.1:8546')\n\n\nclass PersistentWebSocket:\n\n def __init__(self, endpoint_uri, loop):\n self.ws = None\n self.endpoint_uri = endpoint_uri\n self.loop = loop\n\n async def __aenter__(self):\n if self.ws is None:\n self.ws = await websockets.connect(uri=self.endpoint_uri, loop=self.loop)\n return self.ws\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n if exc_val is not None:\n try:\n await self.ws.close()\n except Exception:\n pass\n self.ws = None\n\n\nclass WebsocketProvider(JSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.WebsocketProvider\")\n _loop = None\n\n def __init__(self, endpoint_uri=None):\n self.endpoint_uri = endpoint_uri\n if self.endpoint_uri is None:\n self.endpoint_uri = get_default_endpoint()\n if WebsocketProvider._loop is None:\n WebsocketProvider._loop = _get_threaded_loop()\n self.conn = PersistentWebSocket(self.endpoint_uri, WebsocketProvider._loop)\n super().__init__()\n\n def __str__(self):\n return \"WS connection {0}\".format(self.endpoint_uri)\n\n async def coro_make_request(self, request_data):\n async with self.conn as conn:\n await conn.send(request_data)\n return json.loads(await conn.recv())\n\n def make_request(self, method, params):\n self.logger.debug(\"Making request WebSocket. URI: %s, \"\n \"Method: %s\", self.endpoint_uri, method)\n request_data = self.encode_rpc_request(method, params)\n future = asyncio.run_coroutine_threadsafe(\n self.coro_make_request(request_data),\n WebsocketProvider._loop\n )\n return future.result()\n", "path": "web3/providers/websocket.py"}]} | 1,910 | 545 |
gh_patches_debug_6782 | rasdani/github-patches | git_diff | learningequality__kolibri-1761 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The mastery completion sign updates only after a page refresh and not real time.
## Summary
A learner had completed and came out of the exercise and found the green completed tick did not get updated real time, but after refreshing the page the completed tick appeared.
## System information
- Version: Kolibri 0.4.0beta10
- Operating system: Ubuntu 14.04 LTS
- Browser: Chrome
## How to reproduce
1. Attempt an exercise or master it.
2. Come out of the exercise.
3. The completed or In progress stamp is not updated real time.
## Screenshots
Learner has mastered the topic.

He exited the exercise and the completed sign on the thumbnail is not update:

But on refreshing the page the thumbnail has the completed sign.
</issue>
<code>
[start of kolibri/auth/backends.py]
1 """
2 Implements custom auth backends as described in the Django docs, for our custom user classes -- FacilityUser and
3 DeviceOwner. The appropriate classes should be listed in the AUTHENTICATION_BACKENDS. Note that authentication
4 backends are checked in the order they're listed.
5 """
6
7 from kolibri.auth.models import DeviceOwner, FacilityUser
8
9
10 class FacilityUserBackend(object):
11 """
12 A class that implements authentication for FacilityUsers.
13 """
14
15 def authenticate(self, username=None, password=None, facility=None):
16 """
17 Authenticates the user if the credentials correspond to a FacilityUser for the specified Facility.
18
19 :param username: a string
20 :param password: a string
21 :param facility: a Facility
22 :return: A FacilityUser instance if successful, or None if authentication failed.
23 """
24 users = FacilityUser.objects.filter(username=username)
25 if facility:
26 users = users.filter(facility=facility)
27 for user in users:
28 if user.check_password(password):
29 return user
30 # Allow login without password for learners for facilities that allow this.
31 # Must specify the facility, to prevent accidental logins
32 elif facility and user.dataset.learner_can_login_with_no_password and not user.roles.count():
33 return user
34 return None
35
36 def get_user(self, user_id):
37 """
38 Gets a user. Auth backends are required to implement this.
39
40 :param user_id: A FacilityUser pk
41 :return: A FacilityUser instance if a BaseUser with that pk is found, else None.
42 """
43 try:
44 return FacilityUser.objects.get(pk=user_id)
45 except FacilityUser.DoesNotExist:
46 return None
47
48
49 class DeviceOwnerBackend(object):
50 """
51 A class that implements authentication for DeviceOwners.
52 """
53
54 def authenticate(self, username=None, password=None, **kwargs):
55 """
56 Authenticates the user if the credentials correspond to a DeviceOwner.
57
58 :param username: a string
59 :param password: a string
60 :return: A DeviceOwner instance if successful, or None if authentication failed.
61 """
62 try:
63 user = DeviceOwner.objects.get(username=username)
64 if user.check_password(password):
65 return user
66 else:
67 return None
68 except DeviceOwner.DoesNotExist:
69 return None
70
71 def get_user(self, user_id):
72 """
73 Gets a user. Auth backends are required to implement this.
74
75 :param user_id: A BaseUser pk
76 :return: A DeviceOwner instance if a BaseUser with that pk is found, else None.
77 """
78 try:
79 return DeviceOwner.objects.get(pk=user_id)
80 except DeviceOwner.DoesNotExist:
81 return None
82
[end of kolibri/auth/backends.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/auth/backends.py b/kolibri/auth/backends.py
--- a/kolibri/auth/backends.py
+++ b/kolibri/auth/backends.py
@@ -21,7 +21,7 @@
:param facility: a Facility
:return: A FacilityUser instance if successful, or None if authentication failed.
"""
- users = FacilityUser.objects.filter(username=username)
+ users = FacilityUser.objects.filter(username__iexact=username)
if facility:
users = users.filter(facility=facility)
for user in users:
| {"golden_diff": "diff --git a/kolibri/auth/backends.py b/kolibri/auth/backends.py\n--- a/kolibri/auth/backends.py\n+++ b/kolibri/auth/backends.py\n@@ -21,7 +21,7 @@\n :param facility: a Facility\n :return: A FacilityUser instance if successful, or None if authentication failed.\n \"\"\"\n- users = FacilityUser.objects.filter(username=username)\n+ users = FacilityUser.objects.filter(username__iexact=username)\n if facility:\n users = users.filter(facility=facility)\n for user in users:\n", "issue": "The mastery completion sign updates only after a page refresh and not real time.\n## Summary\r\n\r\nA learner had completed and came out of the exercise and found the green completed tick did not get updated real time, but after refreshing the page the completed tick appeared. \r\n\r\n## System information\r\n - Version: Kolibri 0.4.0beta10\r\n - Operating system: Ubuntu 14.04 LTS\r\n - Browser: Chrome\r\n\r\n\r\n## How to reproduce\r\n1. Attempt an exercise or master it.\r\n2. Come out of the exercise.\r\n3. The completed or In progress stamp is not updated real time.\r\n\r\n## Screenshots\r\nLearner has mastered the topic.\r\n\r\n\r\nHe exited the exercise and the completed sign on the thumbnail is not update:\r\n\r\n\r\nBut on refreshing the page the thumbnail has the completed sign.\n", "before_files": [{"content": "\"\"\"\nImplements custom auth backends as described in the Django docs, for our custom user classes -- FacilityUser and\nDeviceOwner. The appropriate classes should be listed in the AUTHENTICATION_BACKENDS. Note that authentication\nbackends are checked in the order they're listed.\n\"\"\"\n\nfrom kolibri.auth.models import DeviceOwner, FacilityUser\n\n\nclass FacilityUserBackend(object):\n \"\"\"\n A class that implements authentication for FacilityUsers.\n \"\"\"\n\n def authenticate(self, username=None, password=None, facility=None):\n \"\"\"\n Authenticates the user if the credentials correspond to a FacilityUser for the specified Facility.\n\n :param username: a string\n :param password: a string\n :param facility: a Facility\n :return: A FacilityUser instance if successful, or None if authentication failed.\n \"\"\"\n users = FacilityUser.objects.filter(username=username)\n if facility:\n users = users.filter(facility=facility)\n for user in users:\n if user.check_password(password):\n return user\n # Allow login without password for learners for facilities that allow this.\n # Must specify the facility, to prevent accidental logins\n elif facility and user.dataset.learner_can_login_with_no_password and not user.roles.count():\n return user\n return None\n\n def get_user(self, user_id):\n \"\"\"\n Gets a user. Auth backends are required to implement this.\n\n :param user_id: A FacilityUser pk\n :return: A FacilityUser instance if a BaseUser with that pk is found, else None.\n \"\"\"\n try:\n return FacilityUser.objects.get(pk=user_id)\n except FacilityUser.DoesNotExist:\n return None\n\n\nclass DeviceOwnerBackend(object):\n \"\"\"\n A class that implements authentication for DeviceOwners.\n \"\"\"\n\n def authenticate(self, username=None, password=None, **kwargs):\n \"\"\"\n Authenticates the user if the credentials correspond to a DeviceOwner.\n\n :param username: a string\n :param password: a string\n :return: A DeviceOwner instance if successful, or None if authentication failed.\n \"\"\"\n try:\n user = DeviceOwner.objects.get(username=username)\n if user.check_password(password):\n return user\n else:\n return None\n except DeviceOwner.DoesNotExist:\n return None\n\n def get_user(self, user_id):\n \"\"\"\n Gets a user. Auth backends are required to implement this.\n\n :param user_id: A BaseUser pk\n :return: A DeviceOwner instance if a BaseUser with that pk is found, else None.\n \"\"\"\n try:\n return DeviceOwner.objects.get(pk=user_id)\n except DeviceOwner.DoesNotExist:\n return None\n", "path": "kolibri/auth/backends.py"}]} | 1,596 | 127 |
gh_patches_debug_36757 | rasdani/github-patches | git_diff | huggingface__trl-398 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Llama Reward Model is incorrectly merged
As mentioned in #287, `merge_peft_adapter` saves the Llama RM as a `LlamaForCausalLM` see [here](https://github.com/lvwerra/trl/blob/main/examples/stack_llama/scripts/merge_peft_adapter.py#L35)
But the reward model is trained and should be a `LlamaForSequenceClassification` and running `rl_training.py` gives the obvious warnings
```
Some weights of the model checkpoint at ./llama-7b-se-rm were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']
- This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at /home/toolkit/huggingface/llama-7b-rm and are newly initialized: ['score.weight']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
```
We should instead check whether we are merging the rm and then save as a the correct model
Also the `score.weight` is not being loaded as mentioned in #297 , see more info below
--- update --
It seems that `merge_peft_adapter` should be using `merge_and_unload()` which correctly overrides the score. But I haven't yet managed to get good results using the adapter weights on the hub
</issue>
<code>
[start of examples/stack_llama/scripts/merge_peft_adapter.py]
1 from dataclasses import dataclass, field
2 from typing import Optional
3
4 import peft
5 import torch
6 from peft import PeftConfig, PeftModel
7 from peft.utils import _get_submodules
8 from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
9
10
11 DEFAULT_PAD_TOKEN = "[PAD]"
12 DEFAULT_EOS_TOKEN = "</s>"
13 DEFAULT_BOS_TOKEN = "</s>"
14 DEFAULT_UNK_TOKEN = "</s>"
15
16
17 @dataclass
18 class ScriptArguments:
19 """
20 The name of the Casual LM model we wish to fine with PPO
21 """
22
23 adapter_model_name: Optional[str] = field(default=None, metadata={"help": "the model name"})
24 base_model_name: Optional[str] = field(default=None, metadata={"help": "the model name"})
25 output_name: Optional[str] = field(default=None, metadata={"help": "the model name"})
26
27
28 parser = HfArgumentParser(ScriptArguments)
29 script_args = parser.parse_args_into_dataclasses()[0]
30 assert script_args.adapter_model_name is not None, "please provide the name of the Adapter you would like to merge"
31 assert script_args.base_model_name is not None, "please provide the name of the Base model"
32 assert script_args.base_model_name is not None, "please provide the output name of the merged model"
33
34 peft_config = PeftConfig.from_pretrained(script_args.adapter_model_name)
35 model = AutoModelForCausalLM.from_pretrained(script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16)
36 tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name)
37 config = AutoConfig.from_pretrained(script_args.base_model_name)
38 architecture = config.architectures[0]
39 if "Llama" in architecture:
40 print("Setting EOS, BOS, and UNK tokens for LLama tokenizer")
41 tokenizer.add_special_tokens(
42 {
43 "eos_token": DEFAULT_EOS_TOKEN,
44 "bos_token": DEFAULT_BOS_TOKEN,
45 "unk_token": DEFAULT_UNK_TOKEN,
46 "pad_token": DEFAULT_PAD_TOKEN,
47 }
48 )
49
50 # Load the Lora model
51 model = PeftModel.from_pretrained(model, script_args.adapter_model_name)
52 model.eval()
53
54 key_list = [key for key, _ in model.base_model.model.named_modules() if "lora" not in key]
55 for key in key_list:
56 parent, target, target_name = _get_submodules(model.base_model.model, key)
57 if isinstance(target, peft.tuners.lora.Linear):
58 bias = target.bias is not None
59 new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)
60 model.base_model._replace_module(parent, target_name, new_module, target)
61
62 model = model.base_model.model
63
64 model.save_pretrained(f"{script_args.output_name}")
65 tokenizer.save_pretrained(f"{script_args.output_name}")
66 model.push_to_hub(f"{script_args.output_name}", use_temp_dir=False)
67
[end of examples/stack_llama/scripts/merge_peft_adapter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/stack_llama/scripts/merge_peft_adapter.py b/examples/stack_llama/scripts/merge_peft_adapter.py
--- a/examples/stack_llama/scripts/merge_peft_adapter.py
+++ b/examples/stack_llama/scripts/merge_peft_adapter.py
@@ -1,17 +1,9 @@
from dataclasses import dataclass, field
from typing import Optional
-import peft
import torch
from peft import PeftConfig, PeftModel
-from peft.utils import _get_submodules
-from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
-
-
-DEFAULT_PAD_TOKEN = "[PAD]"
-DEFAULT_EOS_TOKEN = "</s>"
-DEFAULT_BOS_TOKEN = "</s>"
-DEFAULT_UNK_TOKEN = "</s>"
+from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser
@dataclass
@@ -32,34 +24,23 @@
assert script_args.base_model_name is not None, "please provide the output name of the merged model"
peft_config = PeftConfig.from_pretrained(script_args.adapter_model_name)
-model = AutoModelForCausalLM.from_pretrained(script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16)
-tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name)
-config = AutoConfig.from_pretrained(script_args.base_model_name)
-architecture = config.architectures[0]
-if "Llama" in architecture:
- print("Setting EOS, BOS, and UNK tokens for LLama tokenizer")
- tokenizer.add_special_tokens(
- {
- "eos_token": DEFAULT_EOS_TOKEN,
- "bos_token": DEFAULT_BOS_TOKEN,
- "unk_token": DEFAULT_UNK_TOKEN,
- "pad_token": DEFAULT_PAD_TOKEN,
- }
+if peft_config.task_type == "SEQ_CLS":
+ # peft is for reward model so load sequence classification
+ model = AutoModelForSequenceClassification.from_pretrained(
+ script_args.base_model_name, num_labels=1, torch_dtype=torch.bfloat16
+ )
+else:
+ model = AutoModelForCausalLM.from_pretrained(
+ script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16
)
+tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name)
+
# Load the Lora model
model = PeftModel.from_pretrained(model, script_args.adapter_model_name)
model.eval()
-key_list = [key for key, _ in model.base_model.model.named_modules() if "lora" not in key]
-for key in key_list:
- parent, target, target_name = _get_submodules(model.base_model.model, key)
- if isinstance(target, peft.tuners.lora.Linear):
- bias = target.bias is not None
- new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)
- model.base_model._replace_module(parent, target_name, new_module, target)
-
-model = model.base_model.model
+model = model.merge_and_unload()
model.save_pretrained(f"{script_args.output_name}")
tokenizer.save_pretrained(f"{script_args.output_name}")
| {"golden_diff": "diff --git a/examples/stack_llama/scripts/merge_peft_adapter.py b/examples/stack_llama/scripts/merge_peft_adapter.py\n--- a/examples/stack_llama/scripts/merge_peft_adapter.py\n+++ b/examples/stack_llama/scripts/merge_peft_adapter.py\n@@ -1,17 +1,9 @@\n from dataclasses import dataclass, field\n from typing import Optional\n \n-import peft\n import torch\n from peft import PeftConfig, PeftModel\n-from peft.utils import _get_submodules\n-from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser\n-\n-\n-DEFAULT_PAD_TOKEN = \"[PAD]\"\n-DEFAULT_EOS_TOKEN = \"</s>\"\n-DEFAULT_BOS_TOKEN = \"</s>\"\n-DEFAULT_UNK_TOKEN = \"</s>\"\n+from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser\n \n \n @dataclass\n@@ -32,34 +24,23 @@\n assert script_args.base_model_name is not None, \"please provide the output name of the merged model\"\n \n peft_config = PeftConfig.from_pretrained(script_args.adapter_model_name)\n-model = AutoModelForCausalLM.from_pretrained(script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16)\n-tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name)\n-config = AutoConfig.from_pretrained(script_args.base_model_name)\n-architecture = config.architectures[0]\n-if \"Llama\" in architecture:\n- print(\"Setting EOS, BOS, and UNK tokens for LLama tokenizer\")\n- tokenizer.add_special_tokens(\n- {\n- \"eos_token\": DEFAULT_EOS_TOKEN,\n- \"bos_token\": DEFAULT_BOS_TOKEN,\n- \"unk_token\": DEFAULT_UNK_TOKEN,\n- \"pad_token\": DEFAULT_PAD_TOKEN,\n- }\n+if peft_config.task_type == \"SEQ_CLS\":\n+ # peft is for reward model so load sequence classification\n+ model = AutoModelForSequenceClassification.from_pretrained(\n+ script_args.base_model_name, num_labels=1, torch_dtype=torch.bfloat16\n+ )\n+else:\n+ model = AutoModelForCausalLM.from_pretrained(\n+ script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16\n )\n \n+tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name)\n+\n # Load the Lora model\n model = PeftModel.from_pretrained(model, script_args.adapter_model_name)\n model.eval()\n \n-key_list = [key for key, _ in model.base_model.model.named_modules() if \"lora\" not in key]\n-for key in key_list:\n- parent, target, target_name = _get_submodules(model.base_model.model, key)\n- if isinstance(target, peft.tuners.lora.Linear):\n- bias = target.bias is not None\n- new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)\n- model.base_model._replace_module(parent, target_name, new_module, target)\n-\n-model = model.base_model.model\n+model = model.merge_and_unload()\n \n model.save_pretrained(f\"{script_args.output_name}\")\n tokenizer.save_pretrained(f\"{script_args.output_name}\")\n", "issue": "Llama Reward Model is incorrectly merged\nAs mentioned in #287, `merge_peft_adapter` saves the Llama RM as a `LlamaForCausalLM` see [here](https://github.com/lvwerra/trl/blob/main/examples/stack_llama/scripts/merge_peft_adapter.py#L35)\r\n\r\nBut the reward model is trained and should be a `LlamaForSequenceClassification` and running `rl_training.py` gives the obvious warnings\r\n```\r\nSome weights of the model checkpoint at ./llama-7b-se-rm were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']\r\n- This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\r\n- This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\r\nSome weights of LlamaForSequenceClassification were not initialized from the model checkpoint at /home/toolkit/huggingface/llama-7b-rm and are newly initialized: ['score.weight']\r\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\r\n```\r\n\r\nWe should instead check whether we are merging the rm and then save as a the correct model \r\n\r\nAlso the `score.weight` is not being loaded as mentioned in #297 , see more info below\r\n\r\n\r\n--- update --\r\n\r\nIt seems that `merge_peft_adapter` should be using `merge_and_unload()` which correctly overrides the score. But I haven't yet managed to get good results using the adapter weights on the hub\n", "before_files": [{"content": "from dataclasses import dataclass, field\nfrom typing import Optional\n\nimport peft\nimport torch\nfrom peft import PeftConfig, PeftModel\nfrom peft.utils import _get_submodules\nfrom transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser\n\n\nDEFAULT_PAD_TOKEN = \"[PAD]\"\nDEFAULT_EOS_TOKEN = \"</s>\"\nDEFAULT_BOS_TOKEN = \"</s>\"\nDEFAULT_UNK_TOKEN = \"</s>\"\n\n\n@dataclass\nclass ScriptArguments:\n \"\"\"\n The name of the Casual LM model we wish to fine with PPO\n \"\"\"\n\n adapter_model_name: Optional[str] = field(default=None, metadata={\"help\": \"the model name\"})\n base_model_name: Optional[str] = field(default=None, metadata={\"help\": \"the model name\"})\n output_name: Optional[str] = field(default=None, metadata={\"help\": \"the model name\"})\n\n\nparser = HfArgumentParser(ScriptArguments)\nscript_args = parser.parse_args_into_dataclasses()[0]\nassert script_args.adapter_model_name is not None, \"please provide the name of the Adapter you would like to merge\"\nassert script_args.base_model_name is not None, \"please provide the name of the Base model\"\nassert script_args.base_model_name is not None, \"please provide the output name of the merged model\"\n\npeft_config = PeftConfig.from_pretrained(script_args.adapter_model_name)\nmodel = AutoModelForCausalLM.from_pretrained(script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16)\ntokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name)\nconfig = AutoConfig.from_pretrained(script_args.base_model_name)\narchitecture = config.architectures[0]\nif \"Llama\" in architecture:\n print(\"Setting EOS, BOS, and UNK tokens for LLama tokenizer\")\n tokenizer.add_special_tokens(\n {\n \"eos_token\": DEFAULT_EOS_TOKEN,\n \"bos_token\": DEFAULT_BOS_TOKEN,\n \"unk_token\": DEFAULT_UNK_TOKEN,\n \"pad_token\": DEFAULT_PAD_TOKEN,\n }\n )\n\n# Load the Lora model\nmodel = PeftModel.from_pretrained(model, script_args.adapter_model_name)\nmodel.eval()\n\nkey_list = [key for key, _ in model.base_model.model.named_modules() if \"lora\" not in key]\nfor key in key_list:\n parent, target, target_name = _get_submodules(model.base_model.model, key)\n if isinstance(target, peft.tuners.lora.Linear):\n bias = target.bias is not None\n new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)\n model.base_model._replace_module(parent, target_name, new_module, target)\n\nmodel = model.base_model.model\n\nmodel.save_pretrained(f\"{script_args.output_name}\")\ntokenizer.save_pretrained(f\"{script_args.output_name}\")\nmodel.push_to_hub(f\"{script_args.output_name}\", use_temp_dir=False)\n", "path": "examples/stack_llama/scripts/merge_peft_adapter.py"}]} | 1,675 | 705 |
gh_patches_debug_29460 | rasdani/github-patches | git_diff | aimhubio__aim-2671 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Extend `aim.ext.tensorboard_tracker.run.Run` to also allow system stats, parameters, and stdout capture.
## 🚀 Feature
Allow capturing of system parameters and terminal logs by the `aim.ext.tensorboard_tracker.run.Run`, as this is great feature shouldn't be only available to the default `Run`.
### Motivation
The new feature of allowing continuous syncing from `tensorboard` files to `aim` is really nice, but because `aim.ext.tensorboard_tracker.run.Run` inherits from `BasicRun` rather than `Run`, it misses out on the ability to log the standard out, system stats and system parameters. Since `aim.ext.tensorboard_tracker.run.Run` should be a possible replacement for `Run`, I don't see a reason why this behaviour shouldn't be allowed.
It has been highlighted in Discord by @mihran113:
> The reason behind inheriting from basic run is exactly to avoid terminal log tracking and system param tracking actually, cause we don’t want to add anything else rather than what’s tracked via tensorboard. Cause there can be times when live tracking is done from a different process, and catching that process’s terminal logs and system params won’t make any sense I guess. If you’re interested you can open a PR to address those points, cause adding the possibility to enable those won’t make any harm as well.
so I believe the *default* arguments should *not* do this extra logging, but still optionally allow this behaviour.
### Pitch
Have `aim.ext.tensorboard_tracker.run.Run` inherit from `aim.sdk.run.Run` instead of `aim.sdk.run.BasicRun`, so that it can utilise it's extra capabilities.
### Alternatives
Instead of inheritance we could change the system resource tracking be a mixin?
Extend `aim.ext.tensorboard_tracker.run.Run` to also allow system stats, parameters, and stdout capture.
## 🚀 Feature
Allow capturing of system parameters and terminal logs by the `aim.ext.tensorboard_tracker.run.Run`, as this is great feature shouldn't be only available to the default `Run`.
### Motivation
The new feature of allowing continuous syncing from `tensorboard` files to `aim` is really nice, but because `aim.ext.tensorboard_tracker.run.Run` inherits from `BasicRun` rather than `Run`, it misses out on the ability to log the standard out, system stats and system parameters. Since `aim.ext.tensorboard_tracker.run.Run` should be a possible replacement for `Run`, I don't see a reason why this behaviour shouldn't be allowed.
It has been highlighted in Discord by @mihran113:
> The reason behind inheriting from basic run is exactly to avoid terminal log tracking and system param tracking actually, cause we don’t want to add anything else rather than what’s tracked via tensorboard. Cause there can be times when live tracking is done from a different process, and catching that process’s terminal logs and system params won’t make any sense I guess. If you’re interested you can open a PR to address those points, cause adding the possibility to enable those won’t make any harm as well.
so I believe the *default* arguments should *not* do this extra logging, but still optionally allow this behaviour.
### Pitch
Have `aim.ext.tensorboard_tracker.run.Run` inherit from `aim.sdk.run.Run` instead of `aim.sdk.run.BasicRun`, so that it can utilise it's extra capabilities.
### Alternatives
Instead of inheritance we could change the system resource tracking be a mixin?
</issue>
<code>
[start of aim/ext/tensorboard_tracker/run.py]
1 from typing import Optional, Union
2
3 from aim.sdk.run import BasicRun
4 from aim.ext.tensorboard_tracker.tracker import TensorboardTracker
5
6 from typing import TYPE_CHECKING
7
8 if TYPE_CHECKING:
9 from aim.sdk.repo import Repo
10
11
12 class Run(BasicRun):
13 def __init__(self, run_hash: Optional[str] = None, *,
14 sync_tensorboard_log_dir: str,
15 repo: Optional[Union[str, 'Repo']] = None,
16 experiment: Optional[str] = None,
17 force_resume: Optional[bool] = False,
18 ):
19 super().__init__(run_hash, repo=repo, read_only=False, experiment=experiment, force_resume=force_resume)
20 self['tb_log_directory'] = sync_tensorboard_log_dir
21 self._tensorboard_tracker = TensorboardTracker(self._tracker, sync_tensorboard_log_dir)
22 self._tensorboard_tracker.start()
23 self._resources.add_extra_resource(self._tensorboard_tracker)
24
[end of aim/ext/tensorboard_tracker/run.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/aim/ext/tensorboard_tracker/run.py b/aim/ext/tensorboard_tracker/run.py
--- a/aim/ext/tensorboard_tracker/run.py
+++ b/aim/ext/tensorboard_tracker/run.py
@@ -1,6 +1,6 @@
from typing import Optional, Union
-from aim.sdk.run import BasicRun
+from aim.sdk.run import Run as SdkRun
from aim.ext.tensorboard_tracker.tracker import TensorboardTracker
from typing import TYPE_CHECKING
@@ -9,14 +9,23 @@
from aim.sdk.repo import Repo
-class Run(BasicRun):
- def __init__(self, run_hash: Optional[str] = None, *,
- sync_tensorboard_log_dir: str,
- repo: Optional[Union[str, 'Repo']] = None,
- experiment: Optional[str] = None,
- force_resume: Optional[bool] = False,
- ):
- super().__init__(run_hash, repo=repo, read_only=False, experiment=experiment, force_resume=force_resume)
+class Run(SdkRun):
+ def __init__(
+ self, run_hash: Optional[str] = None, *,
+ sync_tensorboard_log_dir: str,
+ repo: Optional[Union[str, 'Repo']] = None,
+ experiment: Optional[str] = None,
+ force_resume: Optional[bool] = False,
+ system_tracking_interval: Optional[Union[int, float]] = None,
+ log_system_params: Optional[bool] = False,
+ capture_terminal_logs: Optional[bool] = False,
+ ):
+ super().__init__(
+ run_hash, repo=repo, read_only=False, experiment=experiment, force_resume=force_resume,
+ system_tracking_interval=system_tracking_interval, log_system_params=log_system_params,
+ capture_terminal_logs=capture_terminal_logs
+ )
+
self['tb_log_directory'] = sync_tensorboard_log_dir
self._tensorboard_tracker = TensorboardTracker(self._tracker, sync_tensorboard_log_dir)
self._tensorboard_tracker.start()
| {"golden_diff": "diff --git a/aim/ext/tensorboard_tracker/run.py b/aim/ext/tensorboard_tracker/run.py\n--- a/aim/ext/tensorboard_tracker/run.py\n+++ b/aim/ext/tensorboard_tracker/run.py\n@@ -1,6 +1,6 @@\n from typing import Optional, Union\n \n-from aim.sdk.run import BasicRun\n+from aim.sdk.run import Run as SdkRun\n from aim.ext.tensorboard_tracker.tracker import TensorboardTracker\n \n from typing import TYPE_CHECKING\n@@ -9,14 +9,23 @@\n from aim.sdk.repo import Repo\n \n \n-class Run(BasicRun):\n- def __init__(self, run_hash: Optional[str] = None, *,\n- sync_tensorboard_log_dir: str,\n- repo: Optional[Union[str, 'Repo']] = None,\n- experiment: Optional[str] = None,\n- force_resume: Optional[bool] = False,\n- ):\n- super().__init__(run_hash, repo=repo, read_only=False, experiment=experiment, force_resume=force_resume)\n+class Run(SdkRun):\n+ def __init__(\n+ self, run_hash: Optional[str] = None, *,\n+ sync_tensorboard_log_dir: str,\n+ repo: Optional[Union[str, 'Repo']] = None,\n+ experiment: Optional[str] = None,\n+ force_resume: Optional[bool] = False,\n+ system_tracking_interval: Optional[Union[int, float]] = None,\n+ log_system_params: Optional[bool] = False,\n+ capture_terminal_logs: Optional[bool] = False,\n+ ):\n+ super().__init__(\n+ run_hash, repo=repo, read_only=False, experiment=experiment, force_resume=force_resume,\n+ system_tracking_interval=system_tracking_interval, log_system_params=log_system_params,\n+ capture_terminal_logs=capture_terminal_logs\n+ )\n+\n self['tb_log_directory'] = sync_tensorboard_log_dir\n self._tensorboard_tracker = TensorboardTracker(self._tracker, sync_tensorboard_log_dir)\n self._tensorboard_tracker.start()\n", "issue": "Extend `aim.ext.tensorboard_tracker.run.Run` to also allow system stats, parameters, and stdout capture.\n## \ud83d\ude80 Feature\r\n\r\nAllow capturing of system parameters and terminal logs by the `aim.ext.tensorboard_tracker.run.Run`, as this is great feature shouldn't be only available to the default `Run`.\r\n\r\n### Motivation\r\n\r\nThe new feature of allowing continuous syncing from `tensorboard` files to `aim` is really nice, but because `aim.ext.tensorboard_tracker.run.Run` inherits from `BasicRun` rather than `Run`, it misses out on the ability to log the standard out, system stats and system parameters. Since `aim.ext.tensorboard_tracker.run.Run` should be a possible replacement for `Run`, I don't see a reason why this behaviour shouldn't be allowed.\r\n\r\nIt has been highlighted in Discord by @mihran113:\r\n\r\n> The reason behind inheriting from basic run is exactly to avoid terminal log tracking and system param tracking actually, cause we don\u2019t want to add anything else rather than what\u2019s tracked via tensorboard. Cause there can be times when live tracking is done from a different process, and catching that process\u2019s terminal logs and system params won\u2019t make any sense I guess. If you\u2019re interested you can open a PR to address those points, cause adding the possibility to enable those won\u2019t make any harm as well.\r\n\r\nso I believe the *default* arguments should *not* do this extra logging, but still optionally allow this behaviour. \r\n\r\n### Pitch\r\n\r\nHave `aim.ext.tensorboard_tracker.run.Run` inherit from `aim.sdk.run.Run` instead of `aim.sdk.run.BasicRun`, so that it can utilise it's extra capabilities.\r\n\r\n### Alternatives\r\n\r\nInstead of inheritance we could change the system resource tracking be a mixin? \r\n\nExtend `aim.ext.tensorboard_tracker.run.Run` to also allow system stats, parameters, and stdout capture.\n## \ud83d\ude80 Feature\r\n\r\nAllow capturing of system parameters and terminal logs by the `aim.ext.tensorboard_tracker.run.Run`, as this is great feature shouldn't be only available to the default `Run`.\r\n\r\n### Motivation\r\n\r\nThe new feature of allowing continuous syncing from `tensorboard` files to `aim` is really nice, but because `aim.ext.tensorboard_tracker.run.Run` inherits from `BasicRun` rather than `Run`, it misses out on the ability to log the standard out, system stats and system parameters. Since `aim.ext.tensorboard_tracker.run.Run` should be a possible replacement for `Run`, I don't see a reason why this behaviour shouldn't be allowed.\r\n\r\nIt has been highlighted in Discord by @mihran113:\r\n\r\n> The reason behind inheriting from basic run is exactly to avoid terminal log tracking and system param tracking actually, cause we don\u2019t want to add anything else rather than what\u2019s tracked via tensorboard. Cause there can be times when live tracking is done from a different process, and catching that process\u2019s terminal logs and system params won\u2019t make any sense I guess. If you\u2019re interested you can open a PR to address those points, cause adding the possibility to enable those won\u2019t make any harm as well.\r\n\r\nso I believe the *default* arguments should *not* do this extra logging, but still optionally allow this behaviour. \r\n\r\n### Pitch\r\n\r\nHave `aim.ext.tensorboard_tracker.run.Run` inherit from `aim.sdk.run.Run` instead of `aim.sdk.run.BasicRun`, so that it can utilise it's extra capabilities.\r\n\r\n### Alternatives\r\n\r\nInstead of inheritance we could change the system resource tracking be a mixin? \r\n\n", "before_files": [{"content": "from typing import Optional, Union\n\nfrom aim.sdk.run import BasicRun\nfrom aim.ext.tensorboard_tracker.tracker import TensorboardTracker\n\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from aim.sdk.repo import Repo\n\n\nclass Run(BasicRun):\n def __init__(self, run_hash: Optional[str] = None, *,\n sync_tensorboard_log_dir: str,\n repo: Optional[Union[str, 'Repo']] = None,\n experiment: Optional[str] = None,\n force_resume: Optional[bool] = False,\n ):\n super().__init__(run_hash, repo=repo, read_only=False, experiment=experiment, force_resume=force_resume)\n self['tb_log_directory'] = sync_tensorboard_log_dir\n self._tensorboard_tracker = TensorboardTracker(self._tracker, sync_tensorboard_log_dir)\n self._tensorboard_tracker.start()\n self._resources.add_extra_resource(self._tensorboard_tracker)\n", "path": "aim/ext/tensorboard_tracker/run.py"}]} | 1,512 | 449 |
gh_patches_debug_42129 | rasdani/github-patches | git_diff | conan-io__conan-center-index-1204 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] cgal/all: review options applied
Comming from https://github.com/conan-io/conan-center-index/pull/965#issuecomment-590802910
Seems that the recipe might require some work regarding the options and flags
</issue>
<code>
[start of recipes/cgal/all/conanfile.py]
1 import os
2 from conans import ConanFile, CMake, tools
3
4
5 class CgalConan(ConanFile):
6 name = "cgal"
7 license = "LGPL-3.0-or-later"
8 url = "https://github.com/conan-io/conan-center-index"
9 homepage = "https://github.com/CGAL/cgal"
10 description = "C++ library that aims to provide easy access to efficient and reliable algorithms"\
11 "in computational geometry."
12 topics = ("geometry", "algorithms")
13 settings = "os", "compiler", "build_type", "arch"
14 requires = "mpir/3.0.0", "mpfr/4.0.2", "boost/1.72.0", "eigen/3.3.7"
15 generators = "cmake"
16
17 _source_subfolder = "source_subfolder"
18 _cmake = None
19
20 options = {
21 "with_cgal_core": [True, False],
22 "with_cgal_qt5": [True, False],
23 "with_cgal_imageio": [True, False]
24 }
25
26 default_options = {
27 "with_cgal_core": True,
28 "with_cgal_qt5": False,
29 "with_cgal_imageio": True
30 }
31
32 def _configure_cmake(self):
33 if not self._cmake:
34 self._cmake = CMake(self)
35 self._cmake.definitions["WITH_CGAL_Core"] = self.options.with_cgal_core
36 self._cmake.definitions["WITH_CGAL_Qt5"] = self.options.with_cgal_qt5
37 self._cmake.definitions["WITH_CGAL_ImageIO"] = self.options.with_cgal_imageio
38 self._cmake.configure(source_folder=self._source_subfolder)
39 return self._cmake
40
41 def _patch_sources(self):
42 tools.replace_in_file(
43 os.path.join(self._source_subfolder, "CMakeLists.txt"),
44 "project(CGAL CXX C)", '''project(CGAL CXX C)
45 include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
46 conan_basic_setup()''')
47
48 def source(self):
49 tools.get(**self.conan_data["sources"][self.version])
50 extracted_dir = "CGAL-{}".format(self.version)
51 os.rename(extracted_dir, self._source_subfolder)
52
53 def build(self):
54 self._patch_sources()
55 cmake = self._configure_cmake()
56 cmake.build()
57
58 def package(self):
59 self.copy("LICENSE*", dst="licenses", src=self._source_subfolder)
60 cmake = self._configure_cmake()
61 cmake.install()
62 tools.rmdir(os.path.join(self.package_folder, "share"))
63 tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
64 tools.rmdir(os.path.join(self.package_folder, "bin"))
65
66 def package_info(self):
67 self.cpp_info.names["cmake_find_package"] = "CGAL"
68 self.cpp_info.names["cmake_find_package_multi"] = "CGAL"
69
70 def package_id(self):
71 self.info.header_only()
72
[end of recipes/cgal/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/cgal/all/conanfile.py b/recipes/cgal/all/conanfile.py
--- a/recipes/cgal/all/conanfile.py
+++ b/recipes/cgal/all/conanfile.py
@@ -1,5 +1,6 @@
import os
from conans import ConanFile, CMake, tools
+from conans.errors import ConanInvalidConfiguration
class CgalConan(ConanFile):
@@ -13,20 +14,26 @@
settings = "os", "compiler", "build_type", "arch"
requires = "mpir/3.0.0", "mpfr/4.0.2", "boost/1.72.0", "eigen/3.3.7"
generators = "cmake"
+ exports_sources = "CMakeLists.txt"
_source_subfolder = "source_subfolder"
+ _build_subfolder = "build_subfolder"
_cmake = None
options = {
"with_cgal_core": [True, False],
"with_cgal_qt5": [True, False],
- "with_cgal_imageio": [True, False]
+ "with_cgal_imageio": [True, False],
+ "shared": [True, False],
+ "header_only": [True, False]
}
default_options = {
"with_cgal_core": True,
"with_cgal_qt5": False,
- "with_cgal_imageio": True
+ "with_cgal_imageio": True,
+ "shared": False,
+ "header_only": True
}
def _configure_cmake(self):
@@ -35,15 +42,19 @@
self._cmake.definitions["WITH_CGAL_Core"] = self.options.with_cgal_core
self._cmake.definitions["WITH_CGAL_Qt5"] = self.options.with_cgal_qt5
self._cmake.definitions["WITH_CGAL_ImageIO"] = self.options.with_cgal_imageio
- self._cmake.configure(source_folder=self._source_subfolder)
+ self._cmake.definitions["CGAL_HEADER_ONLY"] = self.options.header_only
+ self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def _patch_sources(self):
- tools.replace_in_file(
- os.path.join(self._source_subfolder, "CMakeLists.txt"),
- "project(CGAL CXX C)", '''project(CGAL CXX C)
-include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
-conan_basic_setup()''')
+ tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
+ "CMAKE_SOURCE_DIR", "CMAKE_CURRENT_SOURCE_DIR")
+
+ def configure(self):
+ if self.options.with_cgal_qt5:
+ raise ConanInvalidConfiguration("Qt Conan package is not available yet.")
+ if self.options.header_only:
+ del self.options.shared
def source(self):
tools.get(**self.conan_data["sources"][self.version])
@@ -61,11 +72,20 @@
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
- tools.rmdir(os.path.join(self.package_folder, "bin"))
+ if self.options.get_safe("shared"):
+ for root, _, filenames in os.walk(os.path.join(self.package_folder, "bin")):
+ for filename in filenames:
+ if not filename.endswith(".dll"):
+ os.unlink(os.path.join(root, filename))
+ else:
+ tools.rmdir(os.path.join(self.package_folder, "bin"))
def package_info(self):
+ if not self.options.header_only:
+ self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.names["cmake_find_package"] = "CGAL"
self.cpp_info.names["cmake_find_package_multi"] = "CGAL"
def package_id(self):
- self.info.header_only()
+ if self.options.header_only:
+ self.info.header_only()
| {"golden_diff": "diff --git a/recipes/cgal/all/conanfile.py b/recipes/cgal/all/conanfile.py\n--- a/recipes/cgal/all/conanfile.py\n+++ b/recipes/cgal/all/conanfile.py\n@@ -1,5 +1,6 @@\n import os\n from conans import ConanFile, CMake, tools\n+from conans.errors import ConanInvalidConfiguration\n \n \n class CgalConan(ConanFile):\n@@ -13,20 +14,26 @@\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n requires = \"mpir/3.0.0\", \"mpfr/4.0.2\", \"boost/1.72.0\", \"eigen/3.3.7\"\n generators = \"cmake\"\n+ exports_sources = \"CMakeLists.txt\"\n \n _source_subfolder = \"source_subfolder\"\n+ _build_subfolder = \"build_subfolder\"\n _cmake = None\n \n options = {\n \"with_cgal_core\": [True, False],\n \"with_cgal_qt5\": [True, False],\n- \"with_cgal_imageio\": [True, False]\n+ \"with_cgal_imageio\": [True, False],\n+ \"shared\": [True, False],\n+ \"header_only\": [True, False]\n }\n \n default_options = {\n \"with_cgal_core\": True,\n \"with_cgal_qt5\": False,\n- \"with_cgal_imageio\": True\n+ \"with_cgal_imageio\": True,\n+ \"shared\": False,\n+ \"header_only\": True\n }\n \n def _configure_cmake(self):\n@@ -35,15 +42,19 @@\n self._cmake.definitions[\"WITH_CGAL_Core\"] = self.options.with_cgal_core\n self._cmake.definitions[\"WITH_CGAL_Qt5\"] = self.options.with_cgal_qt5\n self._cmake.definitions[\"WITH_CGAL_ImageIO\"] = self.options.with_cgal_imageio\n- self._cmake.configure(source_folder=self._source_subfolder)\n+ self._cmake.definitions[\"CGAL_HEADER_ONLY\"] = self.options.header_only\n+ self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n \n def _patch_sources(self):\n- tools.replace_in_file(\n- os.path.join(self._source_subfolder, \"CMakeLists.txt\"),\n- \"project(CGAL CXX C)\", '''project(CGAL CXX C)\n-include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\n-conan_basic_setup()''')\n+ tools.replace_in_file(os.path.join(self._source_subfolder, \"CMakeLists.txt\"),\n+ \"CMAKE_SOURCE_DIR\", \"CMAKE_CURRENT_SOURCE_DIR\")\n+\n+ def configure(self):\n+ if self.options.with_cgal_qt5:\n+ raise ConanInvalidConfiguration(\"Qt Conan package is not available yet.\")\n+ if self.options.header_only:\n+ del self.options.shared\n \n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n@@ -61,11 +72,20 @@\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n- tools.rmdir(os.path.join(self.package_folder, \"bin\"))\n+ if self.options.get_safe(\"shared\"):\n+ for root, _, filenames in os.walk(os.path.join(self.package_folder, \"bin\")):\n+ for filename in filenames:\n+ if not filename.endswith(\".dll\"):\n+ os.unlink(os.path.join(root, filename))\n+ else:\n+ tools.rmdir(os.path.join(self.package_folder, \"bin\"))\n \n def package_info(self):\n+ if not self.options.header_only:\n+ self.cpp_info.libs = tools.collect_libs(self)\n self.cpp_info.names[\"cmake_find_package\"] = \"CGAL\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"CGAL\"\n \n def package_id(self):\n- self.info.header_only()\n+ if self.options.header_only:\n+ self.info.header_only()\n", "issue": "[package] cgal/all: review options applied\nComming from https://github.com/conan-io/conan-center-index/pull/965#issuecomment-590802910\r\n\r\nSeems that the recipe might require some work regarding the options and flags\n", "before_files": [{"content": "import os\nfrom conans import ConanFile, CMake, tools\n\n\nclass CgalConan(ConanFile):\n name = \"cgal\"\n license = \"LGPL-3.0-or-later\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/CGAL/cgal\"\n description = \"C++ library that aims to provide easy access to efficient and reliable algorithms\"\\\n \"in computational geometry.\"\n topics = (\"geometry\", \"algorithms\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n requires = \"mpir/3.0.0\", \"mpfr/4.0.2\", \"boost/1.72.0\", \"eigen/3.3.7\"\n generators = \"cmake\"\n\n _source_subfolder = \"source_subfolder\"\n _cmake = None\n\n options = {\n \"with_cgal_core\": [True, False],\n \"with_cgal_qt5\": [True, False],\n \"with_cgal_imageio\": [True, False]\n }\n\n default_options = {\n \"with_cgal_core\": True,\n \"with_cgal_qt5\": False,\n \"with_cgal_imageio\": True\n }\n\n def _configure_cmake(self):\n if not self._cmake:\n self._cmake = CMake(self)\n self._cmake.definitions[\"WITH_CGAL_Core\"] = self.options.with_cgal_core\n self._cmake.definitions[\"WITH_CGAL_Qt5\"] = self.options.with_cgal_qt5\n self._cmake.definitions[\"WITH_CGAL_ImageIO\"] = self.options.with_cgal_imageio\n self._cmake.configure(source_folder=self._source_subfolder)\n return self._cmake\n\n def _patch_sources(self):\n tools.replace_in_file(\n os.path.join(self._source_subfolder, \"CMakeLists.txt\"),\n \"project(CGAL CXX C)\", '''project(CGAL CXX C)\ninclude(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\nconan_basic_setup()''')\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"CGAL-{}\".format(self.version)\n os.rename(extracted_dir, self._source_subfolder)\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE*\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"bin\"))\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"CGAL\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"CGAL\"\n\n def package_id(self):\n self.info.header_only()\n", "path": "recipes/cgal/all/conanfile.py"}]} | 1,413 | 923 |
gh_patches_debug_63273 | rasdani/github-patches | git_diff | weecology__retriever-400 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't download and extract Gentry dataset
If trying to download "Gentry Forest Transect Dataset" the retriever seems to download the data, but gets stuck when it comes in extracting AVALANCH.xls
Moreover force quit seems the only way to close the program.
OS: OS X El Capitan Version 10.11.3 (15D21)
Machine: Macbook Pro Early 2015 13"
</issue>
<code>
[start of app/download_manager.py]
1 """This class manages dataset downloads concurrently and processes progress
2 output."""
3
4 import wx
5 from retriever.lib.download import DownloadThread
6
7
8 class DownloadManager:
9 def __init__(self, parent):
10 self.dialog = None
11 self.worker = None
12 self.queue = []
13 self.downloaded = set()
14 self.errors = set()
15 self.warnings = set()
16 self.Parent = parent
17 self.timer = wx.Timer(parent, -1)
18 self.timer.interval = 10
19 parent.Bind(wx.EVT_TIMER, self.update, self.timer)
20
21 def Download(self, script):
22 if not script in self.queue and not (self.worker and self.worker.script == script):
23 self.queue.append(script)
24 self.downloaded.add(script)
25 if script in self.errors:
26 self.errors.remove(script)
27 self.warnings.remove(script)
28 self.Parent.script_list.RefreshMe(None)
29 if not self.timer.IsRunning() and not self.worker and len(self.queue) < 2:
30 self.timer.Start(self.timer.interval)
31 return True
32 return False
33
34 def update(self, evt):
35 self.timer.Stop()
36 terminate = False
37 if self.worker:
38 script = self.worker.script
39 if self.worker.finished() and len(self.worker.output) == 0:
40 if hasattr(script, 'warnings') and script.warnings:
41 self.warnings.add(script)
42 self.Parent.SetStatusText('\n'.join(str(w) for w in script.warnings))
43 else:
44 self.Parent.SetStatusText("")
45 self.worker = None
46 self.Parent.script_list.RefreshMe(None)
47 self.timer.Start(self.timer.interval)
48 else:
49 self.worker.output_lock.acquire()
50 while len(self.worker.output) > 0 and not terminate:
51 if "Error:" in self.worker.output[0] and script in self.downloaded:
52 self.downloaded.remove(script)
53 self.errors.add(script)
54 if self.write(self.worker) == False:
55 terminate = True
56 self.worker.output = self.worker.output[1:]
57 #self.gauge.SetValue(100 * ((self.worker.scriptnum) /
58 # (self.worker.progress_max + 1.0)))
59 self.worker.output_lock.release()
60 if terminate:
61 self.Parent.Quit(None)
62 else:
63 self.timer.Start(self.timer.interval)
64 elif self.queue:
65 script = self.queue[0]
66 self.queue = self.queue[1:]
67 self.worker = DownloadThread(self.Parent.engine, script)
68 self.worker.parent = self
69 self.worker.start()
70 self.timer.Start(10)
71
72 def flush(self):
73 pass
74
75 def write(self, worker):
76 s = worker.output[0]
77
78 if '\b' in s:
79 s = s.replace('\b', '')
80 if not self.dialog:
81 wx.GetApp().Yield()
82 self.dialog = wx.ProgressDialog("Download Progress",
83 "Downloading datasets . . .\n"
84 + " " * len(s),
85 maximum=1000,
86 parent=None,
87 style=wx.PD_SMOOTH
88 | wx.DIALOG_NO_PARENT
89 | wx.PD_CAN_ABORT
90 | wx.PD_AUTO_HIDE
91 | wx.PD_REMAINING_TIME
92 )
93 def progress(s):
94 if ' / ' in s:
95 s = s.split(' / ')
96 total = float(s[1])
97 current = float(s[0].split(': ')[1])
98 progress = int((current / total) * 1000)
99 return (progress if progress > 1 else 1)
100 else:
101 return None
102
103 current_progress = progress(s)
104 if current_progress:
105 (keepgoing, skip) = self.dialog.Update(current_progress, s)
106 else:
107 (keepgoing, skip) = self.dialog.Pulse(s)
108
109 if not keepgoing:
110 return False
111 else:
112 if self.dialog:
113 self.dialog.Update(1000, "")
114 self.dialog.Destroy()
115 self.dialog = None
116
117 if '...' in s:
118 self.Parent.SetStatusText(s)
119 else:
120 self.Parent.script_list.SetStatus(worker.script.name, s)
121
122 wx.GetApp().Yield()
123 return True
124
[end of app/download_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/download_manager.py b/app/download_manager.py
--- a/app/download_manager.py
+++ b/app/download_manager.py
@@ -102,8 +102,9 @@
current_progress = progress(s)
if current_progress:
- (keepgoing, skip) = self.dialog.Update(current_progress, s)
- else:
+ # download progress remaining-time disabled. causes bottle neck on Gentry ref: #396.
+ # (keepgoing, skip) = self.dialog.Update(current_progress, s)
+ # else:
(keepgoing, skip) = self.dialog.Pulse(s)
if not keepgoing:
| {"golden_diff": "diff --git a/app/download_manager.py b/app/download_manager.py\n--- a/app/download_manager.py\n+++ b/app/download_manager.py\n@@ -102,8 +102,9 @@\n \n current_progress = progress(s)\n if current_progress:\n- (keepgoing, skip) = self.dialog.Update(current_progress, s)\n- else:\n+ # download progress remaining-time disabled. causes bottle neck on Gentry ref: #396.\n+ # (keepgoing, skip) = self.dialog.Update(current_progress, s)\n+ # else:\n (keepgoing, skip) = self.dialog.Pulse(s)\n \n if not keepgoing:\n", "issue": "Can't download and extract Gentry dataset\nIf trying to download \"Gentry Forest Transect Dataset\" the retriever seems to download the data, but gets stuck when it comes in extracting AVALANCH.xls\nMoreover force quit seems the only way to close the program. \nOS: OS X El Capitan Version 10.11.3 (15D21)\nMachine: Macbook Pro Early 2015 13\"\n\n", "before_files": [{"content": "\"\"\"This class manages dataset downloads concurrently and processes progress\noutput.\"\"\"\n\nimport wx\nfrom retriever.lib.download import DownloadThread\n\n\nclass DownloadManager:\n def __init__(self, parent):\n self.dialog = None\n self.worker = None\n self.queue = []\n self.downloaded = set()\n self.errors = set()\n self.warnings = set()\n self.Parent = parent\n self.timer = wx.Timer(parent, -1)\n self.timer.interval = 10\n parent.Bind(wx.EVT_TIMER, self.update, self.timer)\n\n def Download(self, script):\n if not script in self.queue and not (self.worker and self.worker.script == script):\n self.queue.append(script)\n self.downloaded.add(script)\n if script in self.errors:\n self.errors.remove(script)\n self.warnings.remove(script)\n self.Parent.script_list.RefreshMe(None)\n if not self.timer.IsRunning() and not self.worker and len(self.queue) < 2:\n self.timer.Start(self.timer.interval)\n return True\n return False\n\n def update(self, evt):\n self.timer.Stop()\n terminate = False\n if self.worker:\n script = self.worker.script\n if self.worker.finished() and len(self.worker.output) == 0:\n if hasattr(script, 'warnings') and script.warnings:\n self.warnings.add(script)\n self.Parent.SetStatusText('\\n'.join(str(w) for w in script.warnings))\n else:\n self.Parent.SetStatusText(\"\")\n self.worker = None\n self.Parent.script_list.RefreshMe(None)\n self.timer.Start(self.timer.interval)\n else:\n self.worker.output_lock.acquire()\n while len(self.worker.output) > 0 and not terminate:\n if \"Error:\" in self.worker.output[0] and script in self.downloaded:\n self.downloaded.remove(script)\n self.errors.add(script)\n if self.write(self.worker) == False:\n terminate = True\n self.worker.output = self.worker.output[1:]\n #self.gauge.SetValue(100 * ((self.worker.scriptnum) /\n # (self.worker.progress_max + 1.0)))\n self.worker.output_lock.release()\n if terminate:\n self.Parent.Quit(None)\n else:\n self.timer.Start(self.timer.interval)\n elif self.queue:\n script = self.queue[0]\n self.queue = self.queue[1:]\n self.worker = DownloadThread(self.Parent.engine, script)\n self.worker.parent = self\n self.worker.start()\n self.timer.Start(10)\n\n def flush(self):\n pass\n\n def write(self, worker):\n s = worker.output[0]\n\n if '\\b' in s:\n s = s.replace('\\b', '')\n if not self.dialog:\n wx.GetApp().Yield()\n self.dialog = wx.ProgressDialog(\"Download Progress\",\n \"Downloading datasets . . .\\n\"\n + \" \" * len(s),\n maximum=1000,\n parent=None,\n style=wx.PD_SMOOTH\n | wx.DIALOG_NO_PARENT\n | wx.PD_CAN_ABORT\n | wx.PD_AUTO_HIDE\n | wx.PD_REMAINING_TIME\n )\n def progress(s):\n if ' / ' in s:\n s = s.split(' / ')\n total = float(s[1])\n current = float(s[0].split(': ')[1])\n progress = int((current / total) * 1000)\n return (progress if progress > 1 else 1)\n else:\n return None\n\n current_progress = progress(s)\n if current_progress:\n (keepgoing, skip) = self.dialog.Update(current_progress, s)\n else:\n (keepgoing, skip) = self.dialog.Pulse(s)\n\n if not keepgoing:\n return False\n else:\n if self.dialog:\n self.dialog.Update(1000, \"\")\n self.dialog.Destroy()\n self.dialog = None\n\n if '...' in s:\n self.Parent.SetStatusText(s)\n else:\n self.Parent.script_list.SetStatus(worker.script.name, s)\n\n wx.GetApp().Yield()\n return True\n", "path": "app/download_manager.py"}]} | 1,783 | 144 |
gh_patches_debug_137 | rasdani/github-patches | git_diff | google__flax-3089 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Imcompatibility with Flax Official ImageNet example with jax version >= 0.4.7
Hi,
I was testing the [official flax example](https://github.com/google/flax/tree/main/examples/imagenet/) on Colab with jax and jaxlib version >= 0.4.7 on the colab pro+ environment with V100. After installing the requirements with `pip install -r requirements.txt` and with the following command `python main.py --workdir=./imagenet --config=configs/v100_x8.py`, the error is
```
File "/content/FlaxImageNet/main.py", line 29, in <module>
import train
File "/content/FlaxImageNet/train.py", line 30, in <module>
from flax.training import checkpoints
File "/usr/local/lib/python3.10/dist-packages/flax/training/checkpoints.py", line 34,
in <module>
from jax.experimental.global_device_array import GlobalDeviceArray
ModuleNotFoundError: No module named 'jax.experimental.global_device_array'
```
According to [this StackOverflow answer](https://stackoverflow.com/questions/76191911/no-module-named-jax-experimental-global-device-array-when-running-the-official/76192120#76192120), it seems that 'jax.experimental.global_device_array' is removed.
Therefore, it would be great if one can fix the official example so that it works on newer version of jax.
Unavailable to import checkpoints
Provide as much information as possible. At least, this should include a description of your issue and steps to reproduce the problem. If possible also provide a summary of what steps or workarounds you have already tried.
### System information
- Flax, jax, jaxlib versions (obtain with `pip show flax jax jaxlib`: All to its latest, also orbitax
Name: flax
Version: 0.6.9
Summary: Flax: A neural network library for JAX designed for flexibility
Home-page:
Author:
Author-email: Flax team <[email protected]>
License:
Location: /home/fernanda/.local/lib/python3.8/site-packages
Requires: jax, msgpack, numpy, optax, orbax-checkpoint, PyYAML, rich, tensorstore, typing-extensions
Required-by:
---
Name: jax
Version: 0.4.8
Summary: Differentiate, compile, and transform Numpy code.
Home-page: https://github.com/google/jax
Author: JAX team
Author-email: [email protected]
License: Apache-2.0
Location: /home/fernanda/.local/lib/python3.8/site-packages
Requires: ml-dtypes, numpy, opt-einsum, scipy
Required-by: chex, diffrax, equinox, flax, optax, orbax, orbax-checkpoint, richmol
---
Name: jaxlib
Version: 0.4.7
Summary: XLA library for JAX
Home-page: https://github.com/google/jax
Author: JAX team
Author-email: [email protected]
License: Apache-2.0
Location: /home/fernanda/.local/lib/python3.8/site-packages
Requires: ml-dtypes, numpy, scipy
Required-by: chex, optax, orbax, orbax-checkpoint
---
Name: orbax
Version: 0.1.7
Summary: Orbax
Home-page:
Author:
Author-email: Orbax Authors <[email protected]>
License:
Location: /home/fernanda/.local/lib/python3.8/site-packages
Requires: absl-py, cached_property, etils, importlib_resources, jax, jaxlib, msgpack, nest_asyncio, numpy, pyyaml, tensorstore, typing_extensions
- Python version: 3.8
### Problem you have encountered:
When importing checkpoints, get the following error:
"""
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-1-0eac7b685376> in <module>
11 config.update("jax_enable_x64", True)
12 from flax import serialization
---> 13 from flax.training import checkpoints
14 from jax import numpy as jnp
15 import jax
/gpfs/cfel/group/cmi/common/psi4/psi4conda/lib//python3.8/site-packages/flax/training/checkpoints.py in <module>
37 from jax import process_index
38 from jax import sharding
---> 39 from jax.experimental.global_device_array import GlobalDeviceArray
40 from jax.experimental.multihost_utils import sync_global_devices
41 import orbax.checkpoint as orbax
ModuleNotFoundError: No module named 'jax.experimental.global_device_array'
"""
I guess it is a compatibility problem between jax and flax.
### What you expected to happen:
Usual importing
</issue>
<code>
[start of flax/version.py]
1 # Copyright 2023 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Current Flax version at head on Github."""
16 __version__ = "0.6.9"
17
18
[end of flax/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/version.py b/flax/version.py
--- a/flax/version.py
+++ b/flax/version.py
@@ -13,5 +13,5 @@
# limitations under the License.
"""Current Flax version at head on Github."""
-__version__ = "0.6.9"
+__version__ = "0.6.10"
| {"golden_diff": "diff --git a/flax/version.py b/flax/version.py\n--- a/flax/version.py\n+++ b/flax/version.py\n@@ -13,5 +13,5 @@\n # limitations under the License.\n \n \"\"\"Current Flax version at head on Github.\"\"\"\n-__version__ = \"0.6.9\"\n+__version__ = \"0.6.10\"\n", "issue": "Imcompatibility with Flax Official ImageNet example with jax version >= 0.4.7\nHi, \r\n\r\nI was testing the [official flax example](https://github.com/google/flax/tree/main/examples/imagenet/) on Colab with jax and jaxlib version >= 0.4.7 on the colab pro+ environment with V100. After installing the requirements with `pip install -r requirements.txt` and with the following command `python main.py --workdir=./imagenet --config=configs/v100_x8.py`, the error is \r\n\r\n```\r\nFile \"/content/FlaxImageNet/main.py\", line 29, in <module>\r\nimport train\r\nFile \"/content/FlaxImageNet/train.py\", line 30, in <module>\r\nfrom flax.training import checkpoints\r\nFile \"/usr/local/lib/python3.10/dist-packages/flax/training/checkpoints.py\", line 34, \r\nin <module>\r\nfrom jax.experimental.global_device_array import GlobalDeviceArray\r\nModuleNotFoundError: No module named 'jax.experimental.global_device_array'\r\n```\r\n\r\nAccording to [this StackOverflow answer](https://stackoverflow.com/questions/76191911/no-module-named-jax-experimental-global-device-array-when-running-the-official/76192120#76192120), it seems that 'jax.experimental.global_device_array' is removed. \r\n\r\nTherefore, it would be great if one can fix the official example so that it works on newer version of jax. \nUnavailable to import checkpoints\nProvide as much information as possible. At least, this should include a description of your issue and steps to reproduce the problem. If possible also provide a summary of what steps or workarounds you have already tried.\r\n\r\n### System information\r\n- Flax, jax, jaxlib versions (obtain with `pip show flax jax jaxlib`: All to its latest, also orbitax\r\n\r\nName: flax\r\nVersion: 0.6.9\r\nSummary: Flax: A neural network library for JAX designed for flexibility\r\nHome-page: \r\nAuthor: \r\nAuthor-email: Flax team <[email protected]>\r\nLicense: \r\nLocation: /home/fernanda/.local/lib/python3.8/site-packages\r\nRequires: jax, msgpack, numpy, optax, orbax-checkpoint, PyYAML, rich, tensorstore, typing-extensions\r\nRequired-by: \r\n---\r\nName: jax\r\nVersion: 0.4.8\r\nSummary: Differentiate, compile, and transform Numpy code.\r\nHome-page: https://github.com/google/jax\r\nAuthor: JAX team\r\nAuthor-email: [email protected]\r\nLicense: Apache-2.0\r\nLocation: /home/fernanda/.local/lib/python3.8/site-packages\r\nRequires: ml-dtypes, numpy, opt-einsum, scipy\r\nRequired-by: chex, diffrax, equinox, flax, optax, orbax, orbax-checkpoint, richmol\r\n---\r\nName: jaxlib\r\nVersion: 0.4.7\r\nSummary: XLA library for JAX\r\nHome-page: https://github.com/google/jax\r\nAuthor: JAX team\r\nAuthor-email: [email protected]\r\nLicense: Apache-2.0\r\nLocation: /home/fernanda/.local/lib/python3.8/site-packages\r\nRequires: ml-dtypes, numpy, scipy\r\nRequired-by: chex, optax, orbax, orbax-checkpoint\r\n---\r\nName: orbax\r\nVersion: 0.1.7\r\nSummary: Orbax\r\nHome-page: \r\nAuthor: \r\nAuthor-email: Orbax Authors <[email protected]>\r\nLicense: \r\nLocation: /home/fernanda/.local/lib/python3.8/site-packages\r\nRequires: absl-py, cached_property, etils, importlib_resources, jax, jaxlib, msgpack, nest_asyncio, numpy, pyyaml, tensorstore, typing_extensions\r\n\r\n- Python version: 3.8\r\n\r\n\r\n### Problem you have encountered:\r\nWhen importing checkpoints, get the following error:\r\n \"\"\" \r\n---------------------------------------------------------------------------\r\nModuleNotFoundError Traceback (most recent call last)\r\n<ipython-input-1-0eac7b685376> in <module>\r\n 11 config.update(\"jax_enable_x64\", True)\r\n 12 from flax import serialization\r\n---> 13 from flax.training import checkpoints\r\n 14 from jax import numpy as jnp\r\n 15 import jax\r\n\r\n/gpfs/cfel/group/cmi/common/psi4/psi4conda/lib//python3.8/site-packages/flax/training/checkpoints.py in <module>\r\n 37 from jax import process_index\r\n 38 from jax import sharding\r\n---> 39 from jax.experimental.global_device_array import GlobalDeviceArray\r\n 40 from jax.experimental.multihost_utils import sync_global_devices\r\n 41 import orbax.checkpoint as orbax\r\n\r\nModuleNotFoundError: No module named 'jax.experimental.global_device_array'\r\n\r\n\"\"\"\r\n\r\nI guess it is a compatibility problem between jax and flax.\r\n\r\n### What you expected to happen:\r\n\r\nUsual importing\r\n\r\n\n", "before_files": [{"content": "# Copyright 2023 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Current Flax version at head on Github.\"\"\"\n__version__ = \"0.6.9\"\n\n", "path": "flax/version.py"}]} | 1,834 | 83 |
gh_patches_debug_21264 | rasdani/github-patches | git_diff | inventree__InvenTree-6250 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
News Feed task doesn't work behind proxy, impacting performance
### Please verify that this bug has NOT been raised before.
- [X] I checked and didn't find a similar issue
### Describe the bug*
The `update_news_feed` task attempts to fetch the RSS/Atom feed once daily. This, however, doesn't work behind a proxy server.
The result is that these tasks occupy workers all the time, and never complete.
Each worker is terminated roughly every 90 seconds due to this.
### Steps to Reproduce
1. Put the InvenTree backend on a network unable to reach `INVENTREE_NEWS_URL`
2. Trigger the task
3. Task will lead to continuous timeout termination of workers
### Expected behaviour
Task should finish with no new News entries added if URL is unreachable.
### Deployment Method
- [ ] Docker
- [X] Bare metal
### Version Information
0.12.10
### Please verify if you can reproduce this bug on the demo site.
- [ ] I can reproduce this bug on the demo site.
### Relevant log output
_No response_
</issue>
<code>
[start of InvenTree/common/tasks.py]
1 """Tasks (processes that get offloaded) for common app."""
2
3 import logging
4 import os
5 from datetime import datetime, timedelta
6
7 from django.conf import settings
8 from django.core.exceptions import AppRegistryNotReady
9 from django.db.utils import IntegrityError, OperationalError
10 from django.utils import timezone
11
12 import feedparser
13
14 from InvenTree.helpers_model import getModelsWithMixin
15 from InvenTree.models import InvenTreeNotesMixin
16 from InvenTree.tasks import ScheduledTask, scheduled_task
17
18 logger = logging.getLogger('inventree')
19
20
21 @scheduled_task(ScheduledTask.DAILY)
22 def delete_old_notifications():
23 """Remove old notifications from the database.
24
25 Anything older than ~3 months is removed
26 """
27 try:
28 from common.models import NotificationEntry
29 except AppRegistryNotReady: # pragma: no cover
30 logger.info(
31 "Could not perform 'delete_old_notifications' - App registry not ready"
32 )
33 return
34
35 before = timezone.now() - timedelta(days=90)
36
37 # Delete notification records before the specified date
38 NotificationEntry.objects.filter(updated__lte=before).delete()
39
40
41 @scheduled_task(ScheduledTask.DAILY)
42 def update_news_feed():
43 """Update the newsfeed."""
44 try:
45 from common.models import NewsFeedEntry
46 except AppRegistryNotReady: # pragma: no cover
47 logger.info("Could not perform 'update_news_feed' - App registry not ready")
48 return
49
50 # Fetch and parse feed
51 try:
52 d = feedparser.parse(settings.INVENTREE_NEWS_URL)
53 except Exception as entry: # pragma: no cover
54 logger.warning('update_news_feed: Error parsing the newsfeed', entry)
55 return
56
57 # Get a reference list
58 id_list = [a.feed_id for a in NewsFeedEntry.objects.all()]
59
60 # Iterate over entries
61 for entry in d.entries:
62 # Check if id already exists
63 if entry.id in id_list:
64 continue
65
66 # Create entry
67 try:
68 NewsFeedEntry.objects.create(
69 feed_id=entry.id,
70 title=entry.title,
71 link=entry.link,
72 published=entry.published,
73 author=entry.author,
74 summary=entry.summary,
75 )
76 except (IntegrityError, OperationalError):
77 # Sometimes errors-out on database start-up
78 pass
79
80 logger.info('update_news_feed: Sync done')
81
82
83 @scheduled_task(ScheduledTask.DAILY)
84 def delete_old_notes_images():
85 """Remove old notes images from the database.
86
87 Anything older than ~3 months is removed, unless it is linked to a note
88 """
89 try:
90 from common.models import NotesImage
91 except AppRegistryNotReady:
92 logger.info(
93 "Could not perform 'delete_old_notes_images' - App registry not ready"
94 )
95 return
96
97 # Remove any notes which point to non-existent image files
98 for note in NotesImage.objects.all():
99 if not os.path.exists(note.image.path):
100 logger.info('Deleting note %s - image file does not exist', note.image.path)
101 note.delete()
102
103 note_classes = getModelsWithMixin(InvenTreeNotesMixin)
104 before = datetime.now() - timedelta(days=90)
105
106 for note in NotesImage.objects.filter(date__lte=before):
107 # Find any images which are no longer referenced by a note
108
109 found = False
110
111 img = note.image.name
112
113 for model in note_classes:
114 if model.objects.filter(notes__icontains=img).exists():
115 found = True
116 break
117
118 if not found:
119 logger.info('Deleting note %s - image file not linked to a note', img)
120 note.delete()
121
122 # Finally, remove any images in the notes dir which are not linked to a note
123 notes_dir = os.path.join(settings.MEDIA_ROOT, 'notes')
124
125 try:
126 images = os.listdir(notes_dir)
127 except FileNotFoundError:
128 # Thrown if the directory does not exist
129 images = []
130
131 all_notes = NotesImage.objects.all()
132
133 for image in images:
134 found = False
135 for note in all_notes:
136 img_path = os.path.basename(note.image.path)
137 if img_path == image:
138 found = True
139 break
140
141 if not found:
142 logger.info('Deleting note %s - image file not linked to a note', image)
143 os.remove(os.path.join(notes_dir, image))
144
[end of InvenTree/common/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/InvenTree/common/tasks.py b/InvenTree/common/tasks.py
--- a/InvenTree/common/tasks.py
+++ b/InvenTree/common/tasks.py
@@ -10,6 +10,7 @@
from django.utils import timezone
import feedparser
+import requests
from InvenTree.helpers_model import getModelsWithMixin
from InvenTree.models import InvenTreeNotesMixin
@@ -47,11 +48,16 @@
logger.info("Could not perform 'update_news_feed' - App registry not ready")
return
+ # News feed isn't defined, no need to continue
+ if not settings.INVENTREE_NEWS_URL or type(settings.INVENTREE_NEWS_URL) != str:
+ return
+
# Fetch and parse feed
try:
- d = feedparser.parse(settings.INVENTREE_NEWS_URL)
- except Exception as entry: # pragma: no cover
- logger.warning('update_news_feed: Error parsing the newsfeed', entry)
+ feed = requests.get(settings.INVENTREE_NEWS_URL)
+ d = feedparser.parse(feed.content)
+ except Exception: # pragma: no cover
+ logger.warning('update_news_feed: Error parsing the newsfeed')
return
# Get a reference list
| {"golden_diff": "diff --git a/InvenTree/common/tasks.py b/InvenTree/common/tasks.py\n--- a/InvenTree/common/tasks.py\n+++ b/InvenTree/common/tasks.py\n@@ -10,6 +10,7 @@\n from django.utils import timezone\n \n import feedparser\n+import requests\n \n from InvenTree.helpers_model import getModelsWithMixin\n from InvenTree.models import InvenTreeNotesMixin\n@@ -47,11 +48,16 @@\n logger.info(\"Could not perform 'update_news_feed' - App registry not ready\")\n return\n \n+ # News feed isn't defined, no need to continue\n+ if not settings.INVENTREE_NEWS_URL or type(settings.INVENTREE_NEWS_URL) != str:\n+ return\n+\n # Fetch and parse feed\n try:\n- d = feedparser.parse(settings.INVENTREE_NEWS_URL)\n- except Exception as entry: # pragma: no cover\n- logger.warning('update_news_feed: Error parsing the newsfeed', entry)\n+ feed = requests.get(settings.INVENTREE_NEWS_URL)\n+ d = feedparser.parse(feed.content)\n+ except Exception: # pragma: no cover\n+ logger.warning('update_news_feed: Error parsing the newsfeed')\n return\n \n # Get a reference list\n", "issue": "News Feed task doesn't work behind proxy, impacting performance\n### Please verify that this bug has NOT been raised before.\n\n- [X] I checked and didn't find a similar issue\n\n### Describe the bug*\n\nThe `update_news_feed` task attempts to fetch the RSS/Atom feed once daily. This, however, doesn't work behind a proxy server.\r\n\r\nThe result is that these tasks occupy workers all the time, and never complete.\r\nEach worker is terminated roughly every 90 seconds due to this.\n\n### Steps to Reproduce\n\n1. Put the InvenTree backend on a network unable to reach `INVENTREE_NEWS_URL`\r\n2. Trigger the task\r\n3. Task will lead to continuous timeout termination of workers\n\n### Expected behaviour\n\nTask should finish with no new News entries added if URL is unreachable.\n\n### Deployment Method\n\n- [ ] Docker\n- [X] Bare metal\n\n### Version Information\n\n0.12.10\n\n### Please verify if you can reproduce this bug on the demo site.\n\n- [ ] I can reproduce this bug on the demo site.\n\n### Relevant log output\n\n_No response_\n", "before_files": [{"content": "\"\"\"Tasks (processes that get offloaded) for common app.\"\"\"\n\nimport logging\nimport os\nfrom datetime import datetime, timedelta\n\nfrom django.conf import settings\nfrom django.core.exceptions import AppRegistryNotReady\nfrom django.db.utils import IntegrityError, OperationalError\nfrom django.utils import timezone\n\nimport feedparser\n\nfrom InvenTree.helpers_model import getModelsWithMixin\nfrom InvenTree.models import InvenTreeNotesMixin\nfrom InvenTree.tasks import ScheduledTask, scheduled_task\n\nlogger = logging.getLogger('inventree')\n\n\n@scheduled_task(ScheduledTask.DAILY)\ndef delete_old_notifications():\n \"\"\"Remove old notifications from the database.\n\n Anything older than ~3 months is removed\n \"\"\"\n try:\n from common.models import NotificationEntry\n except AppRegistryNotReady: # pragma: no cover\n logger.info(\n \"Could not perform 'delete_old_notifications' - App registry not ready\"\n )\n return\n\n before = timezone.now() - timedelta(days=90)\n\n # Delete notification records before the specified date\n NotificationEntry.objects.filter(updated__lte=before).delete()\n\n\n@scheduled_task(ScheduledTask.DAILY)\ndef update_news_feed():\n \"\"\"Update the newsfeed.\"\"\"\n try:\n from common.models import NewsFeedEntry\n except AppRegistryNotReady: # pragma: no cover\n logger.info(\"Could not perform 'update_news_feed' - App registry not ready\")\n return\n\n # Fetch and parse feed\n try:\n d = feedparser.parse(settings.INVENTREE_NEWS_URL)\n except Exception as entry: # pragma: no cover\n logger.warning('update_news_feed: Error parsing the newsfeed', entry)\n return\n\n # Get a reference list\n id_list = [a.feed_id for a in NewsFeedEntry.objects.all()]\n\n # Iterate over entries\n for entry in d.entries:\n # Check if id already exists\n if entry.id in id_list:\n continue\n\n # Create entry\n try:\n NewsFeedEntry.objects.create(\n feed_id=entry.id,\n title=entry.title,\n link=entry.link,\n published=entry.published,\n author=entry.author,\n summary=entry.summary,\n )\n except (IntegrityError, OperationalError):\n # Sometimes errors-out on database start-up\n pass\n\n logger.info('update_news_feed: Sync done')\n\n\n@scheduled_task(ScheduledTask.DAILY)\ndef delete_old_notes_images():\n \"\"\"Remove old notes images from the database.\n\n Anything older than ~3 months is removed, unless it is linked to a note\n \"\"\"\n try:\n from common.models import NotesImage\n except AppRegistryNotReady:\n logger.info(\n \"Could not perform 'delete_old_notes_images' - App registry not ready\"\n )\n return\n\n # Remove any notes which point to non-existent image files\n for note in NotesImage.objects.all():\n if not os.path.exists(note.image.path):\n logger.info('Deleting note %s - image file does not exist', note.image.path)\n note.delete()\n\n note_classes = getModelsWithMixin(InvenTreeNotesMixin)\n before = datetime.now() - timedelta(days=90)\n\n for note in NotesImage.objects.filter(date__lte=before):\n # Find any images which are no longer referenced by a note\n\n found = False\n\n img = note.image.name\n\n for model in note_classes:\n if model.objects.filter(notes__icontains=img).exists():\n found = True\n break\n\n if not found:\n logger.info('Deleting note %s - image file not linked to a note', img)\n note.delete()\n\n # Finally, remove any images in the notes dir which are not linked to a note\n notes_dir = os.path.join(settings.MEDIA_ROOT, 'notes')\n\n try:\n images = os.listdir(notes_dir)\n except FileNotFoundError:\n # Thrown if the directory does not exist\n images = []\n\n all_notes = NotesImage.objects.all()\n\n for image in images:\n found = False\n for note in all_notes:\n img_path = os.path.basename(note.image.path)\n if img_path == image:\n found = True\n break\n\n if not found:\n logger.info('Deleting note %s - image file not linked to a note', image)\n os.remove(os.path.join(notes_dir, image))\n", "path": "InvenTree/common/tasks.py"}]} | 2,040 | 285 |
gh_patches_debug_55968 | rasdani/github-patches | git_diff | bridgecrewio__checkov-2740 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check Azure Front Door WAF enabled fails even when a WAF is correctly assigned
**Describe the issue**
[`CKV_AZURE_121`](https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py) fails despite a Web Application Firewall policy being correctly applied.
WAF policies are applied by specifying a value for `web_application_firewall_policy_link_id` inside a `frontend_endpoint` block within the `azurerm_frontdoor` resource itself.
The [documentation](https://docs.bridgecrew.io/docs/ensure-that-azure-front-door-enables-waf) seems to expect that the `web_application_firewall_policy_link_id` attribute is defined in the resource block itself, rather than in a sub-block (`frontend_endpoint`).
- [`azurerm_frontdoor` resource documentation reference](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/frontdoor#web_application_firewall_policy_link_id)
**Examples**
```terraform
resource "azurerm_frontdoor" "test" {
name = "test-front-door"
resource_group_name = var.resource_group_name
enforce_backend_pools_certificate_name_check = false
tags = var.tags
frontend_endpoint {
name = "DefaultFrontend"
host_name = "test-front-door.azurefd.net"
web_application_firewall_policy_link_id = azurerm_frontdoor_firewall_policy.test.id
}
# ...
```
**Version (please complete the following information):**
- Checkov Version: 2.0.930
**Additional context**
</issue>
<code>
[start of checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py]
1 from checkov.common.models.consts import ANY_VALUE
2 from checkov.common.models.enums import CheckCategories
3 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
4
5
6 class AzureFrontDoorEnablesWAF(BaseResourceValueCheck):
7 def __init__(self):
8 name = "Ensure that Azure Front Door enables WAF"
9 id = "CKV_AZURE_121"
10 supported_resources = ['azurerm_frontdoor']
11 categories = [CheckCategories.NETWORKING]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def get_inspected_key(self):
15 return "web_application_firewall_policy_link_id"
16
17 def get_expected_value(self):
18 return ANY_VALUE
19
20
21 check = AzureFrontDoorEnablesWAF()
22
[end of checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py b/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py
--- a/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py
+++ b/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py
@@ -12,7 +12,7 @@
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
- return "web_application_firewall_policy_link_id"
+ return "frontend_endpoint/[0]/web_application_firewall_policy_link_id"
def get_expected_value(self):
return ANY_VALUE
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py b/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py\n--- a/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py\n+++ b/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py\n@@ -12,7 +12,7 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def get_inspected_key(self):\n- return \"web_application_firewall_policy_link_id\"\n+ return \"frontend_endpoint/[0]/web_application_firewall_policy_link_id\"\n \n def get_expected_value(self):\n return ANY_VALUE\n", "issue": "Check Azure Front Door WAF enabled fails even when a WAF is correctly assigned\n**Describe the issue**\r\n[`CKV_AZURE_121`](https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py) fails despite a Web Application Firewall policy being correctly applied. \r\n\r\nWAF policies are applied by specifying a value for `web_application_firewall_policy_link_id` inside a `frontend_endpoint` block within the `azurerm_frontdoor` resource itself.\r\n\r\nThe [documentation](https://docs.bridgecrew.io/docs/ensure-that-azure-front-door-enables-waf) seems to expect that the `web_application_firewall_policy_link_id` attribute is defined in the resource block itself, rather than in a sub-block (`frontend_endpoint`).\r\n\r\n- [`azurerm_frontdoor` resource documentation reference](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/frontdoor#web_application_firewall_policy_link_id)\r\n\r\n**Examples**\r\n```terraform\r\nresource \"azurerm_frontdoor\" \"test\" {\r\n name = \"test-front-door\"\r\n resource_group_name = var.resource_group_name\r\n enforce_backend_pools_certificate_name_check = false\r\n tags = var.tags\r\n\r\n frontend_endpoint {\r\n name = \"DefaultFrontend\"\r\n host_name = \"test-front-door.azurefd.net\"\r\n web_application_firewall_policy_link_id = azurerm_frontdoor_firewall_policy.test.id\r\n }\r\n\r\n # ... \r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version: 2.0.930\r\n\r\n**Additional context**\r\n\n", "before_files": [{"content": "from checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass AzureFrontDoorEnablesWAF(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Azure Front Door enables WAF\"\n id = \"CKV_AZURE_121\"\n supported_resources = ['azurerm_frontdoor']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"web_application_firewall_policy_link_id\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = AzureFrontDoorEnablesWAF()\n", "path": "checkov/terraform/checks/resource/azure/AzureFrontDoorEnablesWAF.py"}]} | 1,123 | 169 |
gh_patches_debug_23532 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-29 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Configure flake8 & GitHub Action correctly
Our flake8 setup has a couple of issues:
- Failures on the GitHub Action don't actually block merge.
- We need to set up our style guide for flake8.
</issue>
<code>
[start of mathesar/forms/widgets.py]
1 from django.forms.widgets import TextInput
2
3 class DataListInput(TextInput):
4 """
5 Widget that adds a <data_list> element to the standard text input widget.
6 See TextInput for further details.
7
8 Attributes:
9 data_list: List of strings, where each string is a data_list value, or
10 a callable that returns a list of the same form
11 data_list_id: ID of the data_list, generated when render() is called.
12 Of the form [widget_id | widget_name]_data_list
13 """
14 template_name = "mathesar/widgets/data_list.html"
15
16 def __init__(self, data_list, attrs=None):
17 super().__init__(attrs=attrs)
18 self.data_list = data_list
19 self.data_list_id = "_data_list"
20
21 def get_context(self, name, value, attrs):
22 context = super().get_context(name, value, attrs)
23 if callable(self.data_list):
24 context["widget"]["data_list"] = self.data_list()
25 else:
26 context["widget"]["data_list"] = self.data_list
27 context["widget"]["data_list_id"] = self.data_list_id
28 return context
29
30 def render(self, name, value, attrs=None, renderer=None):
31 # In practice, there should always be an ID attribute, but we fallback
32 # to using widget name if ID is missing
33 if attrs and "id" in attrs:
34 self.data_list_id = attrs["id"] + "_data_list"
35 else:
36 self.data_list_id = name + "_data_list"
37 attrs = {} if attrs is None else attrs
38 attrs["list"] = self.data_list_id
39 return super().render(name, value, attrs, renderer)
40
41
[end of mathesar/forms/widgets.py]
[start of mathesar/forms/forms.py]
1 from django import forms
2 from django.core.exceptions import ValidationError
3
4 from mathesar.database.schemas import get_all_schemas
5 from mathesar.forms.widgets import DataListInput
6
7 def validate_csv(value):
8 if not value.name.lower().endswith(".csv"):
9 raise ValidationError(f"{value.name} is not a CSV file")
10
11 class UploadFileForm(forms.Form):
12 collection_name = forms.CharField(min_length=1, label="Collection Name")
13
14 application_name = forms.CharField(
15 min_length=1, label="Application Name",
16 widget=DataListInput(get_all_schemas)
17 )
18
19 file = forms.FileField(validators=[validate_csv], label="CSV File")
20
[end of mathesar/forms/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/forms/forms.py b/mathesar/forms/forms.py
--- a/mathesar/forms/forms.py
+++ b/mathesar/forms/forms.py
@@ -4,10 +4,12 @@
from mathesar.database.schemas import get_all_schemas
from mathesar.forms.widgets import DataListInput
+
def validate_csv(value):
if not value.name.lower().endswith(".csv"):
raise ValidationError(f"{value.name} is not a CSV file")
+
class UploadFileForm(forms.Form):
collection_name = forms.CharField(min_length=1, label="Collection Name")
diff --git a/mathesar/forms/widgets.py b/mathesar/forms/widgets.py
--- a/mathesar/forms/widgets.py
+++ b/mathesar/forms/widgets.py
@@ -1,5 +1,6 @@
from django.forms.widgets import TextInput
+
class DataListInput(TextInput):
"""
Widget that adds a <data_list> element to the standard text input widget.
@@ -37,4 +38,3 @@
attrs = {} if attrs is None else attrs
attrs["list"] = self.data_list_id
return super().render(name, value, attrs, renderer)
-
| {"golden_diff": "diff --git a/mathesar/forms/forms.py b/mathesar/forms/forms.py\n--- a/mathesar/forms/forms.py\n+++ b/mathesar/forms/forms.py\n@@ -4,10 +4,12 @@\n from mathesar.database.schemas import get_all_schemas\n from mathesar.forms.widgets import DataListInput\n \n+\n def validate_csv(value):\n if not value.name.lower().endswith(\".csv\"):\n raise ValidationError(f\"{value.name} is not a CSV file\")\n \n+\n class UploadFileForm(forms.Form):\n collection_name = forms.CharField(min_length=1, label=\"Collection Name\")\n \ndiff --git a/mathesar/forms/widgets.py b/mathesar/forms/widgets.py\n--- a/mathesar/forms/widgets.py\n+++ b/mathesar/forms/widgets.py\n@@ -1,5 +1,6 @@\n from django.forms.widgets import TextInput\n \n+\n class DataListInput(TextInput):\n \"\"\"\n Widget that adds a <data_list> element to the standard text input widget.\n@@ -37,4 +38,3 @@\n attrs = {} if attrs is None else attrs\n attrs[\"list\"] = self.data_list_id\n return super().render(name, value, attrs, renderer)\n-\n", "issue": "Configure flake8 & GitHub Action correctly\nOur flake8 setup has a couple of issues:\r\n- Failures on the GitHub Action don't actually block merge.\r\n- We need to set up our style guide for flake8.\n", "before_files": [{"content": "from django.forms.widgets import TextInput\n\nclass DataListInput(TextInput):\n \"\"\"\n Widget that adds a <data_list> element to the standard text input widget.\n See TextInput for further details.\n\n Attributes:\n data_list: List of strings, where each string is a data_list value, or\n a callable that returns a list of the same form\n data_list_id: ID of the data_list, generated when render() is called.\n Of the form [widget_id | widget_name]_data_list\n \"\"\"\n template_name = \"mathesar/widgets/data_list.html\"\n\n def __init__(self, data_list, attrs=None):\n super().__init__(attrs=attrs)\n self.data_list = data_list\n self.data_list_id = \"_data_list\"\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n if callable(self.data_list):\n context[\"widget\"][\"data_list\"] = self.data_list()\n else:\n context[\"widget\"][\"data_list\"] = self.data_list\n context[\"widget\"][\"data_list_id\"] = self.data_list_id\n return context\n\n def render(self, name, value, attrs=None, renderer=None):\n # In practice, there should always be an ID attribute, but we fallback\n # to using widget name if ID is missing\n if attrs and \"id\" in attrs:\n self.data_list_id = attrs[\"id\"] + \"_data_list\"\n else:\n self.data_list_id = name + \"_data_list\"\n attrs = {} if attrs is None else attrs\n attrs[\"list\"] = self.data_list_id\n return super().render(name, value, attrs, renderer)\n\n", "path": "mathesar/forms/widgets.py"}, {"content": "from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom mathesar.database.schemas import get_all_schemas\nfrom mathesar.forms.widgets import DataListInput\n\ndef validate_csv(value):\n if not value.name.lower().endswith(\".csv\"):\n raise ValidationError(f\"{value.name} is not a CSV file\")\n\nclass UploadFileForm(forms.Form):\n collection_name = forms.CharField(min_length=1, label=\"Collection Name\")\n\n application_name = forms.CharField(\n min_length=1, label=\"Application Name\",\n widget=DataListInput(get_all_schemas)\n )\n\n file = forms.FileField(validators=[validate_csv], label=\"CSV File\")\n", "path": "mathesar/forms/forms.py"}]} | 1,210 | 249 |
gh_patches_debug_155 | rasdani/github-patches | git_diff | hylang__hy-1369 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Official support for evaluating strings of Hy code from Python
Is it possible to embed some hy code inside a python file? As opposed to having the whole file be full on hy?
</issue>
<code>
[start of hy/__init__.py]
1 __appname__ = 'hy'
2 try:
3 from hy.version import __version__
4 except ImportError:
5 __version__ = 'unknown'
6
7
8 from hy.models import HyExpression, HyInteger, HyKeyword, HyComplex, HyString, HyBytes, HySymbol, HyFloat, HyDict, HyList, HySet, HyCons # NOQA
9
10
11 import hy.importer # NOQA
12 # we import for side-effects.
13
[end of hy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hy/__init__.py b/hy/__init__.py
--- a/hy/__init__.py
+++ b/hy/__init__.py
@@ -10,3 +10,7 @@
import hy.importer # NOQA
# we import for side-effects.
+
+
+from hy.core.language import read, read_str # NOQA
+from hy.importer import hy_eval as eval # NOQA
| {"golden_diff": "diff --git a/hy/__init__.py b/hy/__init__.py\n--- a/hy/__init__.py\n+++ b/hy/__init__.py\n@@ -10,3 +10,7 @@\n \n import hy.importer # NOQA\n # we import for side-effects.\n+\n+\n+from hy.core.language import read, read_str # NOQA\n+from hy.importer import hy_eval as eval # NOQA\n", "issue": "Official support for evaluating strings of Hy code from Python\nIs it possible to embed some hy code inside a python file? As opposed to having the whole file be full on hy?\n", "before_files": [{"content": "__appname__ = 'hy'\ntry:\n from hy.version import __version__\nexcept ImportError:\n __version__ = 'unknown'\n\n\nfrom hy.models import HyExpression, HyInteger, HyKeyword, HyComplex, HyString, HyBytes, HySymbol, HyFloat, HyDict, HyList, HySet, HyCons # NOQA\n\n\nimport hy.importer # NOQA\n# we import for side-effects.\n", "path": "hy/__init__.py"}]} | 682 | 98 |
gh_patches_debug_11274 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1021 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove Python 3.5 support
Python 3.5 hit EOL September 13, 2020. Support will be removed in our next major release.
</issue>
<code>
[start of elasticapm/__init__.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 import sys
31
32 from elasticapm.base import Client
33 from elasticapm.conf import setup_logging # noqa: F401
34 from elasticapm.instrumentation.control import instrument, uninstrument # noqa: F401
35 from elasticapm.traces import ( # noqa: F401
36 capture_span,
37 get_span_id,
38 get_trace_id,
39 get_transaction_id,
40 get_trace_parent_header,
41 label,
42 set_context,
43 set_custom_context,
44 set_transaction_name,
45 set_transaction_outcome,
46 set_transaction_result,
47 set_user_context,
48 tag,
49 )
50 from elasticapm.utils.disttracing import trace_parent_from_headers, trace_parent_from_string # noqa: F401
51
52 __all__ = ("VERSION", "Client")
53
54 try:
55 try:
56 VERSION = __import__("importlib.metadata").metadata.version("elastic-apm")
57 except ImportError:
58 VERSION = __import__("pkg_resources").get_distribution("elastic-apm").version
59 except Exception:
60 VERSION = "unknown"
61
62
63 if sys.version_info >= (3, 5):
64 from elasticapm.contrib.asyncio.traces import async_capture_span # noqa: F401
65
[end of elasticapm/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/__init__.py b/elasticapm/__init__.py
--- a/elasticapm/__init__.py
+++ b/elasticapm/__init__.py
@@ -36,8 +36,8 @@
capture_span,
get_span_id,
get_trace_id,
- get_transaction_id,
get_trace_parent_header,
+ get_transaction_id,
label,
set_context,
set_custom_context,
@@ -60,5 +60,7 @@
VERSION = "unknown"
-if sys.version_info >= (3, 5):
- from elasticapm.contrib.asyncio.traces import async_capture_span # noqa: F401
+if sys.version_info <= (3, 5):
+ raise DeprecationWarning("The Elastic APM agent requires Python 3.6+")
+
+from elasticapm.contrib.asyncio.traces import async_capture_span # noqa: F401
| {"golden_diff": "diff --git a/elasticapm/__init__.py b/elasticapm/__init__.py\n--- a/elasticapm/__init__.py\n+++ b/elasticapm/__init__.py\n@@ -36,8 +36,8 @@\n capture_span,\n get_span_id,\n get_trace_id,\n- get_transaction_id,\n get_trace_parent_header,\n+ get_transaction_id,\n label,\n set_context,\n set_custom_context,\n@@ -60,5 +60,7 @@\n VERSION = \"unknown\"\n \n \n-if sys.version_info >= (3, 5):\n- from elasticapm.contrib.asyncio.traces import async_capture_span # noqa: F401\n+if sys.version_info <= (3, 5):\n+ raise DeprecationWarning(\"The Elastic APM agent requires Python 3.6+\")\n+\n+from elasticapm.contrib.asyncio.traces import async_capture_span # noqa: F401\n", "issue": "Remove Python 3.5 support\nPython 3.5 hit EOL September 13, 2020. Support will be removed in our next major release.\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nimport sys\n\nfrom elasticapm.base import Client\nfrom elasticapm.conf import setup_logging # noqa: F401\nfrom elasticapm.instrumentation.control import instrument, uninstrument # noqa: F401\nfrom elasticapm.traces import ( # noqa: F401\n capture_span,\n get_span_id,\n get_trace_id,\n get_transaction_id,\n get_trace_parent_header,\n label,\n set_context,\n set_custom_context,\n set_transaction_name,\n set_transaction_outcome,\n set_transaction_result,\n set_user_context,\n tag,\n)\nfrom elasticapm.utils.disttracing import trace_parent_from_headers, trace_parent_from_string # noqa: F401\n\n__all__ = (\"VERSION\", \"Client\")\n\ntry:\n try:\n VERSION = __import__(\"importlib.metadata\").metadata.version(\"elastic-apm\")\n except ImportError:\n VERSION = __import__(\"pkg_resources\").get_distribution(\"elastic-apm\").version\nexcept Exception:\n VERSION = \"unknown\"\n\n\nif sys.version_info >= (3, 5):\n from elasticapm.contrib.asyncio.traces import async_capture_span # noqa: F401\n", "path": "elasticapm/__init__.py"}]} | 1,299 | 212 |
gh_patches_debug_36865 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-834 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Simplify cookiecutter.hooks.find_hooks
We should rename `cookiecutter.hooks.find_hooks` to `find_hook(hook_name)` and explicitly look for the requested hook, instead of processing all the files in the hooks directory.
See https://github.com/audreyr/cookiecutter/pull/768/files/9a94484093ca23e9d55d42a53f096f67535b0b63#r68646614
</issue>
<code>
[start of cookiecutter/hooks.py]
1 # -*- coding: utf-8 -*-
2
3 """Functions for discovering and executing various cookiecutter hooks."""
4
5 import io
6 import logging
7 import os
8 import subprocess
9 import sys
10 import tempfile
11
12 from jinja2 import Template
13
14 from cookiecutter import utils
15 from .exceptions import FailedHookException
16
17 logger = logging.getLogger(__name__)
18
19
20 _HOOKS = [
21 'pre_gen_project',
22 'post_gen_project',
23 # TODO: other hooks should be listed here
24 ]
25 EXIT_SUCCESS = 0
26
27
28 def find_hooks():
29 """Return a dict of all hook scripts provided.
30
31 Must be called with the project template as the current working directory.
32 Dict's key will be the hook/script's name, without extension, while values
33 will be the absolute path to the script. Missing scripts will not be
34 included in the returned dict.
35 """
36 hooks_dir = 'hooks'
37 hooks = {}
38 logger.debug('hooks_dir is {}'.format(hooks_dir))
39
40 if not os.path.isdir(hooks_dir):
41 logger.debug('No hooks/ dir in template_dir')
42 return hooks
43
44 for f in os.listdir(hooks_dir):
45 filename = os.path.basename(f)
46 basename = os.path.splitext(filename)[0]
47
48 if basename in _HOOKS and not filename.endswith('~'):
49 hooks[basename] = os.path.abspath(os.path.join(hooks_dir, f))
50 return hooks
51
52
53 def run_script(script_path, cwd='.'):
54 """Execute a script from a working directory.
55
56 :param script_path: Absolute path to the script to run.
57 :param cwd: The directory to run the script from.
58 """
59 run_thru_shell = sys.platform.startswith('win')
60 if script_path.endswith('.py'):
61 script_command = [sys.executable, script_path]
62 else:
63 script_command = [script_path]
64
65 utils.make_executable(script_path)
66
67 proc = subprocess.Popen(
68 script_command,
69 shell=run_thru_shell,
70 cwd=cwd
71 )
72 exit_status = proc.wait()
73 if exit_status != EXIT_SUCCESS:
74 raise FailedHookException(
75 "Hook script failed (exit status: %d)" % exit_status)
76
77
78 def run_script_with_context(script_path, cwd, context):
79 """Execute a script after rendering it with Jinja.
80
81 :param script_path: Absolute path to the script to run.
82 :param cwd: The directory to run the script from.
83 :param context: Cookiecutter project template context.
84 """
85 _, extension = os.path.splitext(script_path)
86
87 contents = io.open(script_path, 'r', encoding='utf-8').read()
88
89 with tempfile.NamedTemporaryFile(
90 delete=False,
91 mode='wb',
92 suffix=extension
93 ) as temp:
94 output = Template(contents).render(**context)
95 temp.write(output.encode('utf-8'))
96
97 run_script(temp.name, cwd)
98
99
100 def run_hook(hook_name, project_dir, context):
101 """
102 Try to find and execute a hook from the specified project directory.
103
104 :param hook_name: The hook to execute.
105 :param project_dir: The directory to execute the script from.
106 :param context: Cookiecutter project context.
107 """
108 script = find_hooks().get(hook_name)
109 if script is None:
110 logger.debug('No hooks found')
111 return
112 logger.debug('Running hook {}'.format(hook_name))
113 run_script_with_context(script, project_dir, context)
114
[end of cookiecutter/hooks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py
--- a/cookiecutter/hooks.py
+++ b/cookiecutter/hooks.py
@@ -16,38 +16,53 @@
logger = logging.getLogger(__name__)
-
_HOOKS = [
'pre_gen_project',
'post_gen_project',
- # TODO: other hooks should be listed here
]
EXIT_SUCCESS = 0
-def find_hooks():
+def valid_hook(hook_file, hook_name):
+ """Determine if a hook file is valid.
+
+ :param hook_file: The hook file to consider for validity
+ :param hook_name: The hook to find
+ :return: The hook file validity
+ """
+ filename = os.path.basename(hook_file)
+ basename = os.path.splitext(filename)[0]
+
+ matching_hook = basename == hook_name
+ supported_hook = basename in _HOOKS
+ backup_file = filename.endswith('~')
+
+ return matching_hook and supported_hook and not backup_file
+
+
+def find_hook(hook_name, hooks_dir='hooks'):
"""Return a dict of all hook scripts provided.
Must be called with the project template as the current working directory.
Dict's key will be the hook/script's name, without extension, while values
will be the absolute path to the script. Missing scripts will not be
included in the returned dict.
+
+ :param hook_name: The hook to find
+ :param hooks_dir: The hook directory in the template
+ :return: The absolute path to the hook script or None
"""
- hooks_dir = 'hooks'
- hooks = {}
- logger.debug('hooks_dir is {}'.format(hooks_dir))
+ logger.debug('hooks_dir is {}'.format(os.path.abspath(hooks_dir)))
if not os.path.isdir(hooks_dir):
logger.debug('No hooks/ dir in template_dir')
- return hooks
+ return None
- for f in os.listdir(hooks_dir):
- filename = os.path.basename(f)
- basename = os.path.splitext(filename)[0]
+ for hook_file in os.listdir(hooks_dir):
+ if valid_hook(hook_file, hook_name):
+ return os.path.abspath(os.path.join(hooks_dir, hook_file))
- if basename in _HOOKS and not filename.endswith('~'):
- hooks[basename] = os.path.abspath(os.path.join(hooks_dir, f))
- return hooks
+ return None
def run_script(script_path, cwd='.'):
@@ -105,7 +120,7 @@
:param project_dir: The directory to execute the script from.
:param context: Cookiecutter project context.
"""
- script = find_hooks().get(hook_name)
+ script = find_hook(hook_name)
if script is None:
logger.debug('No hooks found')
return
| {"golden_diff": "diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py\n--- a/cookiecutter/hooks.py\n+++ b/cookiecutter/hooks.py\n@@ -16,38 +16,53 @@\n \n logger = logging.getLogger(__name__)\n \n-\n _HOOKS = [\n 'pre_gen_project',\n 'post_gen_project',\n- # TODO: other hooks should be listed here\n ]\n EXIT_SUCCESS = 0\n \n \n-def find_hooks():\n+def valid_hook(hook_file, hook_name):\n+ \"\"\"Determine if a hook file is valid.\n+\n+ :param hook_file: The hook file to consider for validity\n+ :param hook_name: The hook to find\n+ :return: The hook file validity\n+ \"\"\"\n+ filename = os.path.basename(hook_file)\n+ basename = os.path.splitext(filename)[0]\n+\n+ matching_hook = basename == hook_name\n+ supported_hook = basename in _HOOKS\n+ backup_file = filename.endswith('~')\n+\n+ return matching_hook and supported_hook and not backup_file\n+\n+\n+def find_hook(hook_name, hooks_dir='hooks'):\n \"\"\"Return a dict of all hook scripts provided.\n \n Must be called with the project template as the current working directory.\n Dict's key will be the hook/script's name, without extension, while values\n will be the absolute path to the script. Missing scripts will not be\n included in the returned dict.\n+\n+ :param hook_name: The hook to find\n+ :param hooks_dir: The hook directory in the template\n+ :return: The absolute path to the hook script or None\n \"\"\"\n- hooks_dir = 'hooks'\n- hooks = {}\n- logger.debug('hooks_dir is {}'.format(hooks_dir))\n+ logger.debug('hooks_dir is {}'.format(os.path.abspath(hooks_dir)))\n \n if not os.path.isdir(hooks_dir):\n logger.debug('No hooks/ dir in template_dir')\n- return hooks\n+ return None\n \n- for f in os.listdir(hooks_dir):\n- filename = os.path.basename(f)\n- basename = os.path.splitext(filename)[0]\n+ for hook_file in os.listdir(hooks_dir):\n+ if valid_hook(hook_file, hook_name):\n+ return os.path.abspath(os.path.join(hooks_dir, hook_file))\n \n- if basename in _HOOKS and not filename.endswith('~'):\n- hooks[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n- return hooks\n+ return None\n \n \n def run_script(script_path, cwd='.'):\n@@ -105,7 +120,7 @@\n :param project_dir: The directory to execute the script from.\n :param context: Cookiecutter project context.\n \"\"\"\n- script = find_hooks().get(hook_name)\n+ script = find_hook(hook_name)\n if script is None:\n logger.debug('No hooks found')\n return\n", "issue": "Simplify cookiecutter.hooks.find_hooks\nWe should rename `cookiecutter.hooks.find_hooks` to `find_hook(hook_name)` and explicitly look for the requested hook, instead of processing all the files in the hooks directory.\n\nSee https://github.com/audreyr/cookiecutter/pull/768/files/9a94484093ca23e9d55d42a53f096f67535b0b63#r68646614\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Functions for discovering and executing various cookiecutter hooks.\"\"\"\n\nimport io\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom jinja2 import Template\n\nfrom cookiecutter import utils\nfrom .exceptions import FailedHookException\n\nlogger = logging.getLogger(__name__)\n\n\n_HOOKS = [\n 'pre_gen_project',\n 'post_gen_project',\n # TODO: other hooks should be listed here\n]\nEXIT_SUCCESS = 0\n\n\ndef find_hooks():\n \"\"\"Return a dict of all hook scripts provided.\n\n Must be called with the project template as the current working directory.\n Dict's key will be the hook/script's name, without extension, while values\n will be the absolute path to the script. Missing scripts will not be\n included in the returned dict.\n \"\"\"\n hooks_dir = 'hooks'\n hooks = {}\n logger.debug('hooks_dir is {}'.format(hooks_dir))\n\n if not os.path.isdir(hooks_dir):\n logger.debug('No hooks/ dir in template_dir')\n return hooks\n\n for f in os.listdir(hooks_dir):\n filename = os.path.basename(f)\n basename = os.path.splitext(filename)[0]\n\n if basename in _HOOKS and not filename.endswith('~'):\n hooks[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n return hooks\n\n\ndef run_script(script_path, cwd='.'):\n \"\"\"Execute a script from a working directory.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n \"\"\"\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n proc = subprocess.Popen(\n script_command,\n shell=run_thru_shell,\n cwd=cwd\n )\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n \"Hook script failed (exit status: %d)\" % exit_status)\n\n\ndef run_script_with_context(script_path, cwd, context):\n \"\"\"Execute a script after rendering it with Jinja.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n :param context: Cookiecutter project template context.\n \"\"\"\n _, extension = os.path.splitext(script_path)\n\n contents = io.open(script_path, 'r', encoding='utf-8').read()\n\n with tempfile.NamedTemporaryFile(\n delete=False,\n mode='wb',\n suffix=extension\n ) as temp:\n output = Template(contents).render(**context)\n temp.write(output.encode('utf-8'))\n\n run_script(temp.name, cwd)\n\n\ndef run_hook(hook_name, project_dir, context):\n \"\"\"\n Try to find and execute a hook from the specified project directory.\n\n :param hook_name: The hook to execute.\n :param project_dir: The directory to execute the script from.\n :param context: Cookiecutter project context.\n \"\"\"\n script = find_hooks().get(hook_name)\n if script is None:\n logger.debug('No hooks found')\n return\n logger.debug('Running hook {}'.format(hook_name))\n run_script_with_context(script, project_dir, context)\n", "path": "cookiecutter/hooks.py"}]} | 1,631 | 641 |
gh_patches_debug_26507 | rasdani/github-patches | git_diff | airctic__icevision-960 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add more logging to the pytorch lighning models.
The feature consists of two parts:
1. Add the validation loss to the progress bar by default
2. Create boolean parameter for extended progress bar logging (showing the different components of the loss)
</issue>
<code>
[start of icevision/engines/lightning/lightning_model_adapter.py]
1 __all__ = ["LightningModelAdapter"]
2
3 import pytorch_lightning as pl
4 from icevision.imports import *
5 from icevision.metrics import *
6
7
8 class LightningModelAdapter(pl.LightningModule, ABC):
9 def __init__(self, metrics: List[Metric] = None):
10 super().__init__()
11 self.metrics = metrics or []
12
13 def accumulate_metrics(self, preds):
14 for metric in self.metrics:
15 metric.accumulate(preds=preds)
16
17 def finalize_metrics(self) -> None:
18 for metric in self.metrics:
19 metric_logs = metric.finalize()
20 for k, v in metric_logs.items():
21 self.log(f"{metric.name}/{k}", v)
22
[end of icevision/engines/lightning/lightning_model_adapter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/icevision/engines/lightning/lightning_model_adapter.py b/icevision/engines/lightning/lightning_model_adapter.py
--- a/icevision/engines/lightning/lightning_model_adapter.py
+++ b/icevision/engines/lightning/lightning_model_adapter.py
@@ -6,9 +6,21 @@
class LightningModelAdapter(pl.LightningModule, ABC):
- def __init__(self, metrics: List[Metric] = None):
+ def __init__(
+ self,
+ metrics: List[Metric] = None,
+ metrics_keys_to_log_to_prog_bar: List[tuple] = None,
+ ):
+ """
+ To show a metric in the progressbar a list of tupels can be provided for metrics_keys_to_log_to_prog_bar, the first
+ entry has to be the name of the metric to log and the second entry the display name in the progressbar. By default the
+ mAP is logged to the progressbar.
+ """
super().__init__()
self.metrics = metrics or []
+ self.metrics_keys_to_log_to_prog_bar = metrics_keys_to_log_to_prog_bar or [
+ ("AP (IoU=0.50:0.95) area=all", "COCOMetric")
+ ]
def accumulate_metrics(self, preds):
for metric in self.metrics:
@@ -18,4 +30,9 @@
for metric in self.metrics:
metric_logs = metric.finalize()
for k, v in metric_logs.items():
- self.log(f"{metric.name}/{k}", v)
+ for entry in self.metrics_keys_to_log_to_prog_bar:
+ if entry[0] == k:
+ self.log(entry[1], v, prog_bar=True)
+ self.log(f"{metric.name}/{k}", v)
+ else:
+ self.log(f"{metric.name}/{k}", v)
| {"golden_diff": "diff --git a/icevision/engines/lightning/lightning_model_adapter.py b/icevision/engines/lightning/lightning_model_adapter.py\n--- a/icevision/engines/lightning/lightning_model_adapter.py\n+++ b/icevision/engines/lightning/lightning_model_adapter.py\n@@ -6,9 +6,21 @@\n \n \n class LightningModelAdapter(pl.LightningModule, ABC):\n- def __init__(self, metrics: List[Metric] = None):\n+ def __init__(\n+ self,\n+ metrics: List[Metric] = None,\n+ metrics_keys_to_log_to_prog_bar: List[tuple] = None,\n+ ):\n+ \"\"\"\n+ To show a metric in the progressbar a list of tupels can be provided for metrics_keys_to_log_to_prog_bar, the first\n+ entry has to be the name of the metric to log and the second entry the display name in the progressbar. By default the\n+ mAP is logged to the progressbar.\n+ \"\"\"\n super().__init__()\n self.metrics = metrics or []\n+ self.metrics_keys_to_log_to_prog_bar = metrics_keys_to_log_to_prog_bar or [\n+ (\"AP (IoU=0.50:0.95) area=all\", \"COCOMetric\")\n+ ]\n \n def accumulate_metrics(self, preds):\n for metric in self.metrics:\n@@ -18,4 +30,9 @@\n for metric in self.metrics:\n metric_logs = metric.finalize()\n for k, v in metric_logs.items():\n- self.log(f\"{metric.name}/{k}\", v)\n+ for entry in self.metrics_keys_to_log_to_prog_bar:\n+ if entry[0] == k:\n+ self.log(entry[1], v, prog_bar=True)\n+ self.log(f\"{metric.name}/{k}\", v)\n+ else:\n+ self.log(f\"{metric.name}/{k}\", v)\n", "issue": "Add more logging to the pytorch lighning models.\nThe feature consists of two parts:\r\n 1. Add the validation loss to the progress bar by default\r\n 2. Create boolean parameter for extended progress bar logging (showing the different components of the loss)\n", "before_files": [{"content": "__all__ = [\"LightningModelAdapter\"]\n\nimport pytorch_lightning as pl\nfrom icevision.imports import *\nfrom icevision.metrics import *\n\n\nclass LightningModelAdapter(pl.LightningModule, ABC):\n def __init__(self, metrics: List[Metric] = None):\n super().__init__()\n self.metrics = metrics or []\n\n def accumulate_metrics(self, preds):\n for metric in self.metrics:\n metric.accumulate(preds=preds)\n\n def finalize_metrics(self) -> None:\n for metric in self.metrics:\n metric_logs = metric.finalize()\n for k, v in metric_logs.items():\n self.log(f\"{metric.name}/{k}\", v)\n", "path": "icevision/engines/lightning/lightning_model_adapter.py"}]} | 786 | 418 |
gh_patches_debug_13791 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-3409 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
app_key not passed to aiohttp_jinja2
When using aiohttp_admin the app_key value for the templating module differs from the default one.
This causes an error executing:
https://github.com/DataDog/dd-trace-py/blob/ec191a4a71ae71017b70d26111bba4489e617ae5/ddtrace/contrib/aiohttp/template.py#L21
As far as I understand this would solve the problem.
`env = aiohttp_jinja2.get_env(request.app, app_key=kwargs["app_key"])`
</issue>
<code>
[start of ddtrace/contrib/aiohttp_jinja2/patch.py]
1 from ddtrace import Pin
2 from ddtrace import config
3
4 from ...ext import SpanTypes
5 from ...internal.utils import get_argument_value
6 from ..trace_utils import unwrap
7 from ..trace_utils import with_traced_module
8 from ..trace_utils import wrap
9
10
11 config._add(
12 "aiohttp_jinja2",
13 dict(),
14 )
15
16
17 @with_traced_module
18 def traced_render_template(aiohttp_jinja2, pin, func, instance, args, kwargs):
19 # original signature:
20 # render_template(template_name, request, context, *, app_key=APP_KEY, encoding='utf-8')
21 template_name = get_argument_value(args, kwargs, 0, "template_name")
22 request = get_argument_value(args, kwargs, 1, "request")
23 env = aiohttp_jinja2.get_env(request.app)
24
25 # the prefix is available only on PackageLoader
26 template_prefix = getattr(env.loader, "package_path", "")
27 template_meta = "%s/%s" % (template_prefix, template_name)
28
29 with pin.tracer.trace("aiohttp.template", span_type=SpanTypes.TEMPLATE) as span:
30 span.set_tag("aiohttp.template", template_meta)
31 return func(*args, **kwargs)
32
33
34 def _patch(aiohttp_jinja2):
35 Pin().onto(aiohttp_jinja2)
36 wrap("aiohttp_jinja2", "render_template", traced_render_template(aiohttp_jinja2))
37
38
39 def patch():
40 import aiohttp_jinja2
41
42 if getattr(aiohttp_jinja2, "_datadog_patch", False):
43 return
44
45 _patch(aiohttp_jinja2)
46
47 setattr(aiohttp_jinja2, "_datadog_patch", True)
48
49
50 def _unpatch(aiohttp_jinja2):
51 unwrap(aiohttp_jinja2, "render_template")
52
53
54 def unpatch():
55 import aiohttp_jinja2
56
57 if not getattr(aiohttp_jinja2, "_datadog_patch", False):
58 return
59
60 _unpatch(aiohttp_jinja2)
61
62 setattr(aiohttp_jinja2, "_datadog_patch", False)
63
[end of ddtrace/contrib/aiohttp_jinja2/patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/contrib/aiohttp_jinja2/patch.py b/ddtrace/contrib/aiohttp_jinja2/patch.py
--- a/ddtrace/contrib/aiohttp_jinja2/patch.py
+++ b/ddtrace/contrib/aiohttp_jinja2/patch.py
@@ -20,7 +20,10 @@
# render_template(template_name, request, context, *, app_key=APP_KEY, encoding='utf-8')
template_name = get_argument_value(args, kwargs, 0, "template_name")
request = get_argument_value(args, kwargs, 1, "request")
- env = aiohttp_jinja2.get_env(request.app)
+ get_env_kwargs = {}
+ if "app_key" in kwargs:
+ get_env_kwargs["app_key"] = kwargs["app_key"]
+ env = aiohttp_jinja2.get_env(request.app, **get_env_kwargs)
# the prefix is available only on PackageLoader
template_prefix = getattr(env.loader, "package_path", "")
| {"golden_diff": "diff --git a/ddtrace/contrib/aiohttp_jinja2/patch.py b/ddtrace/contrib/aiohttp_jinja2/patch.py\n--- a/ddtrace/contrib/aiohttp_jinja2/patch.py\n+++ b/ddtrace/contrib/aiohttp_jinja2/patch.py\n@@ -20,7 +20,10 @@\n # render_template(template_name, request, context, *, app_key=APP_KEY, encoding='utf-8')\n template_name = get_argument_value(args, kwargs, 0, \"template_name\")\n request = get_argument_value(args, kwargs, 1, \"request\")\n- env = aiohttp_jinja2.get_env(request.app)\n+ get_env_kwargs = {}\n+ if \"app_key\" in kwargs:\n+ get_env_kwargs[\"app_key\"] = kwargs[\"app_key\"]\n+ env = aiohttp_jinja2.get_env(request.app, **get_env_kwargs)\n \n # the prefix is available only on PackageLoader\n template_prefix = getattr(env.loader, \"package_path\", \"\")\n", "issue": "app_key not passed to aiohttp_jinja2 \nWhen using aiohttp_admin the app_key value for the templating module differs from the default one.\r\n\r\nThis causes an error executing:\r\nhttps://github.com/DataDog/dd-trace-py/blob/ec191a4a71ae71017b70d26111bba4489e617ae5/ddtrace/contrib/aiohttp/template.py#L21\r\n\r\nAs far as I understand this would solve the problem.\r\n`env = aiohttp_jinja2.get_env(request.app, app_key=kwargs[\"app_key\"])`\n", "before_files": [{"content": "from ddtrace import Pin\nfrom ddtrace import config\n\nfrom ...ext import SpanTypes\nfrom ...internal.utils import get_argument_value\nfrom ..trace_utils import unwrap\nfrom ..trace_utils import with_traced_module\nfrom ..trace_utils import wrap\n\n\nconfig._add(\n \"aiohttp_jinja2\",\n dict(),\n)\n\n\n@with_traced_module\ndef traced_render_template(aiohttp_jinja2, pin, func, instance, args, kwargs):\n # original signature:\n # render_template(template_name, request, context, *, app_key=APP_KEY, encoding='utf-8')\n template_name = get_argument_value(args, kwargs, 0, \"template_name\")\n request = get_argument_value(args, kwargs, 1, \"request\")\n env = aiohttp_jinja2.get_env(request.app)\n\n # the prefix is available only on PackageLoader\n template_prefix = getattr(env.loader, \"package_path\", \"\")\n template_meta = \"%s/%s\" % (template_prefix, template_name)\n\n with pin.tracer.trace(\"aiohttp.template\", span_type=SpanTypes.TEMPLATE) as span:\n span.set_tag(\"aiohttp.template\", template_meta)\n return func(*args, **kwargs)\n\n\ndef _patch(aiohttp_jinja2):\n Pin().onto(aiohttp_jinja2)\n wrap(\"aiohttp_jinja2\", \"render_template\", traced_render_template(aiohttp_jinja2))\n\n\ndef patch():\n import aiohttp_jinja2\n\n if getattr(aiohttp_jinja2, \"_datadog_patch\", False):\n return\n\n _patch(aiohttp_jinja2)\n\n setattr(aiohttp_jinja2, \"_datadog_patch\", True)\n\n\ndef _unpatch(aiohttp_jinja2):\n unwrap(aiohttp_jinja2, \"render_template\")\n\n\ndef unpatch():\n import aiohttp_jinja2\n\n if not getattr(aiohttp_jinja2, \"_datadog_patch\", False):\n return\n\n _unpatch(aiohttp_jinja2)\n\n setattr(aiohttp_jinja2, \"_datadog_patch\", False)\n", "path": "ddtrace/contrib/aiohttp_jinja2/patch.py"}]} | 1,270 | 226 |
gh_patches_debug_4173 | rasdani/github-patches | git_diff | statsmodels__statsmodels-779 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OLS residuals returned as Pandas series when endog and exog are Pandas series
When I fit OLS model with pandas series and try to do a Durbin-Watson test, the function returns nan. In that case the RegressionResult.resid attribute is a pandas series, rather than a numpy array- converting to a numpy array explicitly, the durbin_watson function works like a charm.
My instinct is this is something that should probably be changed in OLS (to guarantee the type of resid), hence the title of the issue, but I leave that to the judgement of our fearless leaders.
``` python
import statsmodels.api as sm
import numpy as np
from pandas import DataFrame
x=np.arange(1,11)
y=[num+np.random.normal() for num in np.arange(0,5, .5)]
linmod=sm.OLS(y, x).fit()
dw=sm.stats.stattools.durbin_watson(linmod.resid)
data=DataFrame({'x':x, 'y':y}, index=x)
linmod_pandas=sm.OLS(data.y, data.x).fit()
dw_pandas=sm.stats.stattools.durbin_watson(linmod_pandas.resid)
dw_pandas1=sm.stats.stattools.durbin_watson(array(linmod_pandas.resid))
print type(linmod_pandas.resid)
print dw, dw_pandas, dw_pandas1
```
</issue>
<code>
[start of statsmodels/stats/stattools.py]
1 """
2 Statistical tests to be used in conjunction with the models
3
4 Notes
5 -----
6 These functions haven't been formally tested.
7 """
8
9 from scipy import stats
10 import numpy as np
11
12
13 #TODO: these are pretty straightforward but they should be tested
14 def durbin_watson(resids):
15 """
16 Calculates the Durbin-Watson statistic
17
18 Parameters
19 -----------
20 resids : array-like
21
22 Returns
23 --------
24 Durbin Watson statistic. This is defined as
25 sum_(t=2)^(T)((e_t - e_(t-1))^(2))/sum_(t=1)^(T)e_t^(2)
26 """
27 diff_resids = np.diff(resids, 1)
28 dw = np.dot(diff_resids, diff_resids) / np.dot(resids, resids)
29 return dw
30
31 def omni_normtest(resids, axis=0):
32 """
33 Omnibus test for normality
34
35 Parameters
36 -----------
37 resid : array-like
38 axis : int, optional
39 Default is 0
40
41 Returns
42 -------
43 Chi^2 score, two-tail probability
44 """
45 #TODO: change to exception in summary branch and catch in summary()
46 #behavior changed between scipy 0.9 and 0.10
47 resids = np.asarray(resids)
48 n = resids.shape[axis]
49 if n < 8:
50 return np.nan, np.nan
51 return_shape = list(resids.shape)
52 del return_shape[axis]
53 return np.nan * np.zeros(return_shape), np.nan * np.zeros(return_shape)
54 raise ValueError(
55 "skewtest is not valid with less than 8 observations; %i samples"
56 " were given." % int(n))
57
58 return stats.normaltest(resids, axis=axis)
59
60 def jarque_bera(resids):
61 """
62 Calculate residual skewness, kurtosis, and do the JB test for normality
63
64 Parameters
65 -----------
66 resids : array-like
67
68 Returns
69 -------
70 JB, JBpv, skew, kurtosis
71
72 JB = n/6*(S^2 + (K-3)^2/4)
73
74 JBpv is the Chi^2 two-tail probability value
75
76 skew is the measure of skewness
77
78 kurtosis is the measure of kurtosis
79
80 """
81 resids = np.asarray(resids)
82 # Calculate residual skewness and kurtosis
83 skew = stats.skew(resids)
84 kurtosis = 3 + stats.kurtosis(resids)
85
86 # Calculate the Jarque-Bera test for normality
87 JB = (resids.shape[0] / 6.) * (skew**2 + (1 / 4.) * (kurtosis-3)**2)
88 JBpv = stats.chi2.sf(JB,2)
89
90 return JB, JBpv, skew, kurtosis
91
92
[end of statsmodels/stats/stattools.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/statsmodels/stats/stattools.py b/statsmodels/stats/stattools.py
--- a/statsmodels/stats/stattools.py
+++ b/statsmodels/stats/stattools.py
@@ -24,6 +24,7 @@
Durbin Watson statistic. This is defined as
sum_(t=2)^(T)((e_t - e_(t-1))^(2))/sum_(t=1)^(T)e_t^(2)
"""
+ resids=np.asarray(resids)
diff_resids = np.diff(resids, 1)
dw = np.dot(diff_resids, diff_resids) / np.dot(resids, resids)
return dw
| {"golden_diff": "diff --git a/statsmodels/stats/stattools.py b/statsmodels/stats/stattools.py\n--- a/statsmodels/stats/stattools.py\n+++ b/statsmodels/stats/stattools.py\n@@ -24,6 +24,7 @@\n Durbin Watson statistic. This is defined as\n sum_(t=2)^(T)((e_t - e_(t-1))^(2))/sum_(t=1)^(T)e_t^(2)\n \"\"\"\n+ resids=np.asarray(resids)\n diff_resids = np.diff(resids, 1)\n dw = np.dot(diff_resids, diff_resids) / np.dot(resids, resids)\n return dw\n", "issue": "OLS residuals returned as Pandas series when endog and exog are Pandas series\nWhen I fit OLS model with pandas series and try to do a Durbin-Watson test, the function returns nan. In that case the RegressionResult.resid attribute is a pandas series, rather than a numpy array- converting to a numpy array explicitly, the durbin_watson function works like a charm. \n\nMy instinct is this is something that should probably be changed in OLS (to guarantee the type of resid), hence the title of the issue, but I leave that to the judgement of our fearless leaders.\n\n``` python\nimport statsmodels.api as sm\nimport numpy as np\nfrom pandas import DataFrame\nx=np.arange(1,11)\ny=[num+np.random.normal() for num in np.arange(0,5, .5)]\nlinmod=sm.OLS(y, x).fit()\ndw=sm.stats.stattools.durbin_watson(linmod.resid)\ndata=DataFrame({'x':x, 'y':y}, index=x)\nlinmod_pandas=sm.OLS(data.y, data.x).fit()\ndw_pandas=sm.stats.stattools.durbin_watson(linmod_pandas.resid)\ndw_pandas1=sm.stats.stattools.durbin_watson(array(linmod_pandas.resid))\nprint type(linmod_pandas.resid)\nprint dw, dw_pandas, dw_pandas1\n```\n\n", "before_files": [{"content": "\"\"\"\nStatistical tests to be used in conjunction with the models\n\nNotes\n-----\nThese functions haven't been formally tested.\n\"\"\"\n\nfrom scipy import stats\nimport numpy as np\n\n\n#TODO: these are pretty straightforward but they should be tested\ndef durbin_watson(resids):\n \"\"\"\n Calculates the Durbin-Watson statistic\n\n Parameters\n -----------\n resids : array-like\n\n Returns\n --------\n Durbin Watson statistic. This is defined as\n sum_(t=2)^(T)((e_t - e_(t-1))^(2))/sum_(t=1)^(T)e_t^(2)\n \"\"\"\n diff_resids = np.diff(resids, 1)\n dw = np.dot(diff_resids, diff_resids) / np.dot(resids, resids)\n return dw\n\ndef omni_normtest(resids, axis=0):\n \"\"\"\n Omnibus test for normality\n\n Parameters\n -----------\n resid : array-like\n axis : int, optional\n Default is 0\n\n Returns\n -------\n Chi^2 score, two-tail probability\n \"\"\"\n #TODO: change to exception in summary branch and catch in summary()\n #behavior changed between scipy 0.9 and 0.10\n resids = np.asarray(resids)\n n = resids.shape[axis]\n if n < 8:\n return np.nan, np.nan\n return_shape = list(resids.shape)\n del return_shape[axis]\n return np.nan * np.zeros(return_shape), np.nan * np.zeros(return_shape)\n raise ValueError(\n \"skewtest is not valid with less than 8 observations; %i samples\"\n \" were given.\" % int(n))\n\n return stats.normaltest(resids, axis=axis)\n\ndef jarque_bera(resids):\n \"\"\"\n Calculate residual skewness, kurtosis, and do the JB test for normality\n\n Parameters\n -----------\n resids : array-like\n\n Returns\n -------\n JB, JBpv, skew, kurtosis\n\n JB = n/6*(S^2 + (K-3)^2/4)\n\n JBpv is the Chi^2 two-tail probability value\n\n skew is the measure of skewness\n\n kurtosis is the measure of kurtosis\n\n \"\"\"\n resids = np.asarray(resids)\n # Calculate residual skewness and kurtosis\n skew = stats.skew(resids)\n kurtosis = 3 + stats.kurtosis(resids)\n\n # Calculate the Jarque-Bera test for normality\n JB = (resids.shape[0] / 6.) * (skew**2 + (1 / 4.) * (kurtosis-3)**2)\n JBpv = stats.chi2.sf(JB,2)\n\n return JB, JBpv, skew, kurtosis\n\n", "path": "statsmodels/stats/stattools.py"}]} | 1,658 | 146 |
gh_patches_debug_15028 | rasdani/github-patches | git_diff | Pyomo__pyomo-1521 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate the pyomo install-extras subcommand
The conda pyomo.extras package supports this functionality more robustly. We should not duplicate this logic in separate places.
</issue>
<code>
[start of pyomo/scripting/plugins/extras.py]
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 import six
12 from pyomo.scripting.pyomo_parser import add_subparser, CustomHelpFormatter
13
14 def get_packages():
15 packages = [
16 'sympy',
17 'xlrd',
18 'openpyxl',
19 #('suds-jurko', 'suds'),
20 ('PyYAML', 'yaml'),
21 'pypyodbc',
22 'pymysql',
23 #'openopt',
24 #'FuncDesigner',
25 #'DerApproximator',
26 ('ipython[notebook]', 'IPython'),
27 ('pyro4', 'Pyro4'),
28 ]
29 if six.PY2:
30 packages.append(('pyro','Pyro'))
31 return packages
32
33 def install_extras(args=[], quiet=False):
34 #
35 # Verify that pip is installed
36 #
37 try:
38 import pip
39 pip_version = pip.__version__.split('.')
40 for i,s in enumerate(pip_version):
41 try:
42 pip_version[i] = int(s)
43 except:
44 pass
45 pip_version = tuple(pip_version)
46 except ImportError:
47 print("You must have 'pip' installed to run this script.")
48 raise SystemExit
49
50 cmd = ['--disable-pip-version-check', 'install','--upgrade']
51 # Disable the PIP download cache
52 if pip_version[0] >= 6:
53 cmd.append('--no-cache-dir')
54 else:
55 cmd.append('--download-cache')
56 cmd.append('')
57
58 if not quiet:
59 print(' ')
60 print('-'*60)
61 print("Installation Output Logs")
62 print(" (A summary will be printed below)")
63 print('-'*60)
64 print(' ')
65
66 results = {}
67 for package in get_packages():
68 if type(package) is tuple:
69 package, pkg_import = package
70 else:
71 pkg_import = package
72 try:
73 # Allow the user to provide extra options
74 pip.main(cmd + args + [package])
75 __import__(pkg_import)
76 results[package] = True
77 except:
78 results[package] = False
79 try:
80 pip.logger.consumers = []
81 except AttributeError:
82 # old pip versions (prior to 6.0~104^2)
83 pip.log.consumers = []
84
85 if not quiet:
86 print(' ')
87 print(' ')
88 print('-'*60)
89 print("Installation Summary")
90 print('-'*60)
91 print(' ')
92 for package, result in sorted(six.iteritems(results)):
93 if result:
94 print("YES %s" % package)
95 else:
96 print("NO %s" % package)
97
98
99 def pyomo_subcommand(options):
100 return install_extras(options.args, quiet=options.quiet)
101
102
103 _parser = add_subparser(
104 'install-extras',
105 func=pyomo_subcommand,
106 help='Install "extra" packages that Pyomo can leverage.',
107 description="""
108 This pyomo subcommand uses PIP to install optional third-party Python
109 packages that Pyomo could leverage from PyPI. The installation of some
110 packages may fail, but this subcommand ignore these failures and
111 provides a summary describing which packages were installed.
112 """,
113 epilog="""
114 Since pip options begin with a dash, the --pip-args option can only be
115 used with the equals syntax. --pip-args may appear multiple times on
116 the command line. For example:\n\n
117 pyomo install-extras --pip-args="--upgrade"
118 """,
119 formatter_class=CustomHelpFormatter,
120 )
121
122 _parser.add_argument(
123 '-q', '--quiet',
124 action='store_true',
125 dest='quiet',
126 default=False,
127 help="Suppress some terminal output",
128 )
129 _parser.add_argument(
130 "--pip-args",
131 dest="args",
132 action="append",
133 help=("Arguments that are passed to the 'pip' command when "
134 "installing packages"),
135 )
136
137
[end of pyomo/scripting/plugins/extras.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyomo/scripting/plugins/extras.py b/pyomo/scripting/plugins/extras.py
--- a/pyomo/scripting/plugins/extras.py
+++ b/pyomo/scripting/plugins/extras.py
@@ -11,6 +11,8 @@
import six
from pyomo.scripting.pyomo_parser import add_subparser, CustomHelpFormatter
+from pyomo.common.deprecation import deprecated
+
def get_packages():
packages = [
'sympy',
@@ -30,6 +32,11 @@
packages.append(('pyro','Pyro'))
return packages
+@deprecated(
+ "Use of the pyomo install-extras is deprecated."
+ "The current recommended course of action is to manually install "
+ "optional dependencies as needed.",
+ version='TBD')
def install_extras(args=[], quiet=False):
#
# Verify that pip is installed
| {"golden_diff": "diff --git a/pyomo/scripting/plugins/extras.py b/pyomo/scripting/plugins/extras.py\n--- a/pyomo/scripting/plugins/extras.py\n+++ b/pyomo/scripting/plugins/extras.py\n@@ -11,6 +11,8 @@\n import six\n from pyomo.scripting.pyomo_parser import add_subparser, CustomHelpFormatter\n \n+from pyomo.common.deprecation import deprecated\n+\n def get_packages():\n packages = [\n 'sympy', \n@@ -30,6 +32,11 @@\n packages.append(('pyro','Pyro'))\n return packages\n \n+@deprecated(\n+ \"Use of the pyomo install-extras is deprecated.\"\n+ \"The current recommended course of action is to manually install \"\n+ \"optional dependencies as needed.\",\n+ version='TBD')\n def install_extras(args=[], quiet=False):\n #\n # Verify that pip is installed\n", "issue": "Deprecate the pyomo install-extras subcommand\nThe conda pyomo.extras package supports this functionality more robustly. We should not duplicate this logic in separate places.\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and \n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain \n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nimport six\nfrom pyomo.scripting.pyomo_parser import add_subparser, CustomHelpFormatter\n\ndef get_packages():\n packages = [\n 'sympy', \n 'xlrd', \n 'openpyxl', \n #('suds-jurko', 'suds'),\n ('PyYAML', 'yaml'),\n 'pypyodbc', \n 'pymysql', \n #'openopt', \n #'FuncDesigner', \n #'DerApproximator', \n ('ipython[notebook]', 'IPython'),\n ('pyro4', 'Pyro4'),\n ]\n if six.PY2:\n packages.append(('pyro','Pyro'))\n return packages\n\ndef install_extras(args=[], quiet=False):\n #\n # Verify that pip is installed\n #\n try:\n import pip\n pip_version = pip.__version__.split('.')\n for i,s in enumerate(pip_version):\n try:\n pip_version[i] = int(s)\n except:\n pass\n pip_version = tuple(pip_version)\n except ImportError:\n print(\"You must have 'pip' installed to run this script.\")\n raise SystemExit\n\n cmd = ['--disable-pip-version-check', 'install','--upgrade']\n # Disable the PIP download cache\n if pip_version[0] >= 6:\n cmd.append('--no-cache-dir')\n else:\n cmd.append('--download-cache')\n cmd.append('')\n\n if not quiet:\n print(' ')\n print('-'*60)\n print(\"Installation Output Logs\")\n print(\" (A summary will be printed below)\")\n print('-'*60)\n print(' ')\n\n results = {}\n for package in get_packages():\n if type(package) is tuple:\n package, pkg_import = package\n else:\n pkg_import = package\n try:\n # Allow the user to provide extra options\n pip.main(cmd + args + [package])\n __import__(pkg_import)\n results[package] = True\n except:\n results[package] = False\n try:\n pip.logger.consumers = []\n except AttributeError:\n # old pip versions (prior to 6.0~104^2)\n pip.log.consumers = []\n\n if not quiet:\n print(' ')\n print(' ')\n print('-'*60)\n print(\"Installation Summary\")\n print('-'*60)\n print(' ')\n for package, result in sorted(six.iteritems(results)):\n if result:\n print(\"YES %s\" % package)\n else:\n print(\"NO %s\" % package)\n\n\ndef pyomo_subcommand(options):\n return install_extras(options.args, quiet=options.quiet)\n\n\n_parser = add_subparser(\n 'install-extras',\n func=pyomo_subcommand,\n help='Install \"extra\" packages that Pyomo can leverage.',\n description=\"\"\"\nThis pyomo subcommand uses PIP to install optional third-party Python\npackages that Pyomo could leverage from PyPI. The installation of some\npackages may fail, but this subcommand ignore these failures and\nprovides a summary describing which packages were installed.\n\"\"\",\n epilog=\"\"\"\nSince pip options begin with a dash, the --pip-args option can only be\nused with the equals syntax. --pip-args may appear multiple times on\nthe command line. For example:\\n\\n\n pyomo install-extras --pip-args=\"--upgrade\"\n\"\"\",\n formatter_class=CustomHelpFormatter,\n)\n\n_parser.add_argument(\n '-q', '--quiet',\n action='store_true',\n dest='quiet',\n default=False,\n help=\"Suppress some terminal output\",\n)\n_parser.add_argument(\n \"--pip-args\",\n dest=\"args\",\n action=\"append\",\n help=(\"Arguments that are passed to the 'pip' command when \"\n \"installing packages\"),\n)\n\n", "path": "pyomo/scripting/plugins/extras.py"}]} | 1,826 | 196 |
gh_patches_debug_39322 | rasdani/github-patches | git_diff | carpentries__amy-583 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add event organizer info to the API
Compute Canada would like to be able to use the API to pull all the events it is hosting and then use this information to populate website.
Might be nice to have the EventBrite IDs there too.
</issue>
<code>
[start of api/serializers.py]
1 from rest_framework import serializers
2
3 from workshops.models import Badge, Airport, Person, Event
4
5
6 class PersonUsernameSerializer(serializers.ModelSerializer):
7 name = serializers.CharField(source='get_full_name')
8 user = serializers.CharField(source='username')
9
10 class Meta:
11 model = Person
12 fields = ('name', 'user', )
13
14
15 class ExportBadgesSerializer(serializers.ModelSerializer):
16 persons = PersonUsernameSerializer(many=True, source='person_set')
17
18 class Meta:
19 model = Badge
20 fields = ('name', 'persons')
21
22
23 class ExportInstructorLocationsSerializer(serializers.ModelSerializer):
24 name = serializers.CharField(source='fullname')
25 instructors = PersonUsernameSerializer(many=True, source='person_set')
26
27 class Meta:
28 model = Airport
29 fields = ('name', 'latitude', 'longitude', 'instructors', 'country')
30
31
32 class EventSerializer(serializers.ModelSerializer):
33 humandate = serializers.SerializerMethodField()
34 country = serializers.CharField()
35 start = serializers.DateField(format=None)
36 end = serializers.DateField(format=None)
37 url = serializers.URLField(source='website_url')
38
39 def get_humandate(self, obj):
40 """Render start and end dates as human-readable short date."""
41 return EventSerializer.human_readable_date(obj.start, obj.end)
42
43 @staticmethod
44 def human_readable_date(date1, date2):
45 """Render start and end dates as human-readable short date."""
46 if date1 and not date2:
47 return '{:%b %d, %Y}-???'.format(date1)
48 elif date2 and not date1:
49 return '???-{:%b %d, %Y}'.format(date2)
50 elif not date2 and not date1:
51 return '???-???'
52
53 if date1.year == date2.year:
54 if date1.month == date2.month:
55 return '{:%b %d}-{:%d, %Y}'.format(date1, date2)
56 else:
57 return '{:%b %d}-{:%b %d, %Y}'.format(date1, date2)
58 else:
59 return '{:%b %d, %Y}-{:%b %d, %Y}'.format(date1, date2)
60
61 class Meta:
62 model = Event
63 fields = (
64 'slug', 'start', 'end', 'url', 'humandate', 'contact', 'country',
65 'venue', 'address', 'latitude', 'longitude',
66 )
67
[end of api/serializers.py]
[start of api/views.py]
1 from django.db.models import Q
2 from rest_framework.generics import ListAPIView
3 from rest_framework.permissions import IsAuthenticatedOrReadOnly
4 from rest_framework.response import Response
5 from rest_framework.reverse import reverse
6 from rest_framework.views import APIView
7
8 from workshops.models import Badge, Airport, Event
9
10 from .serializers import (
11 ExportBadgesSerializer,
12 ExportInstructorLocationsSerializer,
13 EventSerializer,
14 )
15
16
17 class ApiRoot(APIView):
18 def get(self, request, format=None):
19 return Response({
20 'export-badges': reverse('api:export-badges', request=request,
21 format=format),
22 'export-instructors': reverse('api:export-instructors',
23 request=request, format=format),
24 'events-published': reverse('api:events-published',
25 request=request, format=format),
26 })
27
28
29 class ExportBadgesView(ListAPIView):
30 """List all badges and people who have them."""
31 permission_classes = (IsAuthenticatedOrReadOnly, )
32 paginator = None # disable pagination
33
34 queryset = Badge.objects.prefetch_related('person_set')
35 serializer_class = ExportBadgesSerializer
36
37
38 class ExportInstructorLocationsView(ListAPIView):
39 """List all airports and instructors located near them."""
40 permission_classes = (IsAuthenticatedOrReadOnly, )
41 paginator = None # disable pagination
42
43 queryset = Airport.objects.exclude(person=None) \
44 .prefetch_related('person_set')
45 serializer_class = ExportInstructorLocationsSerializer
46
47
48 class PublishedEvents(ListAPIView):
49 # only events that have both a starting date and a URL
50 permission_classes = (IsAuthenticatedOrReadOnly, )
51 paginator = None # disable pagination
52
53 serializer_class = EventSerializer
54 queryset = Event.objects.published_events()
55
[end of api/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/api/serializers.py b/api/serializers.py
--- a/api/serializers.py
+++ b/api/serializers.py
@@ -35,6 +35,7 @@
start = serializers.DateField(format=None)
end = serializers.DateField(format=None)
url = serializers.URLField(source='website_url')
+ eventbrite_id = serializers.CharField(source='reg_key')
def get_humandate(self, obj):
"""Render start and end dates as human-readable short date."""
@@ -62,5 +63,5 @@
model = Event
fields = (
'slug', 'start', 'end', 'url', 'humandate', 'contact', 'country',
- 'venue', 'address', 'latitude', 'longitude',
+ 'venue', 'address', 'latitude', 'longitude', 'eventbrite_id',
)
diff --git a/api/views.py b/api/views.py
--- a/api/views.py
+++ b/api/views.py
@@ -1,5 +1,6 @@
from django.db.models import Q
from rest_framework.generics import ListAPIView
+from rest_framework.metadata import SimpleMetadata
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from rest_framework.reverse import reverse
@@ -14,6 +15,21 @@
)
+class QueryMetadata(SimpleMetadata):
+ """Additionally include info about query parameters."""
+
+ def determine_metadata(self, request, view):
+ print('doing something')
+ data = super().determine_metadata(request, view)
+
+ try:
+ data['query_params'] = view.get_query_params_description()
+ except AttributeError:
+ pass
+
+ return data
+
+
class ApiRoot(APIView):
def get(self, request, format=None):
return Response({
@@ -46,9 +62,34 @@
class PublishedEvents(ListAPIView):
+ """List published events."""
+
# only events that have both a starting date and a URL
permission_classes = (IsAuthenticatedOrReadOnly, )
paginator = None # disable pagination
serializer_class = EventSerializer
- queryset = Event.objects.published_events()
+
+ metadata_class = QueryMetadata
+
+ def get_queryset(self):
+ """Optionally restrict the returned event set to events hosted by
+ specific host or administered by specific admin."""
+ queryset = Event.objects.published_events()
+
+ administrator = self.request.query_params.get('administrator', None)
+ if administrator is not None:
+ queryset = queryset.filter(administrator__pk=administrator)
+
+ host = self.request.query_params.get('host', None)
+ if host is not None:
+ queryset = queryset.filter(host__pk=host)
+
+ return queryset
+
+ def get_query_params_description(self):
+ return {
+ 'administrator': 'ID of the organization responsible for admin '
+ 'work on events.',
+ 'host': 'ID of the organization hosting the event.',
+ }
| {"golden_diff": "diff --git a/api/serializers.py b/api/serializers.py\n--- a/api/serializers.py\n+++ b/api/serializers.py\n@@ -35,6 +35,7 @@\n start = serializers.DateField(format=None)\n end = serializers.DateField(format=None)\n url = serializers.URLField(source='website_url')\n+ eventbrite_id = serializers.CharField(source='reg_key')\n \n def get_humandate(self, obj):\n \"\"\"Render start and end dates as human-readable short date.\"\"\"\n@@ -62,5 +63,5 @@\n model = Event\n fields = (\n 'slug', 'start', 'end', 'url', 'humandate', 'contact', 'country',\n- 'venue', 'address', 'latitude', 'longitude',\n+ 'venue', 'address', 'latitude', 'longitude', 'eventbrite_id',\n )\ndiff --git a/api/views.py b/api/views.py\n--- a/api/views.py\n+++ b/api/views.py\n@@ -1,5 +1,6 @@\n from django.db.models import Q\n from rest_framework.generics import ListAPIView\n+from rest_framework.metadata import SimpleMetadata\n from rest_framework.permissions import IsAuthenticatedOrReadOnly\n from rest_framework.response import Response\n from rest_framework.reverse import reverse\n@@ -14,6 +15,21 @@\n )\n \n \n+class QueryMetadata(SimpleMetadata):\n+ \"\"\"Additionally include info about query parameters.\"\"\"\n+\n+ def determine_metadata(self, request, view):\n+ print('doing something')\n+ data = super().determine_metadata(request, view)\n+\n+ try:\n+ data['query_params'] = view.get_query_params_description()\n+ except AttributeError:\n+ pass\n+\n+ return data\n+\n+\n class ApiRoot(APIView):\n def get(self, request, format=None):\n return Response({\n@@ -46,9 +62,34 @@\n \n \n class PublishedEvents(ListAPIView):\n+ \"\"\"List published events.\"\"\"\n+\n # only events that have both a starting date and a URL\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n \n serializer_class = EventSerializer\n- queryset = Event.objects.published_events()\n+\n+ metadata_class = QueryMetadata\n+\n+ def get_queryset(self):\n+ \"\"\"Optionally restrict the returned event set to events hosted by\n+ specific host or administered by specific admin.\"\"\"\n+ queryset = Event.objects.published_events()\n+\n+ administrator = self.request.query_params.get('administrator', None)\n+ if administrator is not None:\n+ queryset = queryset.filter(administrator__pk=administrator)\n+\n+ host = self.request.query_params.get('host', None)\n+ if host is not None:\n+ queryset = queryset.filter(host__pk=host)\n+\n+ return queryset\n+\n+ def get_query_params_description(self):\n+ return {\n+ 'administrator': 'ID of the organization responsible for admin '\n+ 'work on events.',\n+ 'host': 'ID of the organization hosting the event.',\n+ }\n", "issue": "Add event organizer info to the API\nCompute Canada would like to be able to use the API to pull all the events it is hosting and then use this information to populate website.\n\nMight be nice to have the EventBrite IDs there too.\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom workshops.models import Badge, Airport, Person, Event\n\n\nclass PersonUsernameSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source='get_full_name')\n user = serializers.CharField(source='username')\n\n class Meta:\n model = Person\n fields = ('name', 'user', )\n\n\nclass ExportBadgesSerializer(serializers.ModelSerializer):\n persons = PersonUsernameSerializer(many=True, source='person_set')\n\n class Meta:\n model = Badge\n fields = ('name', 'persons')\n\n\nclass ExportInstructorLocationsSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source='fullname')\n instructors = PersonUsernameSerializer(many=True, source='person_set')\n\n class Meta:\n model = Airport\n fields = ('name', 'latitude', 'longitude', 'instructors', 'country')\n\n\nclass EventSerializer(serializers.ModelSerializer):\n humandate = serializers.SerializerMethodField()\n country = serializers.CharField()\n start = serializers.DateField(format=None)\n end = serializers.DateField(format=None)\n url = serializers.URLField(source='website_url')\n\n def get_humandate(self, obj):\n \"\"\"Render start and end dates as human-readable short date.\"\"\"\n return EventSerializer.human_readable_date(obj.start, obj.end)\n\n @staticmethod\n def human_readable_date(date1, date2):\n \"\"\"Render start and end dates as human-readable short date.\"\"\"\n if date1 and not date2:\n return '{:%b %d, %Y}-???'.format(date1)\n elif date2 and not date1:\n return '???-{:%b %d, %Y}'.format(date2)\n elif not date2 and not date1:\n return '???-???'\n\n if date1.year == date2.year:\n if date1.month == date2.month:\n return '{:%b %d}-{:%d, %Y}'.format(date1, date2)\n else:\n return '{:%b %d}-{:%b %d, %Y}'.format(date1, date2)\n else:\n return '{:%b %d, %Y}-{:%b %d, %Y}'.format(date1, date2)\n\n class Meta:\n model = Event\n fields = (\n 'slug', 'start', 'end', 'url', 'humandate', 'contact', 'country',\n 'venue', 'address', 'latitude', 'longitude',\n )\n", "path": "api/serializers.py"}, {"content": "from django.db.models import Q\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework.views import APIView\n\nfrom workshops.models import Badge, Airport, Event\n\nfrom .serializers import (\n ExportBadgesSerializer,\n ExportInstructorLocationsSerializer,\n EventSerializer,\n)\n\n\nclass ApiRoot(APIView):\n def get(self, request, format=None):\n return Response({\n 'export-badges': reverse('api:export-badges', request=request,\n format=format),\n 'export-instructors': reverse('api:export-instructors',\n request=request, format=format),\n 'events-published': reverse('api:events-published',\n request=request, format=format),\n })\n\n\nclass ExportBadgesView(ListAPIView):\n \"\"\"List all badges and people who have them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Badge.objects.prefetch_related('person_set')\n serializer_class = ExportBadgesSerializer\n\n\nclass ExportInstructorLocationsView(ListAPIView):\n \"\"\"List all airports and instructors located near them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Airport.objects.exclude(person=None) \\\n .prefetch_related('person_set')\n serializer_class = ExportInstructorLocationsSerializer\n\n\nclass PublishedEvents(ListAPIView):\n # only events that have both a starting date and a URL\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n serializer_class = EventSerializer\n queryset = Event.objects.published_events()\n", "path": "api/views.py"}]} | 1,705 | 654 |
gh_patches_debug_7093 | rasdani/github-patches | git_diff | ckan__ckan-260 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Recline does not preview datastore anymore
The new plugin does not evaluate `datastore_active`.
<!---
@huboard:{"order":247.0}
-->
Recline does not preview datastore anymore
The new plugin does not evaluate `datastore_active`.
<!---
@huboard:{"order":247.0}
-->
</issue>
<code>
[start of ckanext/reclinepreview/plugin.py]
1 from logging import getLogger
2
3 import ckan.plugins as p
4 import ckan.plugins.toolkit as toolkit
5
6 log = getLogger(__name__)
7
8
9 class ReclinePreview(p.SingletonPlugin):
10 """This extension previews resources using recline
11
12 This extension implements two interfaces
13
14 - ``IConfigurer`` allows to modify the configuration
15 - ``IResourcePreview`` allows to add previews
16 """
17 p.implements(p.IConfigurer, inherit=True)
18 p.implements(p.IResourcePreview, inherit=True)
19
20 def update_config(self, config):
21 ''' Set up the resource library, public directory and
22 template directory for the preview
23 '''
24 toolkit.add_public_directory(config, 'theme/public')
25 toolkit.add_template_directory(config, 'theme/templates')
26 toolkit.add_resource('theme/public', 'ckanext-reclinepreview')
27
28 def can_preview(self, data_dict):
29 format_lower = data_dict['resource']['format'].lower()
30 return format_lower in ['csv', 'xls', 'tsv']
31
32 def preview_template(self, context, data_dict):
33 return 'recline.html'
34
[end of ckanext/reclinepreview/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext/reclinepreview/plugin.py b/ckanext/reclinepreview/plugin.py
--- a/ckanext/reclinepreview/plugin.py
+++ b/ckanext/reclinepreview/plugin.py
@@ -26,6 +26,9 @@
toolkit.add_resource('theme/public', 'ckanext-reclinepreview')
def can_preview(self, data_dict):
+ # if the resource is in the datastore then we can preview it with recline
+ if data_dict['resource'].get('datastore_active'):
+ return True
format_lower = data_dict['resource']['format'].lower()
return format_lower in ['csv', 'xls', 'tsv']
| {"golden_diff": "diff --git a/ckanext/reclinepreview/plugin.py b/ckanext/reclinepreview/plugin.py\n--- a/ckanext/reclinepreview/plugin.py\n+++ b/ckanext/reclinepreview/plugin.py\n@@ -26,6 +26,9 @@\n toolkit.add_resource('theme/public', 'ckanext-reclinepreview')\n \n def can_preview(self, data_dict):\n+ # if the resource is in the datastore then we can preview it with recline\n+ if data_dict['resource'].get('datastore_active'):\n+ return True\n format_lower = data_dict['resource']['format'].lower()\n return format_lower in ['csv', 'xls', 'tsv']\n", "issue": "Recline does not preview datastore anymore\nThe new plugin does not evaluate `datastore_active`.\n\n<!---\n@huboard:{\"order\":247.0}\n-->\n\nRecline does not preview datastore anymore\nThe new plugin does not evaluate `datastore_active`.\n\n<!---\n@huboard:{\"order\":247.0}\n-->\n\n", "before_files": [{"content": "from logging import getLogger\n\nimport ckan.plugins as p\nimport ckan.plugins.toolkit as toolkit\n\nlog = getLogger(__name__)\n\n\nclass ReclinePreview(p.SingletonPlugin):\n \"\"\"This extension previews resources using recline\n\n This extension implements two interfaces\n\n - ``IConfigurer`` allows to modify the configuration\n - ``IResourcePreview`` allows to add previews\n \"\"\"\n p.implements(p.IConfigurer, inherit=True)\n p.implements(p.IResourcePreview, inherit=True)\n\n def update_config(self, config):\n ''' Set up the resource library, public directory and\n template directory for the preview\n '''\n toolkit.add_public_directory(config, 'theme/public')\n toolkit.add_template_directory(config, 'theme/templates')\n toolkit.add_resource('theme/public', 'ckanext-reclinepreview')\n\n def can_preview(self, data_dict):\n format_lower = data_dict['resource']['format'].lower()\n return format_lower in ['csv', 'xls', 'tsv']\n\n def preview_template(self, context, data_dict):\n return 'recline.html'\n", "path": "ckanext/reclinepreview/plugin.py"}]} | 904 | 153 |
gh_patches_debug_18458 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-603 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
I miss one with C-Trace.de/WZV
Hello guys,
I just switched from ics to C-Trace.de. Since then, unfortunately, it no longer shows me all the bins. I'm missing the residual waste, everything else is displayed as usual. Can someone help me?
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py]
1 import requests
2 from waste_collection_schedule import Collection # type: ignore[attr-defined]
3 from waste_collection_schedule.service.ICS import ICS
4
5 TITLE = "C-Trace"
6 DESCRIPTION = "Source for C-Trace.de."
7 URL = "https://c-trace.de/"
8 EXTRA_INFO = [
9 {
10 "title": "Bremener Stadreinigung",
11 "url": "https://www.die-bremer-stadtreinigung.de/",
12 },
13 {
14 "title": "AWB Landkreis Augsburg",
15 "url": "https://www.awb-landkreis-augsburg.de/",
16 },
17 {
18 "title": "WZV Kreis Segeberg",
19 "url": "https://www.wzv.de/",
20 },
21 ]
22 TEST_CASES = {
23 "Bremen": {"ort": "Bremen", "strasse": "Abbentorstraße", "hausnummer": 5},
24 "AugsburgLand": {
25 "ort": "Königsbrunn",
26 "strasse": "Marktplatz",
27 "hausnummer": 7,
28 "service": "augsburglandkreis",
29 },
30 }
31
32
33 BASE_URL = "https://web.c-trace.de"
34
35
36 class Source:
37 def __init__(self, ort, strasse, hausnummer, service=None):
38 # Compatibility handling for Bremen which was the first supported
39 # district and didn't require to set a service name.
40 if service is None:
41 if ort == "Bremen":
42 service = "bremenabfallkalender"
43 else:
44 raise Exception("service is missing")
45
46 self._service = service
47 self._ort = ort
48 self._strasse = strasse
49 self._hausnummer = hausnummer
50 self._ics = ICS(regex=r"Abfuhr: (.*)")
51
52 def fetch(self):
53 session = requests.session()
54
55 # get session url
56 r = session.get(
57 f"{BASE_URL}/{self._service}/Abfallkalender",
58 allow_redirects=False,
59 )
60 session_id = r.headers["location"].split("/")[
61 2
62 ] # session_id like "(S(r3bme50igdgsp2lstgxxhvs2))"
63
64 args = {
65 "Ort": self._ort,
66 "Gemeinde": self._ort,
67 "Strasse": self._strasse,
68 "Hausnr": self._hausnummer,
69 "Abfall": "|".join(str(i) for i in range(1, 99)), # return all waste types
70 }
71 r = session.get(
72 f"{BASE_URL}/{self._service}/{session_id}/abfallkalender/cal", params=args
73 )
74 r.raise_for_status()
75
76 # parse ics file
77 r.encoding = "utf-8"
78 dates = self._ics.convert(r.text)
79
80 entries = []
81 for d in dates:
82 entries.append(Collection(d[0], d[1]))
83 return entries
84
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py
@@ -27,6 +27,12 @@
"hausnummer": 7,
"service": "augsburglandkreis",
},
+ "WZV": {
+ "ort": "Bark",
+ "strasse": "Birkenweg",
+ "hausnummer": 1,
+ "service": "segebergwzv-abfallkalender",
+ },
}
@@ -66,7 +72,7 @@
"Gemeinde": self._ort,
"Strasse": self._strasse,
"Hausnr": self._hausnummer,
- "Abfall": "|".join(str(i) for i in range(1, 99)), # return all waste types
+ "Abfall": "|".join(str(i) for i in range(0, 99)), # return all waste types
}
r = session.get(
f"{BASE_URL}/{self._service}/{session_id}/abfallkalender/cal", params=args
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py\n@@ -27,6 +27,12 @@\n \"hausnummer\": 7,\n \"service\": \"augsburglandkreis\",\n },\n+ \"WZV\": {\n+ \"ort\": \"Bark\",\n+ \"strasse\": \"Birkenweg\",\n+ \"hausnummer\": 1,\n+ \"service\": \"segebergwzv-abfallkalender\",\n+ },\n }\n \n \n@@ -66,7 +72,7 @@\n \"Gemeinde\": self._ort,\n \"Strasse\": self._strasse,\n \"Hausnr\": self._hausnummer,\n- \"Abfall\": \"|\".join(str(i) for i in range(1, 99)), # return all waste types\n+ \"Abfall\": \"|\".join(str(i) for i in range(0, 99)), # return all waste types\n }\n r = session.get(\n f\"{BASE_URL}/{self._service}/{session_id}/abfallkalender/cal\", params=args\n", "issue": "I miss one with C-Trace.de/WZV\nHello guys,\r\n\r\nI just switched from ics to C-Trace.de. Since then, unfortunately, it no longer shows me all the bins. I'm missing the residual waste, everything else is displayed as usual. Can someone help me?\r\n\r\n\n", "before_files": [{"content": "import requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"C-Trace\"\nDESCRIPTION = \"Source for C-Trace.de.\"\nURL = \"https://c-trace.de/\"\nEXTRA_INFO = [\n {\n \"title\": \"Bremener Stadreinigung\",\n \"url\": \"https://www.die-bremer-stadtreinigung.de/\",\n },\n {\n \"title\": \"AWB Landkreis Augsburg\",\n \"url\": \"https://www.awb-landkreis-augsburg.de/\",\n },\n {\n \"title\": \"WZV Kreis Segeberg\",\n \"url\": \"https://www.wzv.de/\",\n },\n]\nTEST_CASES = {\n \"Bremen\": {\"ort\": \"Bremen\", \"strasse\": \"Abbentorstra\u00dfe\", \"hausnummer\": 5},\n \"AugsburgLand\": {\n \"ort\": \"K\u00f6nigsbrunn\",\n \"strasse\": \"Marktplatz\",\n \"hausnummer\": 7,\n \"service\": \"augsburglandkreis\",\n },\n}\n\n\nBASE_URL = \"https://web.c-trace.de\"\n\n\nclass Source:\n def __init__(self, ort, strasse, hausnummer, service=None):\n # Compatibility handling for Bremen which was the first supported\n # district and didn't require to set a service name.\n if service is None:\n if ort == \"Bremen\":\n service = \"bremenabfallkalender\"\n else:\n raise Exception(\"service is missing\")\n\n self._service = service\n self._ort = ort\n self._strasse = strasse\n self._hausnummer = hausnummer\n self._ics = ICS(regex=r\"Abfuhr: (.*)\")\n\n def fetch(self):\n session = requests.session()\n\n # get session url\n r = session.get(\n f\"{BASE_URL}/{self._service}/Abfallkalender\",\n allow_redirects=False,\n )\n session_id = r.headers[\"location\"].split(\"/\")[\n 2\n ] # session_id like \"(S(r3bme50igdgsp2lstgxxhvs2))\"\n\n args = {\n \"Ort\": self._ort,\n \"Gemeinde\": self._ort,\n \"Strasse\": self._strasse,\n \"Hausnr\": self._hausnummer,\n \"Abfall\": \"|\".join(str(i) for i in range(1, 99)), # return all waste types\n }\n r = session.get(\n f\"{BASE_URL}/{self._service}/{session_id}/abfallkalender/cal\", params=args\n )\n r.raise_for_status()\n\n # parse ics file\n r.encoding = \"utf-8\"\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/c_trace_de.py"}]} | 1,433 | 301 |
gh_patches_debug_24900 | rasdani/github-patches | git_diff | liberapay__liberapay.com-502 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
add support for xmpp: uri in markdown syntax
When adding and XMPP uri in the following form:
`[[email protected]](xmpp:[email protected]?join)`
the uri syntax is shown raw instead of linking to the room as expected.
add support for xmpp: uri in markdown syntax
When adding and XMPP uri in the following form:
`[[email protected]](xmpp:[email protected]?join)`
the uri syntax is shown raw instead of linking to the room as expected.
</issue>
<code>
[start of liberapay/utils/markdown.py]
1 from markupsafe import Markup
2 import misaka as m # http://misaka.61924.nl/
3
4 def render(markdown):
5 return Markup(m.html(
6 markdown,
7 extensions=m.EXT_AUTOLINK | m.EXT_STRIKETHROUGH | m.EXT_NO_INTRA_EMPHASIS,
8 render_flags=m.HTML_SKIP_HTML | m.HTML_TOC | m.HTML_SMARTYPANTS | m.HTML_SAFELINK
9 ))
10
[end of liberapay/utils/markdown.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/liberapay/utils/markdown.py b/liberapay/utils/markdown.py
--- a/liberapay/utils/markdown.py
+++ b/liberapay/utils/markdown.py
@@ -1,9 +1,41 @@
-from markupsafe import Markup
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import re
+
+from markupsafe import Markup, escape
import misaka as m # http://misaka.61924.nl/
+
+url_re = re.compile(r'^(https?|xmpp):')
+
+
+class CustomRenderer(m.HtmlRenderer):
+
+ def image(self, link, title='', alt=''):
+ if url_re.match(link):
+ maybe_alt = Markup(' alt="%s"') % alt if alt else ''
+ maybe_title = Markup(' title="%s"') % title if title else ''
+ return Markup('<img src="%s"%s%s />') % (link, maybe_alt, maybe_title)
+ else:
+ return escape("" % (alt, link))
+
+ def link(self, content, link, title=''):
+ if url_re.match(link):
+ maybe_title = Markup(' title="%s"') % title if title else ''
+ return Markup('<a href="%s"%s>%s</a>') % (link, maybe_title, content)
+ else:
+ return escape("[%s](%s)" % (content, link))
+
+ def autolink(self, link, is_email):
+ if url_re.match(link):
+ return Markup('<a href="%s">%s</a>') % (link, link)
+ else:
+ return escape('<%s>' % link)
+
+
+renderer = CustomRenderer(flags=m.HTML_SKIP_HTML)
+md = m.Markdown(renderer, extensions=('autolink', 'strikethrough', 'no-intra-emphasis'))
+
+
def render(markdown):
- return Markup(m.html(
- markdown,
- extensions=m.EXT_AUTOLINK | m.EXT_STRIKETHROUGH | m.EXT_NO_INTRA_EMPHASIS,
- render_flags=m.HTML_SKIP_HTML | m.HTML_TOC | m.HTML_SMARTYPANTS | m.HTML_SAFELINK
- ))
+ return Markup(md(markdown))
| {"golden_diff": "diff --git a/liberapay/utils/markdown.py b/liberapay/utils/markdown.py\n--- a/liberapay/utils/markdown.py\n+++ b/liberapay/utils/markdown.py\n@@ -1,9 +1,41 @@\n-from markupsafe import Markup\n+from __future__ import absolute_import, division, print_function, unicode_literals\n+\n+import re\n+\n+from markupsafe import Markup, escape\n import misaka as m # http://misaka.61924.nl/\n \n+\n+url_re = re.compile(r'^(https?|xmpp):')\n+\n+\n+class CustomRenderer(m.HtmlRenderer):\n+\n+ def image(self, link, title='', alt=''):\n+ if url_re.match(link):\n+ maybe_alt = Markup(' alt=\"%s\"') % alt if alt else ''\n+ maybe_title = Markup(' title=\"%s\"') % title if title else ''\n+ return Markup('<img src=\"%s\"%s%s />') % (link, maybe_alt, maybe_title)\n+ else:\n+ return escape(\"\" % (alt, link))\n+\n+ def link(self, content, link, title=''):\n+ if url_re.match(link):\n+ maybe_title = Markup(' title=\"%s\"') % title if title else ''\n+ return Markup('<a href=\"%s\"%s>%s</a>') % (link, maybe_title, content)\n+ else:\n+ return escape(\"[%s](%s)\" % (content, link))\n+\n+ def autolink(self, link, is_email):\n+ if url_re.match(link):\n+ return Markup('<a href=\"%s\">%s</a>') % (link, link)\n+ else:\n+ return escape('<%s>' % link)\n+\n+\n+renderer = CustomRenderer(flags=m.HTML_SKIP_HTML)\n+md = m.Markdown(renderer, extensions=('autolink', 'strikethrough', 'no-intra-emphasis'))\n+\n+\n def render(markdown):\n- return Markup(m.html(\n- markdown,\n- extensions=m.EXT_AUTOLINK | m.EXT_STRIKETHROUGH | m.EXT_NO_INTRA_EMPHASIS,\n- render_flags=m.HTML_SKIP_HTML | m.HTML_TOC | m.HTML_SMARTYPANTS | m.HTML_SAFELINK\n- ))\n+ return Markup(md(markdown))\n", "issue": "add support for xmpp: uri in markdown syntax\nWhen adding and XMPP uri in the following form:\r\n`[[email protected]](xmpp:[email protected]?join)`\r\nthe uri syntax is shown raw instead of linking to the room as expected.\nadd support for xmpp: uri in markdown syntax\nWhen adding and XMPP uri in the following form:\r\n`[[email protected]](xmpp:[email protected]?join)`\r\nthe uri syntax is shown raw instead of linking to the room as expected.\n", "before_files": [{"content": "from markupsafe import Markup\nimport misaka as m # http://misaka.61924.nl/\n\ndef render(markdown):\n return Markup(m.html(\n markdown,\n extensions=m.EXT_AUTOLINK | m.EXT_STRIKETHROUGH | m.EXT_NO_INTRA_EMPHASIS,\n render_flags=m.HTML_SKIP_HTML | m.HTML_TOC | m.HTML_SMARTYPANTS | m.HTML_SAFELINK\n ))\n", "path": "liberapay/utils/markdown.py"}]} | 775 | 513 |
gh_patches_debug_4431 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-2528 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mycroft "devices" web UI doesn't show core version
Version/setup same as MycroftAI/mycroft-core#2523 2523
## Try to provide steps that we can use to replicate the Issue
Hit up https://account.mycroft.ai/devices

## Provide log files or other output to help us see the error
N/A TBD (can help investigate let me know how) per the ref'd ticket the "self support" method didn't work
</issue>
<code>
[start of mycroft/version/__init__.py]
1 # Copyright 2017 Mycroft AI Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 import json
16
17 from genericpath import exists, isfile
18 from os.path import join, expanduser
19
20 from mycroft.configuration import Configuration
21 from mycroft.util.log import LOG
22
23
24 # The following lines are replaced during the release process.
25 # START_VERSION_BLOCK
26 CORE_VERSION_MAJOR = 20
27 CORE_VERSION_MINOR = 2
28 CORE_VERSION_BUILD = 1
29 # END_VERSION_BLOCK
30
31 CORE_VERSION_TUPLE = (CORE_VERSION_MAJOR,
32 CORE_VERSION_MINOR,
33 CORE_VERSION_BUILD)
34 CORE_VERSION_STR = '.'.join(map(str, CORE_VERSION_TUPLE))
35
36
37 class VersionManager:
38 @staticmethod
39 def get():
40 data_dir = expanduser(Configuration.get()['data_dir'])
41 version_file = join(data_dir, 'version.json')
42 if exists(version_file) and isfile(version_file):
43 try:
44 with open(version_file) as f:
45 return json.load(f)
46 except Exception:
47 LOG.error("Failed to load version from '%s'" % version_file)
48 return {"coreVersion": None, "enclosureVersion": None}
49
50
51 def check_version(version_string):
52 """
53 Check if current version is equal or higher than the
54 version string provided to the function
55
56 Args:
57 version_string (string): version string ('Major.Minor.Build')
58 """
59 version_tuple = tuple(map(int, version_string.split('.')))
60 return CORE_VERSION_TUPLE >= version_tuple
61
[end of mycroft/version/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mycroft/version/__init__.py b/mycroft/version/__init__.py
--- a/mycroft/version/__init__.py
+++ b/mycroft/version/__init__.py
@@ -45,7 +45,7 @@
return json.load(f)
except Exception:
LOG.error("Failed to load version from '%s'" % version_file)
- return {"coreVersion": None, "enclosureVersion": None}
+ return {"coreVersion": CORE_VERSION_STR, "enclosureVersion": None}
def check_version(version_string):
| {"golden_diff": "diff --git a/mycroft/version/__init__.py b/mycroft/version/__init__.py\n--- a/mycroft/version/__init__.py\n+++ b/mycroft/version/__init__.py\n@@ -45,7 +45,7 @@\n return json.load(f)\n except Exception:\n LOG.error(\"Failed to load version from '%s'\" % version_file)\n- return {\"coreVersion\": None, \"enclosureVersion\": None}\n+ return {\"coreVersion\": CORE_VERSION_STR, \"enclosureVersion\": None}\n \n \n def check_version(version_string):\n", "issue": "mycroft \"devices\" web UI doesn't show core version\n\r\nVersion/setup same as MycroftAI/mycroft-core#2523 2523\r\n\r\n## Try to provide steps that we can use to replicate the Issue\r\n\r\nHit up https://account.mycroft.ai/devices\r\n\r\n\r\n## Provide log files or other output to help us see the error\r\n\r\nN/A TBD (can help investigate let me know how) per the ref'd ticket the \"self support\" method didn't work\n", "before_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport json\n\nfrom genericpath import exists, isfile\nfrom os.path import join, expanduser\n\nfrom mycroft.configuration import Configuration\nfrom mycroft.util.log import LOG\n\n\n# The following lines are replaced during the release process.\n# START_VERSION_BLOCK\nCORE_VERSION_MAJOR = 20\nCORE_VERSION_MINOR = 2\nCORE_VERSION_BUILD = 1\n# END_VERSION_BLOCK\n\nCORE_VERSION_TUPLE = (CORE_VERSION_MAJOR,\n CORE_VERSION_MINOR,\n CORE_VERSION_BUILD)\nCORE_VERSION_STR = '.'.join(map(str, CORE_VERSION_TUPLE))\n\n\nclass VersionManager:\n @staticmethod\n def get():\n data_dir = expanduser(Configuration.get()['data_dir'])\n version_file = join(data_dir, 'version.json')\n if exists(version_file) and isfile(version_file):\n try:\n with open(version_file) as f:\n return json.load(f)\n except Exception:\n LOG.error(\"Failed to load version from '%s'\" % version_file)\n return {\"coreVersion\": None, \"enclosureVersion\": None}\n\n\ndef check_version(version_string):\n \"\"\"\n Check if current version is equal or higher than the\n version string provided to the function\n\n Args:\n version_string (string): version string ('Major.Minor.Build')\n \"\"\"\n version_tuple = tuple(map(int, version_string.split('.')))\n return CORE_VERSION_TUPLE >= version_tuple\n", "path": "mycroft/version/__init__.py"}]} | 1,249 | 120 |
gh_patches_debug_6728 | rasdani/github-patches | git_diff | ydataai__ydata-profiling-1023 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect duplicate rows count
### Current Behaviour
The duplicated rows count is different between pandas and pandas-profiling when there are nan's in columns
### Expected Behaviour
The count should be equal
### Data Description
I attach a simple example

### Code that reproduces the bug
```Python
import pandas as pd
import numpy as np
df = pd.DataFrame({"a": [np.nan, np.nan, 2], "b": [1, 1, 3]})
sum(df.duplicated())
from pandas_profiling import ProfileReport
profile = ProfileReport(df, title="Pandas Profiling Report")
```
### pandas-profiling version
3.2.0
### Dependencies
```Text
numpy==1.22.4
pandas==1.3.3
```
### OS
_No response_
### Checklist
- [X] There is not yet another bug report for this issue in the [issue tracker](https://github.com/ydataai/pandas-profiling/issues)
- [X] The problem is reproducible from this bug report. [This guide](http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports) can help to craft a minimal bug report.
- [X] The issue has not been resolved by the entries listed under [Common Issues](https://pandas-profiling.ydata.ai/docs/master/pages/support_contrib/common_issues.html).
</issue>
<code>
[start of src/pandas_profiling/model/pandas/duplicates_pandas.py]
1 from typing import Any, Dict, Optional, Sequence, Tuple
2
3 import pandas as pd
4
5 from pandas_profiling.config import Settings
6 from pandas_profiling.model.duplicates import get_duplicates
7
8
9 @get_duplicates.register(Settings, pd.DataFrame, Sequence)
10 def pandas_get_duplicates(
11 config: Settings, df: pd.DataFrame, supported_columns: Sequence
12 ) -> Tuple[Dict[str, Any], Optional[pd.DataFrame]]:
13 """Obtain the most occurring duplicate rows in the DataFrame.
14
15 Args:
16 config: report Settings object
17 df: the Pandas DataFrame.
18 supported_columns: the columns to consider
19
20 Returns:
21 A subset of the DataFrame, ordered by occurrence.
22 """
23 n_head = config.duplicates.head
24
25 metrics: Dict[str, Any] = {}
26 if n_head > 0:
27 if supported_columns and len(df) > 0:
28 duplicates_key = config.duplicates.key
29 if duplicates_key in df.columns:
30 raise ValueError(
31 f"Duplicates key ({duplicates_key}) may not be part of the DataFrame. Either change the "
32 f" column name in the DataFrame or change the 'duplicates.key' parameter."
33 )
34
35 duplicated_rows = df.duplicated(subset=supported_columns, keep=False)
36 duplicated_rows = (
37 df[duplicated_rows]
38 .groupby(supported_columns)
39 .size()
40 .reset_index(name=duplicates_key)
41 )
42
43 metrics["n_duplicates"] = len(duplicated_rows[duplicates_key])
44 metrics["p_duplicates"] = metrics["n_duplicates"] / len(df)
45
46 return (
47 metrics,
48 duplicated_rows.nlargest(n_head, duplicates_key),
49 )
50 else:
51 metrics["n_duplicates"] = 0
52 metrics["p_duplicates"] = 0.0
53 return metrics, None
54 else:
55 return metrics, None
56
[end of src/pandas_profiling/model/pandas/duplicates_pandas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pandas_profiling/model/pandas/duplicates_pandas.py b/src/pandas_profiling/model/pandas/duplicates_pandas.py
--- a/src/pandas_profiling/model/pandas/duplicates_pandas.py
+++ b/src/pandas_profiling/model/pandas/duplicates_pandas.py
@@ -35,7 +35,7 @@
duplicated_rows = df.duplicated(subset=supported_columns, keep=False)
duplicated_rows = (
df[duplicated_rows]
- .groupby(supported_columns)
+ .groupby(supported_columns, dropna=False)
.size()
.reset_index(name=duplicates_key)
)
| {"golden_diff": "diff --git a/src/pandas_profiling/model/pandas/duplicates_pandas.py b/src/pandas_profiling/model/pandas/duplicates_pandas.py\n--- a/src/pandas_profiling/model/pandas/duplicates_pandas.py\n+++ b/src/pandas_profiling/model/pandas/duplicates_pandas.py\n@@ -35,7 +35,7 @@\n duplicated_rows = df.duplicated(subset=supported_columns, keep=False)\n duplicated_rows = (\n df[duplicated_rows]\n- .groupby(supported_columns)\n+ .groupby(supported_columns, dropna=False)\n .size()\n .reset_index(name=duplicates_key)\n )\n", "issue": "Incorrect duplicate rows count\n### Current Behaviour\n\nThe duplicated rows count is different between pandas and pandas-profiling when there are nan's in columns\n\n### Expected Behaviour\n\nThe count should be equal\n\n### Data Description\n\nI attach a simple example\r\n\r\n\r\n\n\n### Code that reproduces the bug\n\n```Python\nimport pandas as pd\r\nimport numpy as np\r\n\r\ndf = pd.DataFrame({\"a\": [np.nan, np.nan, 2], \"b\": [1, 1, 3]})\r\nsum(df.duplicated())\r\n\r\nfrom pandas_profiling import ProfileReport\r\n\r\nprofile = ProfileReport(df, title=\"Pandas Profiling Report\")\n```\n\n\n### pandas-profiling version\n\n3.2.0\n\n### Dependencies\n\n```Text\nnumpy==1.22.4\r\npandas==1.3.3\n```\n\n\n### OS\n\n_No response_\n\n### Checklist\n\n- [X] There is not yet another bug report for this issue in the [issue tracker](https://github.com/ydataai/pandas-profiling/issues)\n- [X] The problem is reproducible from this bug report. [This guide](http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports) can help to craft a minimal bug report.\n- [X] The issue has not been resolved by the entries listed under [Common Issues](https://pandas-profiling.ydata.ai/docs/master/pages/support_contrib/common_issues.html).\n", "before_files": [{"content": "from typing import Any, Dict, Optional, Sequence, Tuple\n\nimport pandas as pd\n\nfrom pandas_profiling.config import Settings\nfrom pandas_profiling.model.duplicates import get_duplicates\n\n\n@get_duplicates.register(Settings, pd.DataFrame, Sequence)\ndef pandas_get_duplicates(\n config: Settings, df: pd.DataFrame, supported_columns: Sequence\n) -> Tuple[Dict[str, Any], Optional[pd.DataFrame]]:\n \"\"\"Obtain the most occurring duplicate rows in the DataFrame.\n\n Args:\n config: report Settings object\n df: the Pandas DataFrame.\n supported_columns: the columns to consider\n\n Returns:\n A subset of the DataFrame, ordered by occurrence.\n \"\"\"\n n_head = config.duplicates.head\n\n metrics: Dict[str, Any] = {}\n if n_head > 0:\n if supported_columns and len(df) > 0:\n duplicates_key = config.duplicates.key\n if duplicates_key in df.columns:\n raise ValueError(\n f\"Duplicates key ({duplicates_key}) may not be part of the DataFrame. Either change the \"\n f\" column name in the DataFrame or change the 'duplicates.key' parameter.\"\n )\n\n duplicated_rows = df.duplicated(subset=supported_columns, keep=False)\n duplicated_rows = (\n df[duplicated_rows]\n .groupby(supported_columns)\n .size()\n .reset_index(name=duplicates_key)\n )\n\n metrics[\"n_duplicates\"] = len(duplicated_rows[duplicates_key])\n metrics[\"p_duplicates\"] = metrics[\"n_duplicates\"] / len(df)\n\n return (\n metrics,\n duplicated_rows.nlargest(n_head, duplicates_key),\n )\n else:\n metrics[\"n_duplicates\"] = 0\n metrics[\"p_duplicates\"] = 0.0\n return metrics, None\n else:\n return metrics, None\n", "path": "src/pandas_profiling/model/pandas/duplicates_pandas.py"}]} | 1,407 | 141 |
gh_patches_debug_4588 | rasdani/github-patches | git_diff | saleor__saleor-541 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop cart partitioner from cart view
Currently the cart is partitioned every time it'd displayed. We really only need to do this when creating an order/payment. We do call it every time the cart is rendered but we then merge all of the partitions back into a single list.
- [ ] identify places where cart partitioner is called
- [ ] remove the unnecessary calls from places that don't absolutely need partitioning to work (checkout)
- [ ] simplify templates so they iterate over the cart instead of walking through a list of partitions that in turn contain items
- [ ] provide a brief description of the changes for the next release changelog
</issue>
<code>
[start of saleor/cart/views.py]
1 from __future__ import unicode_literals
2 from babeldjango.templatetags.babel import currencyfmt
3
4 from django.contrib import messages
5 from django.http import JsonResponse
6 from django.shortcuts import redirect
7 from django.template.response import TemplateResponse
8 from django.utils.translation import ugettext as _
9
10 from . import Cart
11 from .forms import ReplaceCartLineForm
12 from ..cart.utils import (
13 contains_unavailable_products, remove_unavailable_products)
14
15
16 def index(request, product_id=None):
17 if product_id is not None:
18 product_id = int(product_id)
19 cart = Cart.for_session_cart(request.cart, discounts=request.discounts)
20 if contains_unavailable_products(cart):
21 msg = _('Sorry. We don\'t have that many items in stock. '
22 'Quantity was set to maximum available for now.')
23 messages.warning(request, msg)
24 remove_unavailable_products(cart)
25 for line in cart:
26 data = None
27 if line.product.pk == product_id:
28 data = request.POST
29 initial = {'quantity': line.get_quantity()}
30 form = ReplaceCartLineForm(data, cart=cart, product=line.product,
31 initial=initial)
32 line.form = form
33 if form.is_valid():
34 form.save()
35 if request.is_ajax():
36 response = {
37 'productId': line.product.pk,
38 'subtotal': currencyfmt(
39 line.get_total().gross,
40 line.get_total().currency),
41 'total': 0}
42 if cart:
43 response['total'] = currencyfmt(
44 cart.get_total().gross, cart.get_total().currency)
45 return JsonResponse(response)
46 return redirect('cart:index')
47 elif data is not None:
48 if request.is_ajax():
49 response = {'error': form.errors}
50 return JsonResponse(response, status=400)
51 cart_partitioner = cart.partition()
52 return TemplateResponse(
53 request, 'cart/index.html', {
54 'cart': cart_partitioner})
55
[end of saleor/cart/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/cart/views.py b/saleor/cart/views.py
--- a/saleor/cart/views.py
+++ b/saleor/cart/views.py
@@ -48,7 +48,6 @@
if request.is_ajax():
response = {'error': form.errors}
return JsonResponse(response, status=400)
- cart_partitioner = cart.partition()
return TemplateResponse(
request, 'cart/index.html', {
- 'cart': cart_partitioner})
+ 'cart': cart})
| {"golden_diff": "diff --git a/saleor/cart/views.py b/saleor/cart/views.py\n--- a/saleor/cart/views.py\n+++ b/saleor/cart/views.py\n@@ -48,7 +48,6 @@\n if request.is_ajax():\n response = {'error': form.errors}\n return JsonResponse(response, status=400)\n- cart_partitioner = cart.partition()\n return TemplateResponse(\n request, 'cart/index.html', {\n- 'cart': cart_partitioner})\n+ 'cart': cart})\n", "issue": "Drop cart partitioner from cart view\nCurrently the cart is partitioned every time it'd displayed. We really only need to do this when creating an order/payment. We do call it every time the cart is rendered but we then merge all of the partitions back into a single list.\n- [ ] identify places where cart partitioner is called\n- [ ] remove the unnecessary calls from places that don't absolutely need partitioning to work (checkout)\n- [ ] simplify templates so they iterate over the cart instead of walking through a list of partitions that in turn contain items\n- [ ] provide a brief description of the changes for the next release changelog\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom babeldjango.templatetags.babel import currencyfmt\n\nfrom django.contrib import messages\nfrom django.http import JsonResponse\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import ugettext as _\n\nfrom . import Cart\nfrom .forms import ReplaceCartLineForm\nfrom ..cart.utils import (\n contains_unavailable_products, remove_unavailable_products)\n\n\ndef index(request, product_id=None):\n if product_id is not None:\n product_id = int(product_id)\n cart = Cart.for_session_cart(request.cart, discounts=request.discounts)\n if contains_unavailable_products(cart):\n msg = _('Sorry. We don\\'t have that many items in stock. '\n 'Quantity was set to maximum available for now.')\n messages.warning(request, msg)\n remove_unavailable_products(cart)\n for line in cart:\n data = None\n if line.product.pk == product_id:\n data = request.POST\n initial = {'quantity': line.get_quantity()}\n form = ReplaceCartLineForm(data, cart=cart, product=line.product,\n initial=initial)\n line.form = form\n if form.is_valid():\n form.save()\n if request.is_ajax():\n response = {\n 'productId': line.product.pk,\n 'subtotal': currencyfmt(\n line.get_total().gross,\n line.get_total().currency),\n 'total': 0}\n if cart:\n response['total'] = currencyfmt(\n cart.get_total().gross, cart.get_total().currency)\n return JsonResponse(response)\n return redirect('cart:index')\n elif data is not None:\n if request.is_ajax():\n response = {'error': form.errors}\n return JsonResponse(response, status=400)\n cart_partitioner = cart.partition()\n return TemplateResponse(\n request, 'cart/index.html', {\n 'cart': cart_partitioner})\n", "path": "saleor/cart/views.py"}]} | 1,172 | 114 |
gh_patches_debug_10288 | rasdani/github-patches | git_diff | Zeroto521__my-data-toolkit-585 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PERF: `to_set` speeds up especial to large data
<!--
Thanks for contributing a pull request!
Please follow these standard acronyms to start the commit message:
- ENH: enhancement
- BUG: bug fix
- DOC: documentation
- TYP: type annotations
- TST: addition or modification of tests
- MAINT: maintenance commit (refactoring, typos, etc.)
- BLD: change related to building
- REL: related to releasing
- API: an (incompatible) API change
- DEP: deprecate something, or remove a deprecated object
- DEV: development tool or utility
- REV: revert an earlier commit
- PERF: performance improvement
- BOT: always commit via a bot
- CI: related to CI or CD
- CLN: Code cleanup
-->
- [x] closes #542
- [x] whatsnew entry
Apply to index accessor
</issue>
<code>
[start of dtoolkit/accessor/index/to_set.py]
1 import pandas as pd
2
3 from dtoolkit.accessor.register import register_index_method
4
5
6 @register_index_method
7 def to_set(index: pd.Index) -> set:
8 """
9 Return a :keyword:`set` of the values.
10
11 A sugary syntax wraps :keyword:`set`::
12
13 set(index)
14
15 Different to :meth:`~pandas.Index.unique`, it returns :class:`~pandas.Index`.
16
17 Returns
18 -------
19 set
20
21 See Also
22 --------
23 pandas.Index.unique
24
25 Examples
26 --------
27 >>> import dtoolkit.accessor
28 >>> import pandas as pd
29 >>> i = pd.Index([1, 2, 2])
30 >>> i
31 Int64Index([1, 2, 2], dtype='int64')
32 >>> i.to_set()
33 {1, 2}
34 """
35
36 return set(index.unique())
37
[end of dtoolkit/accessor/index/to_set.py]
[start of dtoolkit/accessor/series/to_set.py]
1 import pandas as pd
2
3 from dtoolkit.accessor.register import register_series_method
4
5
6 @register_series_method
7 def to_set(s: pd.Series) -> set:
8 """
9 Return a :keyword:`set` of the values.
10
11 A sugary syntax wraps :keyword:`set`::
12
13 set(s)
14
15 Different to :meth:`~pandas.Series.unique`, it returns :class:`~numpy.ndarray`.
16
17 Returns
18 -------
19 set
20
21 See Also
22 --------
23 pandas.Series.unique
24
25 Examples
26 --------
27 >>> import dtoolkit.accessor
28 >>> import pandas as pd
29 >>> s = pd.Series([1, 2, 2])
30 >>> s
31 0 1
32 1 2
33 2 2
34 dtype: int64
35 >>> s.to_set()
36 {1, 2}
37 """
38
39 return set(s.unique())
40
[end of dtoolkit/accessor/series/to_set.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dtoolkit/accessor/index/to_set.py b/dtoolkit/accessor/index/to_set.py
--- a/dtoolkit/accessor/index/to_set.py
+++ b/dtoolkit/accessor/index/to_set.py
@@ -21,6 +21,7 @@
See Also
--------
pandas.Index.unique
+ dtoolkit.accessor.series.to_set
Examples
--------
diff --git a/dtoolkit/accessor/series/to_set.py b/dtoolkit/accessor/series/to_set.py
--- a/dtoolkit/accessor/series/to_set.py
+++ b/dtoolkit/accessor/series/to_set.py
@@ -21,6 +21,7 @@
See Also
--------
pandas.Series.unique
+ dtoolkit.accessor.index.to_set
Examples
--------
@@ -36,4 +37,4 @@
{1, 2}
"""
- return set(s.unique())
+ return set(s.to_list())
| {"golden_diff": "diff --git a/dtoolkit/accessor/index/to_set.py b/dtoolkit/accessor/index/to_set.py\n--- a/dtoolkit/accessor/index/to_set.py\n+++ b/dtoolkit/accessor/index/to_set.py\n@@ -21,6 +21,7 @@\n See Also\n --------\n pandas.Index.unique\n+ dtoolkit.accessor.series.to_set\n \n Examples\n --------\ndiff --git a/dtoolkit/accessor/series/to_set.py b/dtoolkit/accessor/series/to_set.py\n--- a/dtoolkit/accessor/series/to_set.py\n+++ b/dtoolkit/accessor/series/to_set.py\n@@ -21,6 +21,7 @@\n See Also\n --------\n pandas.Series.unique\n+ dtoolkit.accessor.index.to_set\n \n Examples\n --------\n@@ -36,4 +37,4 @@\n {1, 2}\n \"\"\"\n \n- return set(s.unique())\n+ return set(s.to_list())\n", "issue": "PERF: `to_set` speeds up especial to large data\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [x] closes #542\r\n- [x] whatsnew entry\r\n\r\nApply to index accessor\n", "before_files": [{"content": "import pandas as pd\n\nfrom dtoolkit.accessor.register import register_index_method\n\n\n@register_index_method\ndef to_set(index: pd.Index) -> set:\n \"\"\"\n Return a :keyword:`set` of the values.\n\n A sugary syntax wraps :keyword:`set`::\n\n set(index)\n\n Different to :meth:`~pandas.Index.unique`, it returns :class:`~pandas.Index`.\n\n Returns\n -------\n set\n\n See Also\n --------\n pandas.Index.unique\n\n Examples\n --------\n >>> import dtoolkit.accessor\n >>> import pandas as pd\n >>> i = pd.Index([1, 2, 2])\n >>> i\n Int64Index([1, 2, 2], dtype='int64')\n >>> i.to_set()\n {1, 2}\n \"\"\"\n\n return set(index.unique())\n", "path": "dtoolkit/accessor/index/to_set.py"}, {"content": "import pandas as pd\n\nfrom dtoolkit.accessor.register import register_series_method\n\n\n@register_series_method\ndef to_set(s: pd.Series) -> set:\n \"\"\"\n Return a :keyword:`set` of the values.\n\n A sugary syntax wraps :keyword:`set`::\n\n set(s)\n\n Different to :meth:`~pandas.Series.unique`, it returns :class:`~numpy.ndarray`.\n\n Returns\n -------\n set\n\n See Also\n --------\n pandas.Series.unique\n\n Examples\n --------\n >>> import dtoolkit.accessor\n >>> import pandas as pd\n >>> s = pd.Series([1, 2, 2])\n >>> s\n 0 1\n 1 2\n 2 2\n dtype: int64\n >>> s.to_set()\n {1, 2}\n \"\"\"\n\n return set(s.unique())\n", "path": "dtoolkit/accessor/series/to_set.py"}]} | 1,301 | 216 |
gh_patches_debug_33829 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-1430 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Broken link in Python37DeprecationWarning deprecation message
```
warnings.warn(message, Python37DeprecationWarning)
E google.auth.Python37DeprecationWarning: After January 1, 2024, new releases of this library will drop support for Python 3.7. More details about Python 3.7 support can be found at https://cloud.google.com/python/docs/python37-sunset/
```
The link https://cloud.google.com/python/docs/python37-sunset/ results in 404. We should remove it from the deprecation message.
</issue>
<code>
[start of google/oauth2/__init__.py]
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Google OAuth 2.0 Library for Python."""
16
17 import sys
18 import warnings
19
20
21 class Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER
22 """
23 Deprecation warning raised when Python 3.7 runtime is detected.
24 Python 3.7 support will be dropped after January 1, 2024. See
25 https://cloud.google.com/python/docs/python37-sunset/ for more information.
26 """
27
28 pass
29
30
31 # Checks if the current runtime is Python 3.7.
32 if sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER
33 message = (
34 "After January 1, 2024, new releases of this library will drop support "
35 "for Python 3.7. More details about Python 3.7 support "
36 "can be found at https://cloud.google.com/python/docs/python37-sunset/"
37 )
38 warnings.warn(message, Python37DeprecationWarning)
39
[end of google/oauth2/__init__.py]
[start of google/auth/__init__.py]
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Google Auth Library for Python."""
16
17 import logging
18 import sys
19 import warnings
20
21 from google.auth import version as google_auth_version
22 from google.auth._default import (
23 default,
24 load_credentials_from_dict,
25 load_credentials_from_file,
26 )
27
28
29 __version__ = google_auth_version.__version__
30
31
32 __all__ = ["default", "load_credentials_from_file", "load_credentials_from_dict"]
33
34
35 class Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER
36 """
37 Deprecation warning raised when Python 3.7 runtime is detected.
38 Python 3.7 support will be dropped after January 1, 2024. See
39 https://cloud.google.com/python/docs/python37-sunset/ for more information.
40 """
41
42 pass
43
44
45 # Checks if the current runtime is Python 3.7.
46 if sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER
47 message = (
48 "After January 1, 2024, new releases of this library will drop support "
49 "for Python 3.7. More details about Python 3.7 support "
50 "can be found at https://cloud.google.com/python/docs/python37-sunset/"
51 )
52 warnings.warn(message, Python37DeprecationWarning)
53
54 # Set default logging handler to avoid "No handler found" warnings.
55 logging.getLogger(__name__).addHandler(logging.NullHandler())
56
[end of google/auth/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/google/auth/__init__.py b/google/auth/__init__.py
--- a/google/auth/__init__.py
+++ b/google/auth/__init__.py
@@ -35,8 +35,7 @@
class Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER
"""
Deprecation warning raised when Python 3.7 runtime is detected.
- Python 3.7 support will be dropped after January 1, 2024. See
- https://cloud.google.com/python/docs/python37-sunset/ for more information.
+ Python 3.7 support will be dropped after January 1, 2024.
"""
pass
@@ -46,8 +45,7 @@
if sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER
message = (
"After January 1, 2024, new releases of this library will drop support "
- "for Python 3.7. More details about Python 3.7 support "
- "can be found at https://cloud.google.com/python/docs/python37-sunset/"
+ "for Python 3.7."
)
warnings.warn(message, Python37DeprecationWarning)
diff --git a/google/oauth2/__init__.py b/google/oauth2/__init__.py
--- a/google/oauth2/__init__.py
+++ b/google/oauth2/__init__.py
@@ -21,8 +21,7 @@
class Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER
"""
Deprecation warning raised when Python 3.7 runtime is detected.
- Python 3.7 support will be dropped after January 1, 2024. See
- https://cloud.google.com/python/docs/python37-sunset/ for more information.
+ Python 3.7 support will be dropped after January 1, 2024.
"""
pass
@@ -32,7 +31,6 @@
if sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER
message = (
"After January 1, 2024, new releases of this library will drop support "
- "for Python 3.7. More details about Python 3.7 support "
- "can be found at https://cloud.google.com/python/docs/python37-sunset/"
+ "for Python 3.7."
)
warnings.warn(message, Python37DeprecationWarning)
| {"golden_diff": "diff --git a/google/auth/__init__.py b/google/auth/__init__.py\n--- a/google/auth/__init__.py\n+++ b/google/auth/__init__.py\n@@ -35,8 +35,7 @@\n class Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER\n \"\"\"\n Deprecation warning raised when Python 3.7 runtime is detected.\n- Python 3.7 support will be dropped after January 1, 2024. See\n- https://cloud.google.com/python/docs/python37-sunset/ for more information.\n+ Python 3.7 support will be dropped after January 1, 2024.\n \"\"\"\n \n pass\n@@ -46,8 +45,7 @@\n if sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER\n message = (\n \"After January 1, 2024, new releases of this library will drop support \"\n- \"for Python 3.7. More details about Python 3.7 support \"\n- \"can be found at https://cloud.google.com/python/docs/python37-sunset/\"\n+ \"for Python 3.7.\"\n )\n warnings.warn(message, Python37DeprecationWarning)\n \ndiff --git a/google/oauth2/__init__.py b/google/oauth2/__init__.py\n--- a/google/oauth2/__init__.py\n+++ b/google/oauth2/__init__.py\n@@ -21,8 +21,7 @@\n class Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER\n \"\"\"\n Deprecation warning raised when Python 3.7 runtime is detected.\n- Python 3.7 support will be dropped after January 1, 2024. See\n- https://cloud.google.com/python/docs/python37-sunset/ for more information.\n+ Python 3.7 support will be dropped after January 1, 2024.\n \"\"\"\n \n pass\n@@ -32,7 +31,6 @@\n if sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER\n message = (\n \"After January 1, 2024, new releases of this library will drop support \"\n- \"for Python 3.7. More details about Python 3.7 support \"\n- \"can be found at https://cloud.google.com/python/docs/python37-sunset/\"\n+ \"for Python 3.7.\"\n )\n warnings.warn(message, Python37DeprecationWarning)\n", "issue": "Broken link in Python37DeprecationWarning deprecation message\n```\r\n warnings.warn(message, Python37DeprecationWarning)\r\nE google.auth.Python37DeprecationWarning: After January 1, 2024, new releases of this library will drop support for Python 3.7. More details about Python 3.7 support can be found at https://cloud.google.com/python/docs/python37-sunset/\r\n```\r\nThe link https://cloud.google.com/python/docs/python37-sunset/ results in 404. We should remove it from the deprecation message.\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google OAuth 2.0 Library for Python.\"\"\"\n\nimport sys\nimport warnings\n\n\nclass Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER\n \"\"\"\n Deprecation warning raised when Python 3.7 runtime is detected.\n Python 3.7 support will be dropped after January 1, 2024. See\n https://cloud.google.com/python/docs/python37-sunset/ for more information.\n \"\"\"\n\n pass\n\n\n# Checks if the current runtime is Python 3.7.\nif sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER\n message = (\n \"After January 1, 2024, new releases of this library will drop support \"\n \"for Python 3.7. More details about Python 3.7 support \"\n \"can be found at https://cloud.google.com/python/docs/python37-sunset/\"\n )\n warnings.warn(message, Python37DeprecationWarning)\n", "path": "google/oauth2/__init__.py"}, {"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google Auth Library for Python.\"\"\"\n\nimport logging\nimport sys\nimport warnings\n\nfrom google.auth import version as google_auth_version\nfrom google.auth._default import (\n default,\n load_credentials_from_dict,\n load_credentials_from_file,\n)\n\n\n__version__ = google_auth_version.__version__\n\n\n__all__ = [\"default\", \"load_credentials_from_file\", \"load_credentials_from_dict\"]\n\n\nclass Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER\n \"\"\"\n Deprecation warning raised when Python 3.7 runtime is detected.\n Python 3.7 support will be dropped after January 1, 2024. See\n https://cloud.google.com/python/docs/python37-sunset/ for more information.\n \"\"\"\n\n pass\n\n\n# Checks if the current runtime is Python 3.7.\nif sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER\n message = (\n \"After January 1, 2024, new releases of this library will drop support \"\n \"for Python 3.7. More details about Python 3.7 support \"\n \"can be found at https://cloud.google.com/python/docs/python37-sunset/\"\n )\n warnings.warn(message, Python37DeprecationWarning)\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "google/auth/__init__.py"}]} | 1,663 | 578 |
gh_patches_debug_14755 | rasdani/github-patches | git_diff | ansible__ansible-41206 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
aws_s3 is automaticly decrypting ansible-vault encrypted files before put
<!---
Verify first that your issue/request is not already reported on GitHub.
Also test if the latest release, and devel branch are affected too.
Always add information AFTER of these html comments. -->
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
aws_s3
##### ANSIBLE VERSION
<!--- Paste, BELOW THIS COMMENT, verbatim output from "ansible --version" between quotes below -->
```
2.5.1
```
##### SUMMARY
- I'm trying to upload an ansible-vault encrypted file with aws_s3. But aws_s3 decrypts the src: file before uploading it to S3.
- aws_s3 in 2.4 didn't decrypt the src: parameter.
- The documentation for aws_s3 doesn't mention that the src: parameter is autodecrypted.
- The aws_s3 module doesn't accept the decrypt: argument.
##### STEPS TO REPRODUCE
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: upload vault to s3
aws_s3:
bucket: "the bucket"
object: "file.txt"
src: "file.txt"
mode: put
```
1. The file.txt is encrypted with ansible-vault.
2. The playbook that runs this task is invoked with --vault-password and is able to decrypt the file because other tasks need the file decrypted.
##### EXPECTED RESULTS
Don't autodecrypt the src: argument or be able to specify decrypt: no.
##### ACTUAL RESULTS
The src: argument to aws_s3 is automagicly decrypted without documentation or a way to disable the feature like other modules (ex. copy).
</issue>
<code>
[start of lib/ansible/plugins/action/aws_s3.py]
1 # (c) 2012, Michael DeHaan <[email protected]>
2 # (c) 2018, Will Thames <[email protected]>
3 #
4 # This file is part of Ansible
5 #
6 # Ansible is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # Ansible is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
18 from __future__ import (absolute_import, division, print_function)
19 __metaclass__ = type
20
21 import os
22
23 from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound
24 from ansible.module_utils._text import to_text
25 from ansible.plugins.action import ActionBase
26
27
28 class ActionModule(ActionBase):
29
30 TRANSFERS_FILES = True
31
32 def run(self, tmp=None, task_vars=None):
33 ''' handler for aws_s3 operations '''
34 if task_vars is None:
35 task_vars = dict()
36
37 result = super(ActionModule, self).run(tmp, task_vars)
38 del tmp # tmp no longer has any effect
39
40 source = self._task.args.get('src', None)
41
42 try:
43 new_module_args = self._task.args.copy()
44 if source:
45 source = os.path.expanduser(source)
46
47 # For backward compatibility check if the file exists on the remote; it should take precedence
48 if not self._remote_file_exists(source):
49 try:
50 source = self._loader.get_real_file(self._find_needle('files', source))
51 new_module_args['src'] = source
52 except AnsibleFileNotFound as e:
53 # module handles error message for nonexistent files
54 new_module_args['src'] = source
55 except AnsibleError as e:
56 raise AnsibleActionFail(to_text(e))
57
58 # execute the aws_s3 module now, with the updated args
59 result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
60 except AnsibleAction as e:
61 result.update(e.result)
62 return result
63
[end of lib/ansible/plugins/action/aws_s3.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansible/plugins/action/aws_s3.py b/lib/ansible/plugins/action/aws_s3.py
--- a/lib/ansible/plugins/action/aws_s3.py
+++ b/lib/ansible/plugins/action/aws_s3.py
@@ -47,7 +47,7 @@
# For backward compatibility check if the file exists on the remote; it should take precedence
if not self._remote_file_exists(source):
try:
- source = self._loader.get_real_file(self._find_needle('files', source))
+ source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False)
new_module_args['src'] = source
except AnsibleFileNotFound as e:
# module handles error message for nonexistent files
| {"golden_diff": "diff --git a/lib/ansible/plugins/action/aws_s3.py b/lib/ansible/plugins/action/aws_s3.py\n--- a/lib/ansible/plugins/action/aws_s3.py\n+++ b/lib/ansible/plugins/action/aws_s3.py\n@@ -47,7 +47,7 @@\n # For backward compatibility check if the file exists on the remote; it should take precedence\n if not self._remote_file_exists(source):\n try:\n- source = self._loader.get_real_file(self._find_needle('files', source))\n+ source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False)\n new_module_args['src'] = source\n except AnsibleFileNotFound as e:\n # module handles error message for nonexistent files\n", "issue": "aws_s3 is automaticly decrypting ansible-vault encrypted files before put\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and devel branch are affected too.\r\nAlways add information AFTER of these html comments. -->\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\naws_s3\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste, BELOW THIS COMMENT, verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\n2.5.1\r\n```\r\n\r\n##### SUMMARY\r\n- I'm trying to upload an ansible-vault encrypted file with aws_s3. But aws_s3 decrypts the src: file before uploading it to S3. \r\n- aws_s3 in 2.4 didn't decrypt the src: parameter.\r\n- The documentation for aws_s3 doesn't mention that the src: parameter is autodecrypted.\r\n- The aws_s3 module doesn't accept the decrypt: argument.\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n- name: upload vault to s3\r\n aws_s3:\r\n bucket: \"the bucket\"\r\n object: \"file.txt\"\r\n src: \"file.txt\"\r\n mode: put\r\n```\r\n1. The file.txt is encrypted with ansible-vault. \r\n2. The playbook that runs this task is invoked with --vault-password and is able to decrypt the file because other tasks need the file decrypted.\r\n\r\n##### EXPECTED RESULTS\r\nDon't autodecrypt the src: argument or be able to specify decrypt: no.\r\n\r\n##### ACTUAL RESULTS\r\nThe src: argument to aws_s3 is automagicly decrypted without documentation or a way to disable the feature like other modules (ex. copy).\r\n\n", "before_files": [{"content": "# (c) 2012, Michael DeHaan <[email protected]>\n# (c) 2018, Will Thames <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\n\nfrom ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound\nfrom ansible.module_utils._text import to_text\nfrom ansible.plugins.action import ActionBase\n\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = True\n\n def run(self, tmp=None, task_vars=None):\n ''' handler for aws_s3 operations '''\n if task_vars is None:\n task_vars = dict()\n\n result = super(ActionModule, self).run(tmp, task_vars)\n del tmp # tmp no longer has any effect\n\n source = self._task.args.get('src', None)\n\n try:\n new_module_args = self._task.args.copy()\n if source:\n source = os.path.expanduser(source)\n\n # For backward compatibility check if the file exists on the remote; it should take precedence\n if not self._remote_file_exists(source):\n try:\n source = self._loader.get_real_file(self._find_needle('files', source))\n new_module_args['src'] = source\n except AnsibleFileNotFound as e:\n # module handles error message for nonexistent files\n new_module_args['src'] = source\n except AnsibleError as e:\n raise AnsibleActionFail(to_text(e))\n\n # execute the aws_s3 module now, with the updated args\n result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))\n except AnsibleAction as e:\n result.update(e.result)\n return result\n", "path": "lib/ansible/plugins/action/aws_s3.py"}]} | 1,571 | 165 |
gh_patches_debug_5738 | rasdani/github-patches | git_diff | quantumlib__Cirq-1673 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Two circuit diagram tests that rest in `contrib` are failing on Windows
See: https://travis-ci.com/quantumlib/Cirq/jobs/202641395
</issue>
<code>
[start of cirq/contrib/paulistring/convert_to_pauli_string_phasors.py]
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Optional, cast, TYPE_CHECKING
16
17 import numpy as np
18
19 from cirq import ops, optimizers, protocols, linalg
20 from cirq.circuits.circuit import Circuit
21 from cirq.circuits.optimization_pass import (
22 PointOptimizationSummary,
23 PointOptimizer,
24 )
25
26 if TYPE_CHECKING:
27 # pylint: disable=unused-import
28 from typing import List
29
30
31 class ConvertToPauliStringPhasors(PointOptimizer):
32 """Attempts to convert single-qubit gates into single-qubit
33 PauliStringPhasor operations.
34
35 Checks if the operation has a known unitary effect. If so, and the gate is a
36 1-qubit gate, then decomposes it into x, y, or z rotations and creates a
37 PauliStringPhasor for each.
38 """
39
40 def __init__(self,
41 ignore_failures: bool = False,
42 keep_clifford: bool = False,
43 atol: float = 0) -> None:
44 """
45 Args:
46 ignore_failures: If set, gates that fail to convert are forwarded
47 unchanged. If not set, conversion failures raise a TypeError.
48 keep_clifford: If set, single qubit rotations in the Clifford group
49 are converted to SingleQubitCliffordGates.
50 atol: Maximum absolute error tolerance. The optimization is
51 permitted to round angles with a threshold determined by this
52 tolerance.
53 """
54 super().__init__()
55 self.ignore_failures = ignore_failures
56 self.keep_clifford = keep_clifford
57 self.atol = atol
58
59 def _matrix_to_pauli_string_phasors(self,
60 mat: np.ndarray,
61 qubit: ops.Qid) -> ops.OP_TREE:
62 rotations = optimizers.single_qubit_matrix_to_pauli_rotations(
63 mat, self.atol)
64 out_ops = [] # type: List[ops.Operation]
65 for pauli, half_turns in rotations:
66 if (self.keep_clifford
67 and linalg.all_near_zero_mod(half_turns, 0.5)):
68 cliff_gate = ops.SingleQubitCliffordGate.from_quarter_turns(
69 pauli, round(half_turns * 2))
70 if out_ops and not isinstance(out_ops[-1],
71 ops.PauliStringPhasor):
72 op = cast(ops.GateOperation, out_ops[-1])
73 gate = cast(ops.SingleQubitCliffordGate, op.gate)
74 out_ops[-1] = gate.merged_with(cliff_gate)(qubit)
75 else:
76 out_ops.append(
77 cliff_gate(qubit))
78 else:
79 pauli_string = ops.PauliString.from_single(qubit, pauli)
80 out_ops.append(
81 ops.PauliStringPhasor(pauli_string,
82 exponent_neg=round(half_turns, 10)))
83 return out_ops
84
85 def _convert_one(self, op: ops.Operation) -> ops.OP_TREE:
86 # Don't change if it's already a ops.PauliStringPhasor
87 if isinstance(op, ops.PauliStringPhasor):
88 return op
89
90 if (self.keep_clifford
91 and isinstance(op, ops.GateOperation)
92 and isinstance(op.gate, ops.SingleQubitCliffordGate)):
93 return op
94
95 # Single qubit gate with known matrix?
96 if len(op.qubits) == 1:
97 mat = protocols.unitary(op, None)
98 if mat is not None:
99 return self._matrix_to_pauli_string_phasors(mat, op.qubits[0])
100
101 # Just let it be?
102 if self.ignore_failures:
103 return op
104
105 raise TypeError("Don't know how to work with {!r}. "
106 "It isn't a 1-qubit operation with a known unitary "
107 "effect.".format(op))
108
109 def convert(self, op: ops.Operation) -> ops.OP_TREE:
110 converted = self._convert_one(op)
111 if converted is op:
112 return converted
113 return [self.convert(cast(ops.Operation, e))
114 for e in ops.flatten_op_tree(converted)]
115
116 def optimization_at(self, circuit: Circuit, index: int, op: ops.Operation
117 ) -> Optional[PointOptimizationSummary]:
118 converted = self.convert(op)
119 if converted is op:
120 return None
121
122 return PointOptimizationSummary(
123 clear_span=1,
124 new_operations=converted,
125 clear_qubits=op.qubits)
126
[end of cirq/contrib/paulistring/convert_to_pauli_string_phasors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq/contrib/paulistring/convert_to_pauli_string_phasors.py b/cirq/contrib/paulistring/convert_to_pauli_string_phasors.py
--- a/cirq/contrib/paulistring/convert_to_pauli_string_phasors.py
+++ b/cirq/contrib/paulistring/convert_to_pauli_string_phasors.py
@@ -40,7 +40,7 @@
def __init__(self,
ignore_failures: bool = False,
keep_clifford: bool = False,
- atol: float = 0) -> None:
+ atol: float = 1e-14) -> None:
"""
Args:
ignore_failures: If set, gates that fail to convert are forwarded
| {"golden_diff": "diff --git a/cirq/contrib/paulistring/convert_to_pauli_string_phasors.py b/cirq/contrib/paulistring/convert_to_pauli_string_phasors.py\n--- a/cirq/contrib/paulistring/convert_to_pauli_string_phasors.py\n+++ b/cirq/contrib/paulistring/convert_to_pauli_string_phasors.py\n@@ -40,7 +40,7 @@\n def __init__(self,\n ignore_failures: bool = False,\n keep_clifford: bool = False,\n- atol: float = 0) -> None:\n+ atol: float = 1e-14) -> None:\n \"\"\"\n Args:\n ignore_failures: If set, gates that fail to convert are forwarded\n", "issue": "Two circuit diagram tests that rest in `contrib` are failing on Windows\nSee: https://travis-ci.com/quantumlib/Cirq/jobs/202641395\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional, cast, TYPE_CHECKING\n\nimport numpy as np\n\nfrom cirq import ops, optimizers, protocols, linalg\nfrom cirq.circuits.circuit import Circuit\nfrom cirq.circuits.optimization_pass import (\n PointOptimizationSummary,\n PointOptimizer,\n)\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import\n from typing import List\n\n\nclass ConvertToPauliStringPhasors(PointOptimizer):\n \"\"\"Attempts to convert single-qubit gates into single-qubit\n PauliStringPhasor operations.\n\n Checks if the operation has a known unitary effect. If so, and the gate is a\n 1-qubit gate, then decomposes it into x, y, or z rotations and creates a\n PauliStringPhasor for each.\n \"\"\"\n\n def __init__(self,\n ignore_failures: bool = False,\n keep_clifford: bool = False,\n atol: float = 0) -> None:\n \"\"\"\n Args:\n ignore_failures: If set, gates that fail to convert are forwarded\n unchanged. If not set, conversion failures raise a TypeError.\n keep_clifford: If set, single qubit rotations in the Clifford group\n are converted to SingleQubitCliffordGates.\n atol: Maximum absolute error tolerance. The optimization is\n permitted to round angles with a threshold determined by this\n tolerance.\n \"\"\"\n super().__init__()\n self.ignore_failures = ignore_failures\n self.keep_clifford = keep_clifford\n self.atol = atol\n\n def _matrix_to_pauli_string_phasors(self,\n mat: np.ndarray,\n qubit: ops.Qid) -> ops.OP_TREE:\n rotations = optimizers.single_qubit_matrix_to_pauli_rotations(\n mat, self.atol)\n out_ops = [] # type: List[ops.Operation]\n for pauli, half_turns in rotations:\n if (self.keep_clifford\n and linalg.all_near_zero_mod(half_turns, 0.5)):\n cliff_gate = ops.SingleQubitCliffordGate.from_quarter_turns(\n pauli, round(half_turns * 2))\n if out_ops and not isinstance(out_ops[-1],\n ops.PauliStringPhasor):\n op = cast(ops.GateOperation, out_ops[-1])\n gate = cast(ops.SingleQubitCliffordGate, op.gate)\n out_ops[-1] = gate.merged_with(cliff_gate)(qubit)\n else:\n out_ops.append(\n cliff_gate(qubit))\n else:\n pauli_string = ops.PauliString.from_single(qubit, pauli)\n out_ops.append(\n ops.PauliStringPhasor(pauli_string,\n exponent_neg=round(half_turns, 10)))\n return out_ops\n\n def _convert_one(self, op: ops.Operation) -> ops.OP_TREE:\n # Don't change if it's already a ops.PauliStringPhasor\n if isinstance(op, ops.PauliStringPhasor):\n return op\n\n if (self.keep_clifford\n and isinstance(op, ops.GateOperation)\n and isinstance(op.gate, ops.SingleQubitCliffordGate)):\n return op\n\n # Single qubit gate with known matrix?\n if len(op.qubits) == 1:\n mat = protocols.unitary(op, None)\n if mat is not None:\n return self._matrix_to_pauli_string_phasors(mat, op.qubits[0])\n\n # Just let it be?\n if self.ignore_failures:\n return op\n\n raise TypeError(\"Don't know how to work with {!r}. \"\n \"It isn't a 1-qubit operation with a known unitary \"\n \"effect.\".format(op))\n\n def convert(self, op: ops.Operation) -> ops.OP_TREE:\n converted = self._convert_one(op)\n if converted is op:\n return converted\n return [self.convert(cast(ops.Operation, e))\n for e in ops.flatten_op_tree(converted)]\n\n def optimization_at(self, circuit: Circuit, index: int, op: ops.Operation\n ) -> Optional[PointOptimizationSummary]:\n converted = self.convert(op)\n if converted is op:\n return None\n\n return PointOptimizationSummary(\n clear_span=1,\n new_operations=converted,\n clear_qubits=op.qubits)\n", "path": "cirq/contrib/paulistring/convert_to_pauli_string_phasors.py"}]} | 1,988 | 172 |
gh_patches_debug_28145 | rasdani/github-patches | git_diff | dynamiqs__dynamiqs-216 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Propagator solvers are cached on slighlty changing `delta_t`
Both the `sesolve` and `mesolve` propagator solvers are cached on the time step `delta_t` to take, which should be constant for linearly spaced `t_save`. Thus, the propagator should be computed only once. However, due to numerical imprecisions, the `delta_t` changes slightly even when `t_save` is linearly spaced, resulting in frequent recomputations of the same quantity.
</issue>
<code>
[start of dynamiqs/solvers/propagator.py]
1 from abc import abstractmethod
2
3 from torch import Tensor
4
5 from .solver import AutogradSolver
6 from .utils.td_tensor import ConstantTDTensor
7 from .utils.utils import tqdm
8
9
10 class Propagator(AutogradSolver):
11 def __init__(self, *args, **kwargs):
12 super().__init__(*args, **kwargs)
13
14 # check that Hamiltonian is time-independent
15 if not isinstance(self.H, ConstantTDTensor):
16 raise TypeError(
17 'Solver `Propagator` requires a time-independent Hamiltonian.'
18 )
19 self.H = self.H(0.0)
20
21 def run_autograd(self):
22 y, t1 = self.y0, 0.0
23 for t2 in tqdm(self.t_stop.cpu().numpy(), disable=not self.options.verbose):
24 y = self.forward(t1, t2 - t1, y)
25 self.save(y)
26 t1 = t2
27
28 @abstractmethod
29 def forward(self, t: float, delta_t: float, y: Tensor):
30 pass
31
[end of dynamiqs/solvers/propagator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dynamiqs/solvers/propagator.py b/dynamiqs/solvers/propagator.py
--- a/dynamiqs/solvers/propagator.py
+++ b/dynamiqs/solvers/propagator.py
@@ -1,5 +1,8 @@
+from __future__ import annotations
+
from abc import abstractmethod
+import numpy as np
from torch import Tensor
from .solver import AutogradSolver
@@ -7,6 +10,19 @@
from .utils.utils import tqdm
+def round_truncate(x: np.float32 | np.float64) -> np.float32 | np.float64:
+ # round a strictly positive-valued float to remove numerical errors, and enable
+ # comparing floats for equality
+
+ # The mantissa of a float32 is stored using 23 bits. The following code rounds and
+ # truncates the float value to the 18 most significant bits of its mantissa. This
+ # removes any numerical error that may have accumulated in the 5 least significant
+ # bits of the mantissa.
+ leading = abs(int(np.log2(x)))
+ keep = leading + 18
+ return (x * 2**keep).round() / 2**keep
+
+
class Propagator(AutogradSolver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@@ -21,7 +37,10 @@
def run_autograd(self):
y, t1 = self.y0, 0.0
for t2 in tqdm(self.t_stop.cpu().numpy(), disable=not self.options.verbose):
- y = self.forward(t1, t2 - t1, y)
+ if t2 != 0.0:
+ # round time difference to avoid numerical errors when comparing floats
+ delta_t = round_truncate(t2 - t1)
+ y = self.forward(t1, delta_t, y)
self.save(y)
t1 = t2
| {"golden_diff": "diff --git a/dynamiqs/solvers/propagator.py b/dynamiqs/solvers/propagator.py\n--- a/dynamiqs/solvers/propagator.py\n+++ b/dynamiqs/solvers/propagator.py\n@@ -1,5 +1,8 @@\n+from __future__ import annotations\n+\n from abc import abstractmethod\n \n+import numpy as np\n from torch import Tensor\n \n from .solver import AutogradSolver\n@@ -7,6 +10,19 @@\n from .utils.utils import tqdm\n \n \n+def round_truncate(x: np.float32 | np.float64) -> np.float32 | np.float64:\n+ # round a strictly positive-valued float to remove numerical errors, and enable\n+ # comparing floats for equality\n+\n+ # The mantissa of a float32 is stored using 23 bits. The following code rounds and\n+ # truncates the float value to the 18 most significant bits of its mantissa. This\n+ # removes any numerical error that may have accumulated in the 5 least significant\n+ # bits of the mantissa.\n+ leading = abs(int(np.log2(x)))\n+ keep = leading + 18\n+ return (x * 2**keep).round() / 2**keep\n+\n+\n class Propagator(AutogradSolver):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n@@ -21,7 +37,10 @@\n def run_autograd(self):\n y, t1 = self.y0, 0.0\n for t2 in tqdm(self.t_stop.cpu().numpy(), disable=not self.options.verbose):\n- y = self.forward(t1, t2 - t1, y)\n+ if t2 != 0.0:\n+ # round time difference to avoid numerical errors when comparing floats\n+ delta_t = round_truncate(t2 - t1)\n+ y = self.forward(t1, delta_t, y)\n self.save(y)\n t1 = t2\n", "issue": "Propagator solvers are cached on slighlty changing `delta_t`\nBoth the `sesolve` and `mesolve` propagator solvers are cached on the time step `delta_t` to take, which should be constant for linearly spaced `t_save`. Thus, the propagator should be computed only once. However, due to numerical imprecisions, the `delta_t` changes slightly even when `t_save` is linearly spaced, resulting in frequent recomputations of the same quantity.\n", "before_files": [{"content": "from abc import abstractmethod\n\nfrom torch import Tensor\n\nfrom .solver import AutogradSolver\nfrom .utils.td_tensor import ConstantTDTensor\nfrom .utils.utils import tqdm\n\n\nclass Propagator(AutogradSolver):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # check that Hamiltonian is time-independent\n if not isinstance(self.H, ConstantTDTensor):\n raise TypeError(\n 'Solver `Propagator` requires a time-independent Hamiltonian.'\n )\n self.H = self.H(0.0)\n\n def run_autograd(self):\n y, t1 = self.y0, 0.0\n for t2 in tqdm(self.t_stop.cpu().numpy(), disable=not self.options.verbose):\n y = self.forward(t1, t2 - t1, y)\n self.save(y)\n t1 = t2\n\n @abstractmethod\n def forward(self, t: float, delta_t: float, y: Tensor):\n pass\n", "path": "dynamiqs/solvers/propagator.py"}]} | 931 | 462 |
gh_patches_debug_10 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-770 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
remove text from home page
Please remove this text from homepage 'This is an early version of the HDX Repository. Initially, you will be able to find global datasets relevant to humanitarian work as well as local datasets from our three pilot locations - Colombia, Kenya and Yemen. You can also create an account and add your own data to the repository to share privately or publicly. Please have a look around and send us your feedback!' this will be covered in the about page. Not sure if yumi will want to adjusts the centering of the remaining HDX and tagline but we can ask her
</issue>
<code>
[start of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
1 hdx_version='v0.2.6'
[end of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
@@ -1 +1 @@
-hdx_version='v0.2.6'
\ No newline at end of file
+hdx_version='v0.3.0'
\ No newline at end of file
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version='v0.2.6'\n\\ No newline at end of file\n+hdx_version='v0.3.0'\n\\ No newline at end of file\n", "issue": "remove text from home page \nPlease remove this text from homepage 'This is an early version of the HDX Repository. Initially, you will be able to find global datasets relevant to humanitarian work as well as local datasets from our three pilot locations - Colombia, Kenya and Yemen. You can also create an account and add your own data to the repository to share privately or publicly. Please have a look around and send us your feedback!' this will be covered in the about page. Not sure if yumi will want to adjusts the centering of the remaining HDX and tagline but we can ask her\n\n", "before_files": [{"content": "hdx_version='v0.2.6'", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]} | 688 | 121 |
gh_patches_debug_6354 | rasdani/github-patches | git_diff | iterative__dvc-2627 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue with dvc push to AWS s3 remote
**Please provide information about your setup**
DVC version(i.e. `dvc --version`), Platform and method of installation (pip, homebrew, pkg Mac, exe (Windows), DEB(Linux), RPM(Linux))
DVC: 0.62.1
Mac: Mojave 10.13
Install with pip
issue with `dvc push`

</issue>
<code>
[start of dvc/progress.py]
1 """Manages progress bars for dvc repo."""
2 from __future__ import print_function
3 import logging
4 from tqdm import tqdm
5 from concurrent.futures import ThreadPoolExecutor
6 from funcy import merge
7
8 logger = logging.getLogger(__name__)
9
10
11 class TqdmThreadPoolExecutor(ThreadPoolExecutor):
12 """
13 Ensure worker progressbars are cleared away properly.
14 """
15
16 def __enter__(self):
17 """
18 Creates a blank initial dummy progress bar if needed so that workers
19 are forced to create "nested" bars.
20 """
21 blank_bar = Tqdm(bar_format="Multi-Threaded:", leave=False)
22 if blank_bar.pos > 0:
23 # already nested - don't need a placeholder bar
24 blank_bar.close()
25 self.bar = blank_bar
26 return super(TqdmThreadPoolExecutor, self).__enter__()
27
28 def __exit__(self, *a, **k):
29 super(TqdmThreadPoolExecutor, self).__exit__(*a, **k)
30 self.bar.close()
31
32
33 class Tqdm(tqdm):
34 """
35 maximum-compatibility tqdm-based progressbars
36 """
37
38 BAR_FMT_DEFAULT = (
39 "{percentage:3.0f}%|{bar:10}|"
40 "{desc:{ncols_desc}.{ncols_desc}}{n}/{total}"
41 " [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]"
42 )
43 BAR_FMT_NOTOTAL = (
44 "{desc:{ncols_desc}.{ncols_desc}}{n}"
45 " [{elapsed}<??:??, {rate_fmt:>11}{postfix}]"
46 )
47
48 def __init__(
49 self,
50 iterable=None,
51 disable=None,
52 level=logging.ERROR,
53 desc=None,
54 leave=False,
55 bar_format=None,
56 bytes=False, # pylint: disable=W0622
57 **kwargs
58 ):
59 """
60 bytes : shortcut for
61 `unit='B', unit_scale=True, unit_divisor=1024, miniters=1`
62 desc : persists after `close()`
63 level : effective logging level for determining `disable`;
64 used only if `disable` is unspecified
65 kwargs : anything accepted by `tqdm.tqdm()`
66 """
67 kwargs = kwargs.copy()
68 kwargs.setdefault("unit_scale", True)
69 if bytes:
70 bytes_defaults = dict(
71 unit="B", unit_scale=True, unit_divisor=1024, miniters=1
72 )
73 kwargs = merge(bytes_defaults, kwargs)
74 self.desc_persist = desc
75 if disable is None:
76 disable = logger.getEffectiveLevel() > level
77 super(Tqdm, self).__init__(
78 iterable=iterable,
79 disable=disable,
80 leave=leave,
81 desc=desc,
82 bar_format="!",
83 **kwargs
84 )
85 if bar_format is None:
86 if self.__len__():
87 self.bar_format = self.BAR_FMT_DEFAULT
88 else:
89 self.bar_format = self.BAR_FMT_NOTOTAL
90 else:
91 self.bar_format = bar_format
92 self.refresh()
93
94 def update_desc(self, desc, n=1):
95 """
96 Calls `set_description_str(desc)` and `update(n)`
97 """
98 self.set_description_str(desc, refresh=False)
99 self.update(n)
100
101 def update_to(self, current, total=None):
102 if total:
103 self.total = total # pylint: disable=W0613,W0201
104 self.update(current - self.n)
105
106 def close(self):
107 if self.desc_persist is not None:
108 self.set_description_str(self.desc_persist, refresh=False)
109 super(Tqdm, self).close()
110
111 @property
112 def format_dict(self):
113 """inject `ncols_desc` to fill the display width (`ncols`)"""
114 d = super(Tqdm, self).format_dict
115 ncols = d["ncols"] or 80
116 ncols_desc = ncols - len(self.format_meter(ncols_desc=1, **d)) + 1
117 d["ncols_desc"] = max(ncols_desc, 0)
118 return d
119
[end of dvc/progress.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/progress.py b/dvc/progress.py
--- a/dvc/progress.py
+++ b/dvc/progress.py
@@ -114,5 +114,11 @@
d = super(Tqdm, self).format_dict
ncols = d["ncols"] or 80
ncols_desc = ncols - len(self.format_meter(ncols_desc=1, **d)) + 1
- d["ncols_desc"] = max(ncols_desc, 0)
+ ncols_desc = max(ncols_desc, 0)
+ if ncols_desc:
+ d["ncols_desc"] = ncols_desc
+ else:
+ # work-around for zero-width desc
+ d["ncols_desc"] = 1
+ d["desc"] = 0
return d
| {"golden_diff": "diff --git a/dvc/progress.py b/dvc/progress.py\n--- a/dvc/progress.py\n+++ b/dvc/progress.py\n@@ -114,5 +114,11 @@\n d = super(Tqdm, self).format_dict\n ncols = d[\"ncols\"] or 80\n ncols_desc = ncols - len(self.format_meter(ncols_desc=1, **d)) + 1\n- d[\"ncols_desc\"] = max(ncols_desc, 0)\n+ ncols_desc = max(ncols_desc, 0)\n+ if ncols_desc:\n+ d[\"ncols_desc\"] = ncols_desc\n+ else:\n+ # work-around for zero-width desc\n+ d[\"ncols_desc\"] = 1\n+ d[\"desc\"] = 0\n return d\n", "issue": "Issue with dvc push to AWS s3 remote\n**Please provide information about your setup**\r\nDVC version(i.e. `dvc --version`), Platform and method of installation (pip, homebrew, pkg Mac, exe (Windows), DEB(Linux), RPM(Linux))\r\n\r\nDVC: 0.62.1\r\nMac: Mojave 10.13\r\nInstall with pip\r\n\r\nissue with `dvc push`\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"Manages progress bars for dvc repo.\"\"\"\nfrom __future__ import print_function\nimport logging\nfrom tqdm import tqdm\nfrom concurrent.futures import ThreadPoolExecutor\nfrom funcy import merge\n\nlogger = logging.getLogger(__name__)\n\n\nclass TqdmThreadPoolExecutor(ThreadPoolExecutor):\n \"\"\"\n Ensure worker progressbars are cleared away properly.\n \"\"\"\n\n def __enter__(self):\n \"\"\"\n Creates a blank initial dummy progress bar if needed so that workers\n are forced to create \"nested\" bars.\n \"\"\"\n blank_bar = Tqdm(bar_format=\"Multi-Threaded:\", leave=False)\n if blank_bar.pos > 0:\n # already nested - don't need a placeholder bar\n blank_bar.close()\n self.bar = blank_bar\n return super(TqdmThreadPoolExecutor, self).__enter__()\n\n def __exit__(self, *a, **k):\n super(TqdmThreadPoolExecutor, self).__exit__(*a, **k)\n self.bar.close()\n\n\nclass Tqdm(tqdm):\n \"\"\"\n maximum-compatibility tqdm-based progressbars\n \"\"\"\n\n BAR_FMT_DEFAULT = (\n \"{percentage:3.0f}%|{bar:10}|\"\n \"{desc:{ncols_desc}.{ncols_desc}}{n}/{total}\"\n \" [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]\"\n )\n BAR_FMT_NOTOTAL = (\n \"{desc:{ncols_desc}.{ncols_desc}}{n}\"\n \" [{elapsed}<??:??, {rate_fmt:>11}{postfix}]\"\n )\n\n def __init__(\n self,\n iterable=None,\n disable=None,\n level=logging.ERROR,\n desc=None,\n leave=False,\n bar_format=None,\n bytes=False, # pylint: disable=W0622\n **kwargs\n ):\n \"\"\"\n bytes : shortcut for\n `unit='B', unit_scale=True, unit_divisor=1024, miniters=1`\n desc : persists after `close()`\n level : effective logging level for determining `disable`;\n used only if `disable` is unspecified\n kwargs : anything accepted by `tqdm.tqdm()`\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.setdefault(\"unit_scale\", True)\n if bytes:\n bytes_defaults = dict(\n unit=\"B\", unit_scale=True, unit_divisor=1024, miniters=1\n )\n kwargs = merge(bytes_defaults, kwargs)\n self.desc_persist = desc\n if disable is None:\n disable = logger.getEffectiveLevel() > level\n super(Tqdm, self).__init__(\n iterable=iterable,\n disable=disable,\n leave=leave,\n desc=desc,\n bar_format=\"!\",\n **kwargs\n )\n if bar_format is None:\n if self.__len__():\n self.bar_format = self.BAR_FMT_DEFAULT\n else:\n self.bar_format = self.BAR_FMT_NOTOTAL\n else:\n self.bar_format = bar_format\n self.refresh()\n\n def update_desc(self, desc, n=1):\n \"\"\"\n Calls `set_description_str(desc)` and `update(n)`\n \"\"\"\n self.set_description_str(desc, refresh=False)\n self.update(n)\n\n def update_to(self, current, total=None):\n if total:\n self.total = total # pylint: disable=W0613,W0201\n self.update(current - self.n)\n\n def close(self):\n if self.desc_persist is not None:\n self.set_description_str(self.desc_persist, refresh=False)\n super(Tqdm, self).close()\n\n @property\n def format_dict(self):\n \"\"\"inject `ncols_desc` to fill the display width (`ncols`)\"\"\"\n d = super(Tqdm, self).format_dict\n ncols = d[\"ncols\"] or 80\n ncols_desc = ncols - len(self.format_meter(ncols_desc=1, **d)) + 1\n d[\"ncols_desc\"] = max(ncols_desc, 0)\n return d\n", "path": "dvc/progress.py"}]} | 1,830 | 185 |
gh_patches_debug_10256 | rasdani/github-patches | git_diff | PaddlePaddle__Paddle2ONNX-12 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix travis-ci problems
Travis-ci always failed
</issue>
<code>
[start of variables.py]
1 # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from onnx import helper, onnx_pb2, TensorProto
16 import paddle.fluid.core as core
17
18
19 def paddle_variable_to_onnx_tensor(paddle_var_name, block):
20 # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR.
21 paddle_var = block.var(paddle_var_name)
22 return helper.make_tensor_value_info(
23 paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],
24 paddle_var.shape)
25
26
27 PADDLE_TO_ONNX_DTYPE = {
28 core.VarDesc.VarType.FP32: onnx_pb2.TensorProto.FLOAT,
29 core.VarDesc.VarType.FP64: onnx_pb2.TensorProto.FLOAT16,
30 # '': onnx_pb2.TensorProto.DOUBLE,
31 core.VarDesc.VarType.INT32: onnx_pb2.TensorProto.INT32,
32 core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.INT16,
33 # '': onnx_pb2.TensorProto.INT8,
34 # '': onnx_pb2.TensorProto.UINT8,
35 core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.UINT16,
36 core.VarDesc.VarType.INT64: onnx_pb2.TensorProto.INT64,
37 # '': onnx_pb2.TensorProto.STRING,
38 # '': onnx_pb2.TensorProto.COMPLEX64,
39 # '': onnx_pb2.TensorProto.COMPLEX128,
40 core.VarDesc.VarType.BOOL: onnx_pb2.TensorProto.BOOL
41 }
42
[end of variables.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/variables.py b/variables.py
--- a/variables.py
+++ b/variables.py
@@ -19,9 +19,9 @@
def paddle_variable_to_onnx_tensor(paddle_var_name, block):
# TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR.
paddle_var = block.var(paddle_var_name)
- return helper.make_tensor_value_info(
- paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],
- paddle_var.shape)
+ return helper.make_tensor_value_info(paddle_var_name,
+ PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],
+ paddle_var.shape)
PADDLE_TO_ONNX_DTYPE = {
| {"golden_diff": "diff --git a/variables.py b/variables.py\n--- a/variables.py\n+++ b/variables.py\n@@ -19,9 +19,9 @@\n def paddle_variable_to_onnx_tensor(paddle_var_name, block):\n # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR.\n paddle_var = block.var(paddle_var_name)\n- return helper.make_tensor_value_info(\n- paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],\n- paddle_var.shape)\n+ return helper.make_tensor_value_info(paddle_var_name,\n+ PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],\n+ paddle_var.shape)\n \n \n PADDLE_TO_ONNX_DTYPE = {\n", "issue": "Fix travis-ci problems\nTravis-ci always failed\n", "before_files": [{"content": "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom onnx import helper, onnx_pb2, TensorProto\nimport paddle.fluid.core as core\n\n\ndef paddle_variable_to_onnx_tensor(paddle_var_name, block):\n # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR.\n paddle_var = block.var(paddle_var_name)\n return helper.make_tensor_value_info(\n paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],\n paddle_var.shape)\n\n\nPADDLE_TO_ONNX_DTYPE = {\n core.VarDesc.VarType.FP32: onnx_pb2.TensorProto.FLOAT,\n core.VarDesc.VarType.FP64: onnx_pb2.TensorProto.FLOAT16,\n # '': onnx_pb2.TensorProto.DOUBLE,\n core.VarDesc.VarType.INT32: onnx_pb2.TensorProto.INT32,\n core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.INT16,\n # '': onnx_pb2.TensorProto.INT8,\n # '': onnx_pb2.TensorProto.UINT8,\n core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.UINT16,\n core.VarDesc.VarType.INT64: onnx_pb2.TensorProto.INT64,\n # '': onnx_pb2.TensorProto.STRING,\n # '': onnx_pb2.TensorProto.COMPLEX64,\n # '': onnx_pb2.TensorProto.COMPLEX128,\n core.VarDesc.VarType.BOOL: onnx_pb2.TensorProto.BOOL\n}\n", "path": "variables.py"}]} | 1,096 | 164 |
gh_patches_debug_10950 | rasdani/github-patches | git_diff | chainer__chainer-2329 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove chainer.functions.caffe.CaffeFunction
This is left for backward compatibility.
</issue>
<code>
[start of chainer/functions/caffe/__init__.py]
1 from chainer.links.caffe import caffe_function
2
3
4 # for backward compatibility
5 CaffeFunction = caffe_function.CaffeFunction
6
[end of chainer/functions/caffe/__init__.py]
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import setup
4
5
6 setup_requires = []
7 install_requires = [
8 'filelock',
9 'nose',
10 'numpy>=1.9.0',
11 'protobuf',
12 'six>=1.9.0',
13 ]
14
15 setup(
16 name='chainer',
17 version='2.0.0a1',
18 description='A flexible framework of neural networks',
19 author='Seiya Tokui',
20 author_email='[email protected]',
21 url='http://chainer.org/',
22 license='MIT License',
23 packages=['chainer',
24 'chainer.dataset',
25 'chainer.datasets',
26 'chainer.functions',
27 'chainer.functions.activation',
28 'chainer.functions.array',
29 'chainer.functions.caffe',
30 'chainer.functions.connection',
31 'chainer.functions.evaluation',
32 'chainer.functions.loss',
33 'chainer.functions.math',
34 'chainer.functions.noise',
35 'chainer.functions.normalization',
36 'chainer.functions.pooling',
37 'chainer.functions.theano',
38 'chainer.functions.util',
39 'chainer.function_hooks',
40 'chainer.iterators',
41 'chainer.initializers',
42 'chainer.links',
43 'chainer.links.activation',
44 'chainer.links.caffe',
45 'chainer.links.caffe.protobuf2',
46 'chainer.links.caffe.protobuf3',
47 'chainer.links.connection',
48 'chainer.links.loss',
49 'chainer.links.model',
50 'chainer.links.model.vision',
51 'chainer.links.normalization',
52 'chainer.links.theano',
53 'chainer.optimizers',
54 'chainer.serializers',
55 'chainer.testing',
56 'chainer.training',
57 'chainer.training.extensions',
58 'chainer.training.triggers',
59 'chainer.utils'],
60 zip_safe=False,
61 setup_requires=setup_requires,
62 install_requires=install_requires,
63 tests_require=['mock',
64 'nose'],
65 )
66
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/functions/caffe/__init__.py b/chainer/functions/caffe/__init__.py
deleted file mode 100644
--- a/chainer/functions/caffe/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from chainer.links.caffe import caffe_function
-
-
-# for backward compatibility
-CaffeFunction = caffe_function.CaffeFunction
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,6 @@
'chainer.functions',
'chainer.functions.activation',
'chainer.functions.array',
- 'chainer.functions.caffe',
'chainer.functions.connection',
'chainer.functions.evaluation',
'chainer.functions.loss',
| {"golden_diff": "diff --git a/chainer/functions/caffe/__init__.py b/chainer/functions/caffe/__init__.py\ndeleted file mode 100644\n--- a/chainer/functions/caffe/__init__.py\n+++ /dev/null\n@@ -1,5 +0,0 @@\n-from chainer.links.caffe import caffe_function\n-\n-\n-# for backward compatibility\n-CaffeFunction = caffe_function.CaffeFunction\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,6 @@\n 'chainer.functions',\n 'chainer.functions.activation',\n 'chainer.functions.array',\n- 'chainer.functions.caffe',\n 'chainer.functions.connection',\n 'chainer.functions.evaluation',\n 'chainer.functions.loss',\n", "issue": "Remove chainer.functions.caffe.CaffeFunction\nThis is left for backward compatibility.\n", "before_files": [{"content": "from chainer.links.caffe import caffe_function\n\n\n# for backward compatibility\nCaffeFunction = caffe_function.CaffeFunction\n", "path": "chainer/functions/caffe/__init__.py"}, {"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\n\nsetup_requires = []\ninstall_requires = [\n 'filelock',\n 'nose',\n 'numpy>=1.9.0',\n 'protobuf',\n 'six>=1.9.0',\n]\n\nsetup(\n name='chainer',\n version='2.0.0a1',\n description='A flexible framework of neural networks',\n author='Seiya Tokui',\n author_email='[email protected]',\n url='http://chainer.org/',\n license='MIT License',\n packages=['chainer',\n 'chainer.dataset',\n 'chainer.datasets',\n 'chainer.functions',\n 'chainer.functions.activation',\n 'chainer.functions.array',\n 'chainer.functions.caffe',\n 'chainer.functions.connection',\n 'chainer.functions.evaluation',\n 'chainer.functions.loss',\n 'chainer.functions.math',\n 'chainer.functions.noise',\n 'chainer.functions.normalization',\n 'chainer.functions.pooling',\n 'chainer.functions.theano',\n 'chainer.functions.util',\n 'chainer.function_hooks',\n 'chainer.iterators',\n 'chainer.initializers',\n 'chainer.links',\n 'chainer.links.activation',\n 'chainer.links.caffe',\n 'chainer.links.caffe.protobuf2',\n 'chainer.links.caffe.protobuf3',\n 'chainer.links.connection',\n 'chainer.links.loss',\n 'chainer.links.model',\n 'chainer.links.model.vision',\n 'chainer.links.normalization',\n 'chainer.links.theano',\n 'chainer.optimizers',\n 'chainer.serializers',\n 'chainer.testing',\n 'chainer.training',\n 'chainer.training.extensions',\n 'chainer.training.triggers',\n 'chainer.utils'],\n zip_safe=False,\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=['mock',\n 'nose'],\n)\n", "path": "setup.py"}]} | 1,149 | 174 |
gh_patches_debug_11020 | rasdani/github-patches | git_diff | goauthentik__authentik-6809 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ldap_sync_single ignores "ldap.task_timeout_hours" settings
**Describe the bug**
The "ldap_sync_single" task is ignoring the "ldap.task_timeout_hours" setting as set with the `AUTHENTIK_LDAP__TASK_TIMEOUT_HOURS` environment variable.
**To Reproduce**
Steps to reproduce the behavior:
1. configure AUTHENTIK_LDAP__TASK_TIMEOUT_HOURS to be too short to synchronize a target ldap source
2. configure an LDAP source
3. on the LDAP source details page, click on "Run sync now"
4. wait 10 minutes
**Expected behavior**
The task is given the specified amount of time and not cancelled after 10 minutes.
**Screenshots**

**Logs**
Output of docker-compose logs or kubectl logs respectively
**Version and Deployment (please complete the following information):**
- authentik version: [e.g. [2023.8.2](https://goauthentik.io/docs/releases/2023.8)]
- Deployment: docker-compose
</issue>
<code>
[start of authentik/sources/ldap/tasks.py]
1 """LDAP Sync tasks"""
2 from uuid import uuid4
3
4 from celery import chain, group
5 from django.core.cache import cache
6 from ldap3.core.exceptions import LDAPException
7 from structlog.stdlib import get_logger
8
9 from authentik.events.monitored_tasks import MonitoredTask, TaskResult, TaskResultStatus
10 from authentik.lib.config import CONFIG
11 from authentik.lib.utils.errors import exception_to_string
12 from authentik.lib.utils.reflection import class_to_path, path_to_class
13 from authentik.root.celery import CELERY_APP
14 from authentik.sources.ldap.models import LDAPSource
15 from authentik.sources.ldap.sync.base import BaseLDAPSynchronizer
16 from authentik.sources.ldap.sync.groups import GroupLDAPSynchronizer
17 from authentik.sources.ldap.sync.membership import MembershipLDAPSynchronizer
18 from authentik.sources.ldap.sync.users import UserLDAPSynchronizer
19
20 LOGGER = get_logger()
21 SYNC_CLASSES = [
22 UserLDAPSynchronizer,
23 GroupLDAPSynchronizer,
24 MembershipLDAPSynchronizer,
25 ]
26 CACHE_KEY_PREFIX = "goauthentik.io/sources/ldap/page/"
27
28
29 @CELERY_APP.task()
30 def ldap_sync_all():
31 """Sync all sources"""
32 for source in LDAPSource.objects.filter(enabled=True):
33 ldap_sync_single(source.pk)
34
35
36 @CELERY_APP.task()
37 def ldap_sync_single(source_pk: str):
38 """Sync a single source"""
39 source: LDAPSource = LDAPSource.objects.filter(pk=source_pk).first()
40 if not source:
41 return
42 task = chain(
43 # User and group sync can happen at once, they have no dependencies on each other
44 group(
45 ldap_sync_paginator(source, UserLDAPSynchronizer)
46 + ldap_sync_paginator(source, GroupLDAPSynchronizer),
47 ),
48 # Membership sync needs to run afterwards
49 group(
50 ldap_sync_paginator(source, MembershipLDAPSynchronizer),
51 ),
52 )
53 task()
54
55
56 def ldap_sync_paginator(source: LDAPSource, sync: type[BaseLDAPSynchronizer]) -> list:
57 """Return a list of task signatures with LDAP pagination data"""
58 sync_inst: BaseLDAPSynchronizer = sync(source)
59 signatures = []
60 for page in sync_inst.get_objects():
61 page_cache_key = CACHE_KEY_PREFIX + str(uuid4())
62 cache.set(page_cache_key, page, 60 * 60 * CONFIG.get_int("ldap.task_timeout_hours"))
63 page_sync = ldap_sync.si(source.pk, class_to_path(sync), page_cache_key)
64 signatures.append(page_sync)
65 return signatures
66
67
68 @CELERY_APP.task(
69 bind=True,
70 base=MonitoredTask,
71 soft_time_limit=60 * 60 * CONFIG.get_int("ldap.task_timeout_hours"),
72 task_time_limit=60 * 60 * CONFIG.get_int("ldap.task_timeout_hours"),
73 )
74 def ldap_sync(self: MonitoredTask, source_pk: str, sync_class: str, page_cache_key: str):
75 """Synchronization of an LDAP Source"""
76 self.result_timeout_hours = CONFIG.get_int("ldap.task_timeout_hours")
77 source: LDAPSource = LDAPSource.objects.filter(pk=source_pk).first()
78 if not source:
79 # Because the source couldn't be found, we don't have a UID
80 # to set the state with
81 return
82 sync: type[BaseLDAPSynchronizer] = path_to_class(sync_class)
83 uid = page_cache_key.replace(CACHE_KEY_PREFIX, "")
84 self.set_uid(f"{source.slug}:{sync.name()}:{uid}")
85 try:
86 sync_inst: BaseLDAPSynchronizer = sync(source)
87 page = cache.get(page_cache_key)
88 if not page:
89 error_message = (
90 f"Could not find page in cache: {page_cache_key}. "
91 + "Try increasing ldap.task_timeout_hours"
92 )
93 LOGGER.warning(error_message)
94 self.set_status(TaskResult(TaskResultStatus.ERROR, [error_message]))
95 return
96 cache.touch(page_cache_key)
97 count = sync_inst.sync(page)
98 messages = sync_inst.messages
99 messages.append(f"Synced {count} objects.")
100 self.set_status(
101 TaskResult(
102 TaskResultStatus.SUCCESSFUL,
103 messages,
104 )
105 )
106 cache.delete(page_cache_key)
107 except LDAPException as exc:
108 # No explicit event is created here as .set_status with an error will do that
109 LOGGER.warning(exception_to_string(exc))
110 self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))
111
[end of authentik/sources/ldap/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/authentik/sources/ldap/tasks.py b/authentik/sources/ldap/tasks.py
--- a/authentik/sources/ldap/tasks.py
+++ b/authentik/sources/ldap/tasks.py
@@ -33,7 +33,13 @@
ldap_sync_single(source.pk)
-@CELERY_APP.task()
+@CELERY_APP.task(
+ # We take the configured hours timeout time by 2.5 as we run user and
+ # group in parallel and then membership, so 2x is to cover the serial tasks,
+ # and 0.5x on top of that to give some more leeway
+ soft_time_limit=(60 * 60 * CONFIG.get_int("ldap.task_timeout_hours")) * 2.5,
+ task_time_limit=(60 * 60 * CONFIG.get_int("ldap.task_timeout_hours")) * 2.5,
+)
def ldap_sync_single(source_pk: str):
"""Sync a single source"""
source: LDAPSource = LDAPSource.objects.filter(pk=source_pk).first()
| {"golden_diff": "diff --git a/authentik/sources/ldap/tasks.py b/authentik/sources/ldap/tasks.py\n--- a/authentik/sources/ldap/tasks.py\n+++ b/authentik/sources/ldap/tasks.py\n@@ -33,7 +33,13 @@\n ldap_sync_single(source.pk)\n \n \n-@CELERY_APP.task()\n+@CELERY_APP.task(\n+ # We take the configured hours timeout time by 2.5 as we run user and\n+ # group in parallel and then membership, so 2x is to cover the serial tasks,\n+ # and 0.5x on top of that to give some more leeway\n+ soft_time_limit=(60 * 60 * CONFIG.get_int(\"ldap.task_timeout_hours\")) * 2.5,\n+ task_time_limit=(60 * 60 * CONFIG.get_int(\"ldap.task_timeout_hours\")) * 2.5,\n+)\n def ldap_sync_single(source_pk: str):\n \"\"\"Sync a single source\"\"\"\n source: LDAPSource = LDAPSource.objects.filter(pk=source_pk).first()\n", "issue": "ldap_sync_single ignores \"ldap.task_timeout_hours\" settings\n**Describe the bug**\r\nThe \"ldap_sync_single\" task is ignoring the \"ldap.task_timeout_hours\" setting as set with the `AUTHENTIK_LDAP__TASK_TIMEOUT_HOURS` environment variable.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. configure AUTHENTIK_LDAP__TASK_TIMEOUT_HOURS to be too short to synchronize a target ldap source\r\n2. configure an LDAP source\r\n3. on the LDAP source details page, click on \"Run sync now\"\r\n4. wait 10 minutes\r\n\r\n**Expected behavior**\r\nThe task is given the specified amount of time and not cancelled after 10 minutes.\r\n\r\n**Screenshots**\r\n\r\n\r\n**Logs**\r\nOutput of docker-compose logs or kubectl logs respectively\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: [e.g. [2023.8.2](https://goauthentik.io/docs/releases/2023.8)]\r\n- Deployment: docker-compose\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"LDAP Sync tasks\"\"\"\nfrom uuid import uuid4\n\nfrom celery import chain, group\nfrom django.core.cache import cache\nfrom ldap3.core.exceptions import LDAPException\nfrom structlog.stdlib import get_logger\n\nfrom authentik.events.monitored_tasks import MonitoredTask, TaskResult, TaskResultStatus\nfrom authentik.lib.config import CONFIG\nfrom authentik.lib.utils.errors import exception_to_string\nfrom authentik.lib.utils.reflection import class_to_path, path_to_class\nfrom authentik.root.celery import CELERY_APP\nfrom authentik.sources.ldap.models import LDAPSource\nfrom authentik.sources.ldap.sync.base import BaseLDAPSynchronizer\nfrom authentik.sources.ldap.sync.groups import GroupLDAPSynchronizer\nfrom authentik.sources.ldap.sync.membership import MembershipLDAPSynchronizer\nfrom authentik.sources.ldap.sync.users import UserLDAPSynchronizer\n\nLOGGER = get_logger()\nSYNC_CLASSES = [\n UserLDAPSynchronizer,\n GroupLDAPSynchronizer,\n MembershipLDAPSynchronizer,\n]\nCACHE_KEY_PREFIX = \"goauthentik.io/sources/ldap/page/\"\n\n\n@CELERY_APP.task()\ndef ldap_sync_all():\n \"\"\"Sync all sources\"\"\"\n for source in LDAPSource.objects.filter(enabled=True):\n ldap_sync_single(source.pk)\n\n\n@CELERY_APP.task()\ndef ldap_sync_single(source_pk: str):\n \"\"\"Sync a single source\"\"\"\n source: LDAPSource = LDAPSource.objects.filter(pk=source_pk).first()\n if not source:\n return\n task = chain(\n # User and group sync can happen at once, they have no dependencies on each other\n group(\n ldap_sync_paginator(source, UserLDAPSynchronizer)\n + ldap_sync_paginator(source, GroupLDAPSynchronizer),\n ),\n # Membership sync needs to run afterwards\n group(\n ldap_sync_paginator(source, MembershipLDAPSynchronizer),\n ),\n )\n task()\n\n\ndef ldap_sync_paginator(source: LDAPSource, sync: type[BaseLDAPSynchronizer]) -> list:\n \"\"\"Return a list of task signatures with LDAP pagination data\"\"\"\n sync_inst: BaseLDAPSynchronizer = sync(source)\n signatures = []\n for page in sync_inst.get_objects():\n page_cache_key = CACHE_KEY_PREFIX + str(uuid4())\n cache.set(page_cache_key, page, 60 * 60 * CONFIG.get_int(\"ldap.task_timeout_hours\"))\n page_sync = ldap_sync.si(source.pk, class_to_path(sync), page_cache_key)\n signatures.append(page_sync)\n return signatures\n\n\n@CELERY_APP.task(\n bind=True,\n base=MonitoredTask,\n soft_time_limit=60 * 60 * CONFIG.get_int(\"ldap.task_timeout_hours\"),\n task_time_limit=60 * 60 * CONFIG.get_int(\"ldap.task_timeout_hours\"),\n)\ndef ldap_sync(self: MonitoredTask, source_pk: str, sync_class: str, page_cache_key: str):\n \"\"\"Synchronization of an LDAP Source\"\"\"\n self.result_timeout_hours = CONFIG.get_int(\"ldap.task_timeout_hours\")\n source: LDAPSource = LDAPSource.objects.filter(pk=source_pk).first()\n if not source:\n # Because the source couldn't be found, we don't have a UID\n # to set the state with\n return\n sync: type[BaseLDAPSynchronizer] = path_to_class(sync_class)\n uid = page_cache_key.replace(CACHE_KEY_PREFIX, \"\")\n self.set_uid(f\"{source.slug}:{sync.name()}:{uid}\")\n try:\n sync_inst: BaseLDAPSynchronizer = sync(source)\n page = cache.get(page_cache_key)\n if not page:\n error_message = (\n f\"Could not find page in cache: {page_cache_key}. \"\n + \"Try increasing ldap.task_timeout_hours\"\n )\n LOGGER.warning(error_message)\n self.set_status(TaskResult(TaskResultStatus.ERROR, [error_message]))\n return\n cache.touch(page_cache_key)\n count = sync_inst.sync(page)\n messages = sync_inst.messages\n messages.append(f\"Synced {count} objects.\")\n self.set_status(\n TaskResult(\n TaskResultStatus.SUCCESSFUL,\n messages,\n )\n )\n cache.delete(page_cache_key)\n except LDAPException as exc:\n # No explicit event is created here as .set_status with an error will do that\n LOGGER.warning(exception_to_string(exc))\n self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))\n", "path": "authentik/sources/ldap/tasks.py"}]} | 1,997 | 233 |
gh_patches_debug_6460 | rasdani/github-patches | git_diff | open-mmlab__mmpose-293 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pylint: W0223
```bash
mmpose/models/detectors/bottom_up.py:19:0: W0223: Method 'simple_test' is abstract in class 'BasePose' but is not overridden (abstract-method)
mmpose/models/detectors/top_down.py:18:0: W0223: Method 'simple_test' is abstract in class 'BasePose' but is not overridden (abstract-method)
```
</issue>
<code>
[start of mmpose/models/detectors/base.py]
1 from abc import ABCMeta, abstractmethod
2 from collections import OrderedDict
3
4 import torch
5 import torch.distributed as dist
6 import torch.nn as nn
7
8
9 class BasePose(nn.Module):
10 """Base class for pose detectors.
11
12 All recognizers should subclass it.
13 All subclass should overwrite:
14 Methods:`forward_train`, supporting to forward when training.
15 Methods:`forward_test`, supporting to forward when testing.
16
17 Args:
18 backbone (dict): Backbone modules to extract feature.
19 head (dict): Head modules to give output.
20 train_cfg (dict): Config for training. Default: None.
21 test_cfg (dict): Config for testing. Default: None.
22 """
23
24 __metaclass__ = ABCMeta
25
26 @abstractmethod
27 def forward_train(self, img, img_metas, **kwargs):
28 """Defines the computation performed at training."""
29
30 @abstractmethod
31 def forward_test(self, img, img_metas, **kwargs):
32 """Defines the computation performed at testing."""
33
34 @abstractmethod
35 def simple_test(self, img, img_metas, **kwargs):
36 """Simple test function."""
37
38 @abstractmethod
39 def forward(self, img, img_metas, return_loss=True, **kwargs):
40 """Forward function."""
41
42 @staticmethod
43 def _parse_losses(losses):
44 """Parse the raw outputs (losses) of the network.
45
46 Args:
47 losses (dict): Raw output of the network, which usually contain
48 losses and other necessary information.
49
50 Returns:
51 tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
52 which may be a weighted sum of all losses, log_vars contains
53 all the variables to be sent to the logger.
54 """
55 log_vars = OrderedDict()
56 for loss_name, loss_value in losses.items():
57 if isinstance(loss_value, torch.Tensor):
58 log_vars[loss_name] = loss_value.mean()
59 elif isinstance(loss_value, float):
60 log_vars[loss_name] = loss_value
61 elif isinstance(loss_value, list):
62 log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
63 else:
64 raise TypeError(
65 f'{loss_name} is not a tensor or list of tensors or float')
66
67 loss = sum(_value for _key, _value in log_vars.items()
68 if 'loss' in _key)
69
70 log_vars['loss'] = loss
71 for loss_name, loss_value in log_vars.items():
72 # reduce loss when distributed training
73 if not isinstance(loss_value, float):
74 if dist.is_available() and dist.is_initialized():
75 loss_value = loss_value.data.clone()
76 dist.all_reduce(loss_value.div_(dist.get_world_size()))
77 log_vars[loss_name] = loss_value.item()
78 else:
79 log_vars[loss_name] = loss_value
80
81 return loss, log_vars
82
83 def train_step(self, data_batch, optimizer, **kwargs):
84 """The iteration step during training.
85
86 This method defines an iteration step during training, except for the
87 back propagation and optimizer updating, which are done in an optimizer
88 hook. Note that in some complicated cases or models, the whole process
89 including back propagation and optimizer updating is also defined in
90 this method, such as GAN.
91
92 Args:
93 data_batch (dict): The output of dataloader.
94 optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
95 runner is passed to ``train_step()``. This argument is unused
96 and reserved.
97
98 Returns:
99 dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
100 ``num_samples``.
101 ``loss`` is a tensor for back propagation, which can be a
102 weighted sum of multiple losses.
103 ``log_vars`` contains all the variables to be sent to the
104 logger.
105 ``num_samples`` indicates the batch size (when the model is
106 DDP, it means the batch size on each GPU), which is used for
107 averaging the logs.
108 """
109 losses = self.forward(**data_batch)
110
111 loss, log_vars = self._parse_losses(losses)
112
113 outputs = dict(
114 loss=loss,
115 log_vars=log_vars,
116 num_samples=len(next(iter(data_batch.values()))))
117
118 return outputs
119
120 def val_step(self, data_batch, optimizer, **kwargs):
121 """The iteration step during validation.
122
123 This method shares the same signature as :func:`train_step`, but used
124 during val epochs. Note that the evaluation after training epochs is
125 not implemented with this method, but an evaluation hook.
126 """
127 results = self.forward(return_loss=False, **data_batch)
128
129 outputs = dict(results=results)
130
131 return outputs
132
133 @abstractmethod
134 def show_result(self, **kwargs):
135 """Visualize the results."""
136 raise NotImplementedError
137
[end of mmpose/models/detectors/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmpose/models/detectors/base.py b/mmpose/models/detectors/base.py
--- a/mmpose/models/detectors/base.py
+++ b/mmpose/models/detectors/base.py
@@ -31,10 +31,6 @@
def forward_test(self, img, img_metas, **kwargs):
"""Defines the computation performed at testing."""
- @abstractmethod
- def simple_test(self, img, img_metas, **kwargs):
- """Simple test function."""
-
@abstractmethod
def forward(self, img, img_metas, return_loss=True, **kwargs):
"""Forward function."""
| {"golden_diff": "diff --git a/mmpose/models/detectors/base.py b/mmpose/models/detectors/base.py\n--- a/mmpose/models/detectors/base.py\n+++ b/mmpose/models/detectors/base.py\n@@ -31,10 +31,6 @@\n def forward_test(self, img, img_metas, **kwargs):\n \"\"\"Defines the computation performed at testing.\"\"\"\n \n- @abstractmethod\n- def simple_test(self, img, img_metas, **kwargs):\n- \"\"\"Simple test function.\"\"\"\n-\n @abstractmethod\n def forward(self, img, img_metas, return_loss=True, **kwargs):\n \"\"\"Forward function.\"\"\"\n", "issue": "Pylint: W0223\n```bash\r\nmmpose/models/detectors/bottom_up.py:19:0: W0223: Method 'simple_test' is abstract in class 'BasePose' but is not overridden (abstract-method)\r\nmmpose/models/detectors/top_down.py:18:0: W0223: Method 'simple_test' is abstract in class 'BasePose' but is not overridden (abstract-method)\r\n```\n", "before_files": [{"content": "from abc import ABCMeta, abstractmethod\nfrom collections import OrderedDict\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\n\n\nclass BasePose(nn.Module):\n \"\"\"Base class for pose detectors.\n\n All recognizers should subclass it.\n All subclass should overwrite:\n Methods:`forward_train`, supporting to forward when training.\n Methods:`forward_test`, supporting to forward when testing.\n\n Args:\n backbone (dict): Backbone modules to extract feature.\n head (dict): Head modules to give output.\n train_cfg (dict): Config for training. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n \"\"\"\n\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def forward_train(self, img, img_metas, **kwargs):\n \"\"\"Defines the computation performed at training.\"\"\"\n\n @abstractmethod\n def forward_test(self, img, img_metas, **kwargs):\n \"\"\"Defines the computation performed at testing.\"\"\"\n\n @abstractmethod\n def simple_test(self, img, img_metas, **kwargs):\n \"\"\"Simple test function.\"\"\"\n\n @abstractmethod\n def forward(self, img, img_metas, return_loss=True, **kwargs):\n \"\"\"Forward function.\"\"\"\n\n @staticmethod\n def _parse_losses(losses):\n \"\"\"Parse the raw outputs (losses) of the network.\n\n Args:\n losses (dict): Raw output of the network, which usually contain\n losses and other necessary information.\n\n Returns:\n tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor\n which may be a weighted sum of all losses, log_vars contains\n all the variables to be sent to the logger.\n \"\"\"\n log_vars = OrderedDict()\n for loss_name, loss_value in losses.items():\n if isinstance(loss_value, torch.Tensor):\n log_vars[loss_name] = loss_value.mean()\n elif isinstance(loss_value, float):\n log_vars[loss_name] = loss_value\n elif isinstance(loss_value, list):\n log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)\n else:\n raise TypeError(\n f'{loss_name} is not a tensor or list of tensors or float')\n\n loss = sum(_value for _key, _value in log_vars.items()\n if 'loss' in _key)\n\n log_vars['loss'] = loss\n for loss_name, loss_value in log_vars.items():\n # reduce loss when distributed training\n if not isinstance(loss_value, float):\n if dist.is_available() and dist.is_initialized():\n loss_value = loss_value.data.clone()\n dist.all_reduce(loss_value.div_(dist.get_world_size()))\n log_vars[loss_name] = loss_value.item()\n else:\n log_vars[loss_name] = loss_value\n\n return loss, log_vars\n\n def train_step(self, data_batch, optimizer, **kwargs):\n \"\"\"The iteration step during training.\n\n This method defines an iteration step during training, except for the\n back propagation and optimizer updating, which are done in an optimizer\n hook. Note that in some complicated cases or models, the whole process\n including back propagation and optimizer updating is also defined in\n this method, such as GAN.\n\n Args:\n data_batch (dict): The output of dataloader.\n optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of\n runner is passed to ``train_step()``. This argument is unused\n and reserved.\n\n Returns:\n dict: It should contain at least 3 keys: ``loss``, ``log_vars``,\n ``num_samples``.\n ``loss`` is a tensor for back propagation, which can be a\n weighted sum of multiple losses.\n ``log_vars`` contains all the variables to be sent to the\n logger.\n ``num_samples`` indicates the batch size (when the model is\n DDP, it means the batch size on each GPU), which is used for\n averaging the logs.\n \"\"\"\n losses = self.forward(**data_batch)\n\n loss, log_vars = self._parse_losses(losses)\n\n outputs = dict(\n loss=loss,\n log_vars=log_vars,\n num_samples=len(next(iter(data_batch.values()))))\n\n return outputs\n\n def val_step(self, data_batch, optimizer, **kwargs):\n \"\"\"The iteration step during validation.\n\n This method shares the same signature as :func:`train_step`, but used\n during val epochs. Note that the evaluation after training epochs is\n not implemented with this method, but an evaluation hook.\n \"\"\"\n results = self.forward(return_loss=False, **data_batch)\n\n outputs = dict(results=results)\n\n return outputs\n\n @abstractmethod\n def show_result(self, **kwargs):\n \"\"\"Visualize the results.\"\"\"\n raise NotImplementedError\n", "path": "mmpose/models/detectors/base.py"}]} | 1,991 | 143 |
gh_patches_debug_1841 | rasdani/github-patches | git_diff | kivy__python-for-android-1351 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python2 Build fails with make: *** [Makefile:426: sharedmods] Error 139
# Python version: 3.6
# OS: Arch Linux
# python-for-android version: 0.6.0
The command I use to build is:
`
p4a apk --private ~/Projects/Python/Mobile_Apps/BeerApp/ --package=org.drink.recommendations --name "Drink Recommendations" --version 0.2 --bootstrap=sdl2 --requirements=python2,kivy --ndk_version r9c
`
The error is:
`
make: *** [Makefile:426: sharedmods] Error 139
`
The build logs are in the following file.
[p4a_errors.txt](https://github.com/kivy/python-for-android/files/2091833/p4a_errors.txt)
Initally I thought that this was a buildozer issue, as I attempted it that way first. So, I opened an issue on their github page and multiple users pointed out that they too were experiencing this issue. I've tried with both python3 and python2, the out come is the same. There is absolutely no unicode in any of my source files, I've also attempted the build with pygame instead of sdl2 (for python 2). There are also multiple simillar SO threads open about this.
Does anyone have any sugesstions or ideas as to why this is happening and how to go about fixing it?
It's also worth noting that if I use the kivy buildozer vm, I can use buildozer to carry out a successful build. Just not on any other machine using either buildozer or p4a, using the same source and build commands.
The buildozer issue is here: https://github.com/kivy/buildozer/issues/673
The output from the dump file is:
`
Reading symbols from /home/suroh/.local/share/python-for-android/build/other_builds/hostpython2/desktop/hostpython2/python...done.
[New LWP 28854]
[Thread debugging using libthread_db enabled]
Using host libthread_db library "/usr/lib/libthread_db.so.1".
Core was generated by ./python -E ./setup.py -q build.
Program terminated with signal SIGSEGV, Segmentation fault.
#0 0x000055731803eb2a in PyInstance_NewRaw (klass=klass@entry=0x7f7cbf1d1c18, dict=0x557319325210, dict@entry=0x0) at Objects/classobject.c:534
534 inst->in_dict = dict;
File "/home/suroh/.local/share/python-for-android/build/other_builds/hostpython2/desktop/hostpython2/python-gdb.py", line 55
Py_TPFLAGS_HEAPTYPE = (1L << 9)
^
SyntaxError: invalid syntax
`
</issue>
<code>
[start of pythonforandroid/recipes/hostpython2/__init__.py]
1
2 from pythonforandroid.toolchain import Recipe, shprint, current_directory, info, warning
3 from os.path import join, exists
4 import os
5 import sh
6
7
8 class Hostpython2Recipe(Recipe):
9 version = '2.7.2'
10 url = 'https://python.org/ftp/python/{version}/Python-{version}.tar.bz2'
11 name = 'hostpython2'
12
13 conflicts = ['hostpython3']
14
15 def get_build_container_dir(self, arch=None):
16 choices = self.check_recipe_choices()
17 dir_name = '-'.join([self.name] + choices)
18 return join(self.ctx.build_dir, 'other_builds', dir_name, 'desktop')
19
20 def get_build_dir(self, arch=None):
21 return join(self.get_build_container_dir(), self.name)
22
23 def prebuild_arch(self, arch):
24 # Override hostpython Setup?
25 shprint(sh.cp, join(self.get_recipe_dir(), 'Setup'),
26 join(self.get_build_dir(), 'Modules', 'Setup'))
27
28 def build_arch(self, arch):
29 with current_directory(self.get_build_dir()):
30
31 if exists('hostpython'):
32 info('hostpython already exists, skipping build')
33 self.ctx.hostpython = join(self.get_build_dir(),
34 'hostpython')
35 self.ctx.hostpgen = join(self.get_build_dir(),
36 'hostpgen')
37 return
38
39 if 'LIBS' in os.environ:
40 os.environ.pop('LIBS')
41 configure = sh.Command('./configure')
42
43 shprint(configure)
44 shprint(sh.make, '-j5')
45
46 shprint(sh.mv, join('Parser', 'pgen'), 'hostpgen')
47
48 if exists('python.exe'):
49 shprint(sh.mv, 'python.exe', 'hostpython')
50 elif exists('python'):
51 shprint(sh.mv, 'python', 'hostpython')
52 else:
53 warning('Unable to find the python executable after '
54 'hostpython build! Exiting.')
55 exit(1)
56
57 self.ctx.hostpython = join(self.get_build_dir(), 'hostpython')
58 self.ctx.hostpgen = join(self.get_build_dir(), 'hostpgen')
59
60
61 recipe = Hostpython2Recipe()
62
[end of pythonforandroid/recipes/hostpython2/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pythonforandroid/recipes/hostpython2/__init__.py b/pythonforandroid/recipes/hostpython2/__init__.py
--- a/pythonforandroid/recipes/hostpython2/__init__.py
+++ b/pythonforandroid/recipes/hostpython2/__init__.py
@@ -10,6 +10,7 @@
version = '2.7.2'
url = 'https://python.org/ftp/python/{version}/Python-{version}.tar.bz2'
name = 'hostpython2'
+ patches = ['fix-segfault-pygchead.patch']
conflicts = ['hostpython3']
| {"golden_diff": "diff --git a/pythonforandroid/recipes/hostpython2/__init__.py b/pythonforandroid/recipes/hostpython2/__init__.py\n--- a/pythonforandroid/recipes/hostpython2/__init__.py\n+++ b/pythonforandroid/recipes/hostpython2/__init__.py\n@@ -10,6 +10,7 @@\n version = '2.7.2'\n url = 'https://python.org/ftp/python/{version}/Python-{version}.tar.bz2'\n name = 'hostpython2'\n+ patches = ['fix-segfault-pygchead.patch']\n \n conflicts = ['hostpython3']\n", "issue": "Python2 Build fails with make: *** [Makefile:426: sharedmods] Error 139\n# Python version: 3.6\r\n# OS: Arch Linux \r\n# python-for-android version: 0.6.0 \r\n\r\nThe command I use to build is:\r\n\r\n` \r\n p4a apk --private ~/Projects/Python/Mobile_Apps/BeerApp/ --package=org.drink.recommendations --name \"Drink Recommendations\" --version 0.2 --bootstrap=sdl2 --requirements=python2,kivy --ndk_version r9c\r\n`\r\n\r\nThe error is:\r\n\r\n`\r\n make: *** [Makefile:426: sharedmods] Error 139\r\n`\r\n\r\nThe build logs are in the following file.\r\n[p4a_errors.txt](https://github.com/kivy/python-for-android/files/2091833/p4a_errors.txt)\r\n\r\nInitally I thought that this was a buildozer issue, as I attempted it that way first. So, I opened an issue on their github page and multiple users pointed out that they too were experiencing this issue. I've tried with both python3 and python2, the out come is the same. There is absolutely no unicode in any of my source files, I've also attempted the build with pygame instead of sdl2 (for python 2). There are also multiple simillar SO threads open about this. \r\n\r\nDoes anyone have any sugesstions or ideas as to why this is happening and how to go about fixing it?\r\n\r\nIt's also worth noting that if I use the kivy buildozer vm, I can use buildozer to carry out a successful build. Just not on any other machine using either buildozer or p4a, using the same source and build commands.\r\n\r\nThe buildozer issue is here: https://github.com/kivy/buildozer/issues/673\r\n\r\nThe output from the dump file is:\r\n\r\n`\r\n Reading symbols from /home/suroh/.local/share/python-for-android/build/other_builds/hostpython2/desktop/hostpython2/python...done.\r\n [New LWP 28854]\r\n [Thread debugging using libthread_db enabled]\r\n Using host libthread_db library \"/usr/lib/libthread_db.so.1\".\r\n Core was generated by ./python -E ./setup.py -q build.\r\n Program terminated with signal SIGSEGV, Segmentation fault.\r\n #0 0x000055731803eb2a in PyInstance_NewRaw (klass=klass@entry=0x7f7cbf1d1c18, dict=0x557319325210, dict@entry=0x0) at Objects/classobject.c:534\r\n 534 inst->in_dict = dict;\r\n File \"/home/suroh/.local/share/python-for-android/build/other_builds/hostpython2/desktop/hostpython2/python-gdb.py\", line 55\r\n Py_TPFLAGS_HEAPTYPE = (1L << 9)\r\n ^\r\n SyntaxError: invalid syntax\r\n`\n", "before_files": [{"content": "\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory, info, warning\nfrom os.path import join, exists\nimport os\nimport sh\n\n\nclass Hostpython2Recipe(Recipe):\n version = '2.7.2'\n url = 'https://python.org/ftp/python/{version}/Python-{version}.tar.bz2'\n name = 'hostpython2'\n\n conflicts = ['hostpython3']\n\n def get_build_container_dir(self, arch=None):\n choices = self.check_recipe_choices()\n dir_name = '-'.join([self.name] + choices)\n return join(self.ctx.build_dir, 'other_builds', dir_name, 'desktop')\n\n def get_build_dir(self, arch=None):\n return join(self.get_build_container_dir(), self.name)\n\n def prebuild_arch(self, arch):\n # Override hostpython Setup?\n shprint(sh.cp, join(self.get_recipe_dir(), 'Setup'),\n join(self.get_build_dir(), 'Modules', 'Setup'))\n\n def build_arch(self, arch):\n with current_directory(self.get_build_dir()):\n\n if exists('hostpython'):\n info('hostpython already exists, skipping build')\n self.ctx.hostpython = join(self.get_build_dir(),\n 'hostpython')\n self.ctx.hostpgen = join(self.get_build_dir(),\n 'hostpgen')\n return\n\n if 'LIBS' in os.environ:\n os.environ.pop('LIBS')\n configure = sh.Command('./configure')\n\n shprint(configure)\n shprint(sh.make, '-j5')\n\n shprint(sh.mv, join('Parser', 'pgen'), 'hostpgen')\n\n if exists('python.exe'):\n shprint(sh.mv, 'python.exe', 'hostpython')\n elif exists('python'):\n shprint(sh.mv, 'python', 'hostpython')\n else:\n warning('Unable to find the python executable after '\n 'hostpython build! Exiting.')\n exit(1)\n\n self.ctx.hostpython = join(self.get_build_dir(), 'hostpython')\n self.ctx.hostpgen = join(self.get_build_dir(), 'hostpgen')\n\n\nrecipe = Hostpython2Recipe()\n", "path": "pythonforandroid/recipes/hostpython2/__init__.py"}]} | 1,800 | 137 |
gh_patches_debug_19534 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3132 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support xls and xlsx
## Problem
Mathesar does not support excel files (xls or xlsx). Please see this file:
https://github.com/centerofci/mathesar/blob/0d99ee984206a99c6743a319504a1d86621d71d5/mathesar/imports/base.py#L13
## Proposed solution
Mathesar should support both xls and xlsx files. This should be simple to do with the xlrd (for xls) and openpyxl (for xlsx) libraries and the implementation would be similar to csv.
## Additional context
It is important to keep in mind that non-technical users can't really use csv but are comfortable with xls and xlsx. Implementing this feature would make mathesar much more friendly for these users.
I see that there's an issue about xlsx files: #2742 however it seems to be closed ? If you want and nobody else is working on that I can try providing a PR implementing the xls and xlsx features.
</issue>
<code>
[start of mathesar/imports/excel.py]
1 import pandas
2
3 from db.tables.operations.alter import update_pk_sequence_to_latest
4 from mathesar.database.base import create_mathesar_engine
5 from db.records.operations.insert import insert_records_from_excel
6 from db.tables.operations.create import create_string_column_table
7 from db.tables.operations.drop import drop_table
8 from mathesar.imports.utils import get_alternate_column_names, process_column_names
9 from psycopg2.errors import IntegrityError, DataError
10
11 from mathesar.state import reset_reflection
12
13
14 def insert_records_from_dataframe(name, schema, column_names, engine, comment, dataframe):
15 table = create_string_column_table(
16 name=name,
17 schema_oid=schema.oid,
18 column_names=column_names,
19 engine=engine,
20 comment=comment,
21 )
22
23 insert_records_from_excel(
24 table,
25 engine,
26 dataframe,
27 )
28 return table
29
30
31 def create_db_table_from_excel_data_file(data_file, name, schema, comment=None):
32 db_name = schema.database.name
33 engine = create_mathesar_engine(db_name)
34 dataframe = pandas.read_excel(data_file.file.path)
35 column_names = process_column_names(dataframe.columns)
36 try:
37 table = insert_records_from_dataframe(name, schema, column_names, engine, comment, dataframe)
38 update_pk_sequence_to_latest(engine, table)
39 except (IntegrityError, DataError):
40 drop_table(name=name, schema=schema.name, engine=engine)
41 column_names_alt = get_alternate_column_names(column_names)
42 table = insert_records_from_dataframe(name, schema, column_names_alt, engine, comment, dataframe)
43
44 reset_reflection(db_name=db_name)
45 return table
46
[end of mathesar/imports/excel.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/imports/excel.py b/mathesar/imports/excel.py
--- a/mathesar/imports/excel.py
+++ b/mathesar/imports/excel.py
@@ -28,10 +28,28 @@
return table
+def remove_empty_rows_and_columns_from_dataframe(df):
+ if df.iloc[0].isna().any():
+
+ # drop rows with all NaN values
+ df.dropna(how='all', inplace=True)
+
+ # drop columns with all NaN values
+ df.dropna(axis=1, how='all', inplace=True)
+
+ if all(df.columns.str.startswith('Unnamed')):
+ df.columns = df.iloc[0]
+ df = df[1:]
+
+ return df
+
+
def create_db_table_from_excel_data_file(data_file, name, schema, comment=None):
db_name = schema.database.name
engine = create_mathesar_engine(db_name)
- dataframe = pandas.read_excel(data_file.file.path)
+ dataframe = remove_empty_rows_and_columns_from_dataframe(
+ pandas.read_excel(data_file.file.path)
+ )
column_names = process_column_names(dataframe.columns)
try:
table = insert_records_from_dataframe(name, schema, column_names, engine, comment, dataframe)
| {"golden_diff": "diff --git a/mathesar/imports/excel.py b/mathesar/imports/excel.py\n--- a/mathesar/imports/excel.py\n+++ b/mathesar/imports/excel.py\n@@ -28,10 +28,28 @@\n return table\n \n \n+def remove_empty_rows_and_columns_from_dataframe(df):\n+ if df.iloc[0].isna().any():\n+\n+ # drop rows with all NaN values\n+ df.dropna(how='all', inplace=True)\n+\n+ # drop columns with all NaN values\n+ df.dropna(axis=1, how='all', inplace=True)\n+\n+ if all(df.columns.str.startswith('Unnamed')):\n+ df.columns = df.iloc[0]\n+ df = df[1:]\n+\n+ return df\n+\n+\n def create_db_table_from_excel_data_file(data_file, name, schema, comment=None):\n db_name = schema.database.name\n engine = create_mathesar_engine(db_name)\n- dataframe = pandas.read_excel(data_file.file.path)\n+ dataframe = remove_empty_rows_and_columns_from_dataframe(\n+ pandas.read_excel(data_file.file.path)\n+ )\n column_names = process_column_names(dataframe.columns)\n try:\n table = insert_records_from_dataframe(name, schema, column_names, engine, comment, dataframe)\n", "issue": "Support xls and xlsx\n## Problem\r\nMathesar does not support excel files (xls or xlsx). Please see this file: \r\n\r\nhttps://github.com/centerofci/mathesar/blob/0d99ee984206a99c6743a319504a1d86621d71d5/mathesar/imports/base.py#L13\r\n\r\n## Proposed solution\r\nMathesar should support both xls and xlsx files. This should be simple to do with the xlrd (for xls) and openpyxl (for xlsx) libraries and the implementation would be similar to csv.\r\n\r\n## Additional context\r\nIt is important to keep in mind that non-technical users can't really use csv but are comfortable with xls and xlsx. Implementing this feature would make mathesar much more friendly for these users.\r\n\r\nI see that there's an issue about xlsx files: #2742 however it seems to be closed ? If you want and nobody else is working on that I can try providing a PR implementing the xls and xlsx features.\r\n\n", "before_files": [{"content": "import pandas\n\nfrom db.tables.operations.alter import update_pk_sequence_to_latest\nfrom mathesar.database.base import create_mathesar_engine\nfrom db.records.operations.insert import insert_records_from_excel\nfrom db.tables.operations.create import create_string_column_table\nfrom db.tables.operations.drop import drop_table\nfrom mathesar.imports.utils import get_alternate_column_names, process_column_names\nfrom psycopg2.errors import IntegrityError, DataError\n\nfrom mathesar.state import reset_reflection\n\n\ndef insert_records_from_dataframe(name, schema, column_names, engine, comment, dataframe):\n table = create_string_column_table(\n name=name,\n schema_oid=schema.oid,\n column_names=column_names,\n engine=engine,\n comment=comment,\n )\n\n insert_records_from_excel(\n table,\n engine,\n dataframe,\n )\n return table\n\n\ndef create_db_table_from_excel_data_file(data_file, name, schema, comment=None):\n db_name = schema.database.name\n engine = create_mathesar_engine(db_name)\n dataframe = pandas.read_excel(data_file.file.path)\n column_names = process_column_names(dataframe.columns)\n try:\n table = insert_records_from_dataframe(name, schema, column_names, engine, comment, dataframe)\n update_pk_sequence_to_latest(engine, table)\n except (IntegrityError, DataError):\n drop_table(name=name, schema=schema.name, engine=engine)\n column_names_alt = get_alternate_column_names(column_names)\n table = insert_records_from_dataframe(name, schema, column_names_alt, engine, comment, dataframe)\n\n reset_reflection(db_name=db_name)\n return table\n", "path": "mathesar/imports/excel.py"}]} | 1,200 | 280 |
gh_patches_debug_57187 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-3325 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[beta][v16][rc1] Les contenus extra (pdf, epub, etc.) ne sont pas générés lors de la 2nde validation
Version 16 RC1.
Scénario de test :
- Je publie un tutoriel pris en zone de validation (J'ai pris celui sur les bases de la prog)
- Le tutoriel est réservé, publié.
- Je modifie le sous-titre du tutoriel et redemande sa validation (2 min après la première publication)
- Je le réserver puis publie une fois de plus le tutoriel sans cocher la case maj majeur, donc en version mineure
- Le tutoriel est publié cette fois, mais après 5 min, toujours pas de signe d'un pdf ni epub, etc.
</issue>
<code>
[start of zds/tutorialv2/management/commands/publication_watchdog.py]
1 # coding: utf-8
2 from os.path import dirname, join
3 import os
4 import time
5
6 import shutil
7 from django.core.management import BaseCommand
8 from pathtools.path import listdir
9 from watchdog.observers import Observer
10 from watchdog.events import FileCreatedEvent, FileSystemEventHandler, LoggingEventHandler
11 from zds import settings
12 from zds.tutorialv2.publication_utils import generate_exernal_content
13 from codecs import open
14
15
16 class TutorialIsPublished(FileSystemEventHandler):
17 prepare_callbacks = [] # because we can imagine we will create far more than test directory existence
18 finish_callbacks = [] # because we can imagine we will send a PM on success or failure one day
19
20 @staticmethod
21 def __create_dir(extra_contents_path):
22 if not os.path.exists(extra_contents_path):
23 os.makedirs(extra_contents_path)
24
25 @staticmethod
26 def __cleanup_build_and_watchdog(extra_contents_path, watchdog_file_path):
27 for listed in listdir(extra_contents_path, recursive=False):
28 try:
29 shutil.copy(join(extra_contents_path, listed), extra_contents_path.replace("__building", ""))
30 except Exception:
31 pass
32 shutil.rmtree(extra_contents_path)
33 os.remove()
34
35 def __init__(self):
36 self.prepare_callbacks = [TutorialIsPublished.__create_dir]
37 self.finish_callbacks = [TutorialIsPublished.__cleanup_build_and_watchdog]
38
39 def on_created(self, event):
40 super(TutorialIsPublished, self).on_created(event)
41 pandoc_debug_str = ""
42
43 if settings.PANDOC_LOG_STATE:
44 pandoc_debug_str = " 2>&1 | tee -a " + settings.PANDOC_LOG
45 if isinstance(event, FileCreatedEvent):
46 with open(event.src_path, encoding="utf-8") as f:
47 infos = f.read().strip().split(";")
48 md_file_path = infos[1]
49 base_name = infos[0]
50 extra_contents_path = dirname(md_file_path)
51 self.prepare_generation(extra_contents_path)
52 try:
53 generate_exernal_content(base_name, extra_contents_path, md_file_path,
54 pandoc_debug_str, overload_settings=True)
55 finally:
56 self.finish_generation(extra_contents_path, event.src_path)
57
58 def prepare_generation(self, extra_contents_path):
59
60 for callback in self.prepare_callbacks:
61 callback(extra_contents_path)
62
63 def finish_generation(self, extra_contents_path, watchdog_file_path):
64 for callback in self.finish_callbacks:
65 callback(extra_contents_path, watchdog_file_path)
66
67
68 class Command(BaseCommand):
69 help = 'Launch a watchdog that generate all exported formats (epub, pdf...) files without blocking request handling'
70
71 def handle(self, *args, **options):
72 path = settings.ZDS_APP['content']['extra_content_watchdog_dir']
73 event_handler = TutorialIsPublished()
74 observer = Observer()
75 observer.schedule(event_handler, path, recursive=True)
76 observer.schedule(LoggingEventHandler(), path)
77 observer.start()
78 try:
79 while True:
80 time.sleep(1)
81 except KeyboardInterrupt:
82 observer.stop()
83 observer.join()
84
[end of zds/tutorialv2/management/commands/publication_watchdog.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/tutorialv2/management/commands/publication_watchdog.py b/zds/tutorialv2/management/commands/publication_watchdog.py
--- a/zds/tutorialv2/management/commands/publication_watchdog.py
+++ b/zds/tutorialv2/management/commands/publication_watchdog.py
@@ -30,7 +30,7 @@
except Exception:
pass
shutil.rmtree(extra_contents_path)
- os.remove()
+ os.remove(watchdog_file_path)
def __init__(self):
self.prepare_callbacks = [TutorialIsPublished.__create_dir]
| {"golden_diff": "diff --git a/zds/tutorialv2/management/commands/publication_watchdog.py b/zds/tutorialv2/management/commands/publication_watchdog.py\n--- a/zds/tutorialv2/management/commands/publication_watchdog.py\n+++ b/zds/tutorialv2/management/commands/publication_watchdog.py\n@@ -30,7 +30,7 @@\n except Exception:\n pass\n shutil.rmtree(extra_contents_path)\n- os.remove()\n+ os.remove(watchdog_file_path)\n \n def __init__(self):\n self.prepare_callbacks = [TutorialIsPublished.__create_dir]\n", "issue": "[beta][v16][rc1] Les contenus extra (pdf, epub, etc.) ne sont pas g\u00e9n\u00e9r\u00e9s lors de la 2nde validation\nVersion 16 RC1.\n\nSc\u00e9nario de test : \n- Je publie un tutoriel pris en zone de validation (J'ai pris celui sur les bases de la prog)\n- Le tutoriel est r\u00e9serv\u00e9, publi\u00e9.\n- Je modifie le sous-titre du tutoriel et redemande sa validation (2 min apr\u00e8s la premi\u00e8re publication)\n- Je le r\u00e9server puis publie une fois de plus le tutoriel sans cocher la case maj majeur, donc en version mineure\n- Le tutoriel est publi\u00e9 cette fois, mais apr\u00e8s 5 min, toujours pas de signe d'un pdf ni epub, etc.\n\n", "before_files": [{"content": "# coding: utf-8\nfrom os.path import dirname, join\nimport os\nimport time\n\nimport shutil\nfrom django.core.management import BaseCommand\nfrom pathtools.path import listdir\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileCreatedEvent, FileSystemEventHandler, LoggingEventHandler\nfrom zds import settings\nfrom zds.tutorialv2.publication_utils import generate_exernal_content\nfrom codecs import open\n\n\nclass TutorialIsPublished(FileSystemEventHandler):\n prepare_callbacks = [] # because we can imagine we will create far more than test directory existence\n finish_callbacks = [] # because we can imagine we will send a PM on success or failure one day\n\n @staticmethod\n def __create_dir(extra_contents_path):\n if not os.path.exists(extra_contents_path):\n os.makedirs(extra_contents_path)\n\n @staticmethod\n def __cleanup_build_and_watchdog(extra_contents_path, watchdog_file_path):\n for listed in listdir(extra_contents_path, recursive=False):\n try:\n shutil.copy(join(extra_contents_path, listed), extra_contents_path.replace(\"__building\", \"\"))\n except Exception:\n pass\n shutil.rmtree(extra_contents_path)\n os.remove()\n\n def __init__(self):\n self.prepare_callbacks = [TutorialIsPublished.__create_dir]\n self.finish_callbacks = [TutorialIsPublished.__cleanup_build_and_watchdog]\n\n def on_created(self, event):\n super(TutorialIsPublished, self).on_created(event)\n pandoc_debug_str = \"\"\n\n if settings.PANDOC_LOG_STATE:\n pandoc_debug_str = \" 2>&1 | tee -a \" + settings.PANDOC_LOG\n if isinstance(event, FileCreatedEvent):\n with open(event.src_path, encoding=\"utf-8\") as f:\n infos = f.read().strip().split(\";\")\n md_file_path = infos[1]\n base_name = infos[0]\n extra_contents_path = dirname(md_file_path)\n self.prepare_generation(extra_contents_path)\n try:\n generate_exernal_content(base_name, extra_contents_path, md_file_path,\n pandoc_debug_str, overload_settings=True)\n finally:\n self.finish_generation(extra_contents_path, event.src_path)\n\n def prepare_generation(self, extra_contents_path):\n\n for callback in self.prepare_callbacks:\n callback(extra_contents_path)\n\n def finish_generation(self, extra_contents_path, watchdog_file_path):\n for callback in self.finish_callbacks:\n callback(extra_contents_path, watchdog_file_path)\n\n\nclass Command(BaseCommand):\n help = 'Launch a watchdog that generate all exported formats (epub, pdf...) files without blocking request handling'\n\n def handle(self, *args, **options):\n path = settings.ZDS_APP['content']['extra_content_watchdog_dir']\n event_handler = TutorialIsPublished()\n observer = Observer()\n observer.schedule(event_handler, path, recursive=True)\n observer.schedule(LoggingEventHandler(), path)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n", "path": "zds/tutorialv2/management/commands/publication_watchdog.py"}]} | 1,522 | 131 |
gh_patches_debug_18278 | rasdani/github-patches | git_diff | streamlink__streamlink-1731 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Vaughnlive RTMP port changed from 1935 to 2935
Very brief bug, very simple fix.
rtmp_server_map for all requests uses 192.240.105.171:1935 and doesn't work. (No data returned from stream)
rtmp_server_map change all requests to 192.240.105.171:2935 works for me.
</issue>
<code>
[start of src/streamlink/plugins/vaughnlive.py]
1 import itertools
2 import logging
3 import random
4 import re
5 import ssl
6
7 import websocket
8
9 from streamlink.plugin import Plugin
10 from streamlink.plugin.api import useragents
11 from streamlink.stream import RTMPStream
12
13 _url_re = re.compile(r"""
14 http(s)?://(\w+\.)?
15 (?P<domain>vaughnlive|breakers|instagib|vapers|pearltime).tv
16 (/embed/video)?
17 /(?P<channel>[^/&?]+)
18 """, re.VERBOSE)
19
20
21 class VLWebSocket(websocket.WebSocket):
22 def __init__(self, **_):
23 self.session = _.pop("session")
24 self.logger = logging.getLogger("streamlink.plugins.vaughnlive.websocket")
25 sslopt = _.pop("sslopt", {})
26 sslopt["cert_reqs"] = ssl.CERT_NONE
27 super(VLWebSocket, self).__init__(sslopt=sslopt, **_)
28
29 def send(self, payload, opcode=websocket.ABNF.OPCODE_TEXT):
30 self.logger.debug("Sending message: {0}", payload)
31 return super(VLWebSocket, self).send(payload + "\n\x00", opcode)
32
33 def recv(self):
34 d = super(VLWebSocket, self).recv().replace("\n", "").replace("\x00", "")
35 return d.split(" ", 1)
36
37
38 class VaughnLive(Plugin):
39 servers = ["wss://sapi-ws-{0}x{1:02}.vaughnlive.tv".format(x, y) for x, y in itertools.product(range(1, 3),
40 range(1, 6))]
41 origin = "https://vaughnlive.tv"
42 rtmp_server_map = {
43 "594140c69edad": "192.240.105.171:1935",
44 "585c4cab1bef1": "192.240.105.171:1935",
45 "5940d648b3929": "192.240.105.171:1935",
46 "5941854b39bc4": "192.240.105.171:1935"
47 }
48 name_remap = {"#vl": "live", "#btv": "btv", "#pt": "pt", "#igb": "instagib", "#vtv": "vtv"}
49 domain_map = {"vaughnlive": "#vl", "breakers": "#btv", "instagib": "#igb", "vapers": "#vtv", "pearltime": "#pt"}
50
51 @classmethod
52 def can_handle_url(cls, url):
53 return _url_re.match(url)
54
55 def api_url(self):
56 return random.choice(self.servers)
57
58 def parse_ack(self, action, message):
59 if action.endswith("3"):
60 channel, _, viewers, token, server, choked, is_live, chls, trns, ingest = message.split(";")
61 is_live = is_live == "1"
62 viewers = int(viewers)
63 self.logger.debug("Viewers: {0}, isLive={1}", viewers, is_live)
64 domain, channel = channel.split("-", 1)
65 return is_live, server, domain, channel, token, ingest
66 else:
67 self.logger.error("Unhandled action format: {0}", action)
68
69 def _get_info(self, stream_name):
70 server = self.api_url()
71 self.logger.debug("Connecting to API: {0}", server)
72 ws = websocket.create_connection(server,
73 header=["User-Agent: {0}".format(useragents.CHROME)],
74 origin=self.origin,
75 class_=VLWebSocket,
76 session=self.session)
77 ws.send("MVN LOAD3 {0}".format(stream_name))
78 action, message = ws.recv()
79 return self.parse_ack(action, message)
80
81 def _get_rtmp_streams(self, server, domain, channel, token):
82 rtmp_server = self.rtmp_server_map.get(server, server)
83
84 url = "rtmp://{0}/live?{1}".format(rtmp_server, token)
85
86 yield "live", RTMPStream(self.session, params={
87 "rtmp": url,
88 "pageUrl": self.url,
89 "playpath": "{0}_{1}".format(self.name_remap.get(domain, "live"), channel),
90 "live": True
91 })
92
93 def _get_streams(self):
94 m = _url_re.match(self.url)
95 if m:
96 stream_name = "{0}-{1}".format(self.domain_map[(m.group("domain").lower())],
97 m.group("channel"))
98
99 is_live, server, domain, channel, token, ingest = self._get_info(stream_name)
100
101 if not is_live:
102 self.logger.info("Stream is currently off air")
103 else:
104 self.logger.info("Stream powered by VaughnSoft - remember to support them.")
105 for s in self._get_rtmp_streams(server, domain, channel, token):
106 yield s
107
108
109 __plugin__ = VaughnLive
110
[end of src/streamlink/plugins/vaughnlive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/vaughnlive.py b/src/streamlink/plugins/vaughnlive.py
--- a/src/streamlink/plugins/vaughnlive.py
+++ b/src/streamlink/plugins/vaughnlive.py
@@ -40,10 +40,10 @@
range(1, 6))]
origin = "https://vaughnlive.tv"
rtmp_server_map = {
- "594140c69edad": "192.240.105.171:1935",
- "585c4cab1bef1": "192.240.105.171:1935",
- "5940d648b3929": "192.240.105.171:1935",
- "5941854b39bc4": "192.240.105.171:1935"
+ "594140c69edad": "192.240.105.171:2935",
+ "585c4cab1bef1": "192.240.105.171:2935",
+ "5940d648b3929": "192.240.105.171:2935",
+ "5941854b39bc4": "192.240.105.171:2935"
}
name_remap = {"#vl": "live", "#btv": "btv", "#pt": "pt", "#igb": "instagib", "#vtv": "vtv"}
domain_map = {"vaughnlive": "#vl", "breakers": "#btv", "instagib": "#igb", "vapers": "#vtv", "pearltime": "#pt"}
| {"golden_diff": "diff --git a/src/streamlink/plugins/vaughnlive.py b/src/streamlink/plugins/vaughnlive.py\n--- a/src/streamlink/plugins/vaughnlive.py\n+++ b/src/streamlink/plugins/vaughnlive.py\n@@ -40,10 +40,10 @@\n range(1, 6))]\n origin = \"https://vaughnlive.tv\"\n rtmp_server_map = {\n- \"594140c69edad\": \"192.240.105.171:1935\",\n- \"585c4cab1bef1\": \"192.240.105.171:1935\",\n- \"5940d648b3929\": \"192.240.105.171:1935\",\n- \"5941854b39bc4\": \"192.240.105.171:1935\"\n+ \"594140c69edad\": \"192.240.105.171:2935\",\n+ \"585c4cab1bef1\": \"192.240.105.171:2935\",\n+ \"5940d648b3929\": \"192.240.105.171:2935\",\n+ \"5941854b39bc4\": \"192.240.105.171:2935\"\n }\n name_remap = {\"#vl\": \"live\", \"#btv\": \"btv\", \"#pt\": \"pt\", \"#igb\": \"instagib\", \"#vtv\": \"vtv\"}\n domain_map = {\"vaughnlive\": \"#vl\", \"breakers\": \"#btv\", \"instagib\": \"#igb\", \"vapers\": \"#vtv\", \"pearltime\": \"#pt\"}\n", "issue": "Vaughnlive RTMP port changed from 1935 to 2935\nVery brief bug, very simple fix.\r\n\r\nrtmp_server_map for all requests uses 192.240.105.171:1935 and doesn't work. (No data returned from stream)\r\nrtmp_server_map change all requests to 192.240.105.171:2935 works for me.\r\n\n", "before_files": [{"content": "import itertools\nimport logging\nimport random\nimport re\nimport ssl\n\nimport websocket\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents\nfrom streamlink.stream import RTMPStream\n\n_url_re = re.compile(r\"\"\"\n http(s)?://(\\w+\\.)?\n (?P<domain>vaughnlive|breakers|instagib|vapers|pearltime).tv\n (/embed/video)?\n /(?P<channel>[^/&?]+)\n\"\"\", re.VERBOSE)\n\n\nclass VLWebSocket(websocket.WebSocket):\n def __init__(self, **_):\n self.session = _.pop(\"session\")\n self.logger = logging.getLogger(\"streamlink.plugins.vaughnlive.websocket\")\n sslopt = _.pop(\"sslopt\", {})\n sslopt[\"cert_reqs\"] = ssl.CERT_NONE\n super(VLWebSocket, self).__init__(sslopt=sslopt, **_)\n\n def send(self, payload, opcode=websocket.ABNF.OPCODE_TEXT):\n self.logger.debug(\"Sending message: {0}\", payload)\n return super(VLWebSocket, self).send(payload + \"\\n\\x00\", opcode)\n\n def recv(self):\n d = super(VLWebSocket, self).recv().replace(\"\\n\", \"\").replace(\"\\x00\", \"\")\n return d.split(\" \", 1)\n\n\nclass VaughnLive(Plugin):\n servers = [\"wss://sapi-ws-{0}x{1:02}.vaughnlive.tv\".format(x, y) for x, y in itertools.product(range(1, 3),\n range(1, 6))]\n origin = \"https://vaughnlive.tv\"\n rtmp_server_map = {\n \"594140c69edad\": \"192.240.105.171:1935\",\n \"585c4cab1bef1\": \"192.240.105.171:1935\",\n \"5940d648b3929\": \"192.240.105.171:1935\",\n \"5941854b39bc4\": \"192.240.105.171:1935\"\n }\n name_remap = {\"#vl\": \"live\", \"#btv\": \"btv\", \"#pt\": \"pt\", \"#igb\": \"instagib\", \"#vtv\": \"vtv\"}\n domain_map = {\"vaughnlive\": \"#vl\", \"breakers\": \"#btv\", \"instagib\": \"#igb\", \"vapers\": \"#vtv\", \"pearltime\": \"#pt\"}\n\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def api_url(self):\n return random.choice(self.servers)\n\n def parse_ack(self, action, message):\n if action.endswith(\"3\"):\n channel, _, viewers, token, server, choked, is_live, chls, trns, ingest = message.split(\";\")\n is_live = is_live == \"1\"\n viewers = int(viewers)\n self.logger.debug(\"Viewers: {0}, isLive={1}\", viewers, is_live)\n domain, channel = channel.split(\"-\", 1)\n return is_live, server, domain, channel, token, ingest\n else:\n self.logger.error(\"Unhandled action format: {0}\", action)\n\n def _get_info(self, stream_name):\n server = self.api_url()\n self.logger.debug(\"Connecting to API: {0}\", server)\n ws = websocket.create_connection(server,\n header=[\"User-Agent: {0}\".format(useragents.CHROME)],\n origin=self.origin,\n class_=VLWebSocket,\n session=self.session)\n ws.send(\"MVN LOAD3 {0}\".format(stream_name))\n action, message = ws.recv()\n return self.parse_ack(action, message)\n\n def _get_rtmp_streams(self, server, domain, channel, token):\n rtmp_server = self.rtmp_server_map.get(server, server)\n\n url = \"rtmp://{0}/live?{1}\".format(rtmp_server, token)\n\n yield \"live\", RTMPStream(self.session, params={\n \"rtmp\": url,\n \"pageUrl\": self.url,\n \"playpath\": \"{0}_{1}\".format(self.name_remap.get(domain, \"live\"), channel),\n \"live\": True\n })\n\n def _get_streams(self):\n m = _url_re.match(self.url)\n if m:\n stream_name = \"{0}-{1}\".format(self.domain_map[(m.group(\"domain\").lower())],\n m.group(\"channel\"))\n\n is_live, server, domain, channel, token, ingest = self._get_info(stream_name)\n\n if not is_live:\n self.logger.info(\"Stream is currently off air\")\n else:\n self.logger.info(\"Stream powered by VaughnSoft - remember to support them.\")\n for s in self._get_rtmp_streams(server, domain, channel, token):\n yield s\n\n\n__plugin__ = VaughnLive\n", "path": "src/streamlink/plugins/vaughnlive.py"}]} | 2,010 | 482 |
gh_patches_debug_33942 | rasdani/github-patches | git_diff | TheAlgorithms__Python-10633 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve our test coverage
### Feature description
Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.
### How to find low-coverage files
Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage:
```
---------- coverage: platform linux, python 3.12.0-final-0 -----------
Name Stmts Miss Cover Missing
-----------------------------------------------------------------------------------------------------------
quantum/q_fourier_transform.py 30 30 0% 14-93
scripts/validate_solutions.py 54 54 0% 2-94
strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129
...
```
The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.
Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.
_**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.
### How to add doctests
A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:
```py
def add(a: int, b: int) -> int:
"""
Adds two non-negative numbers.
>>> add(1, 1)
2
>>> add(2, 5)
7
>>> add(1, 0)
1
>>> add(-1, -1)
Traceback (most recent last):
...
ValueError: Numbers must be non-negative
"""
```
For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).
Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.
_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_
</issue>
<code>
[start of backtracking/all_combinations.py]
1 """
2 In this problem, we want to determine all possible combinations of k
3 numbers out of 1 ... n. We use backtracking to solve this problem.
4 Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!)))
5 """
6 from __future__ import annotations
7
8
9 def generate_all_combinations(n: int, k: int) -> list[list[int]]:
10 """
11 >>> generate_all_combinations(n=4, k=2)
12 [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]
13 """
14
15 result: list[list[int]] = []
16 create_all_state(1, n, k, [], result)
17 return result
18
19
20 def create_all_state(
21 increment: int,
22 total_number: int,
23 level: int,
24 current_list: list[int],
25 total_list: list[list[int]],
26 ) -> None:
27 if level == 0:
28 total_list.append(current_list[:])
29 return
30
31 for i in range(increment, total_number - level + 2):
32 current_list.append(i)
33 create_all_state(i + 1, total_number, level - 1, current_list, total_list)
34 current_list.pop()
35
36
37 def print_all_state(total_list: list[list[int]]) -> None:
38 for i in total_list:
39 print(*i)
40
41
42 if __name__ == "__main__":
43 n = 4
44 k = 2
45 total_list = generate_all_combinations(n, k)
46 print_all_state(total_list)
47
[end of backtracking/all_combinations.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py
--- a/backtracking/all_combinations.py
+++ b/backtracking/all_combinations.py
@@ -1,15 +1,40 @@
"""
In this problem, we want to determine all possible combinations of k
numbers out of 1 ... n. We use backtracking to solve this problem.
- Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!)))
+
+ Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!))),
"""
from __future__ import annotations
+from itertools import combinations
+
+
+def combination_lists(n: int, k: int) -> list[list[int]]:
+ """
+ >>> combination_lists(n=4, k=2)
+ [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]
+ """
+ return [list(x) for x in combinations(range(1, n + 1), k)]
+
def generate_all_combinations(n: int, k: int) -> list[list[int]]:
"""
>>> generate_all_combinations(n=4, k=2)
[[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]
+ >>> generate_all_combinations(n=0, k=0)
+ [[]]
+ >>> generate_all_combinations(n=10, k=-1)
+ Traceback (most recent call last):
+ ...
+ RecursionError: maximum recursion depth exceeded
+ >>> generate_all_combinations(n=-1, k=10)
+ []
+ >>> generate_all_combinations(n=5, k=4)
+ [[1, 2, 3, 4], [1, 2, 3, 5], [1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]]
+ >>> from itertools import combinations
+ >>> all(generate_all_combinations(n, k) == combination_lists(n, k)
+ ... for n in range(1, 6) for k in range(1, 6))
+ True
"""
result: list[list[int]] = []
@@ -34,13 +59,17 @@
current_list.pop()
-def print_all_state(total_list: list[list[int]]) -> None:
- for i in total_list:
- print(*i)
+if __name__ == "__main__":
+ from doctest import testmod
+ testmod()
+ print(generate_all_combinations(n=4, k=2))
+ tests = ((n, k) for n in range(1, 5) for k in range(1, 5))
+ for n, k in tests:
+ print(n, k, generate_all_combinations(n, k) == combination_lists(n, k))
-if __name__ == "__main__":
- n = 4
- k = 2
- total_list = generate_all_combinations(n, k)
- print_all_state(total_list)
+ print("Benchmark:")
+ from timeit import timeit
+
+ for func in ("combination_lists", "generate_all_combinations"):
+ print(f"{func:>25}(): {timeit(f'{func}(n=4, k = 2)', globals=globals())}")
| {"golden_diff": "diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py\n--- a/backtracking/all_combinations.py\n+++ b/backtracking/all_combinations.py\n@@ -1,15 +1,40 @@\n \"\"\"\n In this problem, we want to determine all possible combinations of k\n numbers out of 1 ... n. We use backtracking to solve this problem.\n- Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!)))\n+\n+ Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!))),\n \"\"\"\n from __future__ import annotations\n \n+from itertools import combinations\n+\n+\n+def combination_lists(n: int, k: int) -> list[list[int]]:\n+ \"\"\"\n+ >>> combination_lists(n=4, k=2)\n+ [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]\n+ \"\"\"\n+ return [list(x) for x in combinations(range(1, n + 1), k)]\n+\n \n def generate_all_combinations(n: int, k: int) -> list[list[int]]:\n \"\"\"\n >>> generate_all_combinations(n=4, k=2)\n [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]\n+ >>> generate_all_combinations(n=0, k=0)\n+ [[]]\n+ >>> generate_all_combinations(n=10, k=-1)\n+ Traceback (most recent call last):\n+ ...\n+ RecursionError: maximum recursion depth exceeded\n+ >>> generate_all_combinations(n=-1, k=10)\n+ []\n+ >>> generate_all_combinations(n=5, k=4)\n+ [[1, 2, 3, 4], [1, 2, 3, 5], [1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]]\n+ >>> from itertools import combinations\n+ >>> all(generate_all_combinations(n, k) == combination_lists(n, k)\n+ ... for n in range(1, 6) for k in range(1, 6))\n+ True\n \"\"\"\n \n result: list[list[int]] = []\n@@ -34,13 +59,17 @@\n current_list.pop()\n \n \n-def print_all_state(total_list: list[list[int]]) -> None:\n- for i in total_list:\n- print(*i)\n+if __name__ == \"__main__\":\n+ from doctest import testmod\n \n+ testmod()\n+ print(generate_all_combinations(n=4, k=2))\n+ tests = ((n, k) for n in range(1, 5) for k in range(1, 5))\n+ for n, k in tests:\n+ print(n, k, generate_all_combinations(n, k) == combination_lists(n, k))\n \n-if __name__ == \"__main__\":\n- n = 4\n- k = 2\n- total_list = generate_all_combinations(n, k)\n- print_all_state(total_list)\n+ print(\"Benchmark:\")\n+ from timeit import timeit\n+\n+ for func in (\"combination_lists\", \"generate_all_combinations\"):\n+ print(f\"{func:>25}(): {timeit(f'{func}(n=4, k = 2)', globals=globals())}\")\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "\"\"\"\n In this problem, we want to determine all possible combinations of k\n numbers out of 1 ... n. We use backtracking to solve this problem.\n Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!)))\n\"\"\"\nfrom __future__ import annotations\n\n\ndef generate_all_combinations(n: int, k: int) -> list[list[int]]:\n \"\"\"\n >>> generate_all_combinations(n=4, k=2)\n [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]\n \"\"\"\n\n result: list[list[int]] = []\n create_all_state(1, n, k, [], result)\n return result\n\n\ndef create_all_state(\n increment: int,\n total_number: int,\n level: int,\n current_list: list[int],\n total_list: list[list[int]],\n) -> None:\n if level == 0:\n total_list.append(current_list[:])\n return\n\n for i in range(increment, total_number - level + 2):\n current_list.append(i)\n create_all_state(i + 1, total_number, level - 1, current_list, total_list)\n current_list.pop()\n\n\ndef print_all_state(total_list: list[list[int]]) -> None:\n for i in total_list:\n print(*i)\n\n\nif __name__ == \"__main__\":\n n = 4\n k = 2\n total_list = generate_all_combinations(n, k)\n print_all_state(total_list)\n", "path": "backtracking/all_combinations.py"}]} | 1,820 | 810 |
gh_patches_debug_12088 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-639 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Explicitly set encoding for reading history file.
Fixes build in C locale. Otherwise I see:
Traceback (most recent call last):
File "setup.py", line 24, in <module>
history = history_file.read().replace('.. :changelog:', '')
File "/usr/pkg/lib/python3.5/encodings/ascii.py", line 26, in decode
return codecs.ascii_decode(input, self.errors)[0]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 6348: ordinal not in range(128)
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 import os
4 import sys
5
6 from setuptools import setup
7
8 version = "1.3.0"
9
10 if sys.argv[-1] == 'publish':
11 os.system('python setup.py sdist upload')
12 os.system('python setup.py bdist_wheel upload')
13 sys.exit()
14
15 if sys.argv[-1] == 'tag':
16 os.system("git tag -a %s -m 'version %s'" % (version, version))
17 os.system("git push --tags")
18 sys.exit()
19
20 with open('README.rst') as readme_file:
21 readme = readme_file.read()
22
23 with open('HISTORY.rst') as history_file:
24 history = history_file.read().replace('.. :changelog:', '')
25
26 requirements = [
27 'future>=0.15.2',
28 'binaryornot>=0.2.0',
29 'jinja2>=2.7',
30 'click>=5.0',
31 'whichcraft>=0.1.1',
32 'poyo>=0.1.0'
33 ]
34
35 long_description = readme + '\n\n' + history
36
37 if sys.argv[-1] == 'readme':
38 print(long_description)
39 sys.exit()
40
41
42 setup(
43 name='cookiecutter',
44 version=version,
45 description=('A command-line utility that creates projects from project '
46 'templates, e.g. creating a Python package project from a '
47 'Python package project template.'),
48 long_description=long_description,
49 author='Audrey Roy',
50 author_email='[email protected]',
51 url='https://github.com/audreyr/cookiecutter',
52 packages=[
53 'cookiecutter',
54 ],
55 package_dir={'cookiecutter': 'cookiecutter'},
56 entry_points={
57 'console_scripts': [
58 'cookiecutter = cookiecutter.cli:main',
59 ]
60 },
61 include_package_data=True,
62 install_requires=requirements,
63 license='BSD',
64 zip_safe=False,
65 classifiers=[
66 'Development Status :: 5 - Production/Stable',
67 'Environment :: Console',
68 'Intended Audience :: Developers',
69 'Natural Language :: English',
70 'License :: OSI Approved :: BSD License',
71 'Programming Language :: Python',
72 'Programming Language :: Python :: 2',
73 'Programming Language :: Python :: 2.7',
74 'Programming Language :: Python :: 3',
75 'Programming Language :: Python :: 3.3',
76 'Programming Language :: Python :: 3.4',
77 'Programming Language :: Python :: 3.5',
78 'Programming Language :: Python :: Implementation :: CPython',
79 'Programming Language :: Python :: Implementation :: PyPy',
80 'Topic :: Software Development',
81 ],
82 keywords=(
83 'cookiecutter, Python, projects, project templates, Jinja2, '
84 'skeleton, scaffolding, project directory, setup.py, package, '
85 'packaging'
86 ),
87 )
88
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python
import os
+import io
import sys
from setuptools import setup
@@ -17,10 +18,10 @@
os.system("git push --tags")
sys.exit()
-with open('README.rst') as readme_file:
+with io.open('README.rst', 'r', encoding='utf-8') as readme_file:
readme = readme_file.read()
-with open('HISTORY.rst') as history_file:
+with io.open('HISTORY.rst', 'r', encoding='utf-8') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,7 @@\n #!/usr/bin/env python\n \n import os\n+import io\n import sys\n \n from setuptools import setup\n@@ -17,10 +18,10 @@\n os.system(\"git push --tags\")\n sys.exit()\n \n-with open('README.rst') as readme_file:\n+with io.open('README.rst', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n \n-with open('HISTORY.rst') as history_file:\n+with io.open('HISTORY.rst', 'r', encoding='utf-8') as history_file:\n history = history_file.read().replace('.. :changelog:', '')\n \n requirements = [\n", "issue": "Explicitly set encoding for reading history file.\nFixes build in C locale. Otherwise I see:\n\nTraceback (most recent call last):\n File \"setup.py\", line 24, in <module>\n history = history_file.read().replace('.. :changelog:', '')\n File \"/usr/pkg/lib/python3.5/encodings/ascii.py\", line 26, in decode\n return codecs.ascii_decode(input, self.errors)[0]\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 6348: ordinal not in range(128)\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.3.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read().replace('.. :changelog:', '')\n\nrequirements = [\n 'future>=0.15.2',\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=5.0',\n 'whichcraft>=0.1.1',\n 'poyo>=0.1.0'\n]\n\nlong_description = readme + '\\n\\n' + history\n\nif sys.argv[-1] == 'readme':\n print(long_description)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=long_description,\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/audreyr/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.cli:main',\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}]} | 1,453 | 176 |
gh_patches_debug_8011 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-2259 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Plus de pagination dans la liste des membres
Url incriminée : http://beta.zestedesavoir.com/membres/
On ne voit que 100 membres inscrit, alors qu'il y'en a plus.
</issue>
<code>
[start of zds/utils/paginator.py]
1 # coding: utf-8
2
3 from django.views.generic import ListView
4 from django.views.generic.list import MultipleObjectMixin
5
6 from zds.settings import ZDS_APP
7
8
9 class ZdSPagingListView(ListView):
10 def get_context_data(self, **kwargs):
11 """
12 Get the context for this view. This method is surcharged to modify the paginator
13 and information given at the template.
14 """
15 queryset = kwargs.pop('object_list', self.object_list)
16 page_size = self.get_paginate_by(queryset)
17 context_object_name = self.get_context_object_name(queryset)
18 paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)
19 if page_size:
20 paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)
21 context = {
22 'paginator': paginator,
23 'page_obj': page,
24 'is_paginated': is_paginated,
25 'object_list': queryset,
26 'pages': paginator_range(page.number, paginator.num_pages),
27 }
28 else:
29 context = {
30 'paginator': None,
31 'page_obj': None,
32 'is_paginated': False,
33 'object_list': queryset,
34 'pages': [],
35 }
36 if context_object_name is not None:
37 context[context_object_name] = queryset
38 context.update(kwargs)
39 return super(MultipleObjectMixin, self).get_context_data(**context)
40
41
42 def paginator_range(current, stop, start=1):
43 assert (current <= stop)
44
45 # Basic case when no folding
46 if stop - start <= ZDS_APP['paginator']['folding_limit']:
47 return range(start, stop + 1)
48
49 # Complex case when folding
50 lst = []
51 for page_number in range(start, stop + 1):
52 # Bounds
53 if page_number == start or page_number == stop:
54 lst.append(page_number)
55 if page_number == start and current - start > 2:
56 lst.append(None)
57 # Neighbors
58 elif abs(page_number - current) == 1:
59 lst.append(page_number)
60 if page_number - current > 0 and stop - page_number > 2:
61 lst.append(None)
62 # Current
63 elif page_number == current:
64 lst.append(page_number)
65 # Put some
66 elif page_number == stop - 1 and current == stop - 3:
67 lst.append(page_number)
68 # And ignore all other numbers
69
70 return lst
71
[end of zds/utils/paginator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/utils/paginator.py b/zds/utils/paginator.py
--- a/zds/utils/paginator.py
+++ b/zds/utils/paginator.py
@@ -17,7 +17,6 @@
context_object_name = self.get_context_object_name(queryset)
paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)
if page_size:
- paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)
context = {
'paginator': paginator,
'page_obj': page,
| {"golden_diff": "diff --git a/zds/utils/paginator.py b/zds/utils/paginator.py\n--- a/zds/utils/paginator.py\n+++ b/zds/utils/paginator.py\n@@ -17,7 +17,6 @@\n context_object_name = self.get_context_object_name(queryset)\n paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)\n if page_size:\n- paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)\n context = {\n 'paginator': paginator,\n 'page_obj': page,\n", "issue": "Plus de pagination dans la liste des membres\nUrl incrimin\u00e9e : http://beta.zestedesavoir.com/membres/\n\nOn ne voit que 100 membres inscrit, alors qu'il y'en a plus.\n\n", "before_files": [{"content": "# coding: utf-8\n\nfrom django.views.generic import ListView\nfrom django.views.generic.list import MultipleObjectMixin\n\nfrom zds.settings import ZDS_APP\n\n\nclass ZdSPagingListView(ListView):\n def get_context_data(self, **kwargs):\n \"\"\"\n Get the context for this view. This method is surcharged to modify the paginator\n and information given at the template.\n \"\"\"\n queryset = kwargs.pop('object_list', self.object_list)\n page_size = self.get_paginate_by(queryset)\n context_object_name = self.get_context_object_name(queryset)\n paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)\n if page_size:\n paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)\n context = {\n 'paginator': paginator,\n 'page_obj': page,\n 'is_paginated': is_paginated,\n 'object_list': queryset,\n 'pages': paginator_range(page.number, paginator.num_pages),\n }\n else:\n context = {\n 'paginator': None,\n 'page_obj': None,\n 'is_paginated': False,\n 'object_list': queryset,\n 'pages': [],\n }\n if context_object_name is not None:\n context[context_object_name] = queryset\n context.update(kwargs)\n return super(MultipleObjectMixin, self).get_context_data(**context)\n\n\ndef paginator_range(current, stop, start=1):\n assert (current <= stop)\n\n # Basic case when no folding\n if stop - start <= ZDS_APP['paginator']['folding_limit']:\n return range(start, stop + 1)\n\n # Complex case when folding\n lst = []\n for page_number in range(start, stop + 1):\n # Bounds\n if page_number == start or page_number == stop:\n lst.append(page_number)\n if page_number == start and current - start > 2:\n lst.append(None)\n # Neighbors\n elif abs(page_number - current) == 1:\n lst.append(page_number)\n if page_number - current > 0 and stop - page_number > 2:\n lst.append(None)\n # Current\n elif page_number == current:\n lst.append(page_number)\n # Put some\n elif page_number == stop - 1 and current == stop - 3:\n lst.append(page_number)\n # And ignore all other numbers\n\n return lst\n", "path": "zds/utils/paginator.py"}]} | 1,248 | 127 |
gh_patches_debug_18462 | rasdani/github-patches | git_diff | aio-libs__aiohttp-5118 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
option to disable automatic client response body decompression
enhancement for https://github.com/aio-libs/aiohttp/issues/1992
</issue>
<code>
[start of aiohttp/resolver.py]
1 import socket
2 from typing import Any, Dict, List
3
4 from .abc import AbstractResolver
5 from .helpers import get_running_loop
6
7 __all__ = ("ThreadedResolver", "AsyncResolver", "DefaultResolver")
8
9 try:
10 import aiodns
11
12 # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')
13 except ImportError: # pragma: no cover
14 aiodns = None
15
16 aiodns_default = False
17
18
19 class ThreadedResolver(AbstractResolver):
20 """Use Executor for synchronous getaddrinfo() calls, which defaults to
21 concurrent.futures.ThreadPoolExecutor.
22 """
23
24 def __init__(self) -> None:
25 self._loop = get_running_loop()
26
27 async def resolve(
28 self, host: str, port: int = 0, family: int = socket.AF_INET
29 ) -> List[Dict[str, Any]]:
30 infos = await self._loop.getaddrinfo(
31 host, port, type=socket.SOCK_STREAM, family=family
32 )
33
34 hosts = []
35 for family, _, proto, _, address in infos:
36 if family == socket.AF_INET6 and address[3]: # type: ignore
37 # This is essential for link-local IPv6 addresses.
38 # LL IPv6 is a VERY rare case. Strictly speaking, we should use
39 # getnameinfo() unconditionally, but performance makes sense.
40 host, _port = socket.getnameinfo(
41 address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV
42 )
43 port = int(_port)
44 else:
45 host, port = address[:2]
46 hosts.append(
47 {
48 "hostname": host,
49 "host": host,
50 "port": port,
51 "family": family,
52 "proto": proto,
53 "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
54 }
55 )
56
57 return hosts
58
59 async def close(self) -> None:
60 pass
61
62
63 class AsyncResolver(AbstractResolver):
64 """Use the `aiodns` package to make asynchronous DNS lookups"""
65
66 def __init__(self, *args: Any, **kwargs: Any) -> None:
67 if aiodns is None:
68 raise RuntimeError("Resolver requires aiodns library")
69
70 self._loop = get_running_loop()
71 self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)
72
73 async def resolve(
74 self, host: str, port: int = 0, family: int = socket.AF_INET
75 ) -> List[Dict[str, Any]]:
76 try:
77 resp = await self._resolver.gethostbyname(host, family)
78 except aiodns.error.DNSError as exc:
79 msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
80 raise OSError(msg) from exc
81 hosts = []
82 for address in resp.addresses:
83 hosts.append(
84 {
85 "hostname": host,
86 "host": address,
87 "port": port,
88 "family": family,
89 "proto": 0,
90 "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
91 }
92 )
93
94 if not hosts:
95 raise OSError("DNS lookup failed")
96
97 return hosts
98
99 async def close(self) -> None:
100 return self._resolver.cancel()
101
102
103 DefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver
104
[end of aiohttp/resolver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py
--- a/aiohttp/resolver.py
+++ b/aiohttp/resolver.py
@@ -25,10 +25,10 @@
self._loop = get_running_loop()
async def resolve(
- self, host: str, port: int = 0, family: int = socket.AF_INET
+ self, hostname: str, port: int = 0, family: int = socket.AF_INET
) -> List[Dict[str, Any]]:
infos = await self._loop.getaddrinfo(
- host, port, type=socket.SOCK_STREAM, family=family
+ hostname, port, type=socket.SOCK_STREAM, family=family
)
hosts = []
@@ -45,7 +45,7 @@
host, port = address[:2]
hosts.append(
{
- "hostname": host,
+ "hostname": hostname,
"host": host,
"port": port,
"family": family,
| {"golden_diff": "diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py\n--- a/aiohttp/resolver.py\n+++ b/aiohttp/resolver.py\n@@ -25,10 +25,10 @@\n self._loop = get_running_loop()\n \n async def resolve(\n- self, host: str, port: int = 0, family: int = socket.AF_INET\n+ self, hostname: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n infos = await self._loop.getaddrinfo(\n- host, port, type=socket.SOCK_STREAM, family=family\n+ hostname, port, type=socket.SOCK_STREAM, family=family\n )\n \n hosts = []\n@@ -45,7 +45,7 @@\n host, port = address[:2]\n hosts.append(\n {\n- \"hostname\": host,\n+ \"hostname\": hostname,\n \"host\": host,\n \"port\": port,\n \"family\": family,\n", "issue": "option to disable automatic client response body decompression\nenhancement for https://github.com/aio-libs/aiohttp/issues/1992\n", "before_files": [{"content": "import socket\nfrom typing import Any, Dict, List\n\nfrom .abc import AbstractResolver\nfrom .helpers import get_running_loop\n\n__all__ = (\"ThreadedResolver\", \"AsyncResolver\", \"DefaultResolver\")\n\ntry:\n import aiodns\n\n # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')\nexcept ImportError: # pragma: no cover\n aiodns = None\n\naiodns_default = False\n\n\nclass ThreadedResolver(AbstractResolver):\n \"\"\"Use Executor for synchronous getaddrinfo() calls, which defaults to\n concurrent.futures.ThreadPoolExecutor.\n \"\"\"\n\n def __init__(self) -> None:\n self._loop = get_running_loop()\n\n async def resolve(\n self, host: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n infos = await self._loop.getaddrinfo(\n host, port, type=socket.SOCK_STREAM, family=family\n )\n\n hosts = []\n for family, _, proto, _, address in infos:\n if family == socket.AF_INET6 and address[3]: # type: ignore\n # This is essential for link-local IPv6 addresses.\n # LL IPv6 is a VERY rare case. Strictly speaking, we should use\n # getnameinfo() unconditionally, but performance makes sense.\n host, _port = socket.getnameinfo(\n address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV\n )\n port = int(_port)\n else:\n host, port = address[:2]\n hosts.append(\n {\n \"hostname\": host,\n \"host\": host,\n \"port\": port,\n \"family\": family,\n \"proto\": proto,\n \"flags\": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n }\n )\n\n return hosts\n\n async def close(self) -> None:\n pass\n\n\nclass AsyncResolver(AbstractResolver):\n \"\"\"Use the `aiodns` package to make asynchronous DNS lookups\"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n if aiodns is None:\n raise RuntimeError(\"Resolver requires aiodns library\")\n\n self._loop = get_running_loop()\n self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)\n\n async def resolve(\n self, host: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n try:\n resp = await self._resolver.gethostbyname(host, family)\n except aiodns.error.DNSError as exc:\n msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n raise OSError(msg) from exc\n hosts = []\n for address in resp.addresses:\n hosts.append(\n {\n \"hostname\": host,\n \"host\": address,\n \"port\": port,\n \"family\": family,\n \"proto\": 0,\n \"flags\": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n }\n )\n\n if not hosts:\n raise OSError(\"DNS lookup failed\")\n\n return hosts\n\n async def close(self) -> None:\n return self._resolver.cancel()\n\n\nDefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver\n", "path": "aiohttp/resolver.py"}]} | 1,512 | 231 |
gh_patches_debug_44753 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-888 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] ShowTypingMiddleware middleware in a python bot not functioning
## Version
`botbuilder-core 4.7.1`
`botbuilder-schema 4.7.1`
## Describe the bug
``` python
#app.py
ADAPTER = BotFrameworkAdapter(SETTINGS)
# show typing indicator on long activities
ADAPTER.use(ShowTypingMiddleware(delay=0.5, period=2.0))
```
``` python
#bot.py
...
async def on_message_activity(self, turn_context: TurnContext):
if turn_context.activity.text == "middleware":
await asyncio.sleep(10) # mock getting some data
await turn_context.send_activity("done")
...
```
## Expected behavior
I expect that calling the middleware
- shows a TI for activities taking longer than .5 seconds
- repeat sending a TI to the client every 2 seconds
## Actual results :
- TI is sent one time only
- no repeat TI are sent
- a runtime warning is shown:
```
c:\develop\x\pybot1\.venv\lib\site-packages\botbuilder\core\show_typing_middleware.py:79:
RuntimeWarning: coroutine 'ShowTypingMiddleware.on_turn.<locals>.start_interval' was never awaited
start_interval(context, period, period)
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
```
In the emulator log it is clear that only one TI indicator is sent , and no repeats are to be seen
```
[16:55:12]<- messageYou said 'middleware'
[16:55:12]POST200conversations.:conversationId.activities.:activityId
[16:55:12]POST201directline.conversations.:conversationId.activities
[16:55:43]-> messagemiddleware
[16:55:44]<- typing
[16:55:44]POST200conversations.:conversationId.activities.:activityId
[16:55:54]<- messagedone
[16:55:54]POST200conversations.:conversationId.activities.:activityId
[16:55:54]POST201directline.conversations.:conversationId.activities
```
## Additional context
also see Question on [SO](https://stackoverflow.com/posts/60467080/edit)
</issue>
<code>
[start of libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import time
5 from functools import wraps
6 from typing import Awaitable, Callable
7
8 from botbuilder.schema import Activity, ActivityTypes
9
10 from .middleware_set import Middleware
11 from .turn_context import TurnContext
12
13
14 def delay(span=0.0):
15 def wrap(func):
16 @wraps(func)
17 async def delayed():
18 time.sleep(span)
19 await func()
20
21 return delayed
22
23 return wrap
24
25
26 class Timer:
27 clear_timer = False
28
29 async def set_timeout(self, func, time):
30 is_invocation_cancelled = False
31
32 @delay(time)
33 async def some_fn(): # pylint: disable=function-redefined
34 if not self.clear_timer:
35 await func()
36
37 await some_fn()
38 return is_invocation_cancelled
39
40 def set_clear_timer(self):
41 self.clear_timer = True
42
43
44 class ShowTypingMiddleware(Middleware):
45 def __init__(self, delay: float = 0.5, period: float = 2.0):
46 if delay < 0:
47 raise ValueError("Delay must be greater than or equal to zero")
48
49 if period <= 0:
50 raise ValueError("Repeat period must be greater than zero")
51
52 self._delay = delay
53 self._period = period
54
55 async def on_turn(
56 self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]
57 ):
58 finished = False
59 timer = Timer()
60
61 async def start_interval(context: TurnContext, delay: int, period: int):
62 async def aux():
63 if not finished:
64 typing_activity = Activity(
65 type=ActivityTypes.typing,
66 relates_to=context.activity.relates_to,
67 )
68
69 conversation_reference = TurnContext.get_conversation_reference(
70 context.activity
71 )
72
73 typing_activity = TurnContext.apply_conversation_reference(
74 typing_activity, conversation_reference
75 )
76
77 await context.adapter.send_activities(context, [typing_activity])
78
79 start_interval(context, period, period)
80
81 await timer.set_timeout(aux, delay)
82
83 def stop_interval():
84 nonlocal finished
85 finished = True
86 timer.set_clear_timer()
87
88 if context.activity.type == ActivityTypes.message:
89 finished = False
90 await start_interval(context, self._delay, self._period)
91
92 result = await logic()
93 stop_interval()
94
95 return result
96
[end of libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py b/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py
--- a/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py
+++ b/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py
@@ -1,8 +1,6 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
-
-import time
-from functools import wraps
+import asyncio
from typing import Awaitable, Callable
from botbuilder.schema import Activity, ActivityTypes
@@ -11,38 +9,38 @@
from .turn_context import TurnContext
-def delay(span=0.0):
- def wrap(func):
- @wraps(func)
- async def delayed():
- time.sleep(span)
- await func()
-
- return delayed
-
- return wrap
-
-
class Timer:
clear_timer = False
- async def set_timeout(self, func, time):
- is_invocation_cancelled = False
-
- @delay(time)
+ def set_timeout(self, func, span):
async def some_fn(): # pylint: disable=function-redefined
+ await asyncio.sleep(span)
if not self.clear_timer:
await func()
- await some_fn()
- return is_invocation_cancelled
+ asyncio.ensure_future(some_fn())
def set_clear_timer(self):
self.clear_timer = True
class ShowTypingMiddleware(Middleware):
+ """
+ When added, this middleware will send typing activities back to the user when a Message activity
+ is received to let them know that the bot has received the message and is working on the response.
+ You can specify a delay before the first typing activity is sent and then a frequency, which
+ determines how often another typing activity is sent. Typing activities will continue to be sent
+ until your bot sends another message back to the user.
+ """
+
def __init__(self, delay: float = 0.5, period: float = 2.0):
+ """
+ Initializes the middleware.
+
+ :param delay: Delay in seconds for the first typing indicator to be sent.
+ :param period: Delay in seconds for subsequent typing indicators.
+ """
+
if delay < 0:
raise ValueError("Delay must be greater than or equal to zero")
@@ -55,41 +53,43 @@
async def on_turn(
self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]
):
- finished = False
timer = Timer()
- async def start_interval(context: TurnContext, delay: int, period: int):
+ def start_interval(context: TurnContext, delay, period):
async def aux():
- if not finished:
- typing_activity = Activity(
- type=ActivityTypes.typing,
- relates_to=context.activity.relates_to,
- )
+ typing_activity = Activity(
+ type=ActivityTypes.typing, relates_to=context.activity.relates_to,
+ )
- conversation_reference = TurnContext.get_conversation_reference(
- context.activity
- )
+ conversation_reference = TurnContext.get_conversation_reference(
+ context.activity
+ )
- typing_activity = TurnContext.apply_conversation_reference(
- typing_activity, conversation_reference
- )
+ typing_activity = TurnContext.apply_conversation_reference(
+ typing_activity, conversation_reference
+ )
- await context.adapter.send_activities(context, [typing_activity])
+ asyncio.ensure_future(
+ context.adapter.send_activities(context, [typing_activity])
+ )
- start_interval(context, period, period)
+ # restart the timer, with the 'period' value for the delay
+ timer.set_timeout(aux, period)
- await timer.set_timeout(aux, delay)
+ # first time through we use the 'delay' value for the timer.
+ timer.set_timeout(aux, delay)
def stop_interval():
- nonlocal finished
- finished = True
timer.set_clear_timer()
+ # if it's a message, start sending typing activities until the
+ # bot logic is done.
if context.activity.type == ActivityTypes.message:
- finished = False
- await start_interval(context, self._delay, self._period)
+ start_interval(context, self._delay, self._period)
+ # call the bot logic
result = await logic()
+
stop_interval()
return result
| {"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py b/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py\n--- a/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py\n+++ b/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py\n@@ -1,8 +1,6 @@\n # Copyright (c) Microsoft Corporation. All rights reserved.\r\n # Licensed under the MIT License.\r\n-\r\n-import time\r\n-from functools import wraps\r\n+import asyncio\r\n from typing import Awaitable, Callable\r\n \r\n from botbuilder.schema import Activity, ActivityTypes\r\n@@ -11,38 +9,38 @@\n from .turn_context import TurnContext\r\n \r\n \r\n-def delay(span=0.0):\r\n- def wrap(func):\r\n- @wraps(func)\r\n- async def delayed():\r\n- time.sleep(span)\r\n- await func()\r\n-\r\n- return delayed\r\n-\r\n- return wrap\r\n-\r\n-\r\n class Timer:\r\n clear_timer = False\r\n \r\n- async def set_timeout(self, func, time):\r\n- is_invocation_cancelled = False\r\n-\r\n- @delay(time)\r\n+ def set_timeout(self, func, span):\r\n async def some_fn(): # pylint: disable=function-redefined\r\n+ await asyncio.sleep(span)\r\n if not self.clear_timer:\r\n await func()\r\n \r\n- await some_fn()\r\n- return is_invocation_cancelled\r\n+ asyncio.ensure_future(some_fn())\r\n \r\n def set_clear_timer(self):\r\n self.clear_timer = True\r\n \r\n \r\n class ShowTypingMiddleware(Middleware):\r\n+ \"\"\"\r\n+ When added, this middleware will send typing activities back to the user when a Message activity\r\n+ is received to let them know that the bot has received the message and is working on the response.\r\n+ You can specify a delay before the first typing activity is sent and then a frequency, which\r\n+ determines how often another typing activity is sent. Typing activities will continue to be sent\r\n+ until your bot sends another message back to the user.\r\n+ \"\"\"\r\n+\r\n def __init__(self, delay: float = 0.5, period: float = 2.0):\r\n+ \"\"\"\r\n+ Initializes the middleware.\r\n+\r\n+ :param delay: Delay in seconds for the first typing indicator to be sent.\r\n+ :param period: Delay in seconds for subsequent typing indicators.\r\n+ \"\"\"\r\n+\r\n if delay < 0:\r\n raise ValueError(\"Delay must be greater than or equal to zero\")\r\n \r\n@@ -55,41 +53,43 @@\n async def on_turn(\r\n self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]\r\n ):\r\n- finished = False\r\n timer = Timer()\r\n \r\n- async def start_interval(context: TurnContext, delay: int, period: int):\r\n+ def start_interval(context: TurnContext, delay, period):\r\n async def aux():\r\n- if not finished:\r\n- typing_activity = Activity(\r\n- type=ActivityTypes.typing,\r\n- relates_to=context.activity.relates_to,\r\n- )\r\n+ typing_activity = Activity(\r\n+ type=ActivityTypes.typing, relates_to=context.activity.relates_to,\r\n+ )\r\n \r\n- conversation_reference = TurnContext.get_conversation_reference(\r\n- context.activity\r\n- )\r\n+ conversation_reference = TurnContext.get_conversation_reference(\r\n+ context.activity\r\n+ )\r\n \r\n- typing_activity = TurnContext.apply_conversation_reference(\r\n- typing_activity, conversation_reference\r\n- )\r\n+ typing_activity = TurnContext.apply_conversation_reference(\r\n+ typing_activity, conversation_reference\r\n+ )\r\n \r\n- await context.adapter.send_activities(context, [typing_activity])\r\n+ asyncio.ensure_future(\r\n+ context.adapter.send_activities(context, [typing_activity])\r\n+ )\r\n \r\n- start_interval(context, period, period)\r\n+ # restart the timer, with the 'period' value for the delay\r\n+ timer.set_timeout(aux, period)\r\n \r\n- await timer.set_timeout(aux, delay)\r\n+ # first time through we use the 'delay' value for the timer.\r\n+ timer.set_timeout(aux, delay)\r\n \r\n def stop_interval():\r\n- nonlocal finished\r\n- finished = True\r\n timer.set_clear_timer()\r\n \r\n+ # if it's a message, start sending typing activities until the\r\n+ # bot logic is done.\r\n if context.activity.type == ActivityTypes.message:\r\n- finished = False\r\n- await start_interval(context, self._delay, self._period)\r\n+ start_interval(context, self._delay, self._period)\r\n \r\n+ # call the bot logic\r\n result = await logic()\r\n+\r\n stop_interval()\r\n \r\n return result\n", "issue": "[bug] ShowTypingMiddleware middleware in a python bot not functioning\n## Version\r\n`botbuilder-core 4.7.1` \r\n`botbuilder-schema 4.7.1`\r\n\r\n## Describe the bug\r\n\r\n\r\n``` python\r\n#app.py \r\nADAPTER = BotFrameworkAdapter(SETTINGS)\r\n# show typing indicator on long activities\r\nADAPTER.use(ShowTypingMiddleware(delay=0.5, period=2.0))\r\n```\r\n\r\n``` python\r\n#bot.py \r\n...\r\n\r\n async def on_message_activity(self, turn_context: TurnContext):\r\n if turn_context.activity.text == \"middleware\":\r\n await asyncio.sleep(10) # mock getting some data \r\n await turn_context.send_activity(\"done\")\r\n\r\n...\r\n```\r\n\r\n## Expected behavior\r\n\r\nI expect that calling the middleware \r\n- shows a TI for activities taking longer than .5 seconds \r\n- repeat sending a TI to the client every 2 seconds \r\n\r\n## Actual results : \r\n\r\n - TI is sent one time only\r\n - no repeat TI are sent \r\n - a runtime warning is shown:\r\n```\r\n c:\\develop\\x\\pybot1\\.venv\\lib\\site-packages\\botbuilder\\core\\show_typing_middleware.py:79: \r\nRuntimeWarning: coroutine 'ShowTypingMiddleware.on_turn.<locals>.start_interval' was never awaited\r\n start_interval(context, period, period)\r\nRuntimeWarning: Enable tracemalloc to get the object allocation traceback\r\n```\r\n\r\nIn the emulator log it is clear that only one TI indicator is sent , and no repeats are to be seen\r\n```\r\n[16:55:12]<- messageYou said 'middleware'\r\n[16:55:12]POST200conversations.:conversationId.activities.:activityId\r\n[16:55:12]POST201directline.conversations.:conversationId.activities\r\n[16:55:43]-> messagemiddleware\r\n[16:55:44]<- typing\r\n[16:55:44]POST200conversations.:conversationId.activities.:activityId\r\n[16:55:54]<- messagedone\r\n[16:55:54]POST200conversations.:conversationId.activities.:activityId\r\n[16:55:54]POST201directline.conversations.:conversationId.activities\r\n```\r\n\r\n## Additional context\r\nalso see Question on [SO](https://stackoverflow.com/posts/60467080/edit)\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nimport time\r\nfrom functools import wraps\r\nfrom typing import Awaitable, Callable\r\n\r\nfrom botbuilder.schema import Activity, ActivityTypes\r\n\r\nfrom .middleware_set import Middleware\r\nfrom .turn_context import TurnContext\r\n\r\n\r\ndef delay(span=0.0):\r\n def wrap(func):\r\n @wraps(func)\r\n async def delayed():\r\n time.sleep(span)\r\n await func()\r\n\r\n return delayed\r\n\r\n return wrap\r\n\r\n\r\nclass Timer:\r\n clear_timer = False\r\n\r\n async def set_timeout(self, func, time):\r\n is_invocation_cancelled = False\r\n\r\n @delay(time)\r\n async def some_fn(): # pylint: disable=function-redefined\r\n if not self.clear_timer:\r\n await func()\r\n\r\n await some_fn()\r\n return is_invocation_cancelled\r\n\r\n def set_clear_timer(self):\r\n self.clear_timer = True\r\n\r\n\r\nclass ShowTypingMiddleware(Middleware):\r\n def __init__(self, delay: float = 0.5, period: float = 2.0):\r\n if delay < 0:\r\n raise ValueError(\"Delay must be greater than or equal to zero\")\r\n\r\n if period <= 0:\r\n raise ValueError(\"Repeat period must be greater than zero\")\r\n\r\n self._delay = delay\r\n self._period = period\r\n\r\n async def on_turn(\r\n self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]\r\n ):\r\n finished = False\r\n timer = Timer()\r\n\r\n async def start_interval(context: TurnContext, delay: int, period: int):\r\n async def aux():\r\n if not finished:\r\n typing_activity = Activity(\r\n type=ActivityTypes.typing,\r\n relates_to=context.activity.relates_to,\r\n )\r\n\r\n conversation_reference = TurnContext.get_conversation_reference(\r\n context.activity\r\n )\r\n\r\n typing_activity = TurnContext.apply_conversation_reference(\r\n typing_activity, conversation_reference\r\n )\r\n\r\n await context.adapter.send_activities(context, [typing_activity])\r\n\r\n start_interval(context, period, period)\r\n\r\n await timer.set_timeout(aux, delay)\r\n\r\n def stop_interval():\r\n nonlocal finished\r\n finished = True\r\n timer.set_clear_timer()\r\n\r\n if context.activity.type == ActivityTypes.message:\r\n finished = False\r\n await start_interval(context, self._delay, self._period)\r\n\r\n result = await logic()\r\n stop_interval()\r\n\r\n return result\r\n", "path": "libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py"}]} | 1,795 | 1,013 |
gh_patches_debug_10155 | rasdani/github-patches | git_diff | Mailu__Mailu-1885 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Postfix no longer starts correctly in kubernetes
## Environment & Versions
### Environment
- [ ] docker-compose
- [X] kubernetes
- [ ] docker swarm
### Versions
1.8
## Description
After installing mailu 1.8 via helm on Kubernetes, the `mailu-postfix` container runs but never reaches a healthy state and smtp functionality is impaired.
After digging into it I believe the container is failing to become healthy because the following change to the postfix container's startup script (https://github.com/Mailu/Mailu/commit/1d65529c94f54de3cb49ed9584ed95f7860c26fa) and a known issue with the musl resolver the alpine base image uses (https://github.com/kubernetes/kubernetes/issues/64924).
Resolving the mailu installation hostname never succeeds because of the aforementioned bug, and `socrates.system.resolve_hostname` simply retries until the pod's failure threshold is exceeded and is restarted.
There's a couple different ways I believe this could be resolved:
1. Pass a FQDN to `system.resolve_hostname()`, which avoids the resolver bug with search lists, i.e. `domain.com.` with a trailing dot.
2. Update the deployment manifest in the mailu helm chart to use `dnsConfig.options` on the pod spec to set a more agreeable `ndots` value for `/etc/resolv.conf`
3. Use a different base image for mailu containers that is not affected by this issue.
I would be happy to investigate further and file a PR with the appropriate changes based on feedback. Thanks!
</issue>
<code>
[start of core/postfix/start.py]
1 #!/usr/bin/python3
2
3 import os
4 import glob
5 import shutil
6 import multiprocessing
7 import logging as log
8 import sys
9
10 from podop import run_server
11 from socrate import system, conf
12
13 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
14
15 def start_podop():
16 os.setuid(100)
17 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/"
18 # TODO: Remove verbosity setting from Podop?
19 run_server(0, "postfix", "/tmp/podop.socket", [
20 ("transport", "url", url + "transport/§"),
21 ("alias", "url", url + "alias/§"),
22 ("domain", "url", url + "domain/§"),
23 ("mailbox", "url", url + "mailbox/§"),
24 ("recipientmap", "url", url + "recipient/map/§"),
25 ("sendermap", "url", url + "sender/map/§"),
26 ("senderaccess", "url", url + "sender/access/§"),
27 ("senderlogin", "url", url + "sender/login/§")
28 ])
29
30 def is_valid_postconf_line(line):
31 return not line.startswith("#") \
32 and not line == ''
33
34 # Actual startup script
35 os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
36 os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
37 os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332")
38 os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525")
39 os.environ["OUTCLEAN"] = os.environ["HOSTNAMES"].split(",")[0]
40 try:
41 os.environ["OUTCLEAN_ADDRESS"] = system.resolve_hostname(os.environ["OUTCLEAN"])
42 except:
43 os.environ["OUTCLEAN_ADDRESS"] = "10.10.10.10"
44
45 for postfix_file in glob.glob("/conf/*.cf"):
46 conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
47
48 if os.path.exists("/overrides/postfix.cf"):
49 for line in open("/overrides/postfix.cf").read().strip().split("\n"):
50 if is_valid_postconf_line(line):
51 os.system('postconf -e "{}"'.format(line))
52
53 if os.path.exists("/overrides/postfix.master"):
54 for line in open("/overrides/postfix.master").read().strip().split("\n"):
55 if is_valid_postconf_line(line):
56 os.system('postconf -Me "{}"'.format(line))
57
58 for map_file in glob.glob("/overrides/*.map"):
59 destination = os.path.join("/etc/postfix", os.path.basename(map_file))
60 shutil.copyfile(map_file, destination)
61 os.system("postmap {}".format(destination))
62 os.remove(destination)
63
64 if "RELAYUSER" in os.environ:
65 path = "/etc/postfix/sasl_passwd"
66 conf.jinja("/conf/sasl_passwd", os.environ, path)
67 os.system("postmap {}".format(path))
68
69 # Run Podop and Postfix
70 multiprocessing.Process(target=start_podop).start()
71 os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing")
72 # Before starting postfix, we need to check permissions on /queue
73 # in the event that postfix,postdrop id have changed
74 os.system("postfix set-permissions")
75 os.system("postfix start-fg")
76
[end of core/postfix/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/postfix/start.py b/core/postfix/start.py
--- a/core/postfix/start.py
+++ b/core/postfix/start.py
@@ -38,7 +38,11 @@
os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525")
os.environ["OUTCLEAN"] = os.environ["HOSTNAMES"].split(",")[0]
try:
- os.environ["OUTCLEAN_ADDRESS"] = system.resolve_hostname(os.environ["OUTCLEAN"])
+ _to_lookup = os.environ["OUTCLEAN"]
+ # Ensure we lookup a FQDN: @see #1884
+ if not _to_lookup.endswith('.'):
+ _to_lookup += '.'
+ os.environ["OUTCLEAN_ADDRESS"] = system.resolve_hostname(_to_lookup)
except:
os.environ["OUTCLEAN_ADDRESS"] = "10.10.10.10"
| {"golden_diff": "diff --git a/core/postfix/start.py b/core/postfix/start.py\n--- a/core/postfix/start.py\n+++ b/core/postfix/start.py\n@@ -38,7 +38,11 @@\n os.environ[\"LMTP_ADDRESS\"] = system.get_host_address_from_environment(\"LMTP\", \"imap:2525\")\n os.environ[\"OUTCLEAN\"] = os.environ[\"HOSTNAMES\"].split(\",\")[0]\n try:\n- os.environ[\"OUTCLEAN_ADDRESS\"] = system.resolve_hostname(os.environ[\"OUTCLEAN\"])\n+ _to_lookup = os.environ[\"OUTCLEAN\"]\n+ # Ensure we lookup a FQDN: @see #1884\n+ if not _to_lookup.endswith('.'):\n+ _to_lookup += '.'\n+ os.environ[\"OUTCLEAN_ADDRESS\"] = system.resolve_hostname(_to_lookup)\n except:\n os.environ[\"OUTCLEAN_ADDRESS\"] = \"10.10.10.10\"\n", "issue": "Postfix no longer starts correctly in kubernetes\n## Environment & Versions\r\n### Environment\r\n - [ ] docker-compose\r\n - [X] kubernetes\r\n - [ ] docker swarm\r\n\r\n### Versions\r\n1.8\r\n\r\n## Description\r\nAfter installing mailu 1.8 via helm on Kubernetes, the `mailu-postfix` container runs but never reaches a healthy state and smtp functionality is impaired.\r\n\r\nAfter digging into it I believe the container is failing to become healthy because the following change to the postfix container's startup script (https://github.com/Mailu/Mailu/commit/1d65529c94f54de3cb49ed9584ed95f7860c26fa) and a known issue with the musl resolver the alpine base image uses (https://github.com/kubernetes/kubernetes/issues/64924).\r\n\r\nResolving the mailu installation hostname never succeeds because of the aforementioned bug, and `socrates.system.resolve_hostname` simply retries until the pod's failure threshold is exceeded and is restarted.\r\n\r\nThere's a couple different ways I believe this could be resolved:\r\n\r\n1. Pass a FQDN to `system.resolve_hostname()`, which avoids the resolver bug with search lists, i.e. `domain.com.` with a trailing dot.\r\n\r\n2. Update the deployment manifest in the mailu helm chart to use `dnsConfig.options` on the pod spec to set a more agreeable `ndots` value for `/etc/resolv.conf`\r\n\r\n3. Use a different base image for mailu containers that is not affected by this issue.\r\n\r\nI would be happy to investigate further and file a PR with the appropriate changes based on feedback. Thanks!\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(100)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n\t\t(\"transport\", \"url\", url + \"transport/\u00a7\"),\n\t\t(\"alias\", \"url\", url + \"alias/\u00a7\"),\n\t\t(\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"recipientmap\", \"url\", url + \"recipient/map/\u00a7\"),\n (\"sendermap\", \"url\", url + \"sender/map/\u00a7\"),\n (\"senderaccess\", \"url\", url + \"sender/access/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\")\n ])\n\ndef is_valid_postconf_line(line):\n return not line.startswith(\"#\") \\\n and not line == ''\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_MILTER_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_MILTER\", \"antispam:11332\")\nos.environ[\"LMTP_ADDRESS\"] = system.get_host_address_from_environment(\"LMTP\", \"imap:2525\")\nos.environ[\"OUTCLEAN\"] = os.environ[\"HOSTNAMES\"].split(\",\")[0]\ntry:\n os.environ[\"OUTCLEAN_ADDRESS\"] = system.resolve_hostname(os.environ[\"OUTCLEAN\"])\nexcept:\n os.environ[\"OUTCLEAN_ADDRESS\"] = \"10.10.10.10\"\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n conf.jinja(postfix_file, os.environ, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n conf.jinja(\"/conf/sasl_passwd\", os.environ, path)\n os.system(\"postmap {}\".format(path))\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\n# Before starting postfix, we need to check permissions on /queue\n# in the event that postfix,postdrop id have changed\nos.system(\"postfix set-permissions\")\nos.system(\"postfix start-fg\")\n", "path": "core/postfix/start.py"}]} | 1,808 | 208 |
gh_patches_debug_8254 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-2448 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add review count to book search listing to concentrate reviews
Often when I'm searching for a book to e.g. mark as having started reading, or to figure out if any other wyrms have reviewed it, I'll have more than one search result.

_two results for wayfarer no 2_
I typically don't really care so much about reviewing a given edition (I read a lot of non-scholarly ebooks). So instead of finding a particular edition, I want to find the one that has been reviewed by people I follow & whose judgement I trust. Similarly, I'd want to contribute _my_ review to that growing pile of context around a given book.
To aid this, I suggest adding some light information markers to the search results. # of reviews would be one concrete suggestions, another would be to display which ones people I'm following have reviewed. Basically use whatever makes sense from a fast query perspective imo :)
Thanks again for bookwyrm! It's a delightful space and I've found _so_ many books over the soon-to-be 2 years since I joined!! u rok
</issue>
<code>
[start of bookwyrm/templatetags/book_display_tags.py]
1 """ template filters """
2 from django import template
3
4
5 register = template.Library()
6
7
8 @register.filter(name="book_description")
9 def get_book_description(book):
10 """use the work's text if the book doesn't have it"""
11 if book.description:
12 return book.description
13 if book.parent_work:
14 # this shoud always be true
15 return book.parent_work.description
16 return None
17
18
19 @register.simple_tag(takes_context=False)
20 def get_book_file_links(book):
21 """links for a book"""
22 return book.file_links.filter(domain__status="approved")
23
24
25 @register.filter(name="author_edition")
26 def get_author_edition(book, author):
27 """default edition for a book on the author page"""
28 return book.author_edition(author)
29
[end of bookwyrm/templatetags/book_display_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/templatetags/book_display_tags.py b/bookwyrm/templatetags/book_display_tags.py
--- a/bookwyrm/templatetags/book_display_tags.py
+++ b/bookwyrm/templatetags/book_display_tags.py
@@ -1,10 +1,17 @@
""" template filters """
from django import template
+from bookwyrm import models
register = template.Library()
[email protected](name="review_count")
+def get_review_count(book):
+ """how many reviews?"""
+ return models.Review.objects.filter(deleted=False, book=book).count()
+
+
@register.filter(name="book_description")
def get_book_description(book):
"""use the work's text if the book doesn't have it"""
| {"golden_diff": "diff --git a/bookwyrm/templatetags/book_display_tags.py b/bookwyrm/templatetags/book_display_tags.py\n--- a/bookwyrm/templatetags/book_display_tags.py\n+++ b/bookwyrm/templatetags/book_display_tags.py\n@@ -1,10 +1,17 @@\n \"\"\" template filters \"\"\"\n from django import template\n+from bookwyrm import models\n \n \n register = template.Library()\n \n \[email protected](name=\"review_count\")\n+def get_review_count(book):\n+ \"\"\"how many reviews?\"\"\"\n+ return models.Review.objects.filter(deleted=False, book=book).count()\n+\n+\n @register.filter(name=\"book_description\")\n def get_book_description(book):\n \"\"\"use the work's text if the book doesn't have it\"\"\"\n", "issue": "Add review count to book search listing to concentrate reviews\nOften when I'm searching for a book to e.g. mark as having started reading, or to figure out if any other wyrms have reviewed it, I'll have more than one search result. \r\n\r\n\r\n_two results for wayfarer no 2_\r\n\r\nI typically don't really care so much about reviewing a given edition (I read a lot of non-scholarly ebooks). So instead of finding a particular edition, I want to find the one that has been reviewed by people I follow & whose judgement I trust. Similarly, I'd want to contribute _my_ review to that growing pile of context around a given book.\r\n\r\nTo aid this, I suggest adding some light information markers to the search results. # of reviews would be one concrete suggestions, another would be to display which ones people I'm following have reviewed. Basically use whatever makes sense from a fast query perspective imo :)\r\n\r\nThanks again for bookwyrm! It's a delightful space and I've found _so_ many books over the soon-to-be 2 years since I joined!! u rok\n", "before_files": [{"content": "\"\"\" template filters \"\"\"\nfrom django import template\n\n\nregister = template.Library()\n\n\[email protected](name=\"book_description\")\ndef get_book_description(book):\n \"\"\"use the work's text if the book doesn't have it\"\"\"\n if book.description:\n return book.description\n if book.parent_work:\n # this shoud always be true\n return book.parent_work.description\n return None\n\n\[email protected]_tag(takes_context=False)\ndef get_book_file_links(book):\n \"\"\"links for a book\"\"\"\n return book.file_links.filter(domain__status=\"approved\")\n\n\[email protected](name=\"author_edition\")\ndef get_author_edition(book, author):\n \"\"\"default edition for a book on the author page\"\"\"\n return book.author_edition(author)\n", "path": "bookwyrm/templatetags/book_display_tags.py"}]} | 1,050 | 170 |
gh_patches_debug_11492 | rasdani/github-patches | git_diff | cobbler__cobbler-3607 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Backport] SafeConfigParser removal
### Original feature issue
Issue #3551 PR #3552
### Target release
- [x] release33
- [ ] release32
- [ ] release30
### Reason
This is needed for Fedora
</issue>
<code>
[start of cobbler/modules/authorization/configfile.py]
1 """
2 Authorization module that allow users listed in
3 /etc/cobbler/users.conf to be permitted to access resources.
4 For instance, when using authz_ldap, you want to use authn_configfile,
5 not authz_allowall, which will most likely NOT do what you want.
6 """
7 # SPDX-License-Identifier: GPL-2.0-or-later
8 # SPDX-FileCopyrightText: Copyright 2007-2009, Red Hat, Inc and Others
9 # SPDX-FileCopyrightText: Michael DeHaan <michael.dehaan AT gmail>
10
11
12 from configparser import SafeConfigParser
13
14 import os
15 from typing import Dict
16
17 CONFIG_FILE = '/etc/cobbler/users.conf'
18
19
20 def register() -> str:
21 """
22 The mandatory Cobbler module registration hook.
23
24 :return: Always "authz".
25 """
26 return "authz"
27
28
29 def __parse_config() -> Dict[str, dict]:
30 """
31 Parse the the users.conf file.
32
33 :return: The data of the config file.
34 """
35 if not os.path.exists(CONFIG_FILE):
36 return {}
37 config = SafeConfigParser()
38 config.read(CONFIG_FILE)
39 alldata = {}
40 groups = config.sections()
41 for g in groups:
42 alldata[str(g)] = {}
43 opts = config.options(g)
44 for o in opts:
45 alldata[g][o] = 1
46 return alldata
47
48
49 def authorize(api_handle, user: str, resource: str, arg1=None, arg2=None) -> int:
50 """
51 Validate a user against a resource. All users in the file are permitted by this module.
52
53 :param api_handle: This parameter is not used currently.
54 :param user: The user to authorize.
55 :param resource: This parameter is not used currently.
56 :param arg1: This parameter is not used currently.
57 :param arg2: This parameter is not used currently.
58 :return: "0" if no authorized, "1" if authorized.
59 """
60 # FIXME: this must be modified to use the new ACL engine
61
62 data = __parse_config()
63 for g in data:
64 if user.lower() in data[g]:
65 return 1
66 return 0
67
[end of cobbler/modules/authorization/configfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cobbler/modules/authorization/configfile.py b/cobbler/modules/authorization/configfile.py
--- a/cobbler/modules/authorization/configfile.py
+++ b/cobbler/modules/authorization/configfile.py
@@ -9,7 +9,7 @@
# SPDX-FileCopyrightText: Michael DeHaan <michael.dehaan AT gmail>
-from configparser import SafeConfigParser
+from configparser import ConfigParser
import os
from typing import Dict
@@ -34,7 +34,7 @@
"""
if not os.path.exists(CONFIG_FILE):
return {}
- config = SafeConfigParser()
+ config = ConfigParser()
config.read(CONFIG_FILE)
alldata = {}
groups = config.sections()
| {"golden_diff": "diff --git a/cobbler/modules/authorization/configfile.py b/cobbler/modules/authorization/configfile.py\n--- a/cobbler/modules/authorization/configfile.py\n+++ b/cobbler/modules/authorization/configfile.py\n@@ -9,7 +9,7 @@\n # SPDX-FileCopyrightText: Michael DeHaan <michael.dehaan AT gmail>\n \n \n-from configparser import SafeConfigParser\n+from configparser import ConfigParser\n \n import os\n from typing import Dict\n@@ -34,7 +34,7 @@\n \"\"\"\n if not os.path.exists(CONFIG_FILE):\n return {}\n- config = SafeConfigParser()\n+ config = ConfigParser()\n config.read(CONFIG_FILE)\n alldata = {}\n groups = config.sections()\n", "issue": "[Backport] SafeConfigParser removal\n### Original feature issue\r\n\r\nIssue #3551 PR #3552 \r\n\r\n### Target release\r\n\r\n- [x] release33\r\n- [ ] release32\r\n- [ ] release30\r\n\r\n### Reason\r\n\r\nThis is needed for Fedora\n", "before_files": [{"content": "\"\"\"\nAuthorization module that allow users listed in\n/etc/cobbler/users.conf to be permitted to access resources.\nFor instance, when using authz_ldap, you want to use authn_configfile,\nnot authz_allowall, which will most likely NOT do what you want.\n\"\"\"\n# SPDX-License-Identifier: GPL-2.0-or-later\n# SPDX-FileCopyrightText: Copyright 2007-2009, Red Hat, Inc and Others\n# SPDX-FileCopyrightText: Michael DeHaan <michael.dehaan AT gmail>\n\n\nfrom configparser import SafeConfigParser\n\nimport os\nfrom typing import Dict\n\nCONFIG_FILE = '/etc/cobbler/users.conf'\n\n\ndef register() -> str:\n \"\"\"\n The mandatory Cobbler module registration hook.\n\n :return: Always \"authz\".\n \"\"\"\n return \"authz\"\n\n\ndef __parse_config() -> Dict[str, dict]:\n \"\"\"\n Parse the the users.conf file.\n\n :return: The data of the config file.\n \"\"\"\n if not os.path.exists(CONFIG_FILE):\n return {}\n config = SafeConfigParser()\n config.read(CONFIG_FILE)\n alldata = {}\n groups = config.sections()\n for g in groups:\n alldata[str(g)] = {}\n opts = config.options(g)\n for o in opts:\n alldata[g][o] = 1\n return alldata\n\n\ndef authorize(api_handle, user: str, resource: str, arg1=None, arg2=None) -> int:\n \"\"\"\n Validate a user against a resource. All users in the file are permitted by this module.\n\n :param api_handle: This parameter is not used currently.\n :param user: The user to authorize.\n :param resource: This parameter is not used currently.\n :param arg1: This parameter is not used currently.\n :param arg2: This parameter is not used currently.\n :return: \"0\" if no authorized, \"1\" if authorized.\n \"\"\"\n # FIXME: this must be modified to use the new ACL engine\n\n data = __parse_config()\n for g in data:\n if user.lower() in data[g]:\n return 1\n return 0\n", "path": "cobbler/modules/authorization/configfile.py"}]} | 1,209 | 160 |
gh_patches_debug_2487 | rasdani/github-patches | git_diff | bokeh__bokeh-10308 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bokehjs' version has duplicated dev suffix
```sh
$ jq '.version' bokehjs/package.json
"2.2.0dev4-dev.4"
```
Should be `2.2.0-dev.4` instead.
</issue>
<code>
[start of release/config.py]
1 # -----------------------------------------------------------------------------
2 # Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
3 # All rights reserved.
4 #
5 # The full license is in the file LICENSE.txt, distributed with this software.
6 # -----------------------------------------------------------------------------
7 """
8
9 """
10
11 # Standard library imports
12 import re
13 from typing import Dict, Optional, Tuple
14
15 # Bokeh imports
16 from .enums import VersionType
17 from .logger import LOG, Scrubber
18
19 __all__ = ("Config",)
20
21 # This excludes "local" build versions, e.g. 0.12.4+19.gf85560a
22 ANY_VERSION = re.compile(r"^((\d+)\.(\d+)\.(\d+))((dev|rc)(\d+))?$")
23
24 FULL_VERSION = re.compile(r"^(\d+\.\d+\.\d+)$")
25
26
27 class Config(object):
28 def __init__(self, version: str) -> None:
29 m = ANY_VERSION.match(version)
30 if not m:
31 raise ValueError(f"Invalid version for Bokeh build/release {version!r}")
32 groups = m.groups()
33
34 self.version: str = version
35
36 self.base_version: str = groups[0]
37 self.base_version_tuple: Tuple[str, ...] = tuple(groups[1:4])
38 self.ext: Optional[str] = groups[4]
39 self.ext_type: str = groups[5]
40 self.ext_number: str = groups[6]
41
42 self._secrets: Dict[str, str] = {}
43
44 def add_secret(self, name: str, secret: str) -> None:
45 """
46
47 """
48 if name in self._secrets:
49 raise RuntimeError()
50 LOG.add_scrubber(Scrubber(secret, name=name))
51 self._secrets[name] = secret
52
53 @property
54 def secrets(self) -> Dict[str, str]:
55 return self._secrets
56
57 @property
58 def prerelease(self) -> bool:
59 return self.version_type != VersionType.FULL
60
61 @property
62 def version_type(self) -> VersionType:
63 if "rc" in self.version:
64 return VersionType.RC
65 elif "dev" in self.version:
66 return VersionType.DEV
67 else:
68 return VersionType.FULL
69
70 @property
71 def js_version(self) -> str:
72 if self.ext is None:
73 return self.version
74 return f"{self.version}-{self.ext_type}.{self.ext_number}"
75
76 @property
77 def release_level(self) -> str:
78 major, minor = self.base_version_tuple[:2]
79 return f"{major}.{minor}"
80
81 @property
82 def staging_branch(self) -> str:
83 return f"staging-{self.version}"
84
85 @property
86 def base_branch(self) -> str:
87 return f"branch-{self.release_level}"
88
[end of release/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/release/config.py b/release/config.py
--- a/release/config.py
+++ b/release/config.py
@@ -71,7 +71,7 @@
def js_version(self) -> str:
if self.ext is None:
return self.version
- return f"{self.version}-{self.ext_type}.{self.ext_number}"
+ return f"{self.base_version}-{self.ext_type}.{self.ext_number}"
@property
def release_level(self) -> str:
| {"golden_diff": "diff --git a/release/config.py b/release/config.py\n--- a/release/config.py\n+++ b/release/config.py\n@@ -71,7 +71,7 @@\n def js_version(self) -> str:\n if self.ext is None:\n return self.version\n- return f\"{self.version}-{self.ext_type}.{self.ext_number}\"\n+ return f\"{self.base_version}-{self.ext_type}.{self.ext_number}\"\n \n @property\n def release_level(self) -> str:\n", "issue": "bokehjs' version has duplicated dev suffix\n```sh\r\n$ jq '.version' bokehjs/package.json\r\n\"2.2.0dev4-dev.4\"\r\n```\r\nShould be `2.2.0-dev.4` instead.\n", "before_files": [{"content": "# -----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n# -----------------------------------------------------------------------------\n\"\"\"\n\n\"\"\"\n\n# Standard library imports\nimport re\nfrom typing import Dict, Optional, Tuple\n\n# Bokeh imports\nfrom .enums import VersionType\nfrom .logger import LOG, Scrubber\n\n__all__ = (\"Config\",)\n\n# This excludes \"local\" build versions, e.g. 0.12.4+19.gf85560a\nANY_VERSION = re.compile(r\"^((\\d+)\\.(\\d+)\\.(\\d+))((dev|rc)(\\d+))?$\")\n\nFULL_VERSION = re.compile(r\"^(\\d+\\.\\d+\\.\\d+)$\")\n\n\nclass Config(object):\n def __init__(self, version: str) -> None:\n m = ANY_VERSION.match(version)\n if not m:\n raise ValueError(f\"Invalid version for Bokeh build/release {version!r}\")\n groups = m.groups()\n\n self.version: str = version\n\n self.base_version: str = groups[0]\n self.base_version_tuple: Tuple[str, ...] = tuple(groups[1:4])\n self.ext: Optional[str] = groups[4]\n self.ext_type: str = groups[5]\n self.ext_number: str = groups[6]\n\n self._secrets: Dict[str, str] = {}\n\n def add_secret(self, name: str, secret: str) -> None:\n \"\"\"\n\n \"\"\"\n if name in self._secrets:\n raise RuntimeError()\n LOG.add_scrubber(Scrubber(secret, name=name))\n self._secrets[name] = secret\n\n @property\n def secrets(self) -> Dict[str, str]:\n return self._secrets\n\n @property\n def prerelease(self) -> bool:\n return self.version_type != VersionType.FULL\n\n @property\n def version_type(self) -> VersionType:\n if \"rc\" in self.version:\n return VersionType.RC\n elif \"dev\" in self.version:\n return VersionType.DEV\n else:\n return VersionType.FULL\n\n @property\n def js_version(self) -> str:\n if self.ext is None:\n return self.version\n return f\"{self.version}-{self.ext_type}.{self.ext_number}\"\n\n @property\n def release_level(self) -> str:\n major, minor = self.base_version_tuple[:2]\n return f\"{major}.{minor}\"\n\n @property\n def staging_branch(self) -> str:\n return f\"staging-{self.version}\"\n\n @property\n def base_branch(self) -> str:\n return f\"branch-{self.release_level}\"\n", "path": "release/config.py"}]} | 1,369 | 105 |
gh_patches_debug_2660 | rasdani/github-patches | git_diff | techmatters__terraso-backend-81 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add photo field to the User model
## Description
The user profile photo might be automatically fetched from the third-party account system (Google or Apple), or it can also be uploaded from by the user. Since the file itself might be stored on an external storage service, this field will be used to store the location of the file.
In this issue, it's important to consider the flow front-end → back-end for photo upload.
## Suggested subtasks
- [ ] Design the overall flow to upload photo considering front-end → back-end flow
- [ ] Add the new field on model with proper support to the external storage service (upload) and update DB migrations
- [ ] Implement upload feature to update photo
- [ ] Add support to present the proper photo URL from external services
- [ ] Add the new photo field on user API
This issue depends on:
- #21
</issue>
<code>
[start of terraso_backend/apps/graphql/schema/users.py]
1 import graphene
2 from graphene import relay
3 from graphene_django import DjangoObjectType
4
5 from apps.core.models import User
6
7 from .commons import BaseDeleteMutation
8
9
10 class UserNode(DjangoObjectType):
11 id = graphene.ID(source="pk", required=True)
12
13 class Meta:
14 model = User
15 filter_fields = {
16 "email": ["exact", "icontains"],
17 "first_name": ["icontains"],
18 "last_name": ["icontains"],
19 }
20 fields = ("email", "first_name", "last_name", "memberships")
21 interfaces = (relay.Node,)
22
23
24 class UserAddMutation(relay.ClientIDMutation):
25 user = graphene.Field(UserNode)
26
27 class Input:
28 first_name = graphene.String()
29 last_name = graphene.String()
30 email = graphene.String(required=True)
31 password = graphene.String(required=True)
32
33 @classmethod
34 def mutate_and_get_payload(cls, root, info, **kwargs):
35 user = User.objects.create_user(
36 kwargs.pop("email"), password=kwargs.pop("password"), **kwargs
37 )
38
39 return cls(user=user)
40
41
42 class UserUpdateMutation(relay.ClientIDMutation):
43 user = graphene.Field(UserNode)
44
45 model_class = User
46
47 class Input:
48 id = graphene.ID(required=True)
49 first_name = graphene.String()
50 last_name = graphene.String()
51 email = graphene.String()
52 password = graphene.String()
53
54 @classmethod
55 def mutate_and_get_payload(cls, root, info, **kwargs):
56 _id = kwargs.pop("id")
57
58 user = User.objects.get(pk=_id)
59 new_password = kwargs.pop("password", None)
60
61 if new_password:
62 user.set_password(new_password)
63
64 for attr, value in kwargs.items():
65 setattr(user, attr, value)
66
67 user.save()
68
69 return cls(user=user)
70
71
72 class UserDeleteMutation(BaseDeleteMutation):
73 user = graphene.Field(UserNode)
74 model_class = User
75
76 class Input:
77 id = graphene.ID()
78
[end of terraso_backend/apps/graphql/schema/users.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/terraso_backend/apps/graphql/schema/users.py b/terraso_backend/apps/graphql/schema/users.py
--- a/terraso_backend/apps/graphql/schema/users.py
+++ b/terraso_backend/apps/graphql/schema/users.py
@@ -17,7 +17,7 @@
"first_name": ["icontains"],
"last_name": ["icontains"],
}
- fields = ("email", "first_name", "last_name", "memberships")
+ fields = ("email", "first_name", "last_name", "profile_image", "memberships")
interfaces = (relay.Node,)
| {"golden_diff": "diff --git a/terraso_backend/apps/graphql/schema/users.py b/terraso_backend/apps/graphql/schema/users.py\n--- a/terraso_backend/apps/graphql/schema/users.py\n+++ b/terraso_backend/apps/graphql/schema/users.py\n@@ -17,7 +17,7 @@\n \"first_name\": [\"icontains\"],\n \"last_name\": [\"icontains\"],\n }\n- fields = (\"email\", \"first_name\", \"last_name\", \"memberships\")\n+ fields = (\"email\", \"first_name\", \"last_name\", \"profile_image\", \"memberships\")\n interfaces = (relay.Node,)\n", "issue": "Add photo field to the User model\n## Description\r\nThe user profile photo might be automatically fetched from the third-party account system (Google or Apple), or it can also be uploaded from by the user. Since the file itself might be stored on an external storage service, this field will be used to store the location of the file.\r\n\r\nIn this issue, it's important to consider the flow front-end \u2192 back-end for photo upload.\r\n\r\n## Suggested subtasks\r\n- [ ] Design the overall flow to upload photo considering front-end \u2192 back-end flow\r\n- [ ] Add the new field on model with proper support to the external storage service (upload) and update DB migrations\r\n- [ ] Implement upload feature to update photo\r\n- [ ] Add support to present the proper photo URL from external services\r\n- [ ] Add the new photo field on user API\r\n\r\nThis issue depends on:\r\n- #21 \n", "before_files": [{"content": "import graphene\nfrom graphene import relay\nfrom graphene_django import DjangoObjectType\n\nfrom apps.core.models import User\n\nfrom .commons import BaseDeleteMutation\n\n\nclass UserNode(DjangoObjectType):\n id = graphene.ID(source=\"pk\", required=True)\n\n class Meta:\n model = User\n filter_fields = {\n \"email\": [\"exact\", \"icontains\"],\n \"first_name\": [\"icontains\"],\n \"last_name\": [\"icontains\"],\n }\n fields = (\"email\", \"first_name\", \"last_name\", \"memberships\")\n interfaces = (relay.Node,)\n\n\nclass UserAddMutation(relay.ClientIDMutation):\n user = graphene.Field(UserNode)\n\n class Input:\n first_name = graphene.String()\n last_name = graphene.String()\n email = graphene.String(required=True)\n password = graphene.String(required=True)\n\n @classmethod\n def mutate_and_get_payload(cls, root, info, **kwargs):\n user = User.objects.create_user(\n kwargs.pop(\"email\"), password=kwargs.pop(\"password\"), **kwargs\n )\n\n return cls(user=user)\n\n\nclass UserUpdateMutation(relay.ClientIDMutation):\n user = graphene.Field(UserNode)\n\n model_class = User\n\n class Input:\n id = graphene.ID(required=True)\n first_name = graphene.String()\n last_name = graphene.String()\n email = graphene.String()\n password = graphene.String()\n\n @classmethod\n def mutate_and_get_payload(cls, root, info, **kwargs):\n _id = kwargs.pop(\"id\")\n\n user = User.objects.get(pk=_id)\n new_password = kwargs.pop(\"password\", None)\n\n if new_password:\n user.set_password(new_password)\n\n for attr, value in kwargs.items():\n setattr(user, attr, value)\n\n user.save()\n\n return cls(user=user)\n\n\nclass UserDeleteMutation(BaseDeleteMutation):\n user = graphene.Field(UserNode)\n model_class = User\n\n class Input:\n id = graphene.ID()\n", "path": "terraso_backend/apps/graphql/schema/users.py"}]} | 1,297 | 132 |
gh_patches_debug_12406 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-57 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnicodeEncodeError when the prompt string contains non ascii characters.
The call prompt fails, if the template settings contains non-ASCII characters.
cookiecutter.json example:
```
{
"full_name": "Jindřich Smitka",
...
}
```
</issue>
<code>
[start of cookiecutter/prompt.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.prompt
6 ---------------------
7
8 Functions for prompting the user for project info.
9 """
10
11 import sys
12
13 PY3 = sys.version > '3'
14 if PY3:
15 iteritems = lambda d: iter(d.items())
16 else:
17 input = raw_input
18 iteritems = lambda d: d.iteritems()
19
20 def prompt_for_config(context):
21 """
22 Prompts the user to enter new config, using context as a source for the
23 field names and sample values.
24 """
25 cookiecutter_dict = {}
26
27 for key, val in iteritems(context['cookiecutter']):
28 prompt = "{0} (default is \"{1}\")? ".format(key, val)
29 new_val = input(prompt)
30 new_val = new_val.strip()
31
32 if new_val == '':
33 new_val = val
34
35 if PY3:
36 cookiecutter_dict[key] = new_val
37 else:
38 cookiecutter_dict[key] = new_val.decode('utf-8')
39 return cookiecutter_dict
40
41
42 def query_yes_no(question, default="yes"):
43 """
44 Ask a yes/no question via `raw_input()` and return their answer.
45
46 :param question: A string that is presented to the user.
47 :param default: The presumed answer if the user just hits <Enter>.
48 It must be "yes" (the default), "no" or None (meaning
49 an answer is required of the user).
50
51 The "answer" return value is one of "yes" or "no".
52
53 Adapted from
54 http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input
55 http://code.activestate.com/recipes/577058/
56
57 """
58 valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
59 if default is None:
60 prompt = " [y/n] "
61 elif default == "yes":
62 prompt = " [Y/n] "
63 elif default == "no":
64 prompt = " [y/N] "
65 else:
66 raise ValueError("invalid default answer: '%s'" % default)
67
68 while True:
69 sys.stdout.write(question + prompt)
70 choice = input().lower()
71
72 if default is not None and choice == '':
73 return valid[default]
74 elif choice in valid:
75 return valid[choice]
76 else:
77 sys.stdout.write("Please respond with 'yes' or 'no' "
78 "(or 'y' or 'n').\n")
79
[end of cookiecutter/prompt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py
--- a/cookiecutter/prompt.py
+++ b/cookiecutter/prompt.py
@@ -23,15 +23,15 @@
field names and sample values.
"""
cookiecutter_dict = {}
-
+
for key, val in iteritems(context['cookiecutter']):
- prompt = "{0} (default is \"{1}\")? ".format(key, val)
- new_val = input(prompt)
+ prompt = u"{0} (default is \"{1}\")? ".format(key, val)
+ new_val = input(prompt.encode('utf-8'))
new_val = new_val.strip()
if new_val == '':
new_val = val
-
+
if PY3:
cookiecutter_dict[key] = new_val
else:
| {"golden_diff": "diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py\n--- a/cookiecutter/prompt.py\n+++ b/cookiecutter/prompt.py\n@@ -23,15 +23,15 @@\n field names and sample values.\n \"\"\"\n cookiecutter_dict = {}\n- \n+\n for key, val in iteritems(context['cookiecutter']):\n- prompt = \"{0} (default is \\\"{1}\\\")? \".format(key, val)\n- new_val = input(prompt)\n+ prompt = u\"{0} (default is \\\"{1}\\\")? \".format(key, val)\n+ new_val = input(prompt.encode('utf-8'))\n new_val = new_val.strip()\n \n if new_val == '':\n new_val = val\n- \n+\n if PY3:\n cookiecutter_dict[key] = new_val\n else:\n", "issue": "UnicodeEncodeError when the prompt string contains non ascii characters.\nThe call prompt fails, if the template settings contains non-ASCII characters.\n\ncookiecutter.json example:\n\n```\n{\n \"full_name\": \"Jind\u0159ich Smitka\",\n ...\n}\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nimport sys\n\nPY3 = sys.version > '3'\nif PY3:\n iteritems = lambda d: iter(d.items())\nelse:\n input = raw_input\n iteritems = lambda d: d.iteritems()\n\ndef prompt_for_config(context):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n \"\"\"\n cookiecutter_dict = {}\n \n for key, val in iteritems(context['cookiecutter']):\n prompt = \"{0} (default is \\\"{1}\\\")? \".format(key, val)\n new_val = input(prompt)\n new_val = new_val.strip()\n\n if new_val == '':\n new_val = val\n \n if PY3:\n cookiecutter_dict[key] = new_val\n else:\n cookiecutter_dict[key] = new_val.decode('utf-8')\n return cookiecutter_dict\n\n\ndef query_yes_no(question, default=\"yes\"):\n \"\"\"\n Ask a yes/no question via `raw_input()` and return their answer.\n\n :param question: A string that is presented to the user.\n :param default: The presumed answer if the user just hits <Enter>.\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is one of \"yes\" or \"no\".\n\n Adapted from\n http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input\n http://code.activestate.com/recipes/577058/\n\n \"\"\"\n valid = {\"yes\": True, \"y\": True, \"ye\": True, \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n", "path": "cookiecutter/prompt.py"}]} | 1,301 | 195 |
gh_patches_debug_25360 | rasdani/github-patches | git_diff | ansible__ansible-lint-436 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
false positive on apt_key with data field
# Issue Type
- Bug report
# Ansible and Ansible Lint details
```
$ ansible --version
ansible 2.7.4
config file = /home/lae/.ansible.cfg
configured module search path = ['/home/lae/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.7/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.7.1 (default, Oct 22 2018, 10:41:28) [GCC 8.2.1 20180831]
$ ansible-lint --version
ansible-lint 4.0.0a1
```
- ansible installation method: OS
- ansible-lint installation method: pip
# Desired Behaviour
Rule 405 is meant for remote connections but using `apt_key` with the `data` field doesn't require network connectivity (not sure if there's an appropriate network lookup, but if so, that would be an exception).
# Actual Behaviour
```yaml
- name: Trust Proxmox' packaging key
apt_key:
data: "{{ lookup('file', pve_release_key) }}"
id: "{{ pve_release_key_id }}"
state: present
```
The above results in the following.
```
[405] Remote package tasks should have a retry
/home/lae/src/ansible-role-proxmox/tasks/main.yml:47
Task/Handler: Trust Proxmox' packaging key
```
</issue>
<code>
[start of lib/ansiblelint/rules/PackageHasRetryRule.py]
1 # Copyright (c) 2016, Will Thames and contributors
2 # Copyright (c) 2018, Ansible Project
3
4 from ansiblelint import AnsibleLintRule
5
6
7 class PackageHasRetryRule(AnsibleLintRule):
8 id = '405'
9 shortdesc = 'Remote package tasks should have a retry'
10 description = (
11 'Package operations are unreliable as they require '
12 'network communication and the availability of remote '
13 'servers. To mitigate the potential problems, retries '
14 'should be used via '
15 '``register: my_result`` and ``until: my_result is succeeded``'
16 )
17 severity = 'LOW'
18 tags = ['module', 'reliability']
19 version_added = 'v4.0.0'
20
21 # module list generated with:
22 # find lib/ansible/modules/packaging/ -type f -printf '%f\n' \
23 # | sort | awk -F '/' \
24 # '/__|dpkg|_repo|_facts|_sub|_chan/{next} {split($NF, words, ".");
25 # print "\""words[1]"\","}'
26 _package_modules = [
27 "apk",
28 "apt_key",
29 "apt",
30 "apt_rpm",
31 "bower",
32 "bundler",
33 "composer",
34 "cpanm",
35 "dnf",
36 "easy_install",
37 "flatpak",
38 "flatpak_remote",
39 "gem",
40 "homebrew_cask",
41 "homebrew",
42 "homebrew_tap",
43 "layman",
44 "macports",
45 "maven_artifact",
46 "npm",
47 "openbsd_pkg",
48 "opkg",
49 "package",
50 "pacman",
51 "pear",
52 "pip",
53 "pkg5_publisher",
54 "pkg5",
55 "pkgin",
56 "pkgng",
57 "pkgutil",
58 "portage",
59 "portinstall",
60 "rhn_register",
61 "rpm_key",
62 "slackpkg",
63 "snap",
64 "sorcery",
65 "svr4pkg",
66 "swdepot",
67 "swupd",
68 "urpmi",
69 "xbps",
70 "yarn",
71 "yum",
72 "zypper",
73 ]
74
75 _module_ignore_states = [
76 "absent",
77 ]
78
79 _package_name_keys = [
80 "name",
81 "package",
82 "pkg",
83 "deb",
84 ]
85
86 # attempt to find package name
87 def get_package_name(self, action):
88 for key in self._package_name_keys:
89 found_package_name = action.get(key)
90 if found_package_name:
91 break
92 return found_package_name
93
94 def matchtask(self, file, task):
95 module = task["action"]["__ansible_module__"]
96
97 if module not in self._package_modules:
98 return False
99
100 is_task_retryable = 'until' in task
101 if is_task_retryable:
102 return False
103
104 is_state_whitelisted = task['action'].get('state') in self._module_ignore_states
105 if is_state_whitelisted:
106 return False
107
108 found_package_name = self.get_package_name(task['action'])
109 if not found_package_name:
110 return True
111
112 is_package_file = '.' in found_package_name
113 is_package_html = '://' in found_package_name
114 is_local_package_file = is_package_file and not is_package_html
115 if is_local_package_file:
116 return False
117
118 return True
119
[end of lib/ansiblelint/rules/PackageHasRetryRule.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansiblelint/rules/PackageHasRetryRule.py b/lib/ansiblelint/rules/PackageHasRetryRule.py
--- a/lib/ansiblelint/rules/PackageHasRetryRule.py
+++ b/lib/ansiblelint/rules/PackageHasRetryRule.py
@@ -76,19 +76,24 @@
"absent",
]
+ _module_ignore_parameters = [
+ "data",
+ ]
+
_package_name_keys = [
"name",
"package",
"pkg",
"deb",
+ "key",
]
- # attempt to find package name
def get_package_name(self, action):
+ """Attempt to find package name."""
for key in self._package_name_keys:
found_package_name = action.get(key)
if found_package_name:
- break
+ return found_package_name
return found_package_name
def matchtask(self, file, task):
@@ -105,6 +110,12 @@
if is_state_whitelisted:
return False
+ has_whitelisted_parameter = (
+ set(self._module_ignore_parameters).intersection(set(task['action']))
+ )
+ if has_whitelisted_parameter:
+ return False
+
found_package_name = self.get_package_name(task['action'])
if not found_package_name:
return True
| {"golden_diff": "diff --git a/lib/ansiblelint/rules/PackageHasRetryRule.py b/lib/ansiblelint/rules/PackageHasRetryRule.py\n--- a/lib/ansiblelint/rules/PackageHasRetryRule.py\n+++ b/lib/ansiblelint/rules/PackageHasRetryRule.py\n@@ -76,19 +76,24 @@\n \"absent\",\n ]\n \n+ _module_ignore_parameters = [\n+ \"data\",\n+ ]\n+\n _package_name_keys = [\n \"name\",\n \"package\",\n \"pkg\",\n \"deb\",\n+ \"key\",\n ]\n \n- # attempt to find package name\n def get_package_name(self, action):\n+ \"\"\"Attempt to find package name.\"\"\"\n for key in self._package_name_keys:\n found_package_name = action.get(key)\n if found_package_name:\n- break\n+ return found_package_name\n return found_package_name\n \n def matchtask(self, file, task):\n@@ -105,6 +110,12 @@\n if is_state_whitelisted:\n return False\n \n+ has_whitelisted_parameter = (\n+ set(self._module_ignore_parameters).intersection(set(task['action']))\n+ )\n+ if has_whitelisted_parameter:\n+ return False\n+\n found_package_name = self.get_package_name(task['action'])\n if not found_package_name:\n return True\n", "issue": "false positive on apt_key with data field\n# Issue Type\r\n- Bug report\r\n\r\n# Ansible and Ansible Lint details\r\n\r\n```\r\n$ ansible --version\r\nansible 2.7.4\r\n config file = /home/lae/.ansible.cfg\r\n configured module search path = ['/home/lae/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python3.7/site-packages/ansible\r\n executable location = /usr/bin/ansible\r\n python version = 3.7.1 (default, Oct 22 2018, 10:41:28) [GCC 8.2.1 20180831]\r\n$ ansible-lint --version\r\nansible-lint 4.0.0a1\r\n```\r\n\r\n- ansible installation method: OS\r\n- ansible-lint installation method: pip\r\n\r\n# Desired Behaviour\r\n\r\nRule 405 is meant for remote connections but using `apt_key` with the `data` field doesn't require network connectivity (not sure if there's an appropriate network lookup, but if so, that would be an exception).\r\n\r\n# Actual Behaviour\r\n\r\n```yaml\r\n- name: Trust Proxmox' packaging key\r\n apt_key:\r\n data: \"{{ lookup('file', pve_release_key) }}\"\r\n id: \"{{ pve_release_key_id }}\"\r\n state: present\r\n```\r\n\r\nThe above results in the following.\r\n\r\n```\r\n[405] Remote package tasks should have a retry\r\n/home/lae/src/ansible-role-proxmox/tasks/main.yml:47\r\nTask/Handler: Trust Proxmox' packaging key\r\n```\n", "before_files": [{"content": "# Copyright (c) 2016, Will Thames and contributors\n# Copyright (c) 2018, Ansible Project\n\nfrom ansiblelint import AnsibleLintRule\n\n\nclass PackageHasRetryRule(AnsibleLintRule):\n id = '405'\n shortdesc = 'Remote package tasks should have a retry'\n description = (\n 'Package operations are unreliable as they require '\n 'network communication and the availability of remote '\n 'servers. To mitigate the potential problems, retries '\n 'should be used via '\n '``register: my_result`` and ``until: my_result is succeeded``'\n )\n severity = 'LOW'\n tags = ['module', 'reliability']\n version_added = 'v4.0.0'\n\n # module list generated with:\n # find lib/ansible/modules/packaging/ -type f -printf '%f\\n' \\\n # | sort | awk -F '/' \\\n # '/__|dpkg|_repo|_facts|_sub|_chan/{next} {split($NF, words, \".\");\n # print \"\\\"\"words[1]\"\\\",\"}'\n _package_modules = [\n \"apk\",\n \"apt_key\",\n \"apt\",\n \"apt_rpm\",\n \"bower\",\n \"bundler\",\n \"composer\",\n \"cpanm\",\n \"dnf\",\n \"easy_install\",\n \"flatpak\",\n \"flatpak_remote\",\n \"gem\",\n \"homebrew_cask\",\n \"homebrew\",\n \"homebrew_tap\",\n \"layman\",\n \"macports\",\n \"maven_artifact\",\n \"npm\",\n \"openbsd_pkg\",\n \"opkg\",\n \"package\",\n \"pacman\",\n \"pear\",\n \"pip\",\n \"pkg5_publisher\",\n \"pkg5\",\n \"pkgin\",\n \"pkgng\",\n \"pkgutil\",\n \"portage\",\n \"portinstall\",\n \"rhn_register\",\n \"rpm_key\",\n \"slackpkg\",\n \"snap\",\n \"sorcery\",\n \"svr4pkg\",\n \"swdepot\",\n \"swupd\",\n \"urpmi\",\n \"xbps\",\n \"yarn\",\n \"yum\",\n \"zypper\",\n ]\n\n _module_ignore_states = [\n \"absent\",\n ]\n\n _package_name_keys = [\n \"name\",\n \"package\",\n \"pkg\",\n \"deb\",\n ]\n\n # attempt to find package name\n def get_package_name(self, action):\n for key in self._package_name_keys:\n found_package_name = action.get(key)\n if found_package_name:\n break\n return found_package_name\n\n def matchtask(self, file, task):\n module = task[\"action\"][\"__ansible_module__\"]\n\n if module not in self._package_modules:\n return False\n\n is_task_retryable = 'until' in task\n if is_task_retryable:\n return False\n\n is_state_whitelisted = task['action'].get('state') in self._module_ignore_states\n if is_state_whitelisted:\n return False\n\n found_package_name = self.get_package_name(task['action'])\n if not found_package_name:\n return True\n\n is_package_file = '.' in found_package_name\n is_package_html = '://' in found_package_name\n is_local_package_file = is_package_file and not is_package_html\n if is_local_package_file:\n return False\n\n return True\n", "path": "lib/ansiblelint/rules/PackageHasRetryRule.py"}]} | 1,918 | 299 |
gh_patches_debug_15277 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3316 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider verizon is broken
During the global build at 2021-07-14-14-42-22, spider **verizon** failed with **4611 features** and **1645 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/logs/verizon.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/verizon.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/verizon.geojson))
</issue>
<code>
[start of locations/spiders/verizon.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 import re
5
6 from locations.items import GeojsonPointItem
7 from locations.hours import OpeningHours
8
9
10 class VerizonSpider(scrapy.Spider):
11 name = "verizon"
12 item_attributes = { 'brand': "Verizon" }
13 allowed_domains = ["www.verizonwireless.com"]
14 start_urls = (
15 'https://www.verizonwireless.com/sitemap_storelocator.xml',
16 )
17 custom_settings = {
18 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
19 }
20
21 def parse_hours(self, store_hours):
22 opening_hours = OpeningHours()
23 for store_day in store_hours['dayOfWeek']:
24 if store_day.lower() == 'closed':
25 continue
26 else:
27 day, open_close = store_day.split('-')
28 day = day.strip()[:2]
29 open_time = ' '.join(open_close.strip().split(' ', 2)[0:2])
30 if open_time.split(' ')[0].lower() == 'closed':
31 continue
32 elif open_time.split(' ')[0].lower() == 'null':
33 continue
34 else:
35 if open_close.strip().count(' ') == 1:
36 open_time, close_time = open_time.split(' ')
37 opening_hours.add_range(day=day,
38 open_time=open_time,
39 close_time=close_time,
40 time_format='%I:%M%p'
41 )
42 elif open_close.strip().count(' ') == 2:
43 open_time = open_close.strip().split(' ')[0]
44 close_time = ''.join(open_close.strip().split(' ')[1:3])
45 opening_hours.add_range(day=day,
46 open_time=open_time,
47 close_time=close_time,
48 time_format='%I:%M%p'
49 )
50 else:
51 close_time = open_close.strip().split(' ', 2)[2]
52 opening_hours.add_range(day=day,
53 open_time=open_time,
54 close_time=close_time,
55 time_format='%I:%M %p'
56 )
57
58 return opening_hours.as_opening_hours()
59
60 def parse(self, response):
61 response.selector.remove_namespaces()
62 urls = response.xpath('//url/loc/text()').extract()
63
64 for url in urls:
65 yield scrapy.Request(url, callback=self.parse_store)
66
67 def parse_store(self, response):
68 script = response.xpath('//script[contains(text(), "storeJSON")]/text()').extract_first()
69 store_data = json.loads(re.search(r'var storeJSON = (.*);', script).group(1))
70
71 properties = {
72 'name': store_data["storeName"],
73 'ref': store_data["storeNumber"],
74 'addr_full': store_data["address"]["streetAddress"],
75 'city': store_data["address"]["addressLocality"],
76 'state': store_data["address"]["addressRegion"],
77 'postcode': store_data["address"]["postalCode"],
78 'country': store_data["address"]["addressCountry"],
79 'phone': store_data.get("telephone"),
80 'website': store_data.get("url") or response.url,
81 'lat': store_data["geo"].get("latitude"),
82 'lon': store_data["geo"].get("longitude"),
83 'extras': {
84 'business_name': store_data.get('posStoreDetail').get('businessName'),
85 'retail_id': store_data.get('retailId'),
86 'store_type': store_data.get('posStoreDetail').get('storeType'),
87 'store_type_note': store_data.get('typeOfStore')
88 }
89 }
90
91 hours = self.parse_hours(store_data.get("openingHoursSpecification"))
92 if hours:
93 properties["opening_hours"] = hours
94
95 yield GeojsonPointItem(**properties)
96
[end of locations/spiders/verizon.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/verizon.py b/locations/spiders/verizon.py
--- a/locations/spiders/verizon.py
+++ b/locations/spiders/verizon.py
@@ -62,10 +62,15 @@
urls = response.xpath('//url/loc/text()').extract()
for url in urls:
- yield scrapy.Request(url, callback=self.parse_store)
+ if url.split('/')[-2].split('-')[-1].isdigit():
+ # Store pages have a number at the end of their URL
+ yield scrapy.Request(url, callback=self.parse_store)
def parse_store(self, response):
script = response.xpath('//script[contains(text(), "storeJSON")]/text()').extract_first()
+ if not script:
+ return
+
store_data = json.loads(re.search(r'var storeJSON = (.*);', script).group(1))
properties = {
| {"golden_diff": "diff --git a/locations/spiders/verizon.py b/locations/spiders/verizon.py\n--- a/locations/spiders/verizon.py\n+++ b/locations/spiders/verizon.py\n@@ -62,10 +62,15 @@\n urls = response.xpath('//url/loc/text()').extract()\n \n for url in urls:\n- yield scrapy.Request(url, callback=self.parse_store)\n+ if url.split('/')[-2].split('-')[-1].isdigit():\n+ # Store pages have a number at the end of their URL\n+ yield scrapy.Request(url, callback=self.parse_store)\n \n def parse_store(self, response):\n script = response.xpath('//script[contains(text(), \"storeJSON\")]/text()').extract_first()\n+ if not script:\n+ return\n+\n store_data = json.loads(re.search(r'var storeJSON = (.*);', script).group(1))\n \n properties = {\n", "issue": "Spider verizon is broken\nDuring the global build at 2021-07-14-14-42-22, spider **verizon** failed with **4611 features** and **1645 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/logs/verizon.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/verizon.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/verizon.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass VerizonSpider(scrapy.Spider):\n name = \"verizon\"\n item_attributes = { 'brand': \"Verizon\" }\n allowed_domains = [\"www.verizonwireless.com\"]\n start_urls = (\n 'https://www.verizonwireless.com/sitemap_storelocator.xml',\n )\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n for store_day in store_hours['dayOfWeek']:\n if store_day.lower() == 'closed':\n continue\n else:\n day, open_close = store_day.split('-')\n day = day.strip()[:2]\n open_time = ' '.join(open_close.strip().split(' ', 2)[0:2])\n if open_time.split(' ')[0].lower() == 'closed':\n continue\n elif open_time.split(' ')[0].lower() == 'null':\n continue\n else:\n if open_close.strip().count(' ') == 1:\n open_time, close_time = open_time.split(' ')\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M%p'\n )\n elif open_close.strip().count(' ') == 2:\n open_time = open_close.strip().split(' ')[0]\n close_time = ''.join(open_close.strip().split(' ')[1:3])\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M%p'\n )\n else:\n close_time = open_close.strip().split(' ', 2)[2]\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M %p'\n )\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n urls = response.xpath('//url/loc/text()').extract()\n\n for url in urls:\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_store(self, response):\n script = response.xpath('//script[contains(text(), \"storeJSON\")]/text()').extract_first()\n store_data = json.loads(re.search(r'var storeJSON = (.*);', script).group(1))\n\n properties = {\n 'name': store_data[\"storeName\"],\n 'ref': store_data[\"storeNumber\"],\n 'addr_full': store_data[\"address\"][\"streetAddress\"],\n 'city': store_data[\"address\"][\"addressLocality\"],\n 'state': store_data[\"address\"][\"addressRegion\"],\n 'postcode': store_data[\"address\"][\"postalCode\"],\n 'country': store_data[\"address\"][\"addressCountry\"],\n 'phone': store_data.get(\"telephone\"),\n 'website': store_data.get(\"url\") or response.url,\n 'lat': store_data[\"geo\"].get(\"latitude\"),\n 'lon': store_data[\"geo\"].get(\"longitude\"),\n 'extras': {\n 'business_name': store_data.get('posStoreDetail').get('businessName'),\n 'retail_id': store_data.get('retailId'),\n 'store_type': store_data.get('posStoreDetail').get('storeType'),\n 'store_type_note': store_data.get('typeOfStore')\n }\n }\n\n hours = self.parse_hours(store_data.get(\"openingHoursSpecification\"))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/verizon.py"}]} | 1,749 | 205 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.