problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_825 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1905 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bump boto3 to the latest version
**Describe the bug**
I am trying to installing checkov and the latest boto3 version within an environment. However, checkov depends on version 1.17.*
Could you please bump boto3 to the latest version?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "pytest==5.3.1",
28 "coverage==5.5",
29 "coverage-badge",
30 "GitPython==3.1.7",
31 "bandit",
32 "jsonschema",
33 ]
34 },
35 install_requires=[
36 "bc-python-hcl2>=0.3.24",
37 "cloudsplaining>=0.4.1",
38 "deep_merge",
39 "tabulate",
40 "colorama",
41 "termcolor",
42 "junit-xml>=1.9",
43 "dpath>=1.5.0,<2",
44 "pyyaml>=5.4.1",
45 "boto3==1.17.*",
46 "GitPython",
47 "jmespath",
48 "tqdm",
49 "update_checker",
50 "semantic_version",
51 "packaging",
52 "networkx",
53 "dockerfile-parse",
54 "docker",
55 "configargparse",
56 "detect-secrets",
57 "policyuniverse",
58 "typing-extensions",
59 "cachetools",
60 "cyclonedx-python-lib==0.6.2"
61 ],
62 license="Apache License 2.0",
63 name="checkov",
64 version=version,
65 python_requires=">=3.7",
66 description="Infrastructure as code static analysis",
67 author="bridgecrew",
68 author_email="[email protected]",
69 url="https://github.com/bridgecrewio/checkov",
70 packages=setuptools.find_packages(exclude=["tests*", "integration_tests*"]),
71 include_package_data=True,
72 package_dir={
73 "checkov.terraform.checks.graph_checks": "checkov/terraform/checks/graph_checks"
74 },
75 package_data={
76 "checkov.terraform.checks.graph_checks": [
77 "aws/*.yaml",
78 "gcp/*.yaml",
79 "azure/*.yaml",
80 ]
81 },
82 scripts=["bin/checkov", "bin/checkov.cmd"],
83 long_description=long_description,
84 long_description_content_type="text/markdown",
85 classifiers=[
86 "Environment :: Console",
87 "Intended Audience :: Developers",
88 "Intended Audience :: System Administrators",
89 "Programming Language :: Python :: 3.7",
90 "Programming Language :: Python :: 3.8",
91 "Programming Language :: Python :: 3.9",
92 "Topic :: Security",
93 "Topic :: Software Development :: Build Tools",
94 ],
95 )
96
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -42,7 +42,7 @@
"junit-xml>=1.9",
"dpath>=1.5.0,<2",
"pyyaml>=5.4.1",
- "boto3==1.17.*",
+ "boto3>=1.17",
"GitPython",
"jmespath",
"tqdm",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -42,7 +42,7 @@\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n- \"boto3==1.17.*\",\n+ \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n", "issue": "Bump boto3 to the latest version\n**Describe the bug**\r\nI am trying to installing checkov and the latest boto3 version within an environment. However, checkov depends on version 1.17.* \r\n\r\nCould you please bump boto3 to the latest version?\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.24\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3==1.17.*\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions\",\n \"cachetools\",\n \"cyclonedx-python-lib==0.6.2\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\"\n },\n package_data={\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ]\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n ],\n)\n", "path": "setup.py"}]} | 1,443 | 110 |
gh_patches_debug_16881 | rasdani/github-patches | git_diff | facebookresearch__CompilerGym-160 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for Python 3.9
## 🚀 Feature
Add support for python 3.9. This shouldn't require any code changes, but the dependencies may not updating.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2 #
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 #
5 # This source code is licensed under the MIT license found in the
6 # LICENSE file in the root directory of this source tree.
7
8 import distutils.util
9 import io
10
11 import setuptools
12
13 with open("VERSION") as f:
14 version = f.read().strip()
15 with open("README.md") as f:
16 # Force UTF-8 file encoding to support non-ascii characters in the readme.
17 with io.open("README.md", encoding="utf-8") as f:
18 long_description = f.read()
19 with open("compiler_gym/requirements.txt") as f:
20 requirements = [ln.split("#")[0].rstrip() for ln in f.readlines()]
21
22 # When building a bdist_wheel we need to set the appropriate tags: this package
23 # includes compiled binaries, and does not include compiled python extensions.
24 try:
25 from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
26
27 class bdist_wheel(_bdist_wheel):
28 def finalize_options(self):
29 _bdist_wheel.finalize_options(self)
30 self.root_is_pure = False
31
32 def get_tag(self):
33 python, abi, plat = _bdist_wheel.get_tag(self)
34 python, abi = "py3", "none"
35 return python, abi, plat
36
37
38 except ImportError:
39 bdist_wheel = None
40
41 setuptools.setup(
42 name="compiler_gym",
43 version=version,
44 description="Reinforcement learning environments for compiler research",
45 author="Facebook AI Research",
46 long_description=long_description,
47 long_description_content_type="text/markdown",
48 url="https://github.com/facebookresearch/CompilerGym",
49 license="MIT",
50 packages=[
51 "compiler_gym",
52 "compiler_gym.bin",
53 "compiler_gym.datasets",
54 "compiler_gym.envs",
55 "compiler_gym.envs.llvm",
56 "compiler_gym.envs.llvm.service",
57 "compiler_gym.envs.llvm.service.passes",
58 "compiler_gym.service",
59 "compiler_gym.service.proto",
60 "compiler_gym.spaces",
61 "compiler_gym.third_party",
62 "compiler_gym.third_party.autophase",
63 "compiler_gym.third_party.llvm",
64 "compiler_gym.third_party.inst2vec",
65 "compiler_gym.util",
66 "compiler_gym.util.flags",
67 "compiler_gym.views",
68 ],
69 package_dir={
70 "": "bazel-bin/package.runfiles/CompilerGym",
71 },
72 package_data={
73 "compiler_gym": [
74 "envs/llvm/service/passes/*.txt",
75 "envs/llvm/service/compiler_gym-llvm-service",
76 "envs/llvm/service/libLLVMPolly.so",
77 "third_party/inst2vec/*.pickle",
78 "third_party/cBench/benchmarks.txt",
79 "third_party/cBench/cBench-v*/*",
80 "third_party/cBench/runtime_data/**/*",
81 ]
82 },
83 install_requires=requirements,
84 include_package_data=True,
85 python_requires=">=3.6",
86 classifiers=[
87 "Development Status :: 2 - Pre-Alpha",
88 "Environment :: Console",
89 "Intended Audience :: Developers",
90 "Intended Audience :: Science/Research",
91 "License :: OSI Approved :: MIT License",
92 "Topic :: Scientific/Engineering :: Artificial Intelligence",
93 "Topic :: Software Development :: Compilers",
94 ],
95 cmdclass={"bdist_wheel": bdist_wheel},
96 platforms=[distutils.util.get_platform()],
97 zip_safe=False,
98 )
99
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -84,11 +84,16 @@
include_package_data=True,
python_requires=">=3.6",
classifiers=[
- "Development Status :: 2 - Pre-Alpha",
+ "Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Compilers",
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -84,11 +84,16 @@\n include_package_data=True,\n python_requires=\">=3.6\",\n classifiers=[\n- \"Development Status :: 2 - Pre-Alpha\",\n+ \"Development Status :: 3 - Alpha\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n+ \"Programming Language :: Python :: 3.6\",\n+ \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Compilers\",\n ],\n", "issue": "Add support for Python 3.9\n## \ud83d\ude80 Feature\r\n\r\nAdd support for python 3.9. This shouldn't require any code changes, but the dependencies may not updating.\n", "before_files": [{"content": "#!/usr/bin/env python3\n#\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport distutils.util\nimport io\n\nimport setuptools\n\nwith open(\"VERSION\") as f:\n version = f.read().strip()\nwith open(\"README.md\") as f:\n # Force UTF-8 file encoding to support non-ascii characters in the readme.\n with io.open(\"README.md\", encoding=\"utf-8\") as f:\n long_description = f.read()\nwith open(\"compiler_gym/requirements.txt\") as f:\n requirements = [ln.split(\"#\")[0].rstrip() for ln in f.readlines()]\n\n# When building a bdist_wheel we need to set the appropriate tags: this package\n# includes compiled binaries, and does not include compiled python extensions.\ntry:\n from wheel.bdist_wheel import bdist_wheel as _bdist_wheel\n\n class bdist_wheel(_bdist_wheel):\n def finalize_options(self):\n _bdist_wheel.finalize_options(self)\n self.root_is_pure = False\n\n def get_tag(self):\n python, abi, plat = _bdist_wheel.get_tag(self)\n python, abi = \"py3\", \"none\"\n return python, abi, plat\n\n\nexcept ImportError:\n bdist_wheel = None\n\nsetuptools.setup(\n name=\"compiler_gym\",\n version=version,\n description=\"Reinforcement learning environments for compiler research\",\n author=\"Facebook AI Research\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/CompilerGym\",\n license=\"MIT\",\n packages=[\n \"compiler_gym\",\n \"compiler_gym.bin\",\n \"compiler_gym.datasets\",\n \"compiler_gym.envs\",\n \"compiler_gym.envs.llvm\",\n \"compiler_gym.envs.llvm.service\",\n \"compiler_gym.envs.llvm.service.passes\",\n \"compiler_gym.service\",\n \"compiler_gym.service.proto\",\n \"compiler_gym.spaces\",\n \"compiler_gym.third_party\",\n \"compiler_gym.third_party.autophase\",\n \"compiler_gym.third_party.llvm\",\n \"compiler_gym.third_party.inst2vec\",\n \"compiler_gym.util\",\n \"compiler_gym.util.flags\",\n \"compiler_gym.views\",\n ],\n package_dir={\n \"\": \"bazel-bin/package.runfiles/CompilerGym\",\n },\n package_data={\n \"compiler_gym\": [\n \"envs/llvm/service/passes/*.txt\",\n \"envs/llvm/service/compiler_gym-llvm-service\",\n \"envs/llvm/service/libLLVMPolly.so\",\n \"third_party/inst2vec/*.pickle\",\n \"third_party/cBench/benchmarks.txt\",\n \"third_party/cBench/cBench-v*/*\",\n \"third_party/cBench/runtime_data/**/*\",\n ]\n },\n install_requires=requirements,\n include_package_data=True,\n python_requires=\">=3.6\",\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Compilers\",\n ],\n cmdclass={\"bdist_wheel\": bdist_wheel},\n platforms=[distutils.util.get_platform()],\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,531 | 197 |
gh_patches_debug_9649 | rasdani/github-patches | git_diff | OBOFoundry__OBOFoundry.github.io-1980 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OBO prefix map should include prefixes of obsolete ontologies
I think OBO prefix map should contain prefixes of obsolete ontologies. Sometimes, like [here](https://github.com/OBOFoundry/OBOFoundry.github.io/pull/1974/files) one ontology is merged into another, with identifiers and all - it would still be useful to be able to loop these up.
@jamesaoverton objects to that?
</issue>
<code>
[start of util/make-shacl-prefixes.py]
1 #!/usr/bin/env python3
2
3 import csv
4 import sys
5 from argparse import ArgumentParser
6
7 import yaml
8
9
10 def main(args):
11 """
12 Takes ontologies.yml file and makes a triple file with SHACL prefixes.
13
14 For example, for uberon it will generate:
15
16 [ sh:prefix "UBERON" ; sh:namespace "http://purl.obolibrary.org/obo/UBERON_"]
17
18 We always assume the CURIE prefix is uppercase, unless 'preferred_prefix' is specified
19 (for mixed-case prefixes, e.g. FBbt)
20
21 This can be useful for converting an OBO class PURL to a prefix without assumption-embedding string conversions.
22 It can be used to interconvert PURLs to CURIEs.
23
24 Note that while prefixes can sometimes be seen in RDF files, this is part of the syntax and not part of the data,
25 the prefixes are expanded at parse time. The obo_prefixes.ttl file makes these explicit.
26
27 We use the SHACL vocabulary since it provides convenient predicates for putting prefixes in the domain of discourse;
28 however, it does not entail any use of SHACL
29
30 """
31 parser = ArgumentParser(
32 description="""
33 Takes ontologies.yml file and makes a triple file with shacl prefixes"""
34 )
35 parser.add_argument("input")
36 args = parser.parse_args()
37 stream = open(args.input, "r")
38 data = yaml.load(stream, Loader=yaml.SafeLoader)
39
40 print("@prefix sh: <http://www.w3.org/ns/shacl#> .")
41 print("@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .")
42 print("[")
43 print(" sh:declare")
44 sep = ""
45 for ont in data["ontologies"]:
46 if ont.get("is_obsolete", False):
47 continue
48 prefix = ont.get("preferredPrefix", ont["id"].upper())
49 print(
50 f'{sep}[ sh:prefix "{prefix}" ; sh:namespace "http://purl.obolibrary.org/obo/{prefix}_"]'
51 )
52 sep = ","
53 print("] .")
54
55
56 if __name__ == "__main__":
57 main(sys.argv)
58
[end of util/make-shacl-prefixes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/util/make-shacl-prefixes.py b/util/make-shacl-prefixes.py
--- a/util/make-shacl-prefixes.py
+++ b/util/make-shacl-prefixes.py
@@ -43,8 +43,9 @@
print(" sh:declare")
sep = ""
for ont in data["ontologies"]:
- if ont.get("is_obsolete", False):
- continue
+ # if ont.get("is_obsolete", False):
+ # continue
+ # See https://github.com/OBOFoundry/OBOFoundry.github.io/issues/1976
prefix = ont.get("preferredPrefix", ont["id"].upper())
print(
f'{sep}[ sh:prefix "{prefix}" ; sh:namespace "http://purl.obolibrary.org/obo/{prefix}_"]'
| {"golden_diff": "diff --git a/util/make-shacl-prefixes.py b/util/make-shacl-prefixes.py\n--- a/util/make-shacl-prefixes.py\n+++ b/util/make-shacl-prefixes.py\n@@ -43,8 +43,9 @@\n print(\" sh:declare\")\n sep = \"\"\n for ont in data[\"ontologies\"]:\n- if ont.get(\"is_obsolete\", False):\n- continue\n+ # if ont.get(\"is_obsolete\", False):\n+ # continue\n+ # See https://github.com/OBOFoundry/OBOFoundry.github.io/issues/1976\n prefix = ont.get(\"preferredPrefix\", ont[\"id\"].upper())\n print(\n f'{sep}[ sh:prefix \"{prefix}\" ; sh:namespace \"http://purl.obolibrary.org/obo/{prefix}_\"]'\n", "issue": "OBO prefix map should include prefixes of obsolete ontologies\nI think OBO prefix map should contain prefixes of obsolete ontologies. Sometimes, like [here](https://github.com/OBOFoundry/OBOFoundry.github.io/pull/1974/files) one ontology is merged into another, with identifiers and all - it would still be useful to be able to loop these up.\r\n\r\n@jamesaoverton objects to that?\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport csv\nimport sys\nfrom argparse import ArgumentParser\n\nimport yaml\n\n\ndef main(args):\n \"\"\"\n Takes ontologies.yml file and makes a triple file with SHACL prefixes.\n\n For example, for uberon it will generate:\n\n [ sh:prefix \"UBERON\" ; sh:namespace \"http://purl.obolibrary.org/obo/UBERON_\"]\n\n We always assume the CURIE prefix is uppercase, unless 'preferred_prefix' is specified\n (for mixed-case prefixes, e.g. FBbt)\n\n This can be useful for converting an OBO class PURL to a prefix without assumption-embedding string conversions.\n It can be used to interconvert PURLs to CURIEs.\n\n Note that while prefixes can sometimes be seen in RDF files, this is part of the syntax and not part of the data,\n the prefixes are expanded at parse time. The obo_prefixes.ttl file makes these explicit.\n\n We use the SHACL vocabulary since it provides convenient predicates for putting prefixes in the domain of discourse;\n however, it does not entail any use of SHACL\n\n \"\"\"\n parser = ArgumentParser(\n description=\"\"\"\n Takes ontologies.yml file and makes a triple file with shacl prefixes\"\"\"\n )\n parser.add_argument(\"input\")\n args = parser.parse_args()\n stream = open(args.input, \"r\")\n data = yaml.load(stream, Loader=yaml.SafeLoader)\n\n print(\"@prefix sh:\t<http://www.w3.org/ns/shacl#> .\")\n print(\"@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\")\n print(\"[\")\n print(\" sh:declare\")\n sep = \"\"\n for ont in data[\"ontologies\"]:\n if ont.get(\"is_obsolete\", False):\n continue\n prefix = ont.get(\"preferredPrefix\", ont[\"id\"].upper())\n print(\n f'{sep}[ sh:prefix \"{prefix}\" ; sh:namespace \"http://purl.obolibrary.org/obo/{prefix}_\"]'\n )\n sep = \",\"\n print(\"] .\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "path": "util/make-shacl-prefixes.py"}]} | 1,217 | 186 |
gh_patches_debug_12388 | rasdani/github-patches | git_diff | safe-global__safe-config-service-30 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cache GET /safe-apps/ endpoint
The endpoint `GET /safe-apps` returns a list of safe-apps that can be cached (eg.: 1h) due to the nature of the endpoint (frequency of updating the app list is low). Updating the list should invalidate the cache.
</issue>
<code>
[start of src/safe_apps/views.py]
1 from rest_framework.generics import ListAPIView
2
3 from .models import SafeApp
4 from .serializers import SafeAppsResponseSerializer
5
6
7 class SafeAppsListView(ListAPIView):
8 serializer_class = SafeAppsResponseSerializer
9
10 def get_queryset(self):
11 queryset = SafeApp.objects.all()
12
13 network_id = self.request.query_params.get("network_id")
14 if network_id is not None and network_id.isdigit():
15 queryset = queryset.filter(networks__contains=[network_id])
16
17 return queryset
18
[end of src/safe_apps/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/safe_apps/views.py b/src/safe_apps/views.py
--- a/src/safe_apps/views.py
+++ b/src/safe_apps/views.py
@@ -1,3 +1,5 @@
+from django.utils.decorators import method_decorator
+from django.views.decorators.cache import cache_page
from rest_framework.generics import ListAPIView
from .models import SafeApp
@@ -7,6 +9,10 @@
class SafeAppsListView(ListAPIView):
serializer_class = SafeAppsResponseSerializer
+ @method_decorator(cache_page(60 * 10)) # Cache 10 minutes
+ def get(self, request, *args, **kwargs):
+ return super().get(self, request, *args, **kwargs)
+
def get_queryset(self):
queryset = SafeApp.objects.all()
| {"golden_diff": "diff --git a/src/safe_apps/views.py b/src/safe_apps/views.py\n--- a/src/safe_apps/views.py\n+++ b/src/safe_apps/views.py\n@@ -1,3 +1,5 @@\n+from django.utils.decorators import method_decorator\n+from django.views.decorators.cache import cache_page\n from rest_framework.generics import ListAPIView\n \n from .models import SafeApp\n@@ -7,6 +9,10 @@\n class SafeAppsListView(ListAPIView):\n serializer_class = SafeAppsResponseSerializer\n \n+ @method_decorator(cache_page(60 * 10)) # Cache 10 minutes\n+ def get(self, request, *args, **kwargs):\n+ return super().get(self, request, *args, **kwargs)\n+\n def get_queryset(self):\n queryset = SafeApp.objects.all()\n", "issue": "Cache GET /safe-apps/ endpoint\nThe endpoint `GET /safe-apps` returns a list of safe-apps that can be cached (eg.: 1h) due to the nature of the endpoint (frequency of updating the app list is low). Updating the list should invalidate the cache.\n", "before_files": [{"content": "from rest_framework.generics import ListAPIView\n\nfrom .models import SafeApp\nfrom .serializers import SafeAppsResponseSerializer\n\n\nclass SafeAppsListView(ListAPIView):\n serializer_class = SafeAppsResponseSerializer\n\n def get_queryset(self):\n queryset = SafeApp.objects.all()\n\n network_id = self.request.query_params.get(\"network_id\")\n if network_id is not None and network_id.isdigit():\n queryset = queryset.filter(networks__contains=[network_id])\n\n return queryset\n", "path": "src/safe_apps/views.py"}]} | 730 | 177 |
gh_patches_debug_7339 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-1684 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ebola Static Page: static page extension
Create configuration and deploy of static page extention/plugin.
This is a blocker for the other issues.
- populate the list of datasets of the controller
</issue>
<code>
[start of ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py]
1 '''
2 Created on Nov 3, 2014
3
4 @author: alexandru-m-g
5 '''
6
7 import datetime as dt
8
9 import pylons.config as config
10 import logging
11
12 import ckan.lib.base as base
13 import ckan.logic as logic
14 import ckan.model as model
15 import ckan.common as common
16 import ckan.lib.helpers as h
17
18 render = base.render
19 get_action = logic.get_action
20 c = common.c
21 request = common.request
22
23 log = logging.getLogger(__name__)
24
25
26 class CrisisController(base.BaseController):
27
28 def show(self):
29
30 context = {'model': model, 'session': model.Session,
31 'user': c.user or c.author, 'for_view': True,
32 'auth_user_obj': c.userobj}
33
34 datastore_resource_id = self._get_datastore_resource_id(
35 context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))
36 if datastore_resource_id:
37 c.top_line_items = self._get_top_line_items(
38 context, datastore_resource_id)
39
40 limit = 25
41 c.q = u'ebola'
42
43 page = int(request.params.get('page', 1))
44 data_dict = {'sort': u'metadata_modified desc',
45 'fq': '+dataset_type:dataset',
46 'rows': limit,
47 'q': c.q,
48 'start': (page - 1) * limit
49 }
50 query = get_action("package_search")(context, data_dict)
51
52 def pager_url(q=None, page=None):
53 return h.url_for('show_crisis', page=page)
54
55 c.page = h.Page(
56 collection=query['results'],
57 page=page,
58 url=pager_url,
59 item_count=query['count'],
60 items_per_page=limit
61 )
62 c.items = query['results']
63 c.item_count = query['count']
64
65 c.other_links = {}
66 c.other_links['show_more'] = h.url_for(
67 "search", **{'q': u'ebola', 'sort': u'metadata_modified desc',
68 'ext_indicator': '0'})
69
70 return render('crisis/crisis.html')
71
72 def _get_top_line_items(self, context, datastore_resource_id):
73 result = get_action('datastore_search')(
74 context, {'resource_id': datastore_resource_id})
75 if 'records' in result:
76 for r in result['records']:
77 d = dt.datetime.strptime(
78 r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')
79 r[u'latest_date'] = dt.datetime.strftime(d, '%d-%b-%Y')
80 return result['records']
81 return []
82
83 def _get_datastore_resource_id(self, context, dataset_id, resource_name):
84 try:
85 dataset = get_action('package_show')(
86 context, {'id': dataset_id})
87
88 if 'resources' in dataset:
89 for r in dataset['resources']:
90 if 'datastore_active' in r and r['datastore_active'] \
91 and r['name'] == resource_name:
92 return r['id']
93 re
94 except:
95 log.warning('No dataset with id ' + dataset_id)
96 return None
97
[end of ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
--- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
+++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
@@ -90,7 +90,7 @@
if 'datastore_active' in r and r['datastore_active'] \
and r['name'] == resource_name:
return r['id']
- re
+ return None
except:
log.warning('No dataset with id ' + dataset_id)
return None
| {"golden_diff": "diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n--- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n+++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n@@ -90,7 +90,7 @@\n if 'datastore_active' in r and r['datastore_active'] \\\n and r['name'] == resource_name:\n return r['id']\n- re\n+ return None\n except:\n log.warning('No dataset with id ' + dataset_id)\n return None\n", "issue": "Ebola Static Page: static page extension\nCreate configuration and deploy of static page extention/plugin.\nThis is a blocker for the other issues.\n- populate the list of datasets of the controller\n\n", "before_files": [{"content": "'''\nCreated on Nov 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport datetime as dt\n\nimport pylons.config as config\nimport logging\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.lib.helpers as h\n\nrender = base.render\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n\nlog = logging.getLogger(__name__)\n\n\nclass CrisisController(base.BaseController):\n\n def show(self):\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj}\n\n datastore_resource_id = self._get_datastore_resource_id(\n context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))\n if datastore_resource_id:\n c.top_line_items = self._get_top_line_items(\n context, datastore_resource_id)\n\n limit = 25\n c.q = u'ebola'\n\n page = int(request.params.get('page', 1))\n data_dict = {'sort': u'metadata_modified desc',\n 'fq': '+dataset_type:dataset',\n 'rows': limit,\n 'q': c.q,\n 'start': (page - 1) * limit\n }\n query = get_action(\"package_search\")(context, data_dict)\n\n def pager_url(q=None, page=None):\n return h.url_for('show_crisis', page=page)\n\n c.page = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit\n )\n c.items = query['results']\n c.item_count = query['count']\n\n c.other_links = {}\n c.other_links['show_more'] = h.url_for(\n \"search\", **{'q': u'ebola', 'sort': u'metadata_modified desc',\n 'ext_indicator': '0'})\n\n return render('crisis/crisis.html')\n\n def _get_top_line_items(self, context, datastore_resource_id):\n result = get_action('datastore_search')(\n context, {'resource_id': datastore_resource_id})\n if 'records' in result:\n for r in result['records']:\n d = dt.datetime.strptime(\n r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')\n r[u'latest_date'] = dt.datetime.strftime(d, '%d-%b-%Y')\n return result['records']\n return []\n\n def _get_datastore_resource_id(self, context, dataset_id, resource_name):\n try:\n dataset = get_action('package_show')(\n context, {'id': dataset_id})\n\n if 'resources' in dataset:\n for r in dataset['resources']:\n if 'datastore_active' in r and r['datastore_active'] \\\n and r['name'] == resource_name:\n return r['id']\n re\n except:\n log.warning('No dataset with id ' + dataset_id)\n return None\n", "path": "ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py"}]} | 1,501 | 183 |
gh_patches_debug_5006 | rasdani/github-patches | git_diff | Textualize__textual-2621 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pushing a screen should send Leave message
If you have an action that opens a screen, it leaves the footer stuck in the highlight state.
I think we need to call `_set_mouse_over(None)` on the current screen when pushing another screen.
</issue>
<code>
[start of src/textual/widgets/_footer.py]
1 from __future__ import annotations
2
3 from collections import defaultdict
4 from typing import ClassVar, Optional
5
6 import rich.repr
7 from rich.console import RenderableType
8 from rich.text import Text
9
10 from .. import events
11 from ..reactive import reactive
12 from ..widget import Widget
13
14
15 @rich.repr.auto
16 class Footer(Widget):
17 """A simple footer widget which docks itself to the bottom of the parent container."""
18
19 COMPONENT_CLASSES: ClassVar[set[str]] = {
20 "footer--description",
21 "footer--key",
22 "footer--highlight",
23 "footer--highlight-key",
24 }
25 """
26 | Class | Description |
27 | :- | :- |
28 | `footer--description` | Targets the descriptions of the key bindings. |
29 | `footer--highlight` | Targets the highlighted key binding. |
30 | `footer--highlight-key` | Targets the key portion of the highlighted key binding. |
31 | `footer--key` | Targets the key portions of the key bindings. |
32 """
33
34 DEFAULT_CSS = """
35 Footer {
36 background: $accent;
37 color: $text;
38 dock: bottom;
39 height: 1;
40 }
41 Footer > .footer--highlight {
42 background: $accent-darken-1;
43 }
44
45 Footer > .footer--highlight-key {
46 background: $secondary;
47 text-style: bold;
48 }
49
50 Footer > .footer--key {
51 text-style: bold;
52 background: $accent-darken-2;
53 }
54 """
55
56 highlight_key: reactive[str | None] = reactive[Optional[str]](None)
57
58 def __init__(self) -> None:
59 super().__init__()
60 self._key_text: Text | None = None
61 self.auto_links = False
62
63 async def watch_highlight_key(self) -> None:
64 """If highlight key changes we need to regenerate the text."""
65 self._key_text = None
66 self.refresh()
67
68 def _on_mount(self, _: events.Mount) -> None:
69 self.watch(self.screen, "focused", self._bindings_changed)
70 self.watch(self.screen, "stack_updates", self._bindings_changed)
71
72 def _bindings_changed(self, _: Widget | None) -> None:
73 self._key_text = None
74 self.refresh()
75
76 def _on_mouse_move(self, event: events.MouseMove) -> None:
77 """Store any key we are moving over."""
78 self.highlight_key = event.style.meta.get("key")
79
80 def _on_leave(self, _: events.Leave) -> None:
81 """Clear any highlight when the mouse leaves the widget"""
82 if self.screen.is_current:
83 self.highlight_key = None
84
85 def __rich_repr__(self) -> rich.repr.Result:
86 yield from super().__rich_repr__()
87
88 def _make_key_text(self) -> Text:
89 """Create text containing all the keys."""
90 base_style = self.rich_style
91 text = Text(
92 style=self.rich_style,
93 no_wrap=True,
94 overflow="ellipsis",
95 justify="left",
96 end="",
97 )
98 highlight_style = self.get_component_rich_style("footer--highlight")
99 highlight_key_style = self.get_component_rich_style("footer--highlight-key")
100 key_style = self.get_component_rich_style("footer--key")
101 description_style = self.get_component_rich_style("footer--description")
102
103 bindings = [
104 binding
105 for (_, binding) in self.app.namespace_bindings.values()
106 if binding.show
107 ]
108
109 action_to_bindings = defaultdict(list)
110 for binding in bindings:
111 action_to_bindings[binding.action].append(binding)
112
113 for _, bindings in action_to_bindings.items():
114 binding = bindings[0]
115 if binding.key_display is None:
116 key_display = self.app.get_key_display(binding.key)
117 if key_display is None:
118 key_display = binding.key.upper()
119 else:
120 key_display = binding.key_display
121 hovered = self.highlight_key == binding.key
122 key_text = Text.assemble(
123 (f" {key_display} ", highlight_key_style if hovered else key_style),
124 (
125 f" {binding.description} ",
126 highlight_style if hovered else base_style + description_style,
127 ),
128 meta={
129 "@click": f"app.check_bindings('{binding.key}')",
130 "key": binding.key,
131 },
132 )
133 text.append_text(key_text)
134 return text
135
136 def notify_style_update(self) -> None:
137 self._key_text = None
138
139 def post_render(self, renderable):
140 return renderable
141
142 def render(self) -> RenderableType:
143 if self._key_text is None:
144 self._key_text = self._make_key_text()
145 return self._key_text
146
[end of src/textual/widgets/_footer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/textual/widgets/_footer.py b/src/textual/widgets/_footer.py
--- a/src/textual/widgets/_footer.py
+++ b/src/textual/widgets/_footer.py
@@ -79,8 +79,7 @@
def _on_leave(self, _: events.Leave) -> None:
"""Clear any highlight when the mouse leaves the widget"""
- if self.screen.is_current:
- self.highlight_key = None
+ self.highlight_key = None
def __rich_repr__(self) -> rich.repr.Result:
yield from super().__rich_repr__()
| {"golden_diff": "diff --git a/src/textual/widgets/_footer.py b/src/textual/widgets/_footer.py\n--- a/src/textual/widgets/_footer.py\n+++ b/src/textual/widgets/_footer.py\n@@ -79,8 +79,7 @@\n \n def _on_leave(self, _: events.Leave) -> None:\n \"\"\"Clear any highlight when the mouse leaves the widget\"\"\"\n- if self.screen.is_current:\n- self.highlight_key = None\n+ self.highlight_key = None\n \n def __rich_repr__(self) -> rich.repr.Result:\n yield from super().__rich_repr__()\n", "issue": "Pushing a screen should send Leave message\nIf you have an action that opens a screen, it leaves the footer stuck in the highlight state.\n\nI think we need to call `_set_mouse_over(None)` on the current screen when pushing another screen.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom collections import defaultdict\nfrom typing import ClassVar, Optional\n\nimport rich.repr\nfrom rich.console import RenderableType\nfrom rich.text import Text\n\nfrom .. import events\nfrom ..reactive import reactive\nfrom ..widget import Widget\n\n\[email protected]\nclass Footer(Widget):\n \"\"\"A simple footer widget which docks itself to the bottom of the parent container.\"\"\"\n\n COMPONENT_CLASSES: ClassVar[set[str]] = {\n \"footer--description\",\n \"footer--key\",\n \"footer--highlight\",\n \"footer--highlight-key\",\n }\n \"\"\"\n | Class | Description |\n | :- | :- |\n | `footer--description` | Targets the descriptions of the key bindings. |\n | `footer--highlight` | Targets the highlighted key binding. |\n | `footer--highlight-key` | Targets the key portion of the highlighted key binding. |\n | `footer--key` | Targets the key portions of the key bindings. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Footer {\n background: $accent;\n color: $text;\n dock: bottom;\n height: 1;\n }\n Footer > .footer--highlight {\n background: $accent-darken-1;\n }\n\n Footer > .footer--highlight-key {\n background: $secondary;\n text-style: bold;\n }\n\n Footer > .footer--key {\n text-style: bold;\n background: $accent-darken-2;\n }\n \"\"\"\n\n highlight_key: reactive[str | None] = reactive[Optional[str]](None)\n\n def __init__(self) -> None:\n super().__init__()\n self._key_text: Text | None = None\n self.auto_links = False\n\n async def watch_highlight_key(self) -> None:\n \"\"\"If highlight key changes we need to regenerate the text.\"\"\"\n self._key_text = None\n self.refresh()\n\n def _on_mount(self, _: events.Mount) -> None:\n self.watch(self.screen, \"focused\", self._bindings_changed)\n self.watch(self.screen, \"stack_updates\", self._bindings_changed)\n\n def _bindings_changed(self, _: Widget | None) -> None:\n self._key_text = None\n self.refresh()\n\n def _on_mouse_move(self, event: events.MouseMove) -> None:\n \"\"\"Store any key we are moving over.\"\"\"\n self.highlight_key = event.style.meta.get(\"key\")\n\n def _on_leave(self, _: events.Leave) -> None:\n \"\"\"Clear any highlight when the mouse leaves the widget\"\"\"\n if self.screen.is_current:\n self.highlight_key = None\n\n def __rich_repr__(self) -> rich.repr.Result:\n yield from super().__rich_repr__()\n\n def _make_key_text(self) -> Text:\n \"\"\"Create text containing all the keys.\"\"\"\n base_style = self.rich_style\n text = Text(\n style=self.rich_style,\n no_wrap=True,\n overflow=\"ellipsis\",\n justify=\"left\",\n end=\"\",\n )\n highlight_style = self.get_component_rich_style(\"footer--highlight\")\n highlight_key_style = self.get_component_rich_style(\"footer--highlight-key\")\n key_style = self.get_component_rich_style(\"footer--key\")\n description_style = self.get_component_rich_style(\"footer--description\")\n\n bindings = [\n binding\n for (_, binding) in self.app.namespace_bindings.values()\n if binding.show\n ]\n\n action_to_bindings = defaultdict(list)\n for binding in bindings:\n action_to_bindings[binding.action].append(binding)\n\n for _, bindings in action_to_bindings.items():\n binding = bindings[0]\n if binding.key_display is None:\n key_display = self.app.get_key_display(binding.key)\n if key_display is None:\n key_display = binding.key.upper()\n else:\n key_display = binding.key_display\n hovered = self.highlight_key == binding.key\n key_text = Text.assemble(\n (f\" {key_display} \", highlight_key_style if hovered else key_style),\n (\n f\" {binding.description} \",\n highlight_style if hovered else base_style + description_style,\n ),\n meta={\n \"@click\": f\"app.check_bindings('{binding.key}')\",\n \"key\": binding.key,\n },\n )\n text.append_text(key_text)\n return text\n\n def notify_style_update(self) -> None:\n self._key_text = None\n\n def post_render(self, renderable):\n return renderable\n\n def render(self) -> RenderableType:\n if self._key_text is None:\n self._key_text = self._make_key_text()\n return self._key_text\n", "path": "src/textual/widgets/_footer.py"}]} | 1,926 | 127 |
gh_patches_debug_27148 | rasdani/github-patches | git_diff | beeware__toga-2086 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Gtk] WebView uses deprecated run_javascript api
### Describe the bug
The Gtk backend for `toga.WebView` uses the `run_javascript()` API to evaluate JavaScript. This was deprecated in WebView v2.40 in favour of `evaluate_javascript()`. Since we depend on WebKit v4.0 or later, we should switch to the non-deprecated API. See https://webkitgtk.org/reference/webkit2gtk/stable/method.WebView.run_javascript.html.
### Steps to reproduce
Evaluate some JavaScript using `toga.WebView` on Linux with the Gtk backend. See the followig deprecation warning:
```
/home/parallels/maestral/lib/python3.10/site-packages/toga_gtk/widgets/webview.py:106: DeprecationWarning: WebKit2.WebView.run_javascript is deprecated
self.native.run_javascript(javascript, None, gtk_js_finished)
/home/parallels/maestral/lib/python3.10/site-packages/toga_gtk/widgets/webview.py:86: DeprecationWarning: WebKit2.WebView.run_javascript_finish is deprecated
js_result = webview.run_javascript_finish(task)
```
### Expected behavior
Don't use deprecated APIs unless required for backward compatibility.
### Screenshots
_No response_
### Environment
- Operating System: Ubuntu 22.04
- Python version: Python 3.10
- Software versions:
- Toga: 0.3.2.dev804+g609682318
### Logs
_No response_
### Additional context
_No response_
</issue>
<code>
[start of gtk/src/toga_gtk/widgets/webview.py]
1 from travertino.size import at_least
2
3 from toga.widgets.webview import JavaScriptResult
4
5 from ..libs import GLib, WebKit2
6 from .base import Widget
7
8
9 class WebView(Widget):
10 """GTK WebView implementation."""
11
12 def create(self):
13 if WebKit2 is None: # pragma: no cover
14 raise RuntimeError(
15 "Unable to import WebKit2. Ensure that the system package "
16 "providing Webkit2 and its GTK bindings have been installed."
17 )
18
19 self.native = WebKit2.WebView()
20
21 settings = self.native.get_settings()
22 settings.set_property("enable-developer-extras", True)
23
24 # The default cache model is WEB_BROWSER, which will
25 # use the backing cache to minimize hits on the web server.
26 # This can result in stale web content being served, even if
27 # the source document (and the web server response) changes.
28 context = self.native.get_context()
29 context.set_cache_model(WebKit2.CacheModel.DOCUMENT_VIEWER)
30
31 self.native.connect("load-changed", self.gtk_on_load_changed)
32
33 self.load_future = None
34
35 def gtk_on_load_changed(self, widget, load_event, *args):
36 if load_event == WebKit2.LoadEvent.FINISHED:
37 self.interface.on_webview_load(None)
38
39 if self.load_future:
40 self.load_future.set_result(None)
41 self.load_future = None
42
43 def get_url(self):
44 url = self.native.get_uri()
45 return None if url == "about:blank" else url
46
47 def _loaded(self, data):
48 # Internal method to fake a load event.
49 self.native.emit("load-changed", WebKit2.LoadEvent.FINISHED)
50 return False
51
52 def set_url(self, value, future=None):
53 if value:
54 self.native.load_uri(value)
55 else:
56 self.native.load_plain_text("")
57 # GTK doesn't emit a load-changed signal when plain text is loaded; so we
58 # fake it. We can't emit the signal directly because it will be handled
59 # immediately. During creation of an empty webview, the URL is set to None,
60 # which means an event can be triggered before the widget instance has
61 # finished construction. So, we defer the call with a 0 timeout.
62 GLib.timeout_add(0, self._loaded, None)
63
64 self.load_future = future
65
66 def get_user_agent(self):
67 return self.native.get_settings().props.user_agent
68
69 def set_user_agent(self, value):
70 # replace user agent of webview (webview has own one)
71 self.native.get_settings().props.user_agent = value
72
73 def set_content(self, root_url, content):
74 self.native.load_html(content, root_url)
75
76 def evaluate_javascript(self, javascript, on_result=None):
77 # Construct a future on the event loop
78 result = JavaScriptResult()
79
80 # Define a callback that will update the future when
81 # the Javascript is complete.
82 def gtk_js_finished(webview, task, *user_data):
83 """If `run_javascript_finish` from GTK returns a result, unmarshal it, and
84 call back with the result."""
85 try:
86 js_result = webview.run_javascript_finish(task)
87 value = js_result.get_js_value()
88 if value.is_boolean():
89 value = value.to_boolean()
90 elif value.is_number():
91 value = value.to_double()
92 else:
93 value = value.to_string()
94
95 result.future.set_result(value)
96 if on_result:
97 on_result(value)
98 except Exception as e:
99 exc = RuntimeError(str(e))
100 result.future.set_exception(exc)
101 if on_result:
102 on_result(None, exception=exc)
103
104 # Invoke the javascript method, with a callback that will set
105 # the future when a result is available.
106 self.native.run_javascript(javascript, None, gtk_js_finished)
107
108 # wait for the future, and return the result
109 return result
110
111 def rehint(self):
112 self.interface.intrinsic.width = at_least(self.interface._MIN_WIDTH)
113 self.interface.intrinsic.height = at_least(self.interface._MIN_HEIGHT)
114
[end of gtk/src/toga_gtk/widgets/webview.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gtk/src/toga_gtk/widgets/webview.py b/gtk/src/toga_gtk/widgets/webview.py
--- a/gtk/src/toga_gtk/widgets/webview.py
+++ b/gtk/src/toga_gtk/widgets/webview.py
@@ -80,11 +80,10 @@
# Define a callback that will update the future when
# the Javascript is complete.
def gtk_js_finished(webview, task, *user_data):
- """If `run_javascript_finish` from GTK returns a result, unmarshal it, and
+ """If `evaluate_javascript_finish` from GTK returns a result, unmarshal it, and
call back with the result."""
try:
- js_result = webview.run_javascript_finish(task)
- value = js_result.get_js_value()
+ value = webview.evaluate_javascript_finish(task)
if value.is_boolean():
value = value.to_boolean()
elif value.is_number():
@@ -103,7 +102,14 @@
# Invoke the javascript method, with a callback that will set
# the future when a result is available.
- self.native.run_javascript(javascript, None, gtk_js_finished)
+ self.native.evaluate_javascript(
+ script=javascript,
+ length=len(javascript),
+ world_name=None,
+ source_uri=None,
+ cancellable=None,
+ callback=gtk_js_finished,
+ )
# wait for the future, and return the result
return result
| {"golden_diff": "diff --git a/gtk/src/toga_gtk/widgets/webview.py b/gtk/src/toga_gtk/widgets/webview.py\n--- a/gtk/src/toga_gtk/widgets/webview.py\n+++ b/gtk/src/toga_gtk/widgets/webview.py\n@@ -80,11 +80,10 @@\n # Define a callback that will update the future when\n # the Javascript is complete.\n def gtk_js_finished(webview, task, *user_data):\n- \"\"\"If `run_javascript_finish` from GTK returns a result, unmarshal it, and\n+ \"\"\"If `evaluate_javascript_finish` from GTK returns a result, unmarshal it, and\n call back with the result.\"\"\"\n try:\n- js_result = webview.run_javascript_finish(task)\n- value = js_result.get_js_value()\n+ value = webview.evaluate_javascript_finish(task)\n if value.is_boolean():\n value = value.to_boolean()\n elif value.is_number():\n@@ -103,7 +102,14 @@\n \n # Invoke the javascript method, with a callback that will set\n # the future when a result is available.\n- self.native.run_javascript(javascript, None, gtk_js_finished)\n+ self.native.evaluate_javascript(\n+ script=javascript,\n+ length=len(javascript),\n+ world_name=None,\n+ source_uri=None,\n+ cancellable=None,\n+ callback=gtk_js_finished,\n+ )\n \n # wait for the future, and return the result\n return result\n", "issue": "[Gtk] WebView uses deprecated run_javascript api\n### Describe the bug\r\n\r\nThe Gtk backend for `toga.WebView` uses the `run_javascript()` API to evaluate JavaScript. This was deprecated in WebView v2.40 in favour of `evaluate_javascript()`. Since we depend on WebKit v4.0 or later, we should switch to the non-deprecated API. See https://webkitgtk.org/reference/webkit2gtk/stable/method.WebView.run_javascript.html.\r\n\r\n### Steps to reproduce\r\n\r\nEvaluate some JavaScript using `toga.WebView` on Linux with the Gtk backend. See the followig deprecation warning:\r\n\r\n```\r\n/home/parallels/maestral/lib/python3.10/site-packages/toga_gtk/widgets/webview.py:106: DeprecationWarning: WebKit2.WebView.run_javascript is deprecated\r\n self.native.run_javascript(javascript, None, gtk_js_finished)\r\n/home/parallels/maestral/lib/python3.10/site-packages/toga_gtk/widgets/webview.py:86: DeprecationWarning: WebKit2.WebView.run_javascript_finish is deprecated\r\n js_result = webview.run_javascript_finish(task)\r\n```\r\n\r\n### Expected behavior\r\n\r\nDon't use deprecated APIs unless required for backward compatibility.\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n- Operating System: Ubuntu 22.04\r\n- Python version: Python 3.10\r\n- Software versions:\r\n - Toga: 0.3.2.dev804+g609682318\r\n\r\n\r\n### Logs\r\n\r\n_No response_\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "from travertino.size import at_least\n\nfrom toga.widgets.webview import JavaScriptResult\n\nfrom ..libs import GLib, WebKit2\nfrom .base import Widget\n\n\nclass WebView(Widget):\n \"\"\"GTK WebView implementation.\"\"\"\n\n def create(self):\n if WebKit2 is None: # pragma: no cover\n raise RuntimeError(\n \"Unable to import WebKit2. Ensure that the system package \"\n \"providing Webkit2 and its GTK bindings have been installed.\"\n )\n\n self.native = WebKit2.WebView()\n\n settings = self.native.get_settings()\n settings.set_property(\"enable-developer-extras\", True)\n\n # The default cache model is WEB_BROWSER, which will\n # use the backing cache to minimize hits on the web server.\n # This can result in stale web content being served, even if\n # the source document (and the web server response) changes.\n context = self.native.get_context()\n context.set_cache_model(WebKit2.CacheModel.DOCUMENT_VIEWER)\n\n self.native.connect(\"load-changed\", self.gtk_on_load_changed)\n\n self.load_future = None\n\n def gtk_on_load_changed(self, widget, load_event, *args):\n if load_event == WebKit2.LoadEvent.FINISHED:\n self.interface.on_webview_load(None)\n\n if self.load_future:\n self.load_future.set_result(None)\n self.load_future = None\n\n def get_url(self):\n url = self.native.get_uri()\n return None if url == \"about:blank\" else url\n\n def _loaded(self, data):\n # Internal method to fake a load event.\n self.native.emit(\"load-changed\", WebKit2.LoadEvent.FINISHED)\n return False\n\n def set_url(self, value, future=None):\n if value:\n self.native.load_uri(value)\n else:\n self.native.load_plain_text(\"\")\n # GTK doesn't emit a load-changed signal when plain text is loaded; so we\n # fake it. We can't emit the signal directly because it will be handled\n # immediately. During creation of an empty webview, the URL is set to None,\n # which means an event can be triggered before the widget instance has\n # finished construction. So, we defer the call with a 0 timeout.\n GLib.timeout_add(0, self._loaded, None)\n\n self.load_future = future\n\n def get_user_agent(self):\n return self.native.get_settings().props.user_agent\n\n def set_user_agent(self, value):\n # replace user agent of webview (webview has own one)\n self.native.get_settings().props.user_agent = value\n\n def set_content(self, root_url, content):\n self.native.load_html(content, root_url)\n\n def evaluate_javascript(self, javascript, on_result=None):\n # Construct a future on the event loop\n result = JavaScriptResult()\n\n # Define a callback that will update the future when\n # the Javascript is complete.\n def gtk_js_finished(webview, task, *user_data):\n \"\"\"If `run_javascript_finish` from GTK returns a result, unmarshal it, and\n call back with the result.\"\"\"\n try:\n js_result = webview.run_javascript_finish(task)\n value = js_result.get_js_value()\n if value.is_boolean():\n value = value.to_boolean()\n elif value.is_number():\n value = value.to_double()\n else:\n value = value.to_string()\n\n result.future.set_result(value)\n if on_result:\n on_result(value)\n except Exception as e:\n exc = RuntimeError(str(e))\n result.future.set_exception(exc)\n if on_result:\n on_result(None, exception=exc)\n\n # Invoke the javascript method, with a callback that will set\n # the future when a result is available.\n self.native.run_javascript(javascript, None, gtk_js_finished)\n\n # wait for the future, and return the result\n return result\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface._MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface._MIN_HEIGHT)\n", "path": "gtk/src/toga_gtk/widgets/webview.py"}]} | 2,022 | 335 |
gh_patches_debug_4483 | rasdani/github-patches | git_diff | kserve__kserve-2134 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
kfserving-samples bucket on GCS was gone
/kind bug
**What steps did you take and what happened:**
The existing codebase is running some test code with a prebuild model file located in GCS, especially `gs://kfserving-samples/models/tensorflow/flowers` but now seems the bucket is gone already.
Do we have other alternative buckets? or this bucket should be live forever?
```
gsutil cp -r gs://kfserving-samples/models/tensorflow/flowers flowers
BucketNotFoundException: 404 gs://kfserving-samples bucket does not exist.
```
**What did you expect to happen:**
the model file should be downloaded.
**Anything else you would like to add:**
No
**Environment:**
not related to environment.
</issue>
<code>
[start of docs/samples/pipelines/sample-tf-pipeline.py]
1 #
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 import kfp.compiler as compiler
15 import kfp.dsl as dsl
16 from kfp import components
17
18 # kfserving_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/'
19 # 'master/components/kubeflow/kfserving/component.yaml')
20 kserve_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/'
21 'master/components/kserve/component.yaml')
22
23
24 @dsl.pipeline(
25 name='KServe pipeline',
26 description='A pipeline for KServe.'
27 )
28 def kservePipeline(
29 action='apply',
30 model_name='tensorflow-sample',
31 model_uri='gs://kfserving-samples/models/tensorflow/flowers',
32 namespace='anonymous',
33 framework='tensorflow'):
34 kserve_op(action=action,
35 model_name=model_name,
36 model_uri=model_uri,
37 namespace=namespace,
38 framework=framework)
39
40
41 if __name__ == '__main__':
42 compiler.Compiler().compile(kservePipeline, __file__ + '.tar.gz')
43
[end of docs/samples/pipelines/sample-tf-pipeline.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/samples/pipelines/sample-tf-pipeline.py b/docs/samples/pipelines/sample-tf-pipeline.py
--- a/docs/samples/pipelines/sample-tf-pipeline.py
+++ b/docs/samples/pipelines/sample-tf-pipeline.py
@@ -28,7 +28,7 @@
def kservePipeline(
action='apply',
model_name='tensorflow-sample',
- model_uri='gs://kfserving-samples/models/tensorflow/flowers',
+ model_uri='gs://kfserving-examples/models/tensorflow/flowers',
namespace='anonymous',
framework='tensorflow'):
kserve_op(action=action,
| {"golden_diff": "diff --git a/docs/samples/pipelines/sample-tf-pipeline.py b/docs/samples/pipelines/sample-tf-pipeline.py\n--- a/docs/samples/pipelines/sample-tf-pipeline.py\n+++ b/docs/samples/pipelines/sample-tf-pipeline.py\n@@ -28,7 +28,7 @@\n def kservePipeline(\n action='apply',\n model_name='tensorflow-sample',\n- model_uri='gs://kfserving-samples/models/tensorflow/flowers',\n+ model_uri='gs://kfserving-examples/models/tensorflow/flowers',\n namespace='anonymous',\n framework='tensorflow'):\n kserve_op(action=action,\n", "issue": "kfserving-samples bucket on GCS was gone\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nThe existing codebase is running some test code with a prebuild model file located in GCS, especially `gs://kfserving-samples/models/tensorflow/flowers` but now seems the bucket is gone already. \r\nDo we have other alternative buckets? or this bucket should be live forever?\r\n\r\n```\r\ngsutil cp -r gs://kfserving-samples/models/tensorflow/flowers flowers\r\n\r\nBucketNotFoundException: 404 gs://kfserving-samples bucket does not exist.\r\n```\r\n\r\n\r\n\r\n**What did you expect to happen:**\r\nthe model file should be downloaded.\r\n\r\n\r\n**Anything else you would like to add:**\r\nNo\r\n\r\n**Environment:**\r\nnot related to environment.\r\n\n", "before_files": [{"content": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfp.compiler as compiler\nimport kfp.dsl as dsl\nfrom kfp import components\n\n# kfserving_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/'\n# 'master/components/kubeflow/kfserving/component.yaml')\nkserve_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/'\n 'master/components/kserve/component.yaml')\n\n\[email protected](\n name='KServe pipeline',\n description='A pipeline for KServe.'\n)\ndef kservePipeline(\n action='apply',\n model_name='tensorflow-sample',\n model_uri='gs://kfserving-samples/models/tensorflow/flowers',\n namespace='anonymous',\n framework='tensorflow'):\n kserve_op(action=action,\n model_name=model_name,\n model_uri=model_uri,\n namespace=namespace,\n framework=framework)\n\n\nif __name__ == '__main__':\n compiler.Compiler().compile(kservePipeline, __file__ + '.tar.gz')\n", "path": "docs/samples/pipelines/sample-tf-pipeline.py"}]} | 1,135 | 142 |
gh_patches_debug_751 | rasdani/github-patches | git_diff | pytorch__TensorRT-74 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create some sort of serialization / deserialization functionality
With INT8 about to land, would be a pain to have to calibrate from scratch every time. There should be some mechanism to save and load modules with the TRT engine included.
</issue>
<code>
[start of py/trtorch/__init__.py]
1 import os
2 import sys
3
4 if sys.version_info < (3,):
5 raise Exception("Python 2 has reached end-of-life and is not supported by TRTorch")
6
7 import ctypes
8 import torch
9
10 from trtorch._version import __version__
11 from trtorch._compiler import *
12 from trtorch._types import *
13 from trtorch import logging
14
[end of py/trtorch/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py/trtorch/__init__.py b/py/trtorch/__init__.py
--- a/py/trtorch/__init__.py
+++ b/py/trtorch/__init__.py
@@ -11,3 +11,7 @@
from trtorch._compiler import *
from trtorch._types import *
from trtorch import logging
+
+def _register_with_torch():
+ trtorch_dir = os.path.dirname(__file__)
+ torch.ops.load_library(trtorch_dir + '/lib/trtorch.so')
\ No newline at end of file
| {"golden_diff": "diff --git a/py/trtorch/__init__.py b/py/trtorch/__init__.py\n--- a/py/trtorch/__init__.py\n+++ b/py/trtorch/__init__.py\n@@ -11,3 +11,7 @@\n from trtorch._compiler import *\n from trtorch._types import *\n from trtorch import logging\n+\n+def _register_with_torch():\n+ trtorch_dir = os.path.dirname(__file__)\n+ torch.ops.load_library(trtorch_dir + '/lib/trtorch.so')\n\\ No newline at end of file\n", "issue": "Create some sort of serialization / deserialization functionality\nWith INT8 about to land, would be a pain to have to calibrate from scratch every time. There should be some mechanism to save and load modules with the TRT engine included. \n", "before_files": [{"content": "import os\nimport sys\n\nif sys.version_info < (3,):\n raise Exception(\"Python 2 has reached end-of-life and is not supported by TRTorch\")\n\nimport ctypes\nimport torch\n\nfrom trtorch._version import __version__\nfrom trtorch._compiler import *\nfrom trtorch._types import *\nfrom trtorch import logging\n", "path": "py/trtorch/__init__.py"}]} | 682 | 118 |
gh_patches_debug_39214 | rasdani/github-patches | git_diff | pytorch__TensorRT-2372 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
🐛 [Bug] Bug in `aten.where` converter for Numpy array inputs
## Bug Description
- When applying converter to Numpy arrays or constants, the compilation fails due to use of the `expand` operator, which only applies to Torch tensors
</issue>
<code>
[start of py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py]
1 from typing import Optional
2
3 import numpy as np
4 import tensorrt as trt
5 import torch
6 from torch.fx.node import Target
7 from torch_tensorrt.dynamo._SourceIR import SourceIR
8 from torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext
9 from torch_tensorrt.dynamo.conversion.converter_utils import (
10 broadcastable,
11 get_trt_tensor,
12 )
13 from torch_tensorrt.dynamo.conversion.impl.slice import expand
14 from torch_tensorrt.fx.converters.converter_utils import broadcast, set_layer_name
15 from torch_tensorrt.fx.types import TRTTensor
16
17
18 def where(
19 ctx: ConversionContext,
20 target: Target,
21 source_ir: Optional[SourceIR],
22 name: str,
23 input: TRTTensor,
24 other: TRTTensor,
25 condition: TRTTensor,
26 ) -> TRTTensor:
27 if not (broadcastable(input, other)):
28 assert "The two torch tensors should be broadcastable"
29
30 # get output shape
31 # purpose of this is to bring input and other rank same as
32 # output_shape to input it to the add_expand operation
33 # condition will have dimension of either input or other
34 input, other = broadcast(ctx.net, input, other, f"{name}_x", f"{name}_y")
35 if len(tuple(condition.shape)) != len(tuple(input.shape)):
36 condition, input = broadcast(
37 ctx.net, condition, input, f"{name}_condition", f"{name}_x"
38 )
39
40 x_shape = list(input.shape)
41 y_shape = list(other.shape)
42 condition_shape = list(condition.shape)
43
44 output_shape = list(torch.broadcast_shapes(condition_shape, x_shape, y_shape))
45
46 # expand shape
47 if not isinstance(condition, TRTTensor):
48 assert condition.dtype in (torch.bool, np.bool_), "condition dtype is not bool"
49 if condition_shape != output_shape:
50 condition = (
51 condition.expand(output_shape)
52 if isinstance(condition, torch.Tensor)
53 else np.broadcast_to(condition, output_shape)
54 )
55 condition_val = get_trt_tensor(ctx, condition, f"{name}_condition")
56 else:
57 assert condition.dtype == trt.bool, "mask dtype is not bool!"
58 if condition_shape != output_shape:
59 condition_val = expand(
60 ctx, target, source_ir, f"{name}_expand", condition, output_shape
61 )
62 else:
63 condition_val = condition
64
65 if not isinstance(input, TRTTensor):
66 if x_shape != output_shape:
67 # special case where 1 element in input
68 if len(input.shape) == 0:
69 input = (
70 input.unsqueeze(0)
71 if isinstance(input, torch.Tensor)
72 else np.expand_dims(input, axis=0)
73 )
74 input = input.expand(output_shape)
75 x_val = get_trt_tensor(ctx, input, f"{name}_x")
76 else:
77 x_val = input
78 if x_shape != output_shape:
79 x_val = expand(
80 ctx, target, source_ir, f"{name}_x_expand", input, output_shape
81 )
82
83 if not isinstance(other, TRTTensor):
84 if y_shape != output_shape:
85 # special case where 1 element in other
86 if len(other.shape) == 0:
87 other = (
88 other.unsqueeze(0)
89 if isinstance(other, torch.Tensor)
90 else np.expand_dims(other, axis=0)
91 )
92 other = other.expand(output_shape)
93 y_val = get_trt_tensor(ctx, other, f"{name}_y")
94 else:
95 y_val = other
96 if y_shape != output_shape:
97 y_val = expand(
98 ctx, target, source_ir, f"{name}_y_expand", y_val, output_shape
99 )
100
101 select_layer = ctx.net.add_select(condition_val, x_val, y_val)
102
103 set_layer_name(select_layer, target, f"{name}_select")
104
105 return select_layer.get_output(0)
106
[end of py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py b/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py
--- a/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py
+++ b/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py
@@ -1,4 +1,4 @@
-from typing import Optional
+from typing import Optional, Union
import numpy as np
import tensorrt as trt
@@ -11,7 +11,7 @@
get_trt_tensor,
)
from torch_tensorrt.dynamo.conversion.impl.slice import expand
-from torch_tensorrt.fx.converters.converter_utils import broadcast, set_layer_name
+from torch_tensorrt.fx.converters.converter_utils import set_layer_name
from torch_tensorrt.fx.types import TRTTensor
@@ -20,23 +20,13 @@
target: Target,
source_ir: Optional[SourceIR],
name: str,
- input: TRTTensor,
- other: TRTTensor,
- condition: TRTTensor,
+ input: Union[TRTTensor, np.ndarray, torch.Tensor],
+ other: Union[TRTTensor, np.ndarray, torch.Tensor],
+ condition: Union[TRTTensor, np.ndarray, torch.Tensor],
) -> TRTTensor:
if not (broadcastable(input, other)):
assert "The two torch tensors should be broadcastable"
- # get output shape
- # purpose of this is to bring input and other rank same as
- # output_shape to input it to the add_expand operation
- # condition will have dimension of either input or other
- input, other = broadcast(ctx.net, input, other, f"{name}_x", f"{name}_y")
- if len(tuple(condition.shape)) != len(tuple(input.shape)):
- condition, input = broadcast(
- ctx.net, condition, input, f"{name}_condition", f"{name}_x"
- )
-
x_shape = list(input.shape)
y_shape = list(other.shape)
condition_shape = list(condition.shape)
@@ -71,7 +61,11 @@
if isinstance(input, torch.Tensor)
else np.expand_dims(input, axis=0)
)
- input = input.expand(output_shape)
+ input = (
+ input.expand(output_shape)
+ if isinstance(input, torch.Tensor)
+ else np.broadcast_to(input, output_shape)
+ )
x_val = get_trt_tensor(ctx, input, f"{name}_x")
else:
x_val = input
@@ -89,7 +83,11 @@
if isinstance(other, torch.Tensor)
else np.expand_dims(other, axis=0)
)
- other = other.expand(output_shape)
+ other = (
+ other.expand(output_shape)
+ if isinstance(other, torch.Tensor)
+ else np.broadcast_to(other, output_shape)
+ )
y_val = get_trt_tensor(ctx, other, f"{name}_y")
else:
y_val = other
| {"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py b/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py\n--- a/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py\n+++ b/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py\n@@ -1,4 +1,4 @@\n-from typing import Optional\n+from typing import Optional, Union\n \n import numpy as np\n import tensorrt as trt\n@@ -11,7 +11,7 @@\n get_trt_tensor,\n )\n from torch_tensorrt.dynamo.conversion.impl.slice import expand\n-from torch_tensorrt.fx.converters.converter_utils import broadcast, set_layer_name\n+from torch_tensorrt.fx.converters.converter_utils import set_layer_name\n from torch_tensorrt.fx.types import TRTTensor\n \n \n@@ -20,23 +20,13 @@\n target: Target,\n source_ir: Optional[SourceIR],\n name: str,\n- input: TRTTensor,\n- other: TRTTensor,\n- condition: TRTTensor,\n+ input: Union[TRTTensor, np.ndarray, torch.Tensor],\n+ other: Union[TRTTensor, np.ndarray, torch.Tensor],\n+ condition: Union[TRTTensor, np.ndarray, torch.Tensor],\n ) -> TRTTensor:\n if not (broadcastable(input, other)):\n assert \"The two torch tensors should be broadcastable\"\n \n- # get output shape\n- # purpose of this is to bring input and other rank same as\n- # output_shape to input it to the add_expand operation\n- # condition will have dimension of either input or other\n- input, other = broadcast(ctx.net, input, other, f\"{name}_x\", f\"{name}_y\")\n- if len(tuple(condition.shape)) != len(tuple(input.shape)):\n- condition, input = broadcast(\n- ctx.net, condition, input, f\"{name}_condition\", f\"{name}_x\"\n- )\n-\n x_shape = list(input.shape)\n y_shape = list(other.shape)\n condition_shape = list(condition.shape)\n@@ -71,7 +61,11 @@\n if isinstance(input, torch.Tensor)\n else np.expand_dims(input, axis=0)\n )\n- input = input.expand(output_shape)\n+ input = (\n+ input.expand(output_shape)\n+ if isinstance(input, torch.Tensor)\n+ else np.broadcast_to(input, output_shape)\n+ )\n x_val = get_trt_tensor(ctx, input, f\"{name}_x\")\n else:\n x_val = input\n@@ -89,7 +83,11 @@\n if isinstance(other, torch.Tensor)\n else np.expand_dims(other, axis=0)\n )\n- other = other.expand(output_shape)\n+ other = (\n+ other.expand(output_shape)\n+ if isinstance(other, torch.Tensor)\n+ else np.broadcast_to(other, output_shape)\n+ )\n y_val = get_trt_tensor(ctx, other, f\"{name}_y\")\n else:\n y_val = other\n", "issue": "\ud83d\udc1b [Bug] Bug in `aten.where` converter for Numpy array inputs\n## Bug Description\r\n- When applying converter to Numpy arrays or constants, the compilation fails due to use of the `expand` operator, which only applies to Torch tensors\r\n\n", "before_files": [{"content": "from typing import Optional\n\nimport numpy as np\nimport tensorrt as trt\nimport torch\nfrom torch.fx.node import Target\nfrom torch_tensorrt.dynamo._SourceIR import SourceIR\nfrom torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext\nfrom torch_tensorrt.dynamo.conversion.converter_utils import (\n broadcastable,\n get_trt_tensor,\n)\nfrom torch_tensorrt.dynamo.conversion.impl.slice import expand\nfrom torch_tensorrt.fx.converters.converter_utils import broadcast, set_layer_name\nfrom torch_tensorrt.fx.types import TRTTensor\n\n\ndef where(\n ctx: ConversionContext,\n target: Target,\n source_ir: Optional[SourceIR],\n name: str,\n input: TRTTensor,\n other: TRTTensor,\n condition: TRTTensor,\n) -> TRTTensor:\n if not (broadcastable(input, other)):\n assert \"The two torch tensors should be broadcastable\"\n\n # get output shape\n # purpose of this is to bring input and other rank same as\n # output_shape to input it to the add_expand operation\n # condition will have dimension of either input or other\n input, other = broadcast(ctx.net, input, other, f\"{name}_x\", f\"{name}_y\")\n if len(tuple(condition.shape)) != len(tuple(input.shape)):\n condition, input = broadcast(\n ctx.net, condition, input, f\"{name}_condition\", f\"{name}_x\"\n )\n\n x_shape = list(input.shape)\n y_shape = list(other.shape)\n condition_shape = list(condition.shape)\n\n output_shape = list(torch.broadcast_shapes(condition_shape, x_shape, y_shape))\n\n # expand shape\n if not isinstance(condition, TRTTensor):\n assert condition.dtype in (torch.bool, np.bool_), \"condition dtype is not bool\"\n if condition_shape != output_shape:\n condition = (\n condition.expand(output_shape)\n if isinstance(condition, torch.Tensor)\n else np.broadcast_to(condition, output_shape)\n )\n condition_val = get_trt_tensor(ctx, condition, f\"{name}_condition\")\n else:\n assert condition.dtype == trt.bool, \"mask dtype is not bool!\"\n if condition_shape != output_shape:\n condition_val = expand(\n ctx, target, source_ir, f\"{name}_expand\", condition, output_shape\n )\n else:\n condition_val = condition\n\n if not isinstance(input, TRTTensor):\n if x_shape != output_shape:\n # special case where 1 element in input\n if len(input.shape) == 0:\n input = (\n input.unsqueeze(0)\n if isinstance(input, torch.Tensor)\n else np.expand_dims(input, axis=0)\n )\n input = input.expand(output_shape)\n x_val = get_trt_tensor(ctx, input, f\"{name}_x\")\n else:\n x_val = input\n if x_shape != output_shape:\n x_val = expand(\n ctx, target, source_ir, f\"{name}_x_expand\", input, output_shape\n )\n\n if not isinstance(other, TRTTensor):\n if y_shape != output_shape:\n # special case where 1 element in other\n if len(other.shape) == 0:\n other = (\n other.unsqueeze(0)\n if isinstance(other, torch.Tensor)\n else np.expand_dims(other, axis=0)\n )\n other = other.expand(output_shape)\n y_val = get_trt_tensor(ctx, other, f\"{name}_y\")\n else:\n y_val = other\n if y_shape != output_shape:\n y_val = expand(\n ctx, target, source_ir, f\"{name}_y_expand\", y_val, output_shape\n )\n\n select_layer = ctx.net.add_select(condition_val, x_val, y_val)\n\n set_layer_name(select_layer, target, f\"{name}_select\")\n\n return select_layer.get_output(0)\n", "path": "py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py"}]} | 1,660 | 678 |
gh_patches_debug_3522 | rasdani/github-patches | git_diff | pytorch__TensorRT-1953 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
✨[Converter] Implement aten::addmm
Torch op:
func: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
Aten op: torch.ops.addmm.default
</issue>
<code>
[start of py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py]
1 import torch
2 from torch._decomp import register_decomposition, core_aten_decompositions
3
4
5 DECOMPOSITIONS = {**core_aten_decompositions()}
6
7 aten = torch.ops.aten
8
9
10 def replace_inplace_op(aten_op, outplace_op):
11 """Replace inplace operation with functional equivalent
12 Adapted from:
13 https://github.com/pytorch/pytorch/blob/3344d79e3f732dadd5c85b99a7aa1a022f187929/torch/_decomp/decompositions.py#L3355-L3361
14 """
15
16 @register_decomposition(aten_op, registry=DECOMPOSITIONS)
17 def inplace_op(*args, **kwargs):
18 out = outplace_op(*args, **kwargs)
19 return args[0].copy_(out)
20
21 return inplace_op
22
23
24 replace_inplace_op(aten.add_, aten.add)
25 replace_inplace_op(aten.addbmm_, aten.addbmm)
26 replace_inplace_op(aten.addmm_, aten.addmm)
27 replace_inplace_op(aten.addmv_, aten.addmv)
28 replace_inplace_op(aten.baddbmm_, aten.baddbmm)
29 replace_inplace_op(aten.cumprod_, aten.cumprod)
30 replace_inplace_op(aten.fill_, aten.fill)
31 replace_inplace_op(aten.gelu_, aten.gelu)
32 replace_inplace_op(aten.hardsigmoid_, aten.hardsigmoid)
33 replace_inplace_op(aten.index_put_, aten.index_put)
34 replace_inplace_op(aten.index_reduce_, aten.index_reduce)
35 replace_inplace_op(aten.logit_, aten.logit)
36 replace_inplace_op(aten.relu_, aten.relu)
37 replace_inplace_op(aten.renorm_, aten.renorm)
38 replace_inplace_op(aten.round_, aten.round)
39 replace_inplace_op(aten.scatter_, aten.scatter)
40 replace_inplace_op(aten.scatter_add_, aten.scatter_add)
41 replace_inplace_op(aten.scatter_reduce_, aten.scatter_reduce)
42
43
44 @register_decomposition(aten.std, registry=DECOMPOSITIONS)
45 def std_replacement(*args, **kwargs) -> torch.Tensor:
46 return torch.sqrt(torch.var(*args, **kwargs))
47
48
49 @register_decomposition(aten.rsqrt, registry=DECOMPOSITIONS)
50 def rsqrt_replacement(*args, **kwargs) -> torch.Tensor:
51 return torch.reciprocal(torch.sqrt(*args, **kwargs))
52
53
54 @register_decomposition(aten.alias, registry=DECOMPOSITIONS)
55 def alias_replacement(x: torch.Tensor) -> torch.Tensor:
56 return x
57
58
59 def get_decompositions():
60 return DECOMPOSITIONS
61
[end of py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py b/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py
--- a/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py
+++ b/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py
@@ -56,5 +56,14 @@
return x
+@register_decomposition(torch.ops.aten.addmm, registry=DECOMPOSITIONS)
+def addmm_replacement(
+ input_: torch.Tensor, mat1: torch.Tensor, mat2: torch.Tensor, *, beta=1, alpha=1
+) -> torch.Tensor:
+ return torch.add(
+ torch.mul(input_, beta), torch.mul(torch.matmul(mat1, mat2), alpha)
+ )
+
+
def get_decompositions():
return DECOMPOSITIONS
| {"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py b/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py\n--- a/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py\n+++ b/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py\n@@ -56,5 +56,14 @@\n return x\n \n \n+@register_decomposition(torch.ops.aten.addmm, registry=DECOMPOSITIONS)\n+def addmm_replacement(\n+ input_: torch.Tensor, mat1: torch.Tensor, mat2: torch.Tensor, *, beta=1, alpha=1\n+) -> torch.Tensor:\n+ return torch.add(\n+ torch.mul(input_, beta), torch.mul(torch.matmul(mat1, mat2), alpha)\n+ )\n+\n+\n def get_decompositions():\n return DECOMPOSITIONS\n", "issue": "\u2728[Converter] Implement aten::addmm\nTorch op:\r\nfunc: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor\r\nAten op: torch.ops.addmm.default\n", "before_files": [{"content": "import torch\nfrom torch._decomp import register_decomposition, core_aten_decompositions\n\n\nDECOMPOSITIONS = {**core_aten_decompositions()}\n\naten = torch.ops.aten\n\n\ndef replace_inplace_op(aten_op, outplace_op):\n \"\"\"Replace inplace operation with functional equivalent\n Adapted from:\n https://github.com/pytorch/pytorch/blob/3344d79e3f732dadd5c85b99a7aa1a022f187929/torch/_decomp/decompositions.py#L3355-L3361\n \"\"\"\n\n @register_decomposition(aten_op, registry=DECOMPOSITIONS)\n def inplace_op(*args, **kwargs):\n out = outplace_op(*args, **kwargs)\n return args[0].copy_(out)\n\n return inplace_op\n\n\nreplace_inplace_op(aten.add_, aten.add)\nreplace_inplace_op(aten.addbmm_, aten.addbmm)\nreplace_inplace_op(aten.addmm_, aten.addmm)\nreplace_inplace_op(aten.addmv_, aten.addmv)\nreplace_inplace_op(aten.baddbmm_, aten.baddbmm)\nreplace_inplace_op(aten.cumprod_, aten.cumprod)\nreplace_inplace_op(aten.fill_, aten.fill)\nreplace_inplace_op(aten.gelu_, aten.gelu)\nreplace_inplace_op(aten.hardsigmoid_, aten.hardsigmoid)\nreplace_inplace_op(aten.index_put_, aten.index_put)\nreplace_inplace_op(aten.index_reduce_, aten.index_reduce)\nreplace_inplace_op(aten.logit_, aten.logit)\nreplace_inplace_op(aten.relu_, aten.relu)\nreplace_inplace_op(aten.renorm_, aten.renorm)\nreplace_inplace_op(aten.round_, aten.round)\nreplace_inplace_op(aten.scatter_, aten.scatter)\nreplace_inplace_op(aten.scatter_add_, aten.scatter_add)\nreplace_inplace_op(aten.scatter_reduce_, aten.scatter_reduce)\n\n\n@register_decomposition(aten.std, registry=DECOMPOSITIONS)\ndef std_replacement(*args, **kwargs) -> torch.Tensor:\n return torch.sqrt(torch.var(*args, **kwargs))\n\n\n@register_decomposition(aten.rsqrt, registry=DECOMPOSITIONS)\ndef rsqrt_replacement(*args, **kwargs) -> torch.Tensor:\n return torch.reciprocal(torch.sqrt(*args, **kwargs))\n\n\n@register_decomposition(aten.alias, registry=DECOMPOSITIONS)\ndef alias_replacement(x: torch.Tensor) -> torch.Tensor:\n return x\n\n\ndef get_decompositions():\n return DECOMPOSITIONS\n", "path": "py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py"}]} | 1,315 | 194 |
gh_patches_debug_26328 | rasdani/github-patches | git_diff | mindsdb__mindsdb-1020 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Run tests on windows and OSX
Part of the reason we wanted tests with remote databases was to be able to test mindsdb on windows and OSX.
This is currently being done for native but not for mindsdb, current issues that stop us from testing on OSX and windows:
* `psutil.net_connections` requires root privileges on osx/windows
* `ssh` command will fail on windows
* ???
</issue>
<code>
[start of mindsdb/utilities/ps.py]
1 import psutil
2 import time
3
4
5 def is_port_in_use(port_num):
6 portsinuse = []
7 conns = psutil.net_connections()
8 portsinuse = [x.laddr[1] for x in conns if x.status == 'LISTEN']
9 portsinuse.sort()
10 return int(port_num) in portsinuse
11
12
13 def wait_func_is_true(func, timeout, *args, **kwargs):
14 start_time = time.time()
15
16 result = func(*args, **kwargs)
17 while result is False and (time.time() - start_time) < timeout:
18 time.sleep(2)
19 result = func(*args, **kwargs)
20
21 return result
22
23
24 def wait_port(port_num, timeout):
25 return wait_func_is_true(func=is_port_in_use, timeout=timeout, port_num=port_num)
26
27
28 def get_listen_ports(pid):
29 try:
30 p = psutil.Process(pid)
31 cons = p.connections()
32 cons = [x.laddr.port for x in cons]
33 except Exception:
34 return []
35 return cons
36
37
38 def is_pid_listen_port(pid, port):
39 ports = get_listen_ports(pid)
40 return int(port) in ports
41
[end of mindsdb/utilities/ps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mindsdb/utilities/ps.py b/mindsdb/utilities/ps.py
--- a/mindsdb/utilities/ps.py
+++ b/mindsdb/utilities/ps.py
@@ -1,11 +1,44 @@
-import psutil
+import sys
import time
+from collections import namedtuple
+import psutil
+
+
+def net_connections():
+ """Cross-platform psutil.net_connections like interface"""
+ if sys.platform.lower().startswith('linux'):
+ return psutil.net_connections()
+
+ all_connections = []
+ Pconn = None
+ for p in psutil.process_iter(['pid']):
+ try:
+ process = psutil.Process(p.pid)
+ connections = process.connections()
+ if connections:
+ for conn in connections:
+ # Adding pid to the returned instance
+ # for consistency with psutil.net_connections()
+ if Pconn is None:
+ fields = list(conn._fields)
+ fields.append('pid')
+ _conn = namedtuple('Pconn', fields)
+ for attr in conn._fields:
+ setattr(_conn, attr, getattr(conn, attr))
+ _conn.pid = p.pid
+ all_connections.append(_conn)
+
+ except (psutil.AccessDenied, psutil.ZombieProcess, psutil.NoSuchProcess):
+ pass
+ return all_connections
def is_port_in_use(port_num):
- portsinuse = []
- conns = psutil.net_connections()
- portsinuse = [x.laddr[1] for x in conns if x.status == 'LISTEN']
+ """Check does any of child process uses specified port."""
+ parent_process = psutil.Process()
+ child_pids = [x.pid for x in parent_process.children(recursive=True)]
+ conns = net_connections()
+ portsinuse = [x.laddr[1] for x in conns if x.pid in child_pids and x.status == 'LISTEN']
portsinuse.sort()
return int(port_num) in portsinuse
| {"golden_diff": "diff --git a/mindsdb/utilities/ps.py b/mindsdb/utilities/ps.py\n--- a/mindsdb/utilities/ps.py\n+++ b/mindsdb/utilities/ps.py\n@@ -1,11 +1,44 @@\n-import psutil\n+import sys\n import time\n+from collections import namedtuple\n+import psutil\n+\n+\n+def net_connections():\n+ \"\"\"Cross-platform psutil.net_connections like interface\"\"\"\n+ if sys.platform.lower().startswith('linux'):\n+ return psutil.net_connections()\n+\n+ all_connections = []\n+ Pconn = None\n+ for p in psutil.process_iter(['pid']):\n+ try:\n+ process = psutil.Process(p.pid)\n+ connections = process.connections()\n+ if connections:\n+ for conn in connections:\n+ # Adding pid to the returned instance\n+ # for consistency with psutil.net_connections()\n+ if Pconn is None:\n+ fields = list(conn._fields)\n+ fields.append('pid')\n+ _conn = namedtuple('Pconn', fields)\n+ for attr in conn._fields:\n+ setattr(_conn, attr, getattr(conn, attr))\n+ _conn.pid = p.pid\n+ all_connections.append(_conn)\n+\n+ except (psutil.AccessDenied, psutil.ZombieProcess, psutil.NoSuchProcess):\n+ pass\n+ return all_connections\n \n \n def is_port_in_use(port_num):\n- portsinuse = []\n- conns = psutil.net_connections()\n- portsinuse = [x.laddr[1] for x in conns if x.status == 'LISTEN']\n+ \"\"\"Check does any of child process uses specified port.\"\"\"\n+ parent_process = psutil.Process()\n+ child_pids = [x.pid for x in parent_process.children(recursive=True)]\n+ conns = net_connections()\n+ portsinuse = [x.laddr[1] for x in conns if x.pid in child_pids and x.status == 'LISTEN']\n portsinuse.sort()\n return int(port_num) in portsinuse\n", "issue": "Run tests on windows and OSX\nPart of the reason we wanted tests with remote databases was to be able to test mindsdb on windows and OSX.\r\n\r\nThis is currently being done for native but not for mindsdb, current issues that stop us from testing on OSX and windows:\r\n\r\n* `psutil.net_connections` requires root privileges on osx/windows\r\n* `ssh` command will fail on windows\r\n* ??? \n", "before_files": [{"content": "import psutil\nimport time\n\n\ndef is_port_in_use(port_num):\n portsinuse = []\n conns = psutil.net_connections()\n portsinuse = [x.laddr[1] for x in conns if x.status == 'LISTEN']\n portsinuse.sort()\n return int(port_num) in portsinuse\n\n\ndef wait_func_is_true(func, timeout, *args, **kwargs):\n start_time = time.time()\n\n result = func(*args, **kwargs)\n while result is False and (time.time() - start_time) < timeout:\n time.sleep(2)\n result = func(*args, **kwargs)\n\n return result\n\n\ndef wait_port(port_num, timeout):\n return wait_func_is_true(func=is_port_in_use, timeout=timeout, port_num=port_num)\n\n\ndef get_listen_ports(pid):\n try:\n p = psutil.Process(pid)\n cons = p.connections()\n cons = [x.laddr.port for x in cons]\n except Exception:\n return []\n return cons\n\n\ndef is_pid_listen_port(pid, port):\n ports = get_listen_ports(pid)\n return int(port) in ports\n", "path": "mindsdb/utilities/ps.py"}]} | 952 | 454 |
gh_patches_debug_29878 | rasdani/github-patches | git_diff | translate__pootle-4260 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enable sorting by contribution in contributor command
Currently contributors are sorted in alphabetical order. This is great for crediting. But it would be more helpful to allow sorting by contribution in cases where you want to use to list to make other decisions around the amount of contribution.
Thus add `--sort-by-contribution` and `--sort-by-name` options.
</issue>
<code>
[start of pootle/apps/pootle_app/management/commands/contributors.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 import os
11 from collections import Counter
12 from optparse import make_option
13
14 os.environ["DJANGO_SETTINGS_MODULE"] = "pootle.settings"
15
16 from django.contrib.auth import get_user_model
17
18 from pootle_store.models import Unit
19
20 from . import PootleCommand
21
22
23 User = get_user_model()
24
25
26 class Command(PootleCommand):
27 option_list = PootleCommand.option_list + (
28 make_option(
29 "--from-revision",
30 type=int,
31 default=0,
32 dest="revision",
33 help="Only count contributions newer than this revision",
34 ),
35 )
36
37 help = "Print a list of contributors."
38
39 def handle_all(self, **options):
40 system_user = User.objects.get_system_user()
41 units = Unit.objects.exclude(submitted_by=system_user) \
42 .exclude(submitted_by=None)
43
44 if options["revision"]:
45 units = units.filter(revision__gte=options["revision"])
46
47 if self.projects:
48 units = units.filter(
49 store__translation_project__project__code__in=self.projects,
50 )
51
52 if self.languages:
53 units = units.filter(
54 store__translation_project__language__code__in=self.languages,
55 )
56
57 contribs = Counter()
58 for v in units.values("submitted_by"):
59 contribs.update((v["submitted_by"], ))
60
61 self.list_contributions(contribs)
62
63 def list_contributions(self, contribs):
64 out = []
65 for id, count in contribs.items():
66 user = User.objects.get(id=id)
67 name = user.display_name
68 if user.email:
69 name += " <%s>" % (user.email)
70 out.append("%s (%i contributions)" % (name, count))
71
72 # Sort users alphabetically
73 for line in sorted(out):
74 self.stdout.write(line)
75
[end of pootle/apps/pootle_app/management/commands/contributors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_app/management/commands/contributors.py b/pootle/apps/pootle_app/management/commands/contributors.py
--- a/pootle/apps/pootle_app/management/commands/contributors.py
+++ b/pootle/apps/pootle_app/management/commands/contributors.py
@@ -32,6 +32,15 @@
dest="revision",
help="Only count contributions newer than this revision",
),
+ make_option(
+ "--sort-by",
+ type="choice",
+ default="name",
+ choices=["name", "contributions"],
+ dest="sort_by",
+ help="Sort by specified item. Accepts name and contributions. "
+ "Default: %default",
+ ),
)
help = "Print a list of contributors."
@@ -58,17 +67,25 @@
for v in units.values("submitted_by"):
contribs.update((v["submitted_by"], ))
- self.list_contributions(contribs)
+ self.list_contributions(contribs, options["sort_by"])
+
+ def list_contributions(self, contribs, sort_by):
+ if sort_by == "name":
+ contributions = contribs.items()
+ else:
+ contributions = contribs.most_common()
- def list_contributions(self, contribs):
out = []
- for id, count in contribs.items():
+ for id, count in contributions:
user = User.objects.get(id=id)
name = user.display_name
if user.email:
name += " <%s>" % (user.email)
out.append("%s (%i contributions)" % (name, count))
- # Sort users alphabetically
- for line in sorted(out):
+ if sort_by == "name":
+ # Sort users alphabetically
+ out = sorted(out)
+
+ for line in out:
self.stdout.write(line)
| {"golden_diff": "diff --git a/pootle/apps/pootle_app/management/commands/contributors.py b/pootle/apps/pootle_app/management/commands/contributors.py\n--- a/pootle/apps/pootle_app/management/commands/contributors.py\n+++ b/pootle/apps/pootle_app/management/commands/contributors.py\n@@ -32,6 +32,15 @@\n dest=\"revision\",\n help=\"Only count contributions newer than this revision\",\n ),\n+ make_option(\n+ \"--sort-by\",\n+ type=\"choice\",\n+ default=\"name\",\n+ choices=[\"name\", \"contributions\"],\n+ dest=\"sort_by\",\n+ help=\"Sort by specified item. Accepts name and contributions. \"\n+ \"Default: %default\",\n+ ),\n )\n \n help = \"Print a list of contributors.\"\n@@ -58,17 +67,25 @@\n for v in units.values(\"submitted_by\"):\n contribs.update((v[\"submitted_by\"], ))\n \n- self.list_contributions(contribs)\n+ self.list_contributions(contribs, options[\"sort_by\"])\n+\n+ def list_contributions(self, contribs, sort_by):\n+ if sort_by == \"name\":\n+ contributions = contribs.items()\n+ else:\n+ contributions = contribs.most_common()\n \n- def list_contributions(self, contribs):\n out = []\n- for id, count in contribs.items():\n+ for id, count in contributions:\n user = User.objects.get(id=id)\n name = user.display_name\n if user.email:\n name += \" <%s>\" % (user.email)\n out.append(\"%s (%i contributions)\" % (name, count))\n \n- # Sort users alphabetically\n- for line in sorted(out):\n+ if sort_by == \"name\":\n+ # Sort users alphabetically\n+ out = sorted(out)\n+\n+ for line in out:\n self.stdout.write(line)\n", "issue": "Enable sorting by contribution in contributor command\nCurrently contributors are sorted in alphabetical order. This is great for crediting. But it would be more helpful to allow sorting by contribution in cases where you want to use to list to make other decisions around the amount of contribution.\n\nThus add `--sort-by-contribution` and `--sort-by-name` options.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nfrom collections import Counter\nfrom optparse import make_option\n\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"pootle.settings\"\n\nfrom django.contrib.auth import get_user_model\n\nfrom pootle_store.models import Unit\n\nfrom . import PootleCommand\n\n\nUser = get_user_model()\n\n\nclass Command(PootleCommand):\n option_list = PootleCommand.option_list + (\n make_option(\n \"--from-revision\",\n type=int,\n default=0,\n dest=\"revision\",\n help=\"Only count contributions newer than this revision\",\n ),\n )\n\n help = \"Print a list of contributors.\"\n\n def handle_all(self, **options):\n system_user = User.objects.get_system_user()\n units = Unit.objects.exclude(submitted_by=system_user) \\\n .exclude(submitted_by=None)\n\n if options[\"revision\"]:\n units = units.filter(revision__gte=options[\"revision\"])\n\n if self.projects:\n units = units.filter(\n store__translation_project__project__code__in=self.projects,\n )\n\n if self.languages:\n units = units.filter(\n store__translation_project__language__code__in=self.languages,\n )\n\n contribs = Counter()\n for v in units.values(\"submitted_by\"):\n contribs.update((v[\"submitted_by\"], ))\n\n self.list_contributions(contribs)\n\n def list_contributions(self, contribs):\n out = []\n for id, count in contribs.items():\n user = User.objects.get(id=id)\n name = user.display_name\n if user.email:\n name += \" <%s>\" % (user.email)\n out.append(\"%s (%i contributions)\" % (name, count))\n\n # Sort users alphabetically\n for line in sorted(out):\n self.stdout.write(line)\n", "path": "pootle/apps/pootle_app/management/commands/contributors.py"}]} | 1,238 | 434 |
gh_patches_debug_21888 | rasdani/github-patches | git_diff | wagtail__wagtail-651 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"wagtail start project_name" fails to run on Windows 7
Hi. So everything is compiled perfectly inside the virtualenv and I'm trying to start a new project.
```
$ wagtail start wagtailtest
Creating a wagtail project called wagtailtest
Traceback (most recent call last):
File "d:\VirtualEnvs\wagtail_env\Scripts\wagtail-script.py", line 9, in <module>
load_entry_point('wagtail==0.6', 'console_scripts', 'wagtail')()
File "d:\VirtualEnvs\wagtail_env\lib\site-packages\wagtail\bin\wagtail.py", line 75, in main
COMMANDS[command](parser, options, args)
File "d:\VirtualEnvs\wagtail_env\lib\site-packages\wagtail\bin\wagtail.py", line 51, in create_project
project_name
File "C:\Python27\Lib\subprocess.py", line 522, in call
return Popen(*popenargs, **kwargs).wait()
File "C:\Python27\Lib\subprocess.py", line 710, in __init__
errread, errwrite)
File "C:\Python27\Lib\subprocess.py", line 958, in _execute_child
startupinfo)
WindowsError: [Error 193] %1 is not a valid Win32 application
```
Windows 7 x64, Python 2.7 x32.
</issue>
<code>
[start of wagtail/bin/wagtail.py]
1 #!/usr/bin/env python
2 from __future__ import print_function, absolute_import
3
4 import os
5 import subprocess
6 import errno
7 import sys
8
9 from optparse import OptionParser
10
11
12 def create_project(parser, options, args):
13 # Validate args
14 if len(args) < 2:
15 parser.error("Please specify a name for your wagtail installation")
16 elif len(args) > 2:
17 parser.error("Too many arguments")
18
19 project_name = args[1]
20
21 # Make sure given name is not already in use by another python package/module.
22 try:
23 __import__(project_name)
24 except ImportError:
25 pass
26 else:
27 parser.error("'%s' conflicts with the name of an existing "
28 "Python module and cannot be used as a project "
29 "name. Please try another name." % project_name)
30
31 # Make sure directory does not already exist
32 if os.path.exists(project_name):
33 print('A directory called %(project_name)s already exists. \
34 Please choose another name for your wagtail project or remove the existing directory.' % {'project_name': project_name})
35 sys.exit(errno.EEXIST)
36
37 print("Creating a wagtail project called %(project_name)s" % {'project_name': project_name})
38
39 # Create the project from the wagtail template using startapp
40
41 # First find the path to wagtail
42 import wagtail
43 wagtail_path = os.path.dirname(wagtail.__file__)
44 template_path = os.path.join(wagtail_path, 'project_template')
45
46 # Call django-admin startproject
47 result = subprocess.call([
48 'django-admin.py', 'startproject',
49 '--template=' + template_path,
50 '--name=Vagrantfile', '--ext=html,rst',
51 project_name
52 ])
53
54 if result == 0:
55 print("Success! %(project_name)s is created" % {'project_name': project_name})
56
57
58 COMMANDS = {
59 'start': create_project,
60 }
61
62 def main():
63 # Parse options
64 parser = OptionParser(usage="Usage: %prog start project_name")
65 (options, args) = parser.parse_args()
66
67 # Find command
68 try:
69 command = args[0]
70 except IndexError:
71 parser.print_help()
72 return
73
74 if command in COMMANDS:
75 COMMANDS[command](parser, options, args)
76 else:
77 parser.error("Unrecognised command: " + command)
78
79 if __name__ == "__main__":
80 main()
81
[end of wagtail/bin/wagtail.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/bin/wagtail.py b/wagtail/bin/wagtail.py
--- a/wagtail/bin/wagtail.py
+++ b/wagtail/bin/wagtail.py
@@ -2,11 +2,11 @@
from __future__ import print_function, absolute_import
import os
-import subprocess
import errno
import sys
from optparse import OptionParser
+from django.core.management import ManagementUtility
def create_project(parser, options, args):
@@ -44,15 +44,15 @@
template_path = os.path.join(wagtail_path, 'project_template')
# Call django-admin startproject
- result = subprocess.call([
+ utility = ManagementUtility([
'django-admin.py', 'startproject',
'--template=' + template_path,
'--name=Vagrantfile', '--ext=html,rst',
project_name
])
+ utility.execute()
- if result == 0:
- print("Success! %(project_name)s is created" % {'project_name': project_name})
+ print("Success! %(project_name)s is created" % {'project_name': project_name})
COMMANDS = {
| {"golden_diff": "diff --git a/wagtail/bin/wagtail.py b/wagtail/bin/wagtail.py\n--- a/wagtail/bin/wagtail.py\n+++ b/wagtail/bin/wagtail.py\n@@ -2,11 +2,11 @@\n from __future__ import print_function, absolute_import\n \n import os\n-import subprocess\n import errno\n import sys\n \n from optparse import OptionParser\n+from django.core.management import ManagementUtility\n \n \n def create_project(parser, options, args):\n@@ -44,15 +44,15 @@\n template_path = os.path.join(wagtail_path, 'project_template')\n \n # Call django-admin startproject\n- result = subprocess.call([\n+ utility = ManagementUtility([\n 'django-admin.py', 'startproject',\n '--template=' + template_path,\n '--name=Vagrantfile', '--ext=html,rst',\n project_name\n ])\n+ utility.execute()\n \n- if result == 0:\n- print(\"Success! %(project_name)s is created\" % {'project_name': project_name})\n+ print(\"Success! %(project_name)s is created\" % {'project_name': project_name})\n \n \n COMMANDS = {\n", "issue": "\"wagtail start project_name\" fails to run on Windows 7\nHi. So everything is compiled perfectly inside the virtualenv and I'm trying to start a new project.\n\n```\n$ wagtail start wagtailtest\nCreating a wagtail project called wagtailtest\nTraceback (most recent call last):\n File \"d:\\VirtualEnvs\\wagtail_env\\Scripts\\wagtail-script.py\", line 9, in <module>\n load_entry_point('wagtail==0.6', 'console_scripts', 'wagtail')()\n File \"d:\\VirtualEnvs\\wagtail_env\\lib\\site-packages\\wagtail\\bin\\wagtail.py\", line 75, in main\n COMMANDS[command](parser, options, args)\n File \"d:\\VirtualEnvs\\wagtail_env\\lib\\site-packages\\wagtail\\bin\\wagtail.py\", line 51, in create_project\n project_name\n File \"C:\\Python27\\Lib\\subprocess.py\", line 522, in call\n return Popen(*popenargs, **kwargs).wait()\n File \"C:\\Python27\\Lib\\subprocess.py\", line 710, in __init__\n errread, errwrite)\n File \"C:\\Python27\\Lib\\subprocess.py\", line 958, in _execute_child\n startupinfo)\nWindowsError: [Error 193] %1 is not a valid Win32 application\n```\n\nWindows 7 x64, Python 2.7 x32.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function, absolute_import\n\nimport os\nimport subprocess\nimport errno\nimport sys\n\nfrom optparse import OptionParser\n\n\ndef create_project(parser, options, args):\n # Validate args\n if len(args) < 2:\n parser.error(\"Please specify a name for your wagtail installation\")\n elif len(args) > 2:\n parser.error(\"Too many arguments\")\n\n project_name = args[1]\n\n # Make sure given name is not already in use by another python package/module.\n try:\n __import__(project_name)\n except ImportError:\n pass\n else:\n parser.error(\"'%s' conflicts with the name of an existing \"\n \"Python module and cannot be used as a project \"\n \"name. Please try another name.\" % project_name)\n\n # Make sure directory does not already exist\n if os.path.exists(project_name):\n print('A directory called %(project_name)s already exists. \\\n Please choose another name for your wagtail project or remove the existing directory.' % {'project_name': project_name})\n sys.exit(errno.EEXIST)\n\n print(\"Creating a wagtail project called %(project_name)s\" % {'project_name': project_name})\n\n # Create the project from the wagtail template using startapp\n\n # First find the path to wagtail\n import wagtail\n wagtail_path = os.path.dirname(wagtail.__file__)\n template_path = os.path.join(wagtail_path, 'project_template')\n\n # Call django-admin startproject\n result = subprocess.call([\n 'django-admin.py', 'startproject',\n '--template=' + template_path,\n '--name=Vagrantfile', '--ext=html,rst',\n project_name\n ])\n\n if result == 0:\n print(\"Success! %(project_name)s is created\" % {'project_name': project_name})\n\n\nCOMMANDS = {\n 'start': create_project,\n}\n\ndef main():\n # Parse options\n parser = OptionParser(usage=\"Usage: %prog start project_name\")\n (options, args) = parser.parse_args()\n\n # Find command\n try:\n command = args[0]\n except IndexError:\n parser.print_help()\n return\n\n if command in COMMANDS:\n COMMANDS[command](parser, options, args)\n else:\n parser.error(\"Unrecognised command: \" + command)\n\nif __name__ == \"__main__\":\n main()\n", "path": "wagtail/bin/wagtail.py"}]} | 1,577 | 259 |
gh_patches_debug_30556 | rasdani/github-patches | git_diff | mdn__kuma-6929 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
T - the API to support subscription management
The backend pieces needed for https://github.com/mdn/kuma/issues/6703
That way we, can Reactify the subscription management page.
What we need is to endpoints:
1. Getting your subscriptions (plural but it's probably never more than 1)
2. Cancel a subscription (or cancel them all if that's easier)
</issue>
<code>
[start of kuma/users/stripe_utils.py]
1 from datetime import datetime
2
3 import stripe
4 from django.conf import settings
5 from django.utils import timezone
6
7 from kuma.core.urlresolvers import reverse
8 from kuma.wiki.templatetags.jinja_helpers import absolutify
9
10 from .models import UserSubscription
11
12
13 def retrieve_stripe_subscription(customer):
14 for subscription in customer.subscriptions.list().auto_paging_iter():
15 # We have to use array indexing syntax, as stripe uses dicts to
16 # represent its objects (dicts come with an .items method)
17 for item in subscription["items"].auto_paging_iter():
18 if item.plan.id == settings.STRIPE_PLAN_ID:
19 return subscription
20
21 return None
22
23
24 def retrieve_and_synchronize_subscription_info(user):
25 """For the given user, if it has as 'stripe_customer_id' retrieve the info
26 about the subscription if it's there. All packaged in a way that is
27 practical for the stripe_subscription.html template.
28
29 Also, whilst doing this check, we also verify that the UserSubscription record
30 for this user is right. Doing that check is a second-layer check in case
31 our webhooks have failed us.
32 """
33 subscription_info = None
34 stripe_customer = get_stripe_customer(user)
35 if stripe_customer:
36 stripe_subscription_info = get_stripe_subscription_info(stripe_customer)
37 if stripe_subscription_info:
38 source = stripe_customer.default_source
39 if source.object == "card":
40 card = source
41 elif source.object == "source":
42 card = source.card
43 else:
44 raise ValueError(
45 f"unexpected stripe customer default_source of type {source.object!r}"
46 )
47
48 subscription_info = {
49 "id": stripe_subscription_info.id,
50 "amount": stripe_subscription_info.plan.amount,
51 "brand": card.brand,
52 "expires_at": f"{card.exp_month}/{card.exp_year}",
53 "last4": card.last4,
54 # Cards that are part of a "source" don't have a zip
55 "zip": card.get("address_zip", None),
56 # TODO: Deprecated. Only used in the Edit Profile view
57 "next_payment_at": datetime.fromtimestamp(
58 stripe_subscription_info.current_period_end
59 ),
60 }
61
62 # To perfect the synchronization, take this opportunity to make sure
63 # we have an up-to-date record of this.
64 UserSubscription.set_active(user, stripe_subscription_info.id)
65 else:
66 # The user has a stripe_customer_id but no active subscription
67 # on the current settings.STRIPE_PLAN_ID! Perhaps it has been cancelled
68 # and not updated in our own records.
69 for user_subscription in UserSubscription.objects.filter(
70 user=user, canceled__isnull=True
71 ):
72 user_subscription.canceled = timezone.now()
73 user_subscription.save()
74
75 return subscription_info
76
77
78 def create_stripe_customer_and_subscription_for_user(user, email, stripe_token):
79 customer = (
80 stripe.Customer.retrieve(user.stripe_customer_id)
81 if user.stripe_customer_id
82 else None
83 )
84 if not customer or customer.email != email:
85 customer = stripe.Customer.create(email=email, source=stripe_token,)
86 user.stripe_customer_id = customer.id
87 user.save()
88
89 subscription = retrieve_stripe_subscription(customer)
90 if not subscription:
91 subscription = stripe.Subscription.create(
92 customer=customer.id, items=[{"plan": settings.STRIPE_PLAN_ID}],
93 )
94
95 UserSubscription.set_active(user, subscription.id)
96
97
98 def cancel_stripe_customer_subscriptions(user):
99 """Delete all subscriptions for a Stripe customer."""
100 assert user.stripe_customer_id
101 customer = stripe.Customer.retrieve(user.stripe_customer_id)
102 for sub in customer.subscriptions.data:
103 s = stripe.Subscription.retrieve(sub.id)
104 UserSubscription.set_canceled(user, s.id)
105 s.delete()
106
107
108 def get_stripe_customer(user):
109 if settings.STRIPE_PLAN_ID and user.stripe_customer_id:
110 return stripe.Customer.retrieve(
111 user.stripe_customer_id, expand=["default_source"]
112 )
113
114
115 def get_stripe_subscription_info(stripe_customer):
116 return retrieve_stripe_subscription(stripe_customer)
117
118
119 def create_missing_stripe_webhook():
120 url_path = reverse("users.stripe_hooks")
121 url = (
122 "https://" + settings.STRIPE_WEBHOOK_HOSTNAME + url_path
123 if settings.STRIPE_WEBHOOK_HOSTNAME
124 else absolutify(url_path)
125 )
126
127 # From https://stripe.com/docs/api/webhook_endpoints/create
128 events = (
129 # "Occurs whenever an invoice payment attempt succeeds."
130 "invoice.payment_succeeded",
131 # "Occurs whenever a customer’s subscription ends."
132 # Also, if you go into the Stripe Dashboard, click Billing, Subscriptions,
133 # and find a customer and click the "Cancel subscription" button, this
134 # triggers.
135 "customer.subscription.deleted",
136 )
137
138 for webhook in stripe.WebhookEndpoint.list().auto_paging_iter():
139 if webhook.url == url and set(events) == set(webhook.enabled_events):
140 return
141
142 stripe.WebhookEndpoint.create(
143 url=url, enabled_events=events,
144 )
145
[end of kuma/users/stripe_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/users/stripe_utils.py b/kuma/users/stripe_utils.py
--- a/kuma/users/stripe_utils.py
+++ b/kuma/users/stripe_utils.py
@@ -64,7 +64,7 @@
UserSubscription.set_active(user, stripe_subscription_info.id)
else:
# The user has a stripe_customer_id but no active subscription
- # on the current settings.STRIPE_PLAN_ID! Perhaps it has been cancelled
+ # on the current settings.STRIPE_PLAN_ID! Perhaps it has been canceled
# and not updated in our own records.
for user_subscription in UserSubscription.objects.filter(
user=user, canceled__isnull=True
@@ -82,7 +82,7 @@
else None
)
if not customer or customer.email != email:
- customer = stripe.Customer.create(email=email, source=stripe_token,)
+ customer = stripe.Customer.create(email=email, source=stripe_token)
user.stripe_customer_id = customer.id
user.save()
@@ -99,10 +99,13 @@
"""Delete all subscriptions for a Stripe customer."""
assert user.stripe_customer_id
customer = stripe.Customer.retrieve(user.stripe_customer_id)
+ canceled = []
for sub in customer.subscriptions.data:
s = stripe.Subscription.retrieve(sub.id)
UserSubscription.set_canceled(user, s.id)
s.delete()
+ canceled.append(s)
+ return canceled
def get_stripe_customer(user):
| {"golden_diff": "diff --git a/kuma/users/stripe_utils.py b/kuma/users/stripe_utils.py\n--- a/kuma/users/stripe_utils.py\n+++ b/kuma/users/stripe_utils.py\n@@ -64,7 +64,7 @@\n UserSubscription.set_active(user, stripe_subscription_info.id)\n else:\n # The user has a stripe_customer_id but no active subscription\n- # on the current settings.STRIPE_PLAN_ID! Perhaps it has been cancelled\n+ # on the current settings.STRIPE_PLAN_ID! Perhaps it has been canceled\n # and not updated in our own records.\n for user_subscription in UserSubscription.objects.filter(\n user=user, canceled__isnull=True\n@@ -82,7 +82,7 @@\n else None\n )\n if not customer or customer.email != email:\n- customer = stripe.Customer.create(email=email, source=stripe_token,)\n+ customer = stripe.Customer.create(email=email, source=stripe_token)\n user.stripe_customer_id = customer.id\n user.save()\n \n@@ -99,10 +99,13 @@\n \"\"\"Delete all subscriptions for a Stripe customer.\"\"\"\n assert user.stripe_customer_id\n customer = stripe.Customer.retrieve(user.stripe_customer_id)\n+ canceled = []\n for sub in customer.subscriptions.data:\n s = stripe.Subscription.retrieve(sub.id)\n UserSubscription.set_canceled(user, s.id)\n s.delete()\n+ canceled.append(s)\n+ return canceled\n \n \n def get_stripe_customer(user):\n", "issue": "T - the API to support subscription management\nThe backend pieces needed for https://github.com/mdn/kuma/issues/6703\r\n\r\nThat way we, can Reactify the subscription management page. \r\n\r\nWhat we need is to endpoints:\r\n\r\n1. Getting your subscriptions (plural but it's probably never more than 1)\r\n2. Cancel a subscription (or cancel them all if that's easier)\r\n\r\n\n", "before_files": [{"content": "from datetime import datetime\n\nimport stripe\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.wiki.templatetags.jinja_helpers import absolutify\n\nfrom .models import UserSubscription\n\n\ndef retrieve_stripe_subscription(customer):\n for subscription in customer.subscriptions.list().auto_paging_iter():\n # We have to use array indexing syntax, as stripe uses dicts to\n # represent its objects (dicts come with an .items method)\n for item in subscription[\"items\"].auto_paging_iter():\n if item.plan.id == settings.STRIPE_PLAN_ID:\n return subscription\n\n return None\n\n\ndef retrieve_and_synchronize_subscription_info(user):\n \"\"\"For the given user, if it has as 'stripe_customer_id' retrieve the info\n about the subscription if it's there. All packaged in a way that is\n practical for the stripe_subscription.html template.\n\n Also, whilst doing this check, we also verify that the UserSubscription record\n for this user is right. Doing that check is a second-layer check in case\n our webhooks have failed us.\n \"\"\"\n subscription_info = None\n stripe_customer = get_stripe_customer(user)\n if stripe_customer:\n stripe_subscription_info = get_stripe_subscription_info(stripe_customer)\n if stripe_subscription_info:\n source = stripe_customer.default_source\n if source.object == \"card\":\n card = source\n elif source.object == \"source\":\n card = source.card\n else:\n raise ValueError(\n f\"unexpected stripe customer default_source of type {source.object!r}\"\n )\n\n subscription_info = {\n \"id\": stripe_subscription_info.id,\n \"amount\": stripe_subscription_info.plan.amount,\n \"brand\": card.brand,\n \"expires_at\": f\"{card.exp_month}/{card.exp_year}\",\n \"last4\": card.last4,\n # Cards that are part of a \"source\" don't have a zip\n \"zip\": card.get(\"address_zip\", None),\n # TODO: Deprecated. Only used in the Edit Profile view\n \"next_payment_at\": datetime.fromtimestamp(\n stripe_subscription_info.current_period_end\n ),\n }\n\n # To perfect the synchronization, take this opportunity to make sure\n # we have an up-to-date record of this.\n UserSubscription.set_active(user, stripe_subscription_info.id)\n else:\n # The user has a stripe_customer_id but no active subscription\n # on the current settings.STRIPE_PLAN_ID! Perhaps it has been cancelled\n # and not updated in our own records.\n for user_subscription in UserSubscription.objects.filter(\n user=user, canceled__isnull=True\n ):\n user_subscription.canceled = timezone.now()\n user_subscription.save()\n\n return subscription_info\n\n\ndef create_stripe_customer_and_subscription_for_user(user, email, stripe_token):\n customer = (\n stripe.Customer.retrieve(user.stripe_customer_id)\n if user.stripe_customer_id\n else None\n )\n if not customer or customer.email != email:\n customer = stripe.Customer.create(email=email, source=stripe_token,)\n user.stripe_customer_id = customer.id\n user.save()\n\n subscription = retrieve_stripe_subscription(customer)\n if not subscription:\n subscription = stripe.Subscription.create(\n customer=customer.id, items=[{\"plan\": settings.STRIPE_PLAN_ID}],\n )\n\n UserSubscription.set_active(user, subscription.id)\n\n\ndef cancel_stripe_customer_subscriptions(user):\n \"\"\"Delete all subscriptions for a Stripe customer.\"\"\"\n assert user.stripe_customer_id\n customer = stripe.Customer.retrieve(user.stripe_customer_id)\n for sub in customer.subscriptions.data:\n s = stripe.Subscription.retrieve(sub.id)\n UserSubscription.set_canceled(user, s.id)\n s.delete()\n\n\ndef get_stripe_customer(user):\n if settings.STRIPE_PLAN_ID and user.stripe_customer_id:\n return stripe.Customer.retrieve(\n user.stripe_customer_id, expand=[\"default_source\"]\n )\n\n\ndef get_stripe_subscription_info(stripe_customer):\n return retrieve_stripe_subscription(stripe_customer)\n\n\ndef create_missing_stripe_webhook():\n url_path = reverse(\"users.stripe_hooks\")\n url = (\n \"https://\" + settings.STRIPE_WEBHOOK_HOSTNAME + url_path\n if settings.STRIPE_WEBHOOK_HOSTNAME\n else absolutify(url_path)\n )\n\n # From https://stripe.com/docs/api/webhook_endpoints/create\n events = (\n # \"Occurs whenever an invoice payment attempt succeeds.\"\n \"invoice.payment_succeeded\",\n # \"Occurs whenever a customer\u2019s subscription ends.\"\n # Also, if you go into the Stripe Dashboard, click Billing, Subscriptions,\n # and find a customer and click the \"Cancel subscription\" button, this\n # triggers.\n \"customer.subscription.deleted\",\n )\n\n for webhook in stripe.WebhookEndpoint.list().auto_paging_iter():\n if webhook.url == url and set(events) == set(webhook.enabled_events):\n return\n\n stripe.WebhookEndpoint.create(\n url=url, enabled_events=events,\n )\n", "path": "kuma/users/stripe_utils.py"}]} | 2,037 | 324 |
gh_patches_debug_572 | rasdani/github-patches | git_diff | hylang__hy-1343 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
REPL history is lost on (quit)
REPL history is not flushed to disk if the REPL is exited using `(quit)`.
A workaround is to remember to use `CTRL-D` to exit the REPL.
Would be nice if `(quit)` also worked.
</issue>
<code>
[start of hy/completer.py]
1 # Copyright 2017 the authors.
2 # This file is part of Hy, which is free software licensed under the Expat
3 # license. See the LICENSE.
4
5 import contextlib
6 import os
7 import re
8 import sys
9
10 import hy.macros
11 import hy.compiler
12 from hy._compat import builtins, string_types
13
14
15 docomplete = True
16
17 try:
18 import readline
19 except ImportError:
20 try:
21 import pyreadline.rlmain
22 import pyreadline.unicode_helper # NOQA
23 import readline
24 except ImportError:
25 docomplete = False
26
27 if sys.platform == 'darwin' and 'libedit' in readline.__doc__:
28 readline_bind = "bind ^I rl_complete"
29 else:
30 readline_bind = "tab: complete"
31
32
33 class Completer(object):
34
35 def __init__(self, namespace={}):
36 if not isinstance(namespace, dict):
37 raise TypeError('namespace must be a dictionary')
38 self.namespace = namespace
39 self.path = [hy.compiler._compile_table,
40 builtins.__dict__,
41 hy.macros._hy_macros[None],
42 namespace]
43 self.tag_path = [hy.macros._hy_tag[None]]
44 if '__name__' in namespace:
45 module_name = namespace['__name__']
46 self.path.append(hy.macros._hy_macros[module_name])
47 self.tag_path.append(hy.macros._hy_tag[module_name])
48
49 def attr_matches(self, text):
50 # Borrowed from IPython's completer
51 m = re.match(r"(\S+(\.[\w-]+)*)\.([\w-]*)$", text)
52
53 if m:
54 expr, attr = m.group(1, 3)
55 attr = attr.replace("-", "_")
56 expr = expr.replace("-", "_")
57 else:
58 return []
59
60 try:
61 obj = eval(expr, self.namespace)
62 words = dir(obj)
63 except Exception:
64 return []
65
66 n = len(attr)
67 matches = []
68 for w in words:
69 if w[:n] == attr:
70 matches.append("{}.{}".format(
71 expr.replace("_", "-"), w.replace("_", "-")))
72 return matches
73
74 def global_matches(self, text):
75 matches = []
76 for p in self.path:
77 for k in p.keys():
78 if isinstance(k, string_types):
79 k = k.replace("_", "-")
80 if k.startswith(text):
81 matches.append(k)
82 return matches
83
84 def tag_matches(self, text):
85 text = text[1:]
86 matches = []
87 for p in self.tag_path:
88 for k in p.keys():
89 if isinstance(k, string_types):
90 if k.startswith(text):
91 matches.append("#{}".format(k))
92 return matches
93
94 def complete(self, text, state):
95 if text.startswith("#"):
96 matches = self.tag_matches(text)
97 elif "." in text:
98 matches = self.attr_matches(text)
99 else:
100 matches = self.global_matches(text)
101 try:
102 return matches[state]
103 except IndexError:
104 return None
105
106
107 @contextlib.contextmanager
108 def completion(completer=None):
109 delims = "()[]{} "
110 if not completer:
111 completer = Completer()
112
113 if docomplete:
114 readline.set_completer(completer.complete)
115 readline.set_completer_delims(delims)
116
117 history = os.path.expanduser("~/.hy-history")
118 readline.parse_and_bind("set blink-matching-paren on")
119
120 try:
121 readline.read_history_file(history)
122 except IOError:
123 open(history, 'a').close()
124
125 readline.parse_and_bind(readline_bind)
126
127 yield
128
129 if docomplete:
130 readline.write_history_file(history)
131
[end of hy/completer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hy/completer.py b/hy/completer.py
--- a/hy/completer.py
+++ b/hy/completer.py
@@ -124,7 +124,8 @@
readline.parse_and_bind(readline_bind)
- yield
-
- if docomplete:
- readline.write_history_file(history)
+ try:
+ yield
+ finally:
+ if docomplete:
+ readline.write_history_file(history)
| {"golden_diff": "diff --git a/hy/completer.py b/hy/completer.py\n--- a/hy/completer.py\n+++ b/hy/completer.py\n@@ -124,7 +124,8 @@\n \n readline.parse_and_bind(readline_bind)\n \n- yield\n-\n- if docomplete:\n- readline.write_history_file(history)\n+ try:\n+ yield\n+ finally:\n+ if docomplete:\n+ readline.write_history_file(history)\n", "issue": "REPL history is lost on (quit)\nREPL history is not flushed to disk if the REPL is exited using `(quit)`. \r\n\r\nA workaround is to remember to use `CTRL-D` to exit the REPL.\r\n\r\nWould be nice if `(quit)` also worked.\n", "before_files": [{"content": "# Copyright 2017 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nimport contextlib\nimport os\nimport re\nimport sys\n\nimport hy.macros\nimport hy.compiler\nfrom hy._compat import builtins, string_types\n\n\ndocomplete = True\n\ntry:\n import readline\nexcept ImportError:\n try:\n import pyreadline.rlmain\n import pyreadline.unicode_helper # NOQA\n import readline\n except ImportError:\n docomplete = False\n\nif sys.platform == 'darwin' and 'libedit' in readline.__doc__:\n readline_bind = \"bind ^I rl_complete\"\nelse:\n readline_bind = \"tab: complete\"\n\n\nclass Completer(object):\n\n def __init__(self, namespace={}):\n if not isinstance(namespace, dict):\n raise TypeError('namespace must be a dictionary')\n self.namespace = namespace\n self.path = [hy.compiler._compile_table,\n builtins.__dict__,\n hy.macros._hy_macros[None],\n namespace]\n self.tag_path = [hy.macros._hy_tag[None]]\n if '__name__' in namespace:\n module_name = namespace['__name__']\n self.path.append(hy.macros._hy_macros[module_name])\n self.tag_path.append(hy.macros._hy_tag[module_name])\n\n def attr_matches(self, text):\n # Borrowed from IPython's completer\n m = re.match(r\"(\\S+(\\.[\\w-]+)*)\\.([\\w-]*)$\", text)\n\n if m:\n expr, attr = m.group(1, 3)\n attr = attr.replace(\"-\", \"_\")\n expr = expr.replace(\"-\", \"_\")\n else:\n return []\n\n try:\n obj = eval(expr, self.namespace)\n words = dir(obj)\n except Exception:\n return []\n\n n = len(attr)\n matches = []\n for w in words:\n if w[:n] == attr:\n matches.append(\"{}.{}\".format(\n expr.replace(\"_\", \"-\"), w.replace(\"_\", \"-\")))\n return matches\n\n def global_matches(self, text):\n matches = []\n for p in self.path:\n for k in p.keys():\n if isinstance(k, string_types):\n k = k.replace(\"_\", \"-\")\n if k.startswith(text):\n matches.append(k)\n return matches\n\n def tag_matches(self, text):\n text = text[1:]\n matches = []\n for p in self.tag_path:\n for k in p.keys():\n if isinstance(k, string_types):\n if k.startswith(text):\n matches.append(\"#{}\".format(k))\n return matches\n\n def complete(self, text, state):\n if text.startswith(\"#\"):\n matches = self.tag_matches(text)\n elif \".\" in text:\n matches = self.attr_matches(text)\n else:\n matches = self.global_matches(text)\n try:\n return matches[state]\n except IndexError:\n return None\n\n\[email protected]\ndef completion(completer=None):\n delims = \"()[]{} \"\n if not completer:\n completer = Completer()\n\n if docomplete:\n readline.set_completer(completer.complete)\n readline.set_completer_delims(delims)\n\n history = os.path.expanduser(\"~/.hy-history\")\n readline.parse_and_bind(\"set blink-matching-paren on\")\n\n try:\n readline.read_history_file(history)\n except IOError:\n open(history, 'a').close()\n\n readline.parse_and_bind(readline_bind)\n\n yield\n\n if docomplete:\n readline.write_history_file(history)\n", "path": "hy/completer.py"}]} | 1,670 | 109 |
gh_patches_debug_21107 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3043 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Checks IDs changing
Hi Team,
would like to request the check IDs do not get changed since we allowlist some checks which we run in our environment.
Eg : https://docs.bridgecrew.io/docs/bc_aws_iam_45 Check ID says CKV_AWS_61
whereas code is different for CKV_AWS_61.
Thanks!
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py]
1 import re
2
3 from checkov.common.models.enums import CheckResult, CheckCategories
4 from checkov.common.util.type_forcers import extract_policy_dict
5 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
6 from typing import List
7
8
9 class IAMRoleAllowAssumeFromAccount(BaseResourceCheck):
10
11 def __init__(self):
12 name = "Ensure IAM role allows only specific principals in account to assume it"
13 id = "CKV_AWS_61"
14 supported_resources = ['aws_iam_role']
15 categories = [CheckCategories.IAM]
16 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
17
18 def scan_resource_conf(self, conf):
19 try:
20 assume_role_block = extract_policy_dict(conf['assume_role_policy'][0])
21 if assume_role_block and 'Statement' in assume_role_block.keys() \
22 and 'Principal' in assume_role_block['Statement'][0] \
23 and 'AWS' in assume_role_block['Statement'][0]['Principal']:
24 account_access = re.compile(r'\d{12}|arn:aws:iam::\d{12}:root')
25 if re.match(account_access, assume_role_block['Statement'][0]['Principal']['AWS']):
26 return CheckResult.FAILED
27 except Exception: # nosec
28 pass
29 return CheckResult.PASSED
30
31 def get_evaluated_keys(self) -> List[str]:
32 return ['assume_role_policy']
33
34
35 check = IAMRoleAllowAssumeFromAccount()
36
[end of checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py]
[start of checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py]
1 import json
2 import re
3
4 from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
5 from checkov.common.models.enums import CheckResult, CheckCategories
6
7 ACCOUNT_ACCESS = re.compile(r'\d{12}|arn:aws:iam::\d{12}:root')
8
9 class IAMRoleAllowAssumeFromAccount(BaseResourceCheck):
10 def __init__(self):
11 name = "Ensure IAM role allows only specific principals in account to assume it"
12 id = "CKV_AWS_61"
13 supported_resources = ['AWS::IAM::Role']
14 categories = [CheckCategories.IAM]
15 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
16
17 def scan_resource_conf(self, conf):
18 if 'AssumeRolePolicyDocument' in conf['Properties']:
19 assume_role_policy_doc = conf['Properties']['AssumeRolePolicyDocument']
20 if isinstance(assume_role_policy_doc, dict) and 'Fn::Sub' in assume_role_policy_doc.keys():
21 policy_fn_sub_block = assume_role_policy_doc['Fn::Sub']
22 if isinstance(policy_fn_sub_block, list) and len(policy_fn_sub_block) == 2:
23 assume_role_block = json.loads(policy_fn_sub_block[0])
24 else:
25 assume_role_block = json.loads(policy_fn_sub_block)
26 elif isinstance(assume_role_policy_doc, str):
27 assume_role_block = json.loads(assume_role_policy_doc)
28 else:
29 assume_role_block = assume_role_policy_doc
30 else:
31 return CheckResult.UNKNOWN
32
33 if 'Statement' in assume_role_block.keys():
34 if isinstance(assume_role_block['Statement'], list) and 'Principal' in \
35 assume_role_block['Statement'][0]:
36 if 'AWS' in assume_role_block['Statement'][0]['Principal']:
37 if isinstance(assume_role_block['Statement'][0]['Principal']['AWS'],list) \
38 and isinstance(assume_role_block['Statement'][0]['Principal']['AWS'][0], str):
39 if re.match(ACCOUNT_ACCESS, assume_role_block['Statement'][0]['Principal']['AWS'][0]):
40 return CheckResult.FAILED
41
42 return CheckResult.PASSED
43
44
45 check = IAMRoleAllowAssumeFromAccount()
46
[end of checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py b/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py
--- a/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py
+++ b/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py
@@ -8,7 +8,7 @@
class IAMRoleAllowAssumeFromAccount(BaseResourceCheck):
def __init__(self):
- name = "Ensure IAM role allows only specific principals in account to assume it"
+ name = "Ensure AWS IAM policy does not allow assume role permission across all services"
id = "CKV_AWS_61"
supported_resources = ['AWS::IAM::Role']
categories = [CheckCategories.IAM]
diff --git a/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py b/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py
--- a/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py
+++ b/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py
@@ -9,7 +9,7 @@
class IAMRoleAllowAssumeFromAccount(BaseResourceCheck):
def __init__(self):
- name = "Ensure IAM role allows only specific principals in account to assume it"
+ name = "Ensure AWS IAM policy does not allow assume role permission across all services"
id = "CKV_AWS_61"
supported_resources = ['aws_iam_role']
categories = [CheckCategories.IAM]
| {"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py b/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py\n--- a/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py\n+++ b/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py\n@@ -8,7 +8,7 @@\n \n class IAMRoleAllowAssumeFromAccount(BaseResourceCheck):\n def __init__(self):\n- name = \"Ensure IAM role allows only specific principals in account to assume it\"\n+ name = \"Ensure AWS IAM policy does not allow assume role permission across all services\"\n id = \"CKV_AWS_61\"\n supported_resources = ['AWS::IAM::Role']\n categories = [CheckCategories.IAM]\ndiff --git a/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py b/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py\n--- a/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py\n+++ b/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py\n@@ -9,7 +9,7 @@\n class IAMRoleAllowAssumeFromAccount(BaseResourceCheck):\n \n def __init__(self):\n- name = \"Ensure IAM role allows only specific principals in account to assume it\"\n+ name = \"Ensure AWS IAM policy does not allow assume role permission across all services\"\n id = \"CKV_AWS_61\"\n supported_resources = ['aws_iam_role']\n categories = [CheckCategories.IAM]\n", "issue": "Checks IDs changing\nHi Team,\r\n\r\nwould like to request the check IDs do not get changed since we allowlist some checks which we run in our environment.\r\nEg : https://docs.bridgecrew.io/docs/bc_aws_iam_45 Check ID says CKV_AWS_61\r\n\r\nwhereas code is different for CKV_AWS_61.\r\nThanks!\r\n\n", "before_files": [{"content": "import re\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.util.type_forcers import extract_policy_dict\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom typing import List\n\n\nclass IAMRoleAllowAssumeFromAccount(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure IAM role allows only specific principals in account to assume it\"\n id = \"CKV_AWS_61\"\n supported_resources = ['aws_iam_role']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n try:\n assume_role_block = extract_policy_dict(conf['assume_role_policy'][0])\n if assume_role_block and 'Statement' in assume_role_block.keys() \\\n and 'Principal' in assume_role_block['Statement'][0] \\\n and 'AWS' in assume_role_block['Statement'][0]['Principal']:\n account_access = re.compile(r'\\d{12}|arn:aws:iam::\\d{12}:root')\n if re.match(account_access, assume_role_block['Statement'][0]['Principal']['AWS']):\n return CheckResult.FAILED\n except Exception: # nosec\n pass\n return CheckResult.PASSED\n\n def get_evaluated_keys(self) -> List[str]:\n return ['assume_role_policy']\n\n\ncheck = IAMRoleAllowAssumeFromAccount()\n", "path": "checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py"}, {"content": "import json\nimport re\n\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\nACCOUNT_ACCESS = re.compile(r'\\d{12}|arn:aws:iam::\\d{12}:root')\n\nclass IAMRoleAllowAssumeFromAccount(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure IAM role allows only specific principals in account to assume it\"\n id = \"CKV_AWS_61\"\n supported_resources = ['AWS::IAM::Role']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'AssumeRolePolicyDocument' in conf['Properties']:\n assume_role_policy_doc = conf['Properties']['AssumeRolePolicyDocument']\n if isinstance(assume_role_policy_doc, dict) and 'Fn::Sub' in assume_role_policy_doc.keys():\n policy_fn_sub_block = assume_role_policy_doc['Fn::Sub']\n if isinstance(policy_fn_sub_block, list) and len(policy_fn_sub_block) == 2:\n assume_role_block = json.loads(policy_fn_sub_block[0])\n else:\n assume_role_block = json.loads(policy_fn_sub_block)\n elif isinstance(assume_role_policy_doc, str):\n assume_role_block = json.loads(assume_role_policy_doc)\n else:\n assume_role_block = assume_role_policy_doc\n else:\n return CheckResult.UNKNOWN\n\n if 'Statement' in assume_role_block.keys():\n if isinstance(assume_role_block['Statement'], list) and 'Principal' in \\\n assume_role_block['Statement'][0]:\n if 'AWS' in assume_role_block['Statement'][0]['Principal']:\n if isinstance(assume_role_block['Statement'][0]['Principal']['AWS'],list) \\\n and isinstance(assume_role_block['Statement'][0]['Principal']['AWS'][0], str):\n if re.match(ACCOUNT_ACCESS, assume_role_block['Statement'][0]['Principal']['AWS'][0]):\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n\ncheck = IAMRoleAllowAssumeFromAccount()\n", "path": "checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py"}]} | 1,623 | 358 |
gh_patches_debug_13349 | rasdani/github-patches | git_diff | python-poetry__poetry-3583 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--short has no effect when `poetry version` is passed a new version
<!--
Hi there! Thank you for discovering and submitting an issue.
Before you submit this; let's make sure of a few things.
Please make sure the following boxes are ticked if they are correct.
If not, please try and fulfill these first.
-->
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
<!--
Once those are done, if you're able to fill in the following list with your information,
it'd be very helpful to whoever handles the issue.
-->
- **OS version and name**: Ubuntu 16.04
- **Poetry version**: 1.1.4
- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: n/a
## Issue
<!-- Now feel free to write your issue, but please be descriptive! Thanks again 🙌 ❤️ -->
Hi there!
Assuming the current version of a project is 0.1.0, this is what happens when trying to update to the next patch version with the `--short` option:
```console
$ poetry version patch --short
Bumping version from 0.1.0 to 0.1.1
```
Instead, I would like this to be output (only the new version number):
```console
$ poetry version patch --short
0.1.1
```
My use case is scripting: if the user only provides a bump rule to my script, I'd like to be able to easily parse the new version computed by poetry so that I can e.g. create a new git tag easily, and so on.
Thanks!
</issue>
<code>
[start of poetry/console/commands/version.py]
1 from cleo import argument
2 from cleo import option
3
4 from .command import Command
5
6
7 class VersionCommand(Command):
8
9 name = "version"
10 description = (
11 "Shows the version of the project or bumps it when a valid "
12 "bump rule is provided."
13 )
14
15 arguments = [
16 argument(
17 "version",
18 "The version number or the rule to update the version.",
19 optional=True,
20 )
21 ]
22 options = [option("short", "s", "Output the version number only")]
23
24 help = """\
25 The version command shows the current version of the project or bumps the version of
26 the project and writes the new version back to <comment>pyproject.toml</> if a valid
27 bump rule is provided.
28
29 The new version should ideally be a valid semver string or a valid bump rule:
30 patch, minor, major, prepatch, preminor, premajor, prerelease.
31 """
32
33 RESERVED = {
34 "major",
35 "minor",
36 "patch",
37 "premajor",
38 "preminor",
39 "prepatch",
40 "prerelease",
41 }
42
43 def handle(self):
44 version = self.argument("version")
45
46 if version:
47 version = self.increment_version(
48 self.poetry.package.pretty_version, version
49 )
50
51 self.line(
52 "Bumping version from <b>{}</> to <fg=green>{}</>".format(
53 self.poetry.package.pretty_version, version
54 )
55 )
56
57 content = self.poetry.file.read()
58 poetry_content = content["tool"]["poetry"]
59 poetry_content["version"] = version.text
60
61 self.poetry.file.write(content)
62 else:
63 if self.option("short"):
64 self.line("{}".format(self.poetry.package.pretty_version))
65 else:
66 self.line(
67 "<comment>{}</> <info>{}</>".format(
68 self.poetry.package.name, self.poetry.package.pretty_version
69 )
70 )
71
72 def increment_version(self, version, rule):
73 from poetry.core.semver import Version
74
75 try:
76 version = Version.parse(version)
77 except ValueError:
78 raise ValueError("The project's version doesn't seem to follow semver")
79
80 if rule in {"major", "premajor"}:
81 new = version.next_major
82 if rule == "premajor":
83 new = new.first_prerelease
84 elif rule in {"minor", "preminor"}:
85 new = version.next_minor
86 if rule == "preminor":
87 new = new.first_prerelease
88 elif rule in {"patch", "prepatch"}:
89 new = version.next_patch
90 if rule == "prepatch":
91 new = new.first_prerelease
92 elif rule == "prerelease":
93 if version.is_prerelease():
94 pre = version.prerelease
95 new_prerelease = int(pre[1]) + 1
96 new = Version.parse(
97 "{}.{}.{}-{}".format(
98 version.major,
99 version.minor,
100 version.patch,
101 ".".join([pre[0], str(new_prerelease)]),
102 )
103 )
104 else:
105 new = version.next_patch.first_prerelease
106 else:
107 new = Version.parse(rule)
108
109 return new
110
[end of poetry/console/commands/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/poetry/console/commands/version.py b/poetry/console/commands/version.py
--- a/poetry/console/commands/version.py
+++ b/poetry/console/commands/version.py
@@ -48,11 +48,14 @@
self.poetry.package.pretty_version, version
)
- self.line(
- "Bumping version from <b>{}</> to <fg=green>{}</>".format(
- self.poetry.package.pretty_version, version
+ if self.option("short"):
+ self.line("{}".format(version))
+ else:
+ self.line(
+ "Bumping version from <b>{}</> to <fg=green>{}</>".format(
+ self.poetry.package.pretty_version, version
+ )
)
- )
content = self.poetry.file.read()
poetry_content = content["tool"]["poetry"]
| {"golden_diff": "diff --git a/poetry/console/commands/version.py b/poetry/console/commands/version.py\n--- a/poetry/console/commands/version.py\n+++ b/poetry/console/commands/version.py\n@@ -48,11 +48,14 @@\n self.poetry.package.pretty_version, version\n )\n \n- self.line(\n- \"Bumping version from <b>{}</> to <fg=green>{}</>\".format(\n- self.poetry.package.pretty_version, version\n+ if self.option(\"short\"):\n+ self.line(\"{}\".format(version))\n+ else:\n+ self.line(\n+ \"Bumping version from <b>{}</> to <fg=green>{}</>\".format(\n+ self.poetry.package.pretty_version, version\n+ )\n )\n- )\n \n content = self.poetry.file.read()\n poetry_content = content[\"tool\"][\"poetry\"]\n", "issue": "--short has no effect when `poetry version` is passed a new version\n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **OS version and name**: Ubuntu 16.04\r\n- **Poetry version**: 1.1.4\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: n/a\r\n\r\n## Issue\r\n<!-- Now feel free to write your issue, but please be descriptive! Thanks again \ud83d\ude4c \u2764\ufe0f -->\r\n\r\nHi there!\r\n\r\nAssuming the current version of a project is 0.1.0, this is what happens when trying to update to the next patch version with the `--short` option:\r\n\r\n```console\r\n$ poetry version patch --short\r\nBumping version from 0.1.0 to 0.1.1\r\n```\r\n\r\nInstead, I would like this to be output (only the new version number):\r\n\r\n```console\r\n$ poetry version patch --short\r\n0.1.1\r\n```\r\n\r\nMy use case is scripting: if the user only provides a bump rule to my script, I'd like to be able to easily parse the new version computed by poetry so that I can e.g. create a new git tag easily, and so on.\r\n\r\nThanks!\n", "before_files": [{"content": "from cleo import argument\nfrom cleo import option\n\nfrom .command import Command\n\n\nclass VersionCommand(Command):\n\n name = \"version\"\n description = (\n \"Shows the version of the project or bumps it when a valid \"\n \"bump rule is provided.\"\n )\n\n arguments = [\n argument(\n \"version\",\n \"The version number or the rule to update the version.\",\n optional=True,\n )\n ]\n options = [option(\"short\", \"s\", \"Output the version number only\")]\n\n help = \"\"\"\\\nThe version command shows the current version of the project or bumps the version of\nthe project and writes the new version back to <comment>pyproject.toml</> if a valid\nbump rule is provided.\n\nThe new version should ideally be a valid semver string or a valid bump rule:\npatch, minor, major, prepatch, preminor, premajor, prerelease.\n\"\"\"\n\n RESERVED = {\n \"major\",\n \"minor\",\n \"patch\",\n \"premajor\",\n \"preminor\",\n \"prepatch\",\n \"prerelease\",\n }\n\n def handle(self):\n version = self.argument(\"version\")\n\n if version:\n version = self.increment_version(\n self.poetry.package.pretty_version, version\n )\n\n self.line(\n \"Bumping version from <b>{}</> to <fg=green>{}</>\".format(\n self.poetry.package.pretty_version, version\n )\n )\n\n content = self.poetry.file.read()\n poetry_content = content[\"tool\"][\"poetry\"]\n poetry_content[\"version\"] = version.text\n\n self.poetry.file.write(content)\n else:\n if self.option(\"short\"):\n self.line(\"{}\".format(self.poetry.package.pretty_version))\n else:\n self.line(\n \"<comment>{}</> <info>{}</>\".format(\n self.poetry.package.name, self.poetry.package.pretty_version\n )\n )\n\n def increment_version(self, version, rule):\n from poetry.core.semver import Version\n\n try:\n version = Version.parse(version)\n except ValueError:\n raise ValueError(\"The project's version doesn't seem to follow semver\")\n\n if rule in {\"major\", \"premajor\"}:\n new = version.next_major\n if rule == \"premajor\":\n new = new.first_prerelease\n elif rule in {\"minor\", \"preminor\"}:\n new = version.next_minor\n if rule == \"preminor\":\n new = new.first_prerelease\n elif rule in {\"patch\", \"prepatch\"}:\n new = version.next_patch\n if rule == \"prepatch\":\n new = new.first_prerelease\n elif rule == \"prerelease\":\n if version.is_prerelease():\n pre = version.prerelease\n new_prerelease = int(pre[1]) + 1\n new = Version.parse(\n \"{}.{}.{}-{}\".format(\n version.major,\n version.minor,\n version.patch,\n \".\".join([pre[0], str(new_prerelease)]),\n )\n )\n else:\n new = version.next_patch.first_prerelease\n else:\n new = Version.parse(rule)\n\n return new\n", "path": "poetry/console/commands/version.py"}]} | 1,908 | 198 |
gh_patches_debug_3951 | rasdani/github-patches | git_diff | ARM-DOE__ACT-837 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AmeriFlux Documentation is not showing up in the API
The new act.io.ameriflux code is not showing up in the documentation.
</issue>
<code>
[start of act/io/__init__.py]
1 """
2 This module contains procedures for reading and writing various ARM datasets.
3
4 """
5
6 import lazy_loader as lazy
7
8 __getattr__, __dir__, __all__ = lazy.attach(
9 __name__,
10 submodules=[
11 'arm',
12 'ameriflux',
13 'text',
14 'icartt',
15 'mpl',
16 'neon',
17 'noaagml',
18 'noaapsl',
19 'pysp2',
20 'hysplit',
21 ],
22 submod_attrs={
23 'arm': [
24 'WriteDataset',
25 'check_arm_standards',
26 'create_ds_from_arm_dod',
27 'read_arm_netcdf',
28 'check_if_tar_gz_file',
29 'read_arm_mmcr',
30 ],
31 'ameriflux': ['format_as_ameriflux'],
32 'text': ['read_csv'],
33 'icartt': ['read_icartt'],
34 'mpl': ['proc_sigma_mplv5_read', 'read_sigma_mplv5'],
35 'neon': ['read_neon_csv'],
36 'noaagml': [
37 'read_gml',
38 'read_gml_co2',
39 'read_gml_halo',
40 'read_gml_met',
41 'read_gml_ozone',
42 'read_gml_radiation',
43 'read_surfrad',
44 ],
45 'noaapsl': [
46 'read_psl_wind_profiler',
47 'read_psl_wind_profiler_temperature',
48 'read_psl_parsivel',
49 'read_psl_radar_fmcw_moment',
50 'read_psl_surface_met',
51 ],
52 'pysp2': ['read_hk_file', 'read_sp2', 'read_sp2_dat'],
53 'sodar': ['read_mfas_sodar'],
54 'hysplit': ['read_hysplit'],
55 },
56 )
57
[end of act/io/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/act/io/__init__.py b/act/io/__init__.py
--- a/act/io/__init__.py
+++ b/act/io/__init__.py
@@ -28,7 +28,7 @@
'check_if_tar_gz_file',
'read_arm_mmcr',
],
- 'ameriflux': ['format_as_ameriflux'],
+ 'ameriflux': ['convert_to_ameriflux'],
'text': ['read_csv'],
'icartt': ['read_icartt'],
'mpl': ['proc_sigma_mplv5_read', 'read_sigma_mplv5'],
| {"golden_diff": "diff --git a/act/io/__init__.py b/act/io/__init__.py\n--- a/act/io/__init__.py\n+++ b/act/io/__init__.py\n@@ -28,7 +28,7 @@\n 'check_if_tar_gz_file',\n 'read_arm_mmcr',\n ],\n- 'ameriflux': ['format_as_ameriflux'],\n+ 'ameriflux': ['convert_to_ameriflux'],\n 'text': ['read_csv'],\n 'icartt': ['read_icartt'],\n 'mpl': ['proc_sigma_mplv5_read', 'read_sigma_mplv5'],\n", "issue": "AmeriFlux Documentation is not showing up in the API\nThe new act.io.ameriflux code is not showing up in the documentation.\n", "before_files": [{"content": "\"\"\"\nThis module contains procedures for reading and writing various ARM datasets.\n\n\"\"\"\n\nimport lazy_loader as lazy\n\n__getattr__, __dir__, __all__ = lazy.attach(\n __name__,\n submodules=[\n 'arm',\n 'ameriflux',\n 'text',\n 'icartt',\n 'mpl',\n 'neon',\n 'noaagml',\n 'noaapsl',\n 'pysp2',\n 'hysplit',\n ],\n submod_attrs={\n 'arm': [\n 'WriteDataset',\n 'check_arm_standards',\n 'create_ds_from_arm_dod',\n 'read_arm_netcdf',\n 'check_if_tar_gz_file',\n 'read_arm_mmcr',\n ],\n 'ameriflux': ['format_as_ameriflux'],\n 'text': ['read_csv'],\n 'icartt': ['read_icartt'],\n 'mpl': ['proc_sigma_mplv5_read', 'read_sigma_mplv5'],\n 'neon': ['read_neon_csv'],\n 'noaagml': [\n 'read_gml',\n 'read_gml_co2',\n 'read_gml_halo',\n 'read_gml_met',\n 'read_gml_ozone',\n 'read_gml_radiation',\n 'read_surfrad',\n ],\n 'noaapsl': [\n 'read_psl_wind_profiler',\n 'read_psl_wind_profiler_temperature',\n 'read_psl_parsivel',\n 'read_psl_radar_fmcw_moment',\n 'read_psl_surface_met',\n ],\n 'pysp2': ['read_hk_file', 'read_sp2', 'read_sp2_dat'],\n 'sodar': ['read_mfas_sodar'],\n 'hysplit': ['read_hysplit'],\n },\n)\n", "path": "act/io/__init__.py"}]} | 1,072 | 142 |
gh_patches_debug_64988 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-911 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
(Berliner Stadtreinigung) BSR not working
Hi there, since about a day or two BSR integration isn't pulling data into HA.
Any idea what might be wrong? Just FYI the data from abfall.io (ALBA Berlin) is working just fine.
Sorry for not posting code / log, but I'm unsure, what I should be posting.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py]
1 import urllib.parse
2
3 import requests
4 from waste_collection_schedule import Collection # type: ignore[attr-defined]
5 from waste_collection_schedule.service.ICS import ICS
6
7 TITLE = "Berliner Stadtreinigungsbetriebe"
8 DESCRIPTION = "Source for Berliner Stadtreinigungsbetriebe waste collection."
9 URL = "https://bsr.de"
10 TEST_CASES = {
11 "Bahnhofstr., 12159 Berlin (Tempelhof-Schöneberg)": {
12 "abf_strasse": "Bahnhofstr., 12159 Berlin (Tempelhof-Schöneberg)",
13 "abf_hausnr": 1,
14 },
15 "Am Ried, 13467 Berlin (Reinickendorf)": {
16 "abf_strasse": "Am Ried, 13467 Berlin (Reinickendorf)",
17 "abf_hausnr": "11G",
18 },
19 }
20
21
22 def myquote(s):
23 # bsr uses strange quoting
24 return urllib.parse.quote(s, safe=",()")
25
26
27 class Source:
28 def __init__(self, abf_strasse, abf_hausnr):
29 self._abf_strasse = abf_strasse
30 self._abf_hausnr = abf_hausnr
31 self._ics = ICS()
32
33 def fetch(self):
34 # get cookie
35 r = requests.get("https://www.bsr.de/abfuhrkalender-20520.php")
36 cookies = r.cookies
37
38 # get street name only (without PLZ)
39 street = self._abf_strasse.split(",")[0]
40
41 # start search using string name (without PLZ)
42 args = {"script": "dynamic_search", "step": 1, "q": street}
43 r = requests.get(
44 "https://www.bsr.de/abfuhrkalender_ajax.php", params=args, cookies=cookies
45 )
46
47 # retrieve house number list
48 args = {"script": "dynamic_search", "step": 2, "q": self._abf_strasse}
49 r = requests.get(
50 "https://www.bsr.de/abfuhrkalender_ajax.php", params=args, cookies=cookies
51 )
52
53 args = {
54 "abf_strasse": street,
55 "abf_hausnr": self._abf_hausnr,
56 "tab_control": "Jahr",
57 "abf_config_weihnachtsbaeume": "",
58 "abf_config_restmuell": "on",
59 "abf_config_biogut": "on",
60 "abf_config_wertstoffe": "on",
61 "abf_config_laubtonne": "on",
62 # "abf_selectmonth": "5 2020",
63 # "abf_datepicker": "28.04.2020",
64 # "listitems":7,
65 }
66 r = requests.post(
67 "https://www.bsr.de/abfuhrkalender_ajax.php?script=dynamic_kalender_ajax",
68 data=args,
69 cookies=cookies,
70 )
71
72 args = {
73 "script": "dynamic_iCal_ajax",
74 "abf_strasse": self._abf_strasse,
75 "abf_hausnr": self._abf_hausnr,
76 "tab_control": "Jahr",
77 "abf_config_weihnachtsbaeume": "",
78 "abf_config_restmuell": "on",
79 "abf_config_biogut": "on",
80 "abf_config_wertstoffe": "on",
81 "abf_config_laubtonne": "on",
82 # "abf_selectmonth": "5 2020",
83 # "listitems":7,
84 }
85
86 # create url using private url encoding
87 encoded = map(lambda key: f"{key}={myquote(str(args[key]))}", args.keys())
88 url = "https://www.bsr.de/abfuhrkalender_ajax.php?" + "&".join(encoded)
89 r = requests.get(url, cookies=cookies)
90
91 # parse ics file
92 dates = self._ics.convert(r.text)
93
94 entries = []
95 for d in dates:
96 entries.append(Collection(d[0], d[1]))
97 return entries
98
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py
@@ -28,7 +28,7 @@
def __init__(self, abf_strasse, abf_hausnr):
self._abf_strasse = abf_strasse
self._abf_hausnr = abf_hausnr
- self._ics = ICS()
+ self._ics = ICS(offset=1)
def fetch(self):
# get cookie
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py\n@@ -28,7 +28,7 @@\n def __init__(self, abf_strasse, abf_hausnr):\n self._abf_strasse = abf_strasse\n self._abf_hausnr = abf_hausnr\n- self._ics = ICS()\n+ self._ics = ICS(offset=1)\n \n def fetch(self):\n # get cookie\n", "issue": "(Berliner Stadtreinigung) BSR not working\nHi there, since about a day or two BSR integration isn't pulling data into HA. \r\nAny idea what might be wrong? Just FYI the data from abfall.io (ALBA Berlin) is working just fine. \r\n\r\nSorry for not posting code / log, but I'm unsure, what I should be posting. \n", "before_files": [{"content": "import urllib.parse\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"Berliner Stadtreinigungsbetriebe\"\nDESCRIPTION = \"Source for Berliner Stadtreinigungsbetriebe waste collection.\"\nURL = \"https://bsr.de\"\nTEST_CASES = {\n \"Bahnhofstr., 12159 Berlin (Tempelhof-Sch\u00f6neberg)\": {\n \"abf_strasse\": \"Bahnhofstr., 12159 Berlin (Tempelhof-Sch\u00f6neberg)\",\n \"abf_hausnr\": 1,\n },\n \"Am Ried, 13467 Berlin (Reinickendorf)\": {\n \"abf_strasse\": \"Am Ried, 13467 Berlin (Reinickendorf)\",\n \"abf_hausnr\": \"11G\",\n },\n}\n\n\ndef myquote(s):\n # bsr uses strange quoting\n return urllib.parse.quote(s, safe=\",()\")\n\n\nclass Source:\n def __init__(self, abf_strasse, abf_hausnr):\n self._abf_strasse = abf_strasse\n self._abf_hausnr = abf_hausnr\n self._ics = ICS()\n\n def fetch(self):\n # get cookie\n r = requests.get(\"https://www.bsr.de/abfuhrkalender-20520.php\")\n cookies = r.cookies\n\n # get street name only (without PLZ)\n street = self._abf_strasse.split(\",\")[0]\n\n # start search using string name (without PLZ)\n args = {\"script\": \"dynamic_search\", \"step\": 1, \"q\": street}\n r = requests.get(\n \"https://www.bsr.de/abfuhrkalender_ajax.php\", params=args, cookies=cookies\n )\n\n # retrieve house number list\n args = {\"script\": \"dynamic_search\", \"step\": 2, \"q\": self._abf_strasse}\n r = requests.get(\n \"https://www.bsr.de/abfuhrkalender_ajax.php\", params=args, cookies=cookies\n )\n\n args = {\n \"abf_strasse\": street,\n \"abf_hausnr\": self._abf_hausnr,\n \"tab_control\": \"Jahr\",\n \"abf_config_weihnachtsbaeume\": \"\",\n \"abf_config_restmuell\": \"on\",\n \"abf_config_biogut\": \"on\",\n \"abf_config_wertstoffe\": \"on\",\n \"abf_config_laubtonne\": \"on\",\n # \"abf_selectmonth\": \"5 2020\",\n # \"abf_datepicker\": \"28.04.2020\",\n # \"listitems\":7,\n }\n r = requests.post(\n \"https://www.bsr.de/abfuhrkalender_ajax.php?script=dynamic_kalender_ajax\",\n data=args,\n cookies=cookies,\n )\n\n args = {\n \"script\": \"dynamic_iCal_ajax\",\n \"abf_strasse\": self._abf_strasse,\n \"abf_hausnr\": self._abf_hausnr,\n \"tab_control\": \"Jahr\",\n \"abf_config_weihnachtsbaeume\": \"\",\n \"abf_config_restmuell\": \"on\",\n \"abf_config_biogut\": \"on\",\n \"abf_config_wertstoffe\": \"on\",\n \"abf_config_laubtonne\": \"on\",\n # \"abf_selectmonth\": \"5 2020\",\n # \"listitems\":7,\n }\n\n # create url using private url encoding\n encoded = map(lambda key: f\"{key}={myquote(str(args[key]))}\", args.keys())\n url = \"https://www.bsr.de/abfuhrkalender_ajax.php?\" + \"&\".join(encoded)\n r = requests.get(url, cookies=cookies)\n\n # parse ics file\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py"}]} | 1,768 | 167 |
gh_patches_debug_24298 | rasdani/github-patches | git_diff | airctic__icevision-71 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hub Detr fine tuning
Following the first relase of Detr on hub, it would be a very good idea to support fine tuning.
[this](https://github.com/facebookresearch/detr/issues/9) thread should be helpful, and [this](https://gist.github.com/mlk1337/651297e28199b4bb7907fc413c49f58f) gist has the high level overview on how to implement it.
</issue>
<code>
[start of examples/detr_wheat.py]
1 import pandas as pd
2 from mantisshrimp.imports import *
3 from mantisshrimp.hub.detr import *
4
5
6 class WheatParser(DetrBBoxParser):
7 def __init__(self, df, source):
8 self.df = df
9 self.source = source
10 self.imageid_map = IDMap()
11
12 def __iter__(self):
13 yield from self.df.itertuples()
14
15 def __len__(self):
16 return len(self.df)
17
18 def prepare(self, o):
19 self.bbox = BBox.from_xywh(*np.fromstring(o.bbox[1:-1], sep=","))
20
21 def imageid(self, o) -> int:
22 return self.imageid_map[o.image_id]
23
24 def filepath(self, o) -> Union[str, Path]:
25 return self.source / f"{o.image_id}.jpg"
26
27 def height(self, o) -> int:
28 return o.height
29
30 def width(self, o) -> int:
31 return o.width
32
33 def label(self, o) -> int:
34 return 1
35
36 def bbox(self, o) -> BBox:
37 return self.bbox
38
39 def area(self, o) -> float:
40 return self.bbox.area
41
42 def iscrowd(self, o) -> bool:
43 return 0
44
45
46 def get_datasets(args):
47 # parse records
48 source = Path(args.data_path)
49 df = pd.read_csv(source / "train.csv")
50 data_splitter = RandomSplitter([0.8, 0.2])
51 parser = WheatParser(df, source / "train")
52 train_rs, valid_rs = parser.parse(data_splitter)
53 # We use the transforms defined by the authors
54 train_tfm = detr_transform("train")
55 valid_tfm = detr_transform("val")
56 train_dataset = DetrDataset(train_rs, train_tfm)
57 valid_dataset = DetrDataset(valid_rs, valid_tfm)
58 return train_dataset, valid_dataset
59
60
61 if __name__ == "__main__":
62 # adds new arguments to original args_parser
63 args_parser = get_args_parser()
64 args_parser.add_argument("--data_path", type=str)
65 args = args_parser.parse_args()
66
67 train_dataset, valid_dataset = get_datasets(args)
68 run_detr(args=args, dataset_train=train_dataset, dataset_val=valid_dataset)
69
[end of examples/detr_wheat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/detr_wheat.py b/examples/detr_wheat.py
--- a/examples/detr_wheat.py
+++ b/examples/detr_wheat.py
@@ -30,17 +30,17 @@
def width(self, o) -> int:
return o.width
- def label(self, o) -> int:
- return 1
+ def label(self, o) -> List[int]:
+ return [1]
- def bbox(self, o) -> BBox:
- return self.bbox
+ def bbox(self, o) -> List[BBox]:
+ return [self.bbox]
- def area(self, o) -> float:
- return self.bbox.area
+ def area(self, o) -> List[float]:
+ return [self.bbox.area]
- def iscrowd(self, o) -> bool:
- return 0
+ def iscrowd(self, o) -> List[bool]:
+ return [0]
def get_datasets(args):
@@ -62,7 +62,12 @@
# adds new arguments to original args_parser
args_parser = get_args_parser()
args_parser.add_argument("--data_path", type=str)
+ args_parser.add_argument("--num_classes", type=int, default=None)
+ args_parser.add_argument("--fine_tune", action="store_true")
args = args_parser.parse_args()
+ if args.fine_tune:
+ args.resume = detr_pretrained_checkpoint_base()
+
train_dataset, valid_dataset = get_datasets(args)
run_detr(args=args, dataset_train=train_dataset, dataset_val=valid_dataset)
| {"golden_diff": "diff --git a/examples/detr_wheat.py b/examples/detr_wheat.py\n--- a/examples/detr_wheat.py\n+++ b/examples/detr_wheat.py\n@@ -30,17 +30,17 @@\n def width(self, o) -> int:\n return o.width\n \n- def label(self, o) -> int:\n- return 1\n+ def label(self, o) -> List[int]:\n+ return [1]\n \n- def bbox(self, o) -> BBox:\n- return self.bbox\n+ def bbox(self, o) -> List[BBox]:\n+ return [self.bbox]\n \n- def area(self, o) -> float:\n- return self.bbox.area\n+ def area(self, o) -> List[float]:\n+ return [self.bbox.area]\n \n- def iscrowd(self, o) -> bool:\n- return 0\n+ def iscrowd(self, o) -> List[bool]:\n+ return [0]\n \n \n def get_datasets(args):\n@@ -62,7 +62,12 @@\n # adds new arguments to original args_parser\n args_parser = get_args_parser()\n args_parser.add_argument(\"--data_path\", type=str)\n+ args_parser.add_argument(\"--num_classes\", type=int, default=None)\n+ args_parser.add_argument(\"--fine_tune\", action=\"store_true\")\n args = args_parser.parse_args()\n \n+ if args.fine_tune:\n+ args.resume = detr_pretrained_checkpoint_base()\n+\n train_dataset, valid_dataset = get_datasets(args)\n run_detr(args=args, dataset_train=train_dataset, dataset_val=valid_dataset)\n", "issue": "Hub Detr fine tuning\nFollowing the first relase of Detr on hub, it would be a very good idea to support fine tuning.\r\n\r\n[this](https://github.com/facebookresearch/detr/issues/9) thread should be helpful, and [this](https://gist.github.com/mlk1337/651297e28199b4bb7907fc413c49f58f) gist has the high level overview on how to implement it.\n", "before_files": [{"content": "import pandas as pd\nfrom mantisshrimp.imports import *\nfrom mantisshrimp.hub.detr import *\n\n\nclass WheatParser(DetrBBoxParser):\n def __init__(self, df, source):\n self.df = df\n self.source = source\n self.imageid_map = IDMap()\n\n def __iter__(self):\n yield from self.df.itertuples()\n\n def __len__(self):\n return len(self.df)\n\n def prepare(self, o):\n self.bbox = BBox.from_xywh(*np.fromstring(o.bbox[1:-1], sep=\",\"))\n\n def imageid(self, o) -> int:\n return self.imageid_map[o.image_id]\n\n def filepath(self, o) -> Union[str, Path]:\n return self.source / f\"{o.image_id}.jpg\"\n\n def height(self, o) -> int:\n return o.height\n\n def width(self, o) -> int:\n return o.width\n\n def label(self, o) -> int:\n return 1\n\n def bbox(self, o) -> BBox:\n return self.bbox\n\n def area(self, o) -> float:\n return self.bbox.area\n\n def iscrowd(self, o) -> bool:\n return 0\n\n\ndef get_datasets(args):\n # parse records\n source = Path(args.data_path)\n df = pd.read_csv(source / \"train.csv\")\n data_splitter = RandomSplitter([0.8, 0.2])\n parser = WheatParser(df, source / \"train\")\n train_rs, valid_rs = parser.parse(data_splitter)\n # We use the transforms defined by the authors\n train_tfm = detr_transform(\"train\")\n valid_tfm = detr_transform(\"val\")\n train_dataset = DetrDataset(train_rs, train_tfm)\n valid_dataset = DetrDataset(valid_rs, valid_tfm)\n return train_dataset, valid_dataset\n\n\nif __name__ == \"__main__\":\n # adds new arguments to original args_parser\n args_parser = get_args_parser()\n args_parser.add_argument(\"--data_path\", type=str)\n args = args_parser.parse_args()\n\n train_dataset, valid_dataset = get_datasets(args)\n run_detr(args=args, dataset_train=train_dataset, dataset_val=valid_dataset)\n", "path": "examples/detr_wheat.py"}]} | 1,277 | 364 |
gh_patches_debug_4773 | rasdani/github-patches | git_diff | mozilla__bugbug-476 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
defect_enhancement_task Taskcluster task is missing the artifact
From the task () logs:
```
[taskcluster 2019-05-21 08:37:51.436Z] === Task Finished ===
[taskcluster 2019-05-21 08:37:51.519Z] Artifact "public/defectenhancementtaskmodel.xz" not found at "/defectenhancementtaskmodel.xz"
[taskcluster 2019-05-21 08:37:51.927Z] Successful task run with exit code: 0 completed in 471.275 seconds
```
</issue>
<code>
[start of scripts/trainer.py]
1 # -*- coding: utf-8 -*-
2
3 import argparse
4 import lzma
5 import os
6 import shutil
7 from logging import INFO, basicConfig, getLogger
8 from urllib.request import urlretrieve
9
10 from bugbug.models import get_model_class
11
12 basicConfig(level=INFO)
13 logger = getLogger(__name__)
14
15 BASE_URL = "https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.latest/artifacts/public"
16
17
18 class Trainer(object):
19 def decompress_file(self, path):
20 with lzma.open(f"{path}.xz", "rb") as input_f:
21 with open(path, "wb") as output_f:
22 shutil.copyfileobj(input_f, output_f)
23
24 def compress_file(self, path):
25 with open(path, "rb") as input_f:
26 with lzma.open(f"{path}.xz", "wb") as output_f:
27 shutil.copyfileobj(input_f, output_f)
28
29 def go(self, model_name):
30 # Download datasets that were built by bugbug_data.
31 os.makedirs("data", exist_ok=True)
32
33 # Bugs.json
34 logger.info("Downloading bugs database")
35 bugs_url = BASE_URL.format("bugs")
36 urlretrieve(f"{bugs_url}/bugs.json.xz", "data/bugs.json.xz")
37 logger.info("Decompressing bugs database")
38 self.decompress_file("data/bugs.json")
39
40 logger.info(f"Training *{model_name}* model")
41
42 model_class = get_model_class(model_name)
43 model = model_class()
44 model.train()
45
46 model_file_name = f"{model_name}model"
47 self.compress_file(model_file_name)
48
49
50 def main():
51 description = "Train the models"
52 parser = argparse.ArgumentParser(description=description)
53
54 parser.add_argument("model", help="Which model to train.")
55
56 args = parser.parse_args()
57
58 retriever = Trainer()
59 retriever.go(args.model)
60
[end of scripts/trainer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/trainer.py b/scripts/trainer.py
--- a/scripts/trainer.py
+++ b/scripts/trainer.py
@@ -43,9 +43,14 @@
model = model_class()
model.train()
+ logger.info(f"Training done")
+
model_file_name = f"{model_name}model"
+ assert os.path.exists(model_file_name)
self.compress_file(model_file_name)
+ logger.info(f"Model compressed")
+
def main():
description = "Train the models"
| {"golden_diff": "diff --git a/scripts/trainer.py b/scripts/trainer.py\n--- a/scripts/trainer.py\n+++ b/scripts/trainer.py\n@@ -43,9 +43,14 @@\n model = model_class()\n model.train()\n \n+ logger.info(f\"Training done\")\n+\n model_file_name = f\"{model_name}model\"\n+ assert os.path.exists(model_file_name)\n self.compress_file(model_file_name)\n \n+ logger.info(f\"Model compressed\")\n+\n \n def main():\n description = \"Train the models\"\n", "issue": "defect_enhancement_task Taskcluster task is missing the artifact\nFrom the task () logs:\r\n```\r\n[taskcluster 2019-05-21 08:37:51.436Z] === Task Finished ===\r\n[taskcluster 2019-05-21 08:37:51.519Z] Artifact \"public/defectenhancementtaskmodel.xz\" not found at \"/defectenhancementtaskmodel.xz\"\r\n[taskcluster 2019-05-21 08:37:51.927Z] Successful task run with exit code: 0 completed in 471.275 seconds\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport lzma\nimport os\nimport shutil\nfrom logging import INFO, basicConfig, getLogger\nfrom urllib.request import urlretrieve\n\nfrom bugbug.models import get_model_class\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\nBASE_URL = \"https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.latest/artifacts/public\"\n\n\nclass Trainer(object):\n def decompress_file(self, path):\n with lzma.open(f\"{path}.xz\", \"rb\") as input_f:\n with open(path, \"wb\") as output_f:\n shutil.copyfileobj(input_f, output_f)\n\n def compress_file(self, path):\n with open(path, \"rb\") as input_f:\n with lzma.open(f\"{path}.xz\", \"wb\") as output_f:\n shutil.copyfileobj(input_f, output_f)\n\n def go(self, model_name):\n # Download datasets that were built by bugbug_data.\n os.makedirs(\"data\", exist_ok=True)\n\n # Bugs.json\n logger.info(\"Downloading bugs database\")\n bugs_url = BASE_URL.format(\"bugs\")\n urlretrieve(f\"{bugs_url}/bugs.json.xz\", \"data/bugs.json.xz\")\n logger.info(\"Decompressing bugs database\")\n self.decompress_file(\"data/bugs.json\")\n\n logger.info(f\"Training *{model_name}* model\")\n\n model_class = get_model_class(model_name)\n model = model_class()\n model.train()\n\n model_file_name = f\"{model_name}model\"\n self.compress_file(model_file_name)\n\n\ndef main():\n description = \"Train the models\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to train.\")\n\n args = parser.parse_args()\n\n retriever = Trainer()\n retriever.go(args.model)\n", "path": "scripts/trainer.py"}]} | 1,217 | 116 |
gh_patches_debug_10249 | rasdani/github-patches | git_diff | tensorflow__addons-248 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build fails because of optimizers
Some tests for optimizers fail because of this commit https://github.com/tensorflow/tensorflow/commit/2cb745ef1e0b4082a618c81274fca39be0cb4fc6. It can be fixed by replacing `self._get_hyper('epsilon')` with `self.epsilon` when trying to access the value of epsilon. For more details, please refer to https://github.com/tensorflow/tensorflow/commit/2cb745ef1e0b4082a618c81274fca39be0cb4fc6.
</issue>
<code>
[start of tensorflow_addons/optimizers/lazy_adam.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Variant of the Adam optimizer that handles sparse updates more efficiently.
16
17 Compared with the original Adam optimizer, the one in this file can
18 provide a large improvement in model training throughput for some
19 applications. However, it provides slightly different semantics than the
20 original Adam algorithm, and may lead to different empirical results.
21 """
22
23 from __future__ import absolute_import
24 from __future__ import division
25 from __future__ import print_function
26
27 import tensorflow as tf
28 from tensorflow_addons.utils import keras_utils
29
30
31 @keras_utils.register_keras_custom_object
32 class LazyAdam(tf.keras.optimizers.Adam):
33 """Variant of the Adam optimizer that handles sparse updates more
34 efficiently.
35
36 The original Adam algorithm maintains two moving-average accumulators for
37 each trainable variable; the accumulators are updated at every step.
38 This class provides lazier handling of gradient updates for sparse
39 variables. It only updates moving-average accumulators for sparse variable
40 indices that appear in the current batch, rather than updating the
41 accumulators for all indices. Compared with the original Adam optimizer,
42 it can provide large improvements in model training throughput for some
43 applications. However, it provides slightly different semantics than the
44 original Adam algorithm, and may lead to different empirical results.
45
46 Note, amsgrad is currently not supported and the argument can only be
47 False.
48 """
49
50 def _resource_apply_sparse(self, grad, var, indices):
51 var_dtype = var.dtype.base_dtype
52 lr_t = self._decayed_lr(var_dtype)
53 beta_1_t = self._get_hyper('beta_1', var_dtype)
54 beta_2_t = self._get_hyper('beta_2', var_dtype)
55 local_step = tf.cast(self.iterations + 1, var_dtype)
56 beta_1_power = tf.math.pow(beta_1_t, local_step)
57 beta_2_power = tf.math.pow(beta_2_t, local_step)
58 epsilon_t = self._get_hyper('epsilon', var_dtype)
59 lr = (lr_t * tf.math.sqrt(1 - beta_2_power) / (1 - beta_1_power))
60
61 # \\(m := beta1 * m + (1 - beta1) * g_t\\)
62 m = self.get_slot(var, "m")
63 m_t_slice = beta_1_t * tf.gather(m, indices) + (1 - beta_1_t) * grad
64
65 m_update_kwargs = {
66 'resource': m.handle,
67 'indices': indices,
68 'updates': m_t_slice
69 }
70 m_update_op = tf.raw_ops.ResourceScatterUpdate(**m_update_kwargs)
71
72 # \\(v := beta2 * v + (1 - beta2) * (g_t * g_t)\\)
73 v = self.get_slot(var, "v")
74 v_t_slice = (beta_2_t * tf.gather(v, indices) +
75 (1 - beta_2_t) * tf.math.square(grad))
76
77 v_update_kwargs = {
78 'resource': v.handle,
79 'indices': indices,
80 'updates': v_t_slice
81 }
82 v_update_op = tf.raw_ops.ResourceScatterUpdate(**v_update_kwargs)
83
84 # \\(variable -= learning_rate * m_t / (epsilon_t + sqrt(v_t))\\)
85 var_slice = lr * m_t_slice / (tf.math.sqrt(v_t_slice) + epsilon_t)
86
87 var_update_kwargs = {
88 'resource': var.handle,
89 'indices': indices,
90 'updates': var_slice
91 }
92 var_update_op = tf.raw_ops.ResourceScatterSub(**var_update_kwargs)
93
94 return tf.group(*[var_update_op, m_update_op, v_update_op])
95
[end of tensorflow_addons/optimizers/lazy_adam.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tensorflow_addons/optimizers/lazy_adam.py b/tensorflow_addons/optimizers/lazy_adam.py
--- a/tensorflow_addons/optimizers/lazy_adam.py
+++ b/tensorflow_addons/optimizers/lazy_adam.py
@@ -55,7 +55,7 @@
local_step = tf.cast(self.iterations + 1, var_dtype)
beta_1_power = tf.math.pow(beta_1_t, local_step)
beta_2_power = tf.math.pow(beta_2_t, local_step)
- epsilon_t = self._get_hyper('epsilon', var_dtype)
+ epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype)
lr = (lr_t * tf.math.sqrt(1 - beta_2_power) / (1 - beta_1_power))
# \\(m := beta1 * m + (1 - beta1) * g_t\\)
| {"golden_diff": "diff --git a/tensorflow_addons/optimizers/lazy_adam.py b/tensorflow_addons/optimizers/lazy_adam.py\n--- a/tensorflow_addons/optimizers/lazy_adam.py\n+++ b/tensorflow_addons/optimizers/lazy_adam.py\n@@ -55,7 +55,7 @@\n local_step = tf.cast(self.iterations + 1, var_dtype)\n beta_1_power = tf.math.pow(beta_1_t, local_step)\n beta_2_power = tf.math.pow(beta_2_t, local_step)\n- epsilon_t = self._get_hyper('epsilon', var_dtype)\n+ epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype)\n lr = (lr_t * tf.math.sqrt(1 - beta_2_power) / (1 - beta_1_power))\n \n # \\\\(m := beta1 * m + (1 - beta1) * g_t\\\\)\n", "issue": "Build fails because of optimizers\nSome tests for optimizers fail because of this commit https://github.com/tensorflow/tensorflow/commit/2cb745ef1e0b4082a618c81274fca39be0cb4fc6. It can be fixed by replacing `self._get_hyper('epsilon')` with `self.epsilon` when trying to access the value of epsilon. For more details, please refer to https://github.com/tensorflow/tensorflow/commit/2cb745ef1e0b4082a618c81274fca39be0cb4fc6.\r\n\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Variant of the Adam optimizer that handles sparse updates more efficiently.\n\nCompared with the original Adam optimizer, the one in this file can\nprovide a large improvement in model training throughput for some\napplications. However, it provides slightly different semantics than the\noriginal Adam algorithm, and may lead to different empirical results.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_addons.utils import keras_utils\n\n\n@keras_utils.register_keras_custom_object\nclass LazyAdam(tf.keras.optimizers.Adam):\n \"\"\"Variant of the Adam optimizer that handles sparse updates more\n efficiently.\n\n The original Adam algorithm maintains two moving-average accumulators for\n each trainable variable; the accumulators are updated at every step.\n This class provides lazier handling of gradient updates for sparse\n variables. It only updates moving-average accumulators for sparse variable\n indices that appear in the current batch, rather than updating the\n accumulators for all indices. Compared with the original Adam optimizer,\n it can provide large improvements in model training throughput for some\n applications. However, it provides slightly different semantics than the\n original Adam algorithm, and may lead to different empirical results.\n\n Note, amsgrad is currently not supported and the argument can only be\n False.\n \"\"\"\n\n def _resource_apply_sparse(self, grad, var, indices):\n var_dtype = var.dtype.base_dtype\n lr_t = self._decayed_lr(var_dtype)\n beta_1_t = self._get_hyper('beta_1', var_dtype)\n beta_2_t = self._get_hyper('beta_2', var_dtype)\n local_step = tf.cast(self.iterations + 1, var_dtype)\n beta_1_power = tf.math.pow(beta_1_t, local_step)\n beta_2_power = tf.math.pow(beta_2_t, local_step)\n epsilon_t = self._get_hyper('epsilon', var_dtype)\n lr = (lr_t * tf.math.sqrt(1 - beta_2_power) / (1 - beta_1_power))\n\n # \\\\(m := beta1 * m + (1 - beta1) * g_t\\\\)\n m = self.get_slot(var, \"m\")\n m_t_slice = beta_1_t * tf.gather(m, indices) + (1 - beta_1_t) * grad\n\n m_update_kwargs = {\n 'resource': m.handle,\n 'indices': indices,\n 'updates': m_t_slice\n }\n m_update_op = tf.raw_ops.ResourceScatterUpdate(**m_update_kwargs)\n\n # \\\\(v := beta2 * v + (1 - beta2) * (g_t * g_t)\\\\)\n v = self.get_slot(var, \"v\")\n v_t_slice = (beta_2_t * tf.gather(v, indices) +\n (1 - beta_2_t) * tf.math.square(grad))\n\n v_update_kwargs = {\n 'resource': v.handle,\n 'indices': indices,\n 'updates': v_t_slice\n }\n v_update_op = tf.raw_ops.ResourceScatterUpdate(**v_update_kwargs)\n\n # \\\\(variable -= learning_rate * m_t / (epsilon_t + sqrt(v_t))\\\\)\n var_slice = lr * m_t_slice / (tf.math.sqrt(v_t_slice) + epsilon_t)\n\n var_update_kwargs = {\n 'resource': var.handle,\n 'indices': indices,\n 'updates': var_slice\n }\n var_update_op = tf.raw_ops.ResourceScatterSub(**var_update_kwargs)\n\n return tf.group(*[var_update_op, m_update_op, v_update_op])\n", "path": "tensorflow_addons/optimizers/lazy_adam.py"}]} | 1,807 | 207 |
gh_patches_debug_20695 | rasdani/github-patches | git_diff | mne-tools__mne-bids-pipeline-699 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DOC: Website <->README sync
Following @hoechenberger's PRs I think our README.md (which ends up on PyPI) looks nicer / does better advertising than our docs landing page does
| https://pypi.org/project/mne-bids-pipeline/#description | https://mne.tools/mne-bids-pipeline/1.0/index.html |
| -- | -- |
|  |  |
I propose that we should add the tag line and bullet points from the README.md to the landing page for mne-bids-pipeline, and also double check that our project description matches. This would also make it a bit more like MNE-Python, where our landing page tries to quickly convey "This package can do a lot of cool stuff that you want!".
I propose that we should modify `build_docs.sh` to have a `gen_index.py` that takes the bullet points of README.md and pastes them into the `index.md` for the website.
@hoechenberger @drammock WDYT?
</issue>
<code>
[start of docs/hooks.py]
1 import logging
2 from typing import Dict, Any
3
4 from mkdocs.config.defaults import MkDocsConfig
5
6 logger = logging.getLogger("mkdocs")
7
8 config_updated = False
9
10
11 # Ideally there would be a better hook, but it's unclear if context can
12 # be obtained any earlier
13 def on_template_context(
14 context: Dict[str, Any],
15 template_name: str,
16 config: MkDocsConfig,
17 ) -> None:
18 """Update the copyright in the footer."""
19 global config_updated
20 if not config_updated:
21 config_updated = True
22 now = context["build_date_utc"].strftime("%Y/%m/%d")
23 config.copyright = f"{config.copyright}, last updated {now}"
24 logger.info(f"Updated copyright to {config.copyright}")
25
[end of docs/hooks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/hooks.py b/docs/hooks.py
--- a/docs/hooks.py
+++ b/docs/hooks.py
@@ -2,6 +2,8 @@
from typing import Dict, Any
from mkdocs.config.defaults import MkDocsConfig
+from mkdocs.structure.pages import Page
+from mkdocs.structure.files import Files
logger = logging.getLogger("mkdocs")
@@ -22,3 +24,29 @@
now = context["build_date_utc"].strftime("%Y/%m/%d")
config.copyright = f"{config.copyright}, last updated {now}"
logger.info(f"Updated copyright to {config.copyright}")
+
+
+_EMOJI_MAP = {
+ "🏆": ":trophy:",
+ "🛠️": ":tools:",
+ "📘": ":blue_book:",
+ "🧑🤝🧑": ":people_holding_hands_tone1:",
+ "💻": ":computer:",
+ "🆘": ":sos:",
+ "👣": ":footprints:",
+ "⏩": ":fast_forward:",
+ "⏏️": ":eject:",
+ "☁️": ":cloud:",
+}
+
+
+def on_page_markdown(
+ markdown: str,
+ page: Page,
+ config: MkDocsConfig,
+ files: Files,
+) -> str:
+ if page.file.name == "index" and page.title == "Home":
+ for rd, md in _EMOJI_MAP.items():
+ markdown = markdown.replace(rd, md)
+ return markdown
| {"golden_diff": "diff --git a/docs/hooks.py b/docs/hooks.py\n--- a/docs/hooks.py\n+++ b/docs/hooks.py\n@@ -2,6 +2,8 @@\n from typing import Dict, Any\n \n from mkdocs.config.defaults import MkDocsConfig\n+from mkdocs.structure.pages import Page\n+from mkdocs.structure.files import Files\n \n logger = logging.getLogger(\"mkdocs\")\n \n@@ -22,3 +24,29 @@\n now = context[\"build_date_utc\"].strftime(\"%Y/%m/%d\")\n config.copyright = f\"{config.copyright}, last updated {now}\"\n logger.info(f\"Updated copyright to {config.copyright}\")\n+\n+\n+_EMOJI_MAP = {\n+ \"\ud83c\udfc6\": \":trophy:\",\n+ \"\ud83d\udee0\ufe0f\": \":tools:\",\n+ \"\ud83d\udcd8\": \":blue_book:\",\n+ \"\ud83e\uddd1\u200d\ud83e\udd1d\u200d\ud83e\uddd1\": \":people_holding_hands_tone1:\",\n+ \"\ud83d\udcbb\": \":computer:\",\n+ \"\ud83c\udd98\": \":sos:\",\n+ \"\ud83d\udc63\": \":footprints:\",\n+ \"\u23e9\": \":fast_forward:\",\n+ \"\u23cf\ufe0f\": \":eject:\",\n+ \"\u2601\ufe0f\": \":cloud:\",\n+}\n+\n+\n+def on_page_markdown(\n+ markdown: str,\n+ page: Page,\n+ config: MkDocsConfig,\n+ files: Files,\n+) -> str:\n+ if page.file.name == \"index\" and page.title == \"Home\":\n+ for rd, md in _EMOJI_MAP.items():\n+ markdown = markdown.replace(rd, md)\n+ return markdown\n", "issue": "DOC: Website <->README sync\nFollowing @hoechenberger's PRs I think our README.md (which ends up on PyPI) looks nicer / does better advertising than our docs landing page does\r\n\r\n| https://pypi.org/project/mne-bids-pipeline/#description | https://mne.tools/mne-bids-pipeline/1.0/index.html |\r\n| -- | -- |\r\n|  |  |\r\n\r\nI propose that we should add the tag line and bullet points from the README.md to the landing page for mne-bids-pipeline, and also double check that our project description matches. This would also make it a bit more like MNE-Python, where our landing page tries to quickly convey \"This package can do a lot of cool stuff that you want!\".\r\n\r\nI propose that we should modify `build_docs.sh` to have a `gen_index.py` that takes the bullet points of README.md and pastes them into the `index.md` for the website.\r\n\r\n@hoechenberger @drammock WDYT?\n", "before_files": [{"content": "import logging\nfrom typing import Dict, Any\n\nfrom mkdocs.config.defaults import MkDocsConfig\n\nlogger = logging.getLogger(\"mkdocs\")\n\nconfig_updated = False\n\n\n# Ideally there would be a better hook, but it's unclear if context can\n# be obtained any earlier\ndef on_template_context(\n context: Dict[str, Any],\n template_name: str,\n config: MkDocsConfig,\n) -> None:\n \"\"\"Update the copyright in the footer.\"\"\"\n global config_updated\n if not config_updated:\n config_updated = True\n now = context[\"build_date_utc\"].strftime(\"%Y/%m/%d\")\n config.copyright = f\"{config.copyright}, last updated {now}\"\n logger.info(f\"Updated copyright to {config.copyright}\")\n", "path": "docs/hooks.py"}]} | 1,110 | 336 |
gh_patches_debug_48777 | rasdani/github-patches | git_diff | kymatio__kymatio-288 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG 3D benchmark fails on GPU
It doesn't call the `cuda()` function (now required) when giving a CUDA tensor input, so the scattering transform errors.
</issue>
<code>
[start of examples/3d/compute_speed.py]
1 """
2 3D scattering transform benchmark
3 =================================
4 We compute scattering transforms for volume maps of size `128`-by-`128`-by-
5 `128`, with averaging scale `2**2 = 4` and maximum spherical harmonic
6 order `L = 2`. The volumes are stacked into batches of size `batch_size = 8`
7 and the transforms are computed `10` times to get an average running time.
8 """
9
10 ###############################################################################
11 # Preliminaries
12 # -------------
13 # Since kymatio handles PyTorch arrays, we first import `torch`.
14
15 import torch
16
17 ###############################################################################
18 # To measure the running time of the implementation, we use the `time` package.
19
20 import time
21
22 ###############################################################################
23 # The performance of the implementation depends on which "backend" is used. We
24 # therefore want to report the name of the backend when presenting the results.
25 # Certain backends are also GPU-only, we we want to detect that before running
26 # the benchmark.
27
28 import kymatio.scattering3d.backend as backend
29
30 ###############################################################################
31 # Finally, we import the `HarmonicScattering3D` class that computes the scattering
32 # transform.
33
34 from kymatio import HarmonicScattering3D
35 ###############################################################################
36 # Benchmark setup
37 # --------------------
38 # First, we set up some basic parameters: the volume width `M`, height `N`,
39 # and depth 'O', the maximum number of the spherical harmonics `L`, and the
40 # maximum scale `2**J`. Here, we consider cubic volumes of size `128`, with
41 # a maximum scale of `2**2 = 4` and maximum spherical harmonic order of `2`.
42
43 M, N, O = 128, 128, 128
44 J = 2
45 L = 2
46
47 integral_powers = [1., 2.]
48 sigma_0 = 1
49
50 ###############################################################################
51 # To squeeze the maximum performance out of the implementation, we apply it to
52 # a batch of `8` volumes. Larger batch sizes do not yield increased efficiency,
53 # but smaller values increases the influence of overhead on the running time.
54
55 batch_size = 8
56
57 ###############################################################################
58 # We repeat the benchmark `10` times and compute the average running time to
59 # get a reasonable estimate.
60
61 times = 10
62
63 ###############################################################################
64 # Determine which devices (CPU or GPU) that are supported by the current
65 # backend.
66
67 if backend.NAME == 'torch':
68 devices = ['cpu', 'gpu']
69 elif backend.NAME == 'skcuda':
70 devices = ['gpu']
71
72 ###############################################################################
73 # Set up the scattering object and the test data
74 # ----------------------------------------------
75
76 ###############################################################################
77 # Create the `HarmonicScattering3D` object using the given parameters and generate
78 # some compatible test data with the specified batch size.
79
80 scattering = HarmonicScattering3D(J, shape=(M, N, O), L=L, sigma_0=sigma_0)
81
82 x = torch.randn(batch_size, M, N, O, dtype=torch.float32)
83
84 ###############################################################################
85 # Run the benchmark
86 # -----------------
87 # For each device, we need to convert the Tensor `x` to the appropriate type,
88 # invoke `times` calls to `scattering.forward` and print the running times.
89 # Before the timer starts, we add an extra `scattering.forward` call to ensure
90 # any first-time overhead, such as memory allocation and CUDA kernel
91 # compilation, is not counted. If the benchmark is running on the GPU, we also
92 # need to call `torch.cuda.synchronize()` before and after the benchmark to
93 # make sure that all CUDA kernels have finished executing.
94
95 for device in devices:
96 fmt_str = '==> Testing Float32 with {} backend, on {}, forward'
97 print(fmt_str.format(backend.NAME, device.upper()))
98
99 if device == 'gpu':
100 x = x.cuda()
101 else:
102 x = x.cpu()
103
104 scattering.method = 'integral'
105 scattering.integral_powers = integral_powers
106
107 scattering.forward(x)
108
109 if device == 'gpu':
110 torch.cuda.synchronize()
111
112 t_start = time.time()
113 for _ in range(times):
114 scattering.forward(x)
115
116 if device == 'gpu':
117 torch.cuda.synchronize()
118
119 t_elapsed = time.time() - t_start
120
121 fmt_str = 'Elapsed time: {:2f} [s / {:d} evals], avg: {:.2f} (s/batch)'
122 print(fmt_str.format(t_elapsed, times, t_elapsed/times))
123
124 ###############################################################################
125 # The resulting output should be something like
126 #
127 # .. code-block:: text
128 #
129 # ==> Testing Float32 with torch backend, on CPU, forward
130 # Elapsed time: 109.739110 [s / 10 evals], avg: 10.97 (s/batch)
131 # ==> Testing Float32 with torch backend, on GPU, forward
132 # Elapsed time: 60.476041 [s / 10 evals], avg: 6.05 (s/batch)
133
[end of examples/3d/compute_speed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/3d/compute_speed.py b/examples/3d/compute_speed.py
--- a/examples/3d/compute_speed.py
+++ b/examples/3d/compute_speed.py
@@ -97,8 +97,10 @@
print(fmt_str.format(backend.NAME, device.upper()))
if device == 'gpu':
+ scattering.cuda()
x = x.cuda()
else:
+ scattering.cpu()
x = x.cpu()
scattering.method = 'integral'
| {"golden_diff": "diff --git a/examples/3d/compute_speed.py b/examples/3d/compute_speed.py\n--- a/examples/3d/compute_speed.py\n+++ b/examples/3d/compute_speed.py\n@@ -97,8 +97,10 @@\n print(fmt_str.format(backend.NAME, device.upper()))\n \n if device == 'gpu':\n+ scattering.cuda()\n x = x.cuda()\n else:\n+ scattering.cpu()\n x = x.cpu()\n \n scattering.method = 'integral'\n", "issue": "BUG 3D benchmark fails on GPU\nIt doesn't call the `cuda()` function (now required) when giving a CUDA tensor input, so the scattering transform errors.\n", "before_files": [{"content": "\"\"\"\n3D scattering transform benchmark\n=================================\nWe compute scattering transforms for volume maps of size `128`-by-`128`-by-\n`128`, with averaging scale `2**2 = 4` and maximum spherical harmonic\norder `L = 2`. The volumes are stacked into batches of size `batch_size = 8`\nand the transforms are computed `10` times to get an average running time.\n\"\"\"\n\n###############################################################################\n# Preliminaries\n# -------------\n# Since kymatio handles PyTorch arrays, we first import `torch`.\n\nimport torch\n\n###############################################################################\n# To measure the running time of the implementation, we use the `time` package.\n\nimport time\n\n###############################################################################\n# The performance of the implementation depends on which \"backend\" is used. We\n# therefore want to report the name of the backend when presenting the results.\n# Certain backends are also GPU-only, we we want to detect that before running\n# the benchmark.\n\nimport kymatio.scattering3d.backend as backend\n\n###############################################################################\n# Finally, we import the `HarmonicScattering3D` class that computes the scattering\n# transform.\n\nfrom kymatio import HarmonicScattering3D\n###############################################################################\n# Benchmark setup\n# --------------------\n# First, we set up some basic parameters: the volume width `M`, height `N`,\n# and depth 'O', the maximum number of the spherical harmonics `L`, and the\n# maximum scale `2**J`. Here, we consider cubic volumes of size `128`, with\n# a maximum scale of `2**2 = 4` and maximum spherical harmonic order of `2`.\n\nM, N, O = 128, 128, 128\nJ = 2\nL = 2\n\nintegral_powers = [1., 2.]\nsigma_0 = 1\n\n###############################################################################\n# To squeeze the maximum performance out of the implementation, we apply it to\n# a batch of `8` volumes. Larger batch sizes do not yield increased efficiency,\n# but smaller values increases the influence of overhead on the running time.\n\nbatch_size = 8\n\n###############################################################################\n# We repeat the benchmark `10` times and compute the average running time to\n# get a reasonable estimate.\n\ntimes = 10\n\n###############################################################################\n# Determine which devices (CPU or GPU) that are supported by the current\n# backend.\n\nif backend.NAME == 'torch':\n devices = ['cpu', 'gpu']\nelif backend.NAME == 'skcuda':\n devices = ['gpu']\n\n###############################################################################\n# Set up the scattering object and the test data\n# ----------------------------------------------\n\n###############################################################################\n# Create the `HarmonicScattering3D` object using the given parameters and generate\n# some compatible test data with the specified batch size.\n\nscattering = HarmonicScattering3D(J, shape=(M, N, O), L=L, sigma_0=sigma_0)\n\nx = torch.randn(batch_size, M, N, O, dtype=torch.float32)\n\n###############################################################################\n# Run the benchmark\n# -----------------\n# For each device, we need to convert the Tensor `x` to the appropriate type,\n# invoke `times` calls to `scattering.forward` and print the running times.\n# Before the timer starts, we add an extra `scattering.forward` call to ensure\n# any first-time overhead, such as memory allocation and CUDA kernel\n# compilation, is not counted. If the benchmark is running on the GPU, we also\n# need to call `torch.cuda.synchronize()` before and after the benchmark to\n# make sure that all CUDA kernels have finished executing.\n\nfor device in devices:\n fmt_str = '==> Testing Float32 with {} backend, on {}, forward'\n print(fmt_str.format(backend.NAME, device.upper()))\n\n if device == 'gpu':\n x = x.cuda()\n else:\n x = x.cpu()\n\n scattering.method = 'integral'\n scattering.integral_powers = integral_powers\n\n scattering.forward(x)\n\n if device == 'gpu':\n torch.cuda.synchronize()\n\n t_start = time.time()\n for _ in range(times):\n scattering.forward(x)\n\n if device == 'gpu':\n torch.cuda.synchronize()\n\n t_elapsed = time.time() - t_start\n\n fmt_str = 'Elapsed time: {:2f} [s / {:d} evals], avg: {:.2f} (s/batch)'\n print(fmt_str.format(t_elapsed, times, t_elapsed/times))\n\n###############################################################################\n# The resulting output should be something like\n#\n# .. code-block:: text\n#\n# ==> Testing Float32 with torch backend, on CPU, forward\n# Elapsed time: 109.739110 [s / 10 evals], avg: 10.97 (s/batch)\n# ==> Testing Float32 with torch backend, on GPU, forward\n# Elapsed time: 60.476041 [s / 10 evals], avg: 6.05 (s/batch)\n", "path": "examples/3d/compute_speed.py"}]} | 1,969 | 111 |
gh_patches_debug_1004 | rasdani/github-patches | git_diff | cloudtools__troposphere-2238 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update DLM Interval Rule Values
Update DLM valid intervals. `1` has been added.
[DLM interval rule allows ](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-dlm-lifecyclepolicy-createrule.html)
</issue>
<code>
[start of troposphere/validators/dlm.py]
1 # Copyright (c) 2012-2022, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6
7 from . import tags_or_list
8
9
10 def validate_tags_or_list(x):
11 """
12 Property: LifecyclePolicy.Tags
13 Property: PolicyDetails.TargetTags
14 Property: Schedule.TagsToAdd
15 """
16 return tags_or_list(x)
17
18
19 def validate_interval(interval):
20 """
21 Interval validation rule.
22 Property: CreateRule.Interval
23 """
24
25 VALID_INTERVALS = (2, 3, 4, 6, 8, 12, 24)
26
27 if interval not in VALID_INTERVALS:
28 raise ValueError(
29 "Interval must be one of : %s"
30 % ", ".join([str(i) for i in VALID_INTERVALS])
31 )
32 return interval
33
34
35 def validate_interval_unit(interval_unit):
36 """
37 Interval unit validation rule.
38 Property: CreateRule.IntervalUnit
39 """
40
41 VALID_INTERVAL_UNITS = ("HOURS",)
42
43 if interval_unit not in VALID_INTERVAL_UNITS:
44 raise ValueError(
45 "Interval unit must be one of : %s" % ", ".join(VALID_INTERVAL_UNITS)
46 )
47 return interval_unit
48
49
50 def validate_state(state):
51 """
52 State validation rule.
53 Property: LifecyclePolicy.State
54 """
55
56 VALID_STATES = ("ENABLED", "DISABLED")
57
58 if state not in VALID_STATES:
59 raise ValueError("State must be one of : %s" % ", ".join(VALID_STATES))
60 return state
61
[end of troposphere/validators/dlm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/troposphere/validators/dlm.py b/troposphere/validators/dlm.py
--- a/troposphere/validators/dlm.py
+++ b/troposphere/validators/dlm.py
@@ -22,7 +22,7 @@
Property: CreateRule.Interval
"""
- VALID_INTERVALS = (2, 3, 4, 6, 8, 12, 24)
+ VALID_INTERVALS = (1, 2, 3, 4, 6, 8, 12, 24)
if interval not in VALID_INTERVALS:
raise ValueError(
| {"golden_diff": "diff --git a/troposphere/validators/dlm.py b/troposphere/validators/dlm.py\n--- a/troposphere/validators/dlm.py\n+++ b/troposphere/validators/dlm.py\n@@ -22,7 +22,7 @@\n Property: CreateRule.Interval\n \"\"\"\n \n- VALID_INTERVALS = (2, 3, 4, 6, 8, 12, 24)\n+ VALID_INTERVALS = (1, 2, 3, 4, 6, 8, 12, 24)\n \n if interval not in VALID_INTERVALS:\n raise ValueError(\n", "issue": "Update DLM Interval Rule Values\nUpdate DLM valid intervals. `1` has been added.\r\n\r\n[DLM interval rule allows ](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-dlm-lifecyclepolicy-createrule.html)\n", "before_files": [{"content": "# Copyright (c) 2012-2022, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\n\nfrom . import tags_or_list\n\n\ndef validate_tags_or_list(x):\n \"\"\"\n Property: LifecyclePolicy.Tags\n Property: PolicyDetails.TargetTags\n Property: Schedule.TagsToAdd\n \"\"\"\n return tags_or_list(x)\n\n\ndef validate_interval(interval):\n \"\"\"\n Interval validation rule.\n Property: CreateRule.Interval\n \"\"\"\n\n VALID_INTERVALS = (2, 3, 4, 6, 8, 12, 24)\n\n if interval not in VALID_INTERVALS:\n raise ValueError(\n \"Interval must be one of : %s\"\n % \", \".join([str(i) for i in VALID_INTERVALS])\n )\n return interval\n\n\ndef validate_interval_unit(interval_unit):\n \"\"\"\n Interval unit validation rule.\n Property: CreateRule.IntervalUnit\n \"\"\"\n\n VALID_INTERVAL_UNITS = (\"HOURS\",)\n\n if interval_unit not in VALID_INTERVAL_UNITS:\n raise ValueError(\n \"Interval unit must be one of : %s\" % \", \".join(VALID_INTERVAL_UNITS)\n )\n return interval_unit\n\n\ndef validate_state(state):\n \"\"\"\n State validation rule.\n Property: LifecyclePolicy.State\n \"\"\"\n\n VALID_STATES = (\"ENABLED\", \"DISABLED\")\n\n if state not in VALID_STATES:\n raise ValueError(\"State must be one of : %s\" % \", \".join(VALID_STATES))\n return state\n", "path": "troposphere/validators/dlm.py"}]} | 1,046 | 142 |
gh_patches_debug_23227 | rasdani/github-patches | git_diff | deepchecks__deepchecks-405 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: model info check fails when model is sklearn pipeline
to reproduce:
https://www.kaggle.com/itay94/notebook6f16624759

</issue>
<code>
[start of deepchecks/checks/overview/model_info.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module contains model_info check."""
12 import pandas as pd
13 from sklearn.base import BaseEstimator
14
15 from deepchecks import ModelOnlyBaseCheck, CheckResult
16 from deepchecks.utils.validation import model_type_validation
17
18
19 __all__ = ['ModelInfo']
20
21
22 class ModelInfo(ModelOnlyBaseCheck):
23 """Summarize given model parameters."""
24
25 def run(self, model: BaseEstimator) -> CheckResult:
26 """Run check.
27
28 Args:
29 model (BaseEstimator): A scikit-learn-compatible fitted estimator instance
30
31 Returns:
32 CheckResult: value is dictionary in format {type: <model_type>, params: <model_params_dict>}
33 """
34 return self._model_info(model)
35
36 def _model_info(self, model: BaseEstimator):
37 model_type_validation(model)
38 model_type = type(model).__name__
39 model_params = model.get_params()
40 default_params = type(model)().get_params()
41
42 # Create dataframe to show
43 model_param_df = pd.DataFrame(model_params.items(), columns=['Parameter', 'Value'])
44 model_param_df['Default'] = model_param_df['Parameter'].map(lambda x: default_params.get(x, ''))
45
46 def highlight_not_default(data):
47 n = len(data)
48 if data['Value'] != data['Default']:
49 return n * ['background-color: lightblue']
50 else:
51 return n * ['']
52
53 model_param_df = model_param_df.style.apply(highlight_not_default, axis=1).hide_index()
54
55 value = {'type': model_type, 'params': model_params}
56 footnote = '<p style="font-size:0.7em"><i>Colored rows are parameters with non-default values</i></p>'
57 display = [f'Model Type: {model_type}', model_param_df, footnote]
58
59 return CheckResult(value, header='Model Info', display=display)
60
[end of deepchecks/checks/overview/model_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/deepchecks/checks/overview/model_info.py b/deepchecks/checks/overview/model_info.py
--- a/deepchecks/checks/overview/model_info.py
+++ b/deepchecks/checks/overview/model_info.py
@@ -14,6 +14,7 @@
from deepchecks import ModelOnlyBaseCheck, CheckResult
from deepchecks.utils.validation import model_type_validation
+from deepchecks.utils.model import get_model_of_pipeline
__all__ = ['ModelInfo']
@@ -31,13 +32,11 @@
Returns:
CheckResult: value is dictionary in format {type: <model_type>, params: <model_params_dict>}
"""
- return self._model_info(model)
-
- def _model_info(self, model: BaseEstimator):
model_type_validation(model)
- model_type = type(model).__name__
- model_params = model.get_params()
- default_params = type(model)().get_params()
+ estimator = get_model_of_pipeline(model)
+ model_type = type(estimator).__name__
+ model_params = estimator.get_params()
+ default_params = type(estimator)().get_params()
# Create dataframe to show
model_param_df = pd.DataFrame(model_params.items(), columns=['Parameter', 'Value'])
| {"golden_diff": "diff --git a/deepchecks/checks/overview/model_info.py b/deepchecks/checks/overview/model_info.py\n--- a/deepchecks/checks/overview/model_info.py\n+++ b/deepchecks/checks/overview/model_info.py\n@@ -14,6 +14,7 @@\n \n from deepchecks import ModelOnlyBaseCheck, CheckResult\n from deepchecks.utils.validation import model_type_validation\n+from deepchecks.utils.model import get_model_of_pipeline\n \n \n __all__ = ['ModelInfo']\n@@ -31,13 +32,11 @@\n Returns:\n CheckResult: value is dictionary in format {type: <model_type>, params: <model_params_dict>}\n \"\"\"\n- return self._model_info(model)\n-\n- def _model_info(self, model: BaseEstimator):\n model_type_validation(model)\n- model_type = type(model).__name__\n- model_params = model.get_params()\n- default_params = type(model)().get_params()\n+ estimator = get_model_of_pipeline(model)\n+ model_type = type(estimator).__name__\n+ model_params = estimator.get_params()\n+ default_params = type(estimator)().get_params()\n \n # Create dataframe to show\n model_param_df = pd.DataFrame(model_params.items(), columns=['Parameter', 'Value'])\n", "issue": "BUG: model info check fails when model is sklearn pipeline\nto reproduce:\r\nhttps://www.kaggle.com/itay94/notebook6f16624759\r\n\r\n\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module contains model_info check.\"\"\"\nimport pandas as pd\nfrom sklearn.base import BaseEstimator\n\nfrom deepchecks import ModelOnlyBaseCheck, CheckResult\nfrom deepchecks.utils.validation import model_type_validation\n\n\n__all__ = ['ModelInfo']\n\n\nclass ModelInfo(ModelOnlyBaseCheck):\n \"\"\"Summarize given model parameters.\"\"\"\n\n def run(self, model: BaseEstimator) -> CheckResult:\n \"\"\"Run check.\n\n Args:\n model (BaseEstimator): A scikit-learn-compatible fitted estimator instance\n\n Returns:\n CheckResult: value is dictionary in format {type: <model_type>, params: <model_params_dict>}\n \"\"\"\n return self._model_info(model)\n\n def _model_info(self, model: BaseEstimator):\n model_type_validation(model)\n model_type = type(model).__name__\n model_params = model.get_params()\n default_params = type(model)().get_params()\n\n # Create dataframe to show\n model_param_df = pd.DataFrame(model_params.items(), columns=['Parameter', 'Value'])\n model_param_df['Default'] = model_param_df['Parameter'].map(lambda x: default_params.get(x, ''))\n\n def highlight_not_default(data):\n n = len(data)\n if data['Value'] != data['Default']:\n return n * ['background-color: lightblue']\n else:\n return n * ['']\n\n model_param_df = model_param_df.style.apply(highlight_not_default, axis=1).hide_index()\n\n value = {'type': model_type, 'params': model_params}\n footnote = '<p style=\"font-size:0.7em\"><i>Colored rows are parameters with non-default values</i></p>'\n display = [f'Model Type: {model_type}', model_param_df, footnote]\n\n return CheckResult(value, header='Model Info', display=display)\n", "path": "deepchecks/checks/overview/model_info.py"}]} | 1,254 | 281 |
gh_patches_debug_25992 | rasdani/github-patches | git_diff | joke2k__faker-314 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Param switches on faker.password() don't guarantee valid password
The format switches on `faker.password()` (`special_chars, digits, upper_case, lower_case`) don't always return passwords matching those rules.
This is problematic as when using generated passwords in unit tests, where passwords must conform to validity rules (e.g. "must contain numbers"), tests can randomly fail.
I expected that these switches would guarantee the function returns a conforming password. e.g. `faker.password(digits=True)` always returns a password containing digits, but this is not the case.
</issue>
<code>
[start of faker/providers/misc/__init__.py]
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4 import hashlib
5 import string
6 import uuid
7
8 from faker.generator import random
9 from faker.providers.date_time import Provider as DatetimeProvider
10
11 from .. import BaseProvider
12
13
14 class Provider(BaseProvider):
15 language_codes = ('cn', 'de', 'el', 'en', 'es', 'fr', 'it', 'pt', 'ru')
16
17 @classmethod
18 def boolean(cls, chance_of_getting_true=50):
19 return random.randint(1, 100) <= chance_of_getting_true
20
21 @classmethod
22 def null_boolean(cls):
23 return {
24 0: None,
25 1: True,
26 -1: False
27 }[random.randint(-1, 1)]
28
29 @classmethod
30 def md5(cls, raw_output=False):
31 """
32 Calculates the md5 hash of a given string
33 :example 'cfcd208495d565ef66e7dff9f98764da'
34 """
35 res = hashlib.md5(str(random.random()).encode('utf-8'))
36 if raw_output:
37 return res.digest()
38 return res.hexdigest()
39
40 @classmethod
41 def sha1(cls, raw_output=False):
42 """
43 Calculates the sha1 hash of a given string
44 :example 'b5d86317c2a144cd04d0d7c03b2b02666fafadf2'
45 """
46 res = hashlib.sha1(str(random.random()).encode('utf-8'))
47 if raw_output:
48 return res.digest()
49 return res.hexdigest()
50
51 @classmethod
52 def sha256(cls, raw_output=False):
53 """
54 Calculates the sha256 hash of a given string
55 :example '85086017559ccc40638fcde2fecaf295e0de7ca51b7517b6aebeaaf75b4d4654'
56 """
57 res = hashlib.sha256(str(random.random()).encode('utf-8'))
58 if raw_output:
59 return res.digest()
60 return res.hexdigest()
61
62 def locale(self):
63 return self.language_code() + '_' + self.country_code()
64
65 @classmethod
66 def country_code(cls):
67 return cls.random_element(DatetimeProvider.countries)['code']
68
69 @classmethod
70 def language_code(cls):
71 return cls.random_element(cls.language_codes)
72
73 @classmethod
74 def uuid4(cls):
75 """
76 Generates a random UUID4 string.
77 """
78 return str(uuid.uuid4())
79
80 @classmethod
81 def password(cls, length=10, special_chars=True, digits=True, upper_case=True, lower_case=True):
82 """
83 Generates a random password.
84 @param length: Integer. Length of a password
85 @param special_chars: Boolean. Whether to use special characters !@#$%^&*()_+
86 @param digits: Boolean. Whether to use digits
87 @param upper_case: Boolean. Whether to use upper letters
88 @param lower_case: Boolean. Whether to use lower letters
89 @return: String. Random password
90 """
91 chars = ""
92 if special_chars:
93 chars += "!@#$%^&*()_+"
94 if digits:
95 chars += string.digits
96 if upper_case:
97 chars += string.ascii_uppercase
98 if lower_case:
99 chars += string.ascii_lowercase
100 return ''.join(random.choice(chars) for x in range(length))
101
[end of faker/providers/misc/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/misc/__init__.py b/faker/providers/misc/__init__.py
--- a/faker/providers/misc/__init__.py
+++ b/faker/providers/misc/__init__.py
@@ -88,13 +88,33 @@
@param lower_case: Boolean. Whether to use lower letters
@return: String. Random password
"""
- chars = ""
+ choices = ""
+ required_tokens = []
if special_chars:
- chars += "!@#$%^&*()_+"
+ required_tokens.append(random.choice("!@#$%^&*()_+"))
+ choices += "!@#$%^&*()_+"
if digits:
- chars += string.digits
+ required_tokens.append(random.choice(string.digits))
+ choices += string.digits
if upper_case:
- chars += string.ascii_uppercase
+ required_tokens.append(random.choice(string.ascii_uppercase))
+ choices += string.ascii_uppercase
if lower_case:
- chars += string.ascii_lowercase
- return ''.join(random.choice(chars) for x in range(length))
+ required_tokens.append(random.choice(string.ascii_lowercase))
+ choices += string.ascii_lowercase
+
+ assert len(required_tokens) <= length, "Required length is shorter than required characters"
+
+ # Generate a first version of the password
+ chars = [random.choice(choices) for x in range(length)]
+
+ # Pick some unique locations
+ random_indexes = set()
+ while len(random_indexes) < len(required_tokens):
+ random_indexes.add(random.randint(0, len(chars) - 1))
+
+ # Replace them with the required characters
+ for i, index in enumerate(random_indexes):
+ chars[index] = required_tokens[i]
+
+ return ''.join(chars)
| {"golden_diff": "diff --git a/faker/providers/misc/__init__.py b/faker/providers/misc/__init__.py\n--- a/faker/providers/misc/__init__.py\n+++ b/faker/providers/misc/__init__.py\n@@ -88,13 +88,33 @@\n @param lower_case: Boolean. Whether to use lower letters\n @return: String. Random password\n \"\"\"\n- chars = \"\"\n+ choices = \"\"\n+ required_tokens = []\n if special_chars:\n- chars += \"!@#$%^&*()_+\"\n+ required_tokens.append(random.choice(\"!@#$%^&*()_+\"))\n+ choices += \"!@#$%^&*()_+\"\n if digits:\n- chars += string.digits\n+ required_tokens.append(random.choice(string.digits))\n+ choices += string.digits\n if upper_case:\n- chars += string.ascii_uppercase\n+ required_tokens.append(random.choice(string.ascii_uppercase))\n+ choices += string.ascii_uppercase\n if lower_case:\n- chars += string.ascii_lowercase\n- return ''.join(random.choice(chars) for x in range(length))\n+ required_tokens.append(random.choice(string.ascii_lowercase))\n+ choices += string.ascii_lowercase\n+\n+ assert len(required_tokens) <= length, \"Required length is shorter than required characters\"\n+\n+ # Generate a first version of the password\n+ chars = [random.choice(choices) for x in range(length)]\n+\n+ # Pick some unique locations\n+ random_indexes = set()\n+ while len(random_indexes) < len(required_tokens):\n+ random_indexes.add(random.randint(0, len(chars) - 1))\n+\n+ # Replace them with the required characters\n+ for i, index in enumerate(random_indexes):\n+ chars[index] = required_tokens[i]\n+\n+ return ''.join(chars)\n", "issue": "Param switches on faker.password() don't guarantee valid password\nThe format switches on `faker.password()` (`special_chars, digits, upper_case, lower_case`) don't always return passwords matching those rules.\n\nThis is problematic as when using generated passwords in unit tests, where passwords must conform to validity rules (e.g. \"must contain numbers\"), tests can randomly fail.\n\nI expected that these switches would guarantee the function returns a conforming password. e.g. `faker.password(digits=True)` always returns a password containing digits, but this is not the case.\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\nimport hashlib\nimport string\nimport uuid\n\nfrom faker.generator import random\nfrom faker.providers.date_time import Provider as DatetimeProvider\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n language_codes = ('cn', 'de', 'el', 'en', 'es', 'fr', 'it', 'pt', 'ru')\n\n @classmethod\n def boolean(cls, chance_of_getting_true=50):\n return random.randint(1, 100) <= chance_of_getting_true\n\n @classmethod\n def null_boolean(cls):\n return {\n 0: None,\n 1: True,\n -1: False\n }[random.randint(-1, 1)]\n\n @classmethod\n def md5(cls, raw_output=False):\n \"\"\"\n Calculates the md5 hash of a given string\n :example 'cfcd208495d565ef66e7dff9f98764da'\n \"\"\"\n res = hashlib.md5(str(random.random()).encode('utf-8'))\n if raw_output:\n return res.digest()\n return res.hexdigest()\n\n @classmethod\n def sha1(cls, raw_output=False):\n \"\"\"\n Calculates the sha1 hash of a given string\n :example 'b5d86317c2a144cd04d0d7c03b2b02666fafadf2'\n \"\"\"\n res = hashlib.sha1(str(random.random()).encode('utf-8'))\n if raw_output:\n return res.digest()\n return res.hexdigest()\n\n @classmethod\n def sha256(cls, raw_output=False):\n \"\"\"\n Calculates the sha256 hash of a given string\n :example '85086017559ccc40638fcde2fecaf295e0de7ca51b7517b6aebeaaf75b4d4654'\n \"\"\"\n res = hashlib.sha256(str(random.random()).encode('utf-8'))\n if raw_output:\n return res.digest()\n return res.hexdigest()\n\n def locale(self):\n return self.language_code() + '_' + self.country_code()\n\n @classmethod\n def country_code(cls):\n return cls.random_element(DatetimeProvider.countries)['code']\n\n @classmethod\n def language_code(cls):\n return cls.random_element(cls.language_codes)\n\n @classmethod\n def uuid4(cls):\n \"\"\"\n Generates a random UUID4 string.\n \"\"\"\n return str(uuid.uuid4())\n\n @classmethod\n def password(cls, length=10, special_chars=True, digits=True, upper_case=True, lower_case=True):\n \"\"\"\n Generates a random password.\n @param length: Integer. Length of a password\n @param special_chars: Boolean. Whether to use special characters !@#$%^&*()_+\n @param digits: Boolean. Whether to use digits\n @param upper_case: Boolean. Whether to use upper letters\n @param lower_case: Boolean. Whether to use lower letters\n @return: String. Random password\n \"\"\"\n chars = \"\"\n if special_chars:\n chars += \"!@#$%^&*()_+\"\n if digits:\n chars += string.digits\n if upper_case:\n chars += string.ascii_uppercase\n if lower_case:\n chars += string.ascii_lowercase\n return ''.join(random.choice(chars) for x in range(length))\n", "path": "faker/providers/misc/__init__.py"}]} | 1,624 | 395 |
gh_patches_debug_42011 | rasdani/github-patches | git_diff | freedomofpress__securedrop-3724 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update OSSEC to v3.0
## Description
OSSEC 3.0 was released on July 17th 2018[0], containing a large amount of bug fixes (including 2 security fixes) as well as new major functionality. Of note, it supports whitelisting syscheck md5 hashes in a sqlite database, potentially reducing notification noise.
## User Research Evidence
Users like up-to-date packages
## User Stories
As a SecureDrop administrator, I would like to have all packages updated and would like to minimize alerts/noise.
[0] https://github.com/ossec/ossec-hids/releases
</issue>
<code>
[start of install_files/ansible-base/roles/build-ossec-deb-pkg/library/ossec_urls.py]
1 #!/usr/bin/env python
2 DOCUMENTATION = '''
3 ---
4 module: ossec_urls
5 short_description: Gather facts for OSSEC download URLs
6 description:
7 - Gather version, checksum, and URL info for OSSEC downloads
8 author:
9 - Conor Schaefer (@conorsch)
10 - Freedom of the Press Foundation (@freedomofpress)
11 requirements:
12 - requests
13 options:
14 ossec_version:
15 description:
16 - version number of release to download
17 default: "2.8.2"
18 required: no
19 notes:
20 - The OSSEC version to download is hardcoded to avoid surprises.
21 If you want a newer version than the current default, you should
22 pass the version in via I(ossec_version).
23 '''
24 EXAMPLES = '''
25 - ossec_urls:
26 ossec_version: "2.8.2"
27 '''
28
29 import re # noqa: E402
30
31
32 HAS_REQUESTS = True
33 try:
34 import requests
35 except ImportError:
36 HAS_REQUESTS = False
37
38
39 class OSSECURLs():
40
41 def __init__(self, ossec_version):
42 self.ossec_version = ossec_version
43
44 checksums = self.parse_checksums()
45
46 self.ansible_facts = dict(
47 ossec_version=self.ossec_version,
48 ossec_tarball_filename=self.ossec_tarball_filename,
49 ossec_tarball_url=self.ossec_tarball_url,
50 ossec_checksum_filename=self.ossec_checksum_filename,
51 ossec_checksum_url=self.ossec_checksum_url,
52 )
53
54 self.ansible_facts.update(checksums)
55
56 @property
57 def ossec_tarball_filename(self):
58 return "ossec-hids-{}.tar.gz".format(self.ossec_version)
59
60 @property
61 def ossec_tarball_url(self):
62 return "https://github.com/ossec/ossec-hids/archive/{}.tar.gz".format(
63 self.ossec_version)
64
65 @property
66 def ossec_checksum_url(self):
67 return "https://github.com/ossec/ossec-hids/releases/download/{}/{}".format( # noqa: E501
68 self.ossec_version, self.ossec_checksum_filename)
69
70 @property
71 def ossec_checksum_filename(self):
72 return "{}-checksum.txt".format(self.ossec_tarball_filename)
73
74 def parse_checksums(self):
75 r = requests.get(self.ossec_checksum_url)
76 checksum_regex = re.compile(r'''
77 ^MD5\(
78 '''
79 + re.escape(self.ossec_tarball_filename) +
80 r'''\)=\s+(?P<ossec_md5_checksum>[0-9a-f]{32})\s+
81 SHA1\(
82 '''
83 + re.escape(self.ossec_tarball_filename) +
84 r'''\)=\s+(?P<ossec_sha1_checksum>[0-9a-f]{40})$
85 ''', re.VERBOSE | re.MULTILINE
86 )
87 checksum_list = r.content.rstrip()
88 results = re.match(checksum_regex, checksum_list).groupdict()
89 return results
90
91
92 def main():
93 module = AnsibleModule( # noqa: F405
94 argument_spec=dict(
95 ossec_version=dict(default="2.8.2"),
96 ),
97 supports_check_mode=False
98 )
99 if not HAS_REQUESTS:
100 module.fail_json(msg='requests required for this module')
101
102 ossec_version = module.params['ossec_version']
103 try:
104 ossec_config = OSSECURLs(ossec_version=ossec_version)
105 except: # noqa: E722
106 msg = ("Failed to find checksum information for OSSEC v{}."
107 "Ensure you have the proper release specified, "
108 "and check the download page to confirm: "
109 "http://www.ossec.net/?page_id=19".format(ossec_version))
110 module.fail_json(msg=msg)
111
112 results = ossec_config.ansible_facts
113
114 if results:
115 module.exit_json(changed=False, ansible_facts=results)
116 else:
117 msg = "Failed to fetch OSSEC URL facts."
118 module.fail_json(msg=msg)
119
120
121 from ansible.module_utils.basic import * # noqa E402,F403
122 main()
123
[end of install_files/ansible-base/roles/build-ossec-deb-pkg/library/ossec_urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/install_files/ansible-base/roles/build-ossec-deb-pkg/library/ossec_urls.py b/install_files/ansible-base/roles/build-ossec-deb-pkg/library/ossec_urls.py
--- a/install_files/ansible-base/roles/build-ossec-deb-pkg/library/ossec_urls.py
+++ b/install_files/ansible-base/roles/build-ossec-deb-pkg/library/ossec_urls.py
@@ -14,7 +14,7 @@
ossec_version:
description:
- version number of release to download
- default: "2.8.2"
+ default: "3.0.0"
required: no
notes:
- The OSSEC version to download is hardcoded to avoid surprises.
@@ -23,15 +23,15 @@
'''
EXAMPLES = '''
- ossec_urls:
- ossec_version: "2.8.2"
+ ossec_version: "3.0.0"
'''
-import re # noqa: E402
+import re # noqa: F401
HAS_REQUESTS = True
try:
- import requests
+ import requests # noqa: F401
except ImportError:
HAS_REQUESTS = False
@@ -39,60 +39,38 @@
class OSSECURLs():
def __init__(self, ossec_version):
+ self.REPO_URL = "https://github.com/ossec/ossec-hids"
self.ossec_version = ossec_version
-
- checksums = self.parse_checksums()
-
self.ansible_facts = dict(
ossec_version=self.ossec_version,
ossec_tarball_filename=self.ossec_tarball_filename,
ossec_tarball_url=self.ossec_tarball_url,
- ossec_checksum_filename=self.ossec_checksum_filename,
- ossec_checksum_url=self.ossec_checksum_url,
+ ossec_signature_filename=self.ossec_signature_filename,
+ ossec_signature_url=self.ossec_signature_url,
)
- self.ansible_facts.update(checksums)
-
@property
def ossec_tarball_filename(self):
return "ossec-hids-{}.tar.gz".format(self.ossec_version)
@property
def ossec_tarball_url(self):
- return "https://github.com/ossec/ossec-hids/archive/{}.tar.gz".format(
- self.ossec_version)
+ return self.REPO_URL + "/archive/{}.tar.gz".format(self.ossec_version)
@property
- def ossec_checksum_url(self):
- return "https://github.com/ossec/ossec-hids/releases/download/{}/{}".format( # noqa: E501
- self.ossec_version, self.ossec_checksum_filename)
+ def ossec_signature_url(self):
+ return self.REPO_URL + "/releases/download/{}/{}".format(
+ self.ossec_version, self.ossec_signature_filename)
@property
- def ossec_checksum_filename(self):
- return "{}-checksum.txt".format(self.ossec_tarball_filename)
-
- def parse_checksums(self):
- r = requests.get(self.ossec_checksum_url)
- checksum_regex = re.compile(r'''
- ^MD5\(
- '''
- + re.escape(self.ossec_tarball_filename) +
- r'''\)=\s+(?P<ossec_md5_checksum>[0-9a-f]{32})\s+
- SHA1\(
- '''
- + re.escape(self.ossec_tarball_filename) +
- r'''\)=\s+(?P<ossec_sha1_checksum>[0-9a-f]{40})$
- ''', re.VERBOSE | re.MULTILINE
- )
- checksum_list = r.content.rstrip()
- results = re.match(checksum_regex, checksum_list).groupdict()
- return results
+ def ossec_signature_filename(self):
+ return "ossec-hids-{}.tar.gz.asc".format(self.ossec_version)
def main():
module = AnsibleModule( # noqa: F405
argument_spec=dict(
- ossec_version=dict(default="2.8.2"),
+ ossec_version=dict(default="3.0.0"),
),
supports_check_mode=False
)
| {"golden_diff": "diff --git a/install_files/ansible-base/roles/build-ossec-deb-pkg/library/ossec_urls.py b/install_files/ansible-base/roles/build-ossec-deb-pkg/library/ossec_urls.py\n--- a/install_files/ansible-base/roles/build-ossec-deb-pkg/library/ossec_urls.py\n+++ b/install_files/ansible-base/roles/build-ossec-deb-pkg/library/ossec_urls.py\n@@ -14,7 +14,7 @@\n ossec_version:\n description:\n - version number of release to download\n- default: \"2.8.2\"\n+ default: \"3.0.0\"\n required: no\n notes:\n - The OSSEC version to download is hardcoded to avoid surprises.\n@@ -23,15 +23,15 @@\n '''\n EXAMPLES = '''\n - ossec_urls:\n- ossec_version: \"2.8.2\"\n+ ossec_version: \"3.0.0\"\n '''\n \n-import re # noqa: E402\n+import re # noqa: F401\n \n \n HAS_REQUESTS = True\n try:\n- import requests\n+ import requests # noqa: F401\n except ImportError:\n HAS_REQUESTS = False\n \n@@ -39,60 +39,38 @@\n class OSSECURLs():\n \n def __init__(self, ossec_version):\n+ self.REPO_URL = \"https://github.com/ossec/ossec-hids\"\n self.ossec_version = ossec_version\n-\n- checksums = self.parse_checksums()\n-\n self.ansible_facts = dict(\n ossec_version=self.ossec_version,\n ossec_tarball_filename=self.ossec_tarball_filename,\n ossec_tarball_url=self.ossec_tarball_url,\n- ossec_checksum_filename=self.ossec_checksum_filename,\n- ossec_checksum_url=self.ossec_checksum_url,\n+ ossec_signature_filename=self.ossec_signature_filename,\n+ ossec_signature_url=self.ossec_signature_url,\n )\n \n- self.ansible_facts.update(checksums)\n-\n @property\n def ossec_tarball_filename(self):\n return \"ossec-hids-{}.tar.gz\".format(self.ossec_version)\n \n @property\n def ossec_tarball_url(self):\n- return \"https://github.com/ossec/ossec-hids/archive/{}.tar.gz\".format(\n- self.ossec_version)\n+ return self.REPO_URL + \"/archive/{}.tar.gz\".format(self.ossec_version)\n \n @property\n- def ossec_checksum_url(self):\n- return \"https://github.com/ossec/ossec-hids/releases/download/{}/{}\".format( # noqa: E501\n- self.ossec_version, self.ossec_checksum_filename)\n+ def ossec_signature_url(self):\n+ return self.REPO_URL + \"/releases/download/{}/{}\".format(\n+ self.ossec_version, self.ossec_signature_filename)\n \n @property\n- def ossec_checksum_filename(self):\n- return \"{}-checksum.txt\".format(self.ossec_tarball_filename)\n-\n- def parse_checksums(self):\n- r = requests.get(self.ossec_checksum_url)\n- checksum_regex = re.compile(r'''\n- ^MD5\\(\n- '''\n- + re.escape(self.ossec_tarball_filename) +\n- r'''\\)=\\s+(?P<ossec_md5_checksum>[0-9a-f]{32})\\s+\n- SHA1\\(\n- '''\n- + re.escape(self.ossec_tarball_filename) +\n- r'''\\)=\\s+(?P<ossec_sha1_checksum>[0-9a-f]{40})$\n- ''', re.VERBOSE | re.MULTILINE\n- )\n- checksum_list = r.content.rstrip()\n- results = re.match(checksum_regex, checksum_list).groupdict()\n- return results\n+ def ossec_signature_filename(self):\n+ return \"ossec-hids-{}.tar.gz.asc\".format(self.ossec_version)\n \n \n def main():\n module = AnsibleModule( # noqa: F405\n argument_spec=dict(\n- ossec_version=dict(default=\"2.8.2\"),\n+ ossec_version=dict(default=\"3.0.0\"),\n ),\n supports_check_mode=False\n )\n", "issue": "Update OSSEC to v3.0\n## Description\r\n\r\nOSSEC 3.0 was released on July 17th 2018[0], containing a large amount of bug fixes (including 2 security fixes) as well as new major functionality. Of note, it supports whitelisting syscheck md5 hashes in a sqlite database, potentially reducing notification noise.\r\n\r\n## User Research Evidence\r\n\r\nUsers like up-to-date packages\r\n\r\n## User Stories\r\n\r\nAs a SecureDrop administrator, I would like to have all packages updated and would like to minimize alerts/noise.\r\n\r\n[0] https://github.com/ossec/ossec-hids/releases\n", "before_files": [{"content": "#!/usr/bin/env python\nDOCUMENTATION = '''\n---\nmodule: ossec_urls\nshort_description: Gather facts for OSSEC download URLs\ndescription:\n - Gather version, checksum, and URL info for OSSEC downloads\nauthor:\n - Conor Schaefer (@conorsch)\n - Freedom of the Press Foundation (@freedomofpress)\nrequirements:\n - requests\noptions:\n ossec_version:\n description:\n - version number of release to download\n default: \"2.8.2\"\n required: no\nnotes:\n - The OSSEC version to download is hardcoded to avoid surprises.\n If you want a newer version than the current default, you should\n pass the version in via I(ossec_version).\n'''\nEXAMPLES = '''\n- ossec_urls:\n ossec_version: \"2.8.2\"\n'''\n\nimport re # noqa: E402\n\n\nHAS_REQUESTS = True\ntry:\n import requests\nexcept ImportError:\n HAS_REQUESTS = False\n\n\nclass OSSECURLs():\n\n def __init__(self, ossec_version):\n self.ossec_version = ossec_version\n\n checksums = self.parse_checksums()\n\n self.ansible_facts = dict(\n ossec_version=self.ossec_version,\n ossec_tarball_filename=self.ossec_tarball_filename,\n ossec_tarball_url=self.ossec_tarball_url,\n ossec_checksum_filename=self.ossec_checksum_filename,\n ossec_checksum_url=self.ossec_checksum_url,\n )\n\n self.ansible_facts.update(checksums)\n\n @property\n def ossec_tarball_filename(self):\n return \"ossec-hids-{}.tar.gz\".format(self.ossec_version)\n\n @property\n def ossec_tarball_url(self):\n return \"https://github.com/ossec/ossec-hids/archive/{}.tar.gz\".format(\n self.ossec_version)\n\n @property\n def ossec_checksum_url(self):\n return \"https://github.com/ossec/ossec-hids/releases/download/{}/{}\".format( # noqa: E501\n self.ossec_version, self.ossec_checksum_filename)\n\n @property\n def ossec_checksum_filename(self):\n return \"{}-checksum.txt\".format(self.ossec_tarball_filename)\n\n def parse_checksums(self):\n r = requests.get(self.ossec_checksum_url)\n checksum_regex = re.compile(r'''\n ^MD5\\(\n '''\n + re.escape(self.ossec_tarball_filename) +\n r'''\\)=\\s+(?P<ossec_md5_checksum>[0-9a-f]{32})\\s+\n SHA1\\(\n '''\n + re.escape(self.ossec_tarball_filename) +\n r'''\\)=\\s+(?P<ossec_sha1_checksum>[0-9a-f]{40})$\n ''', re.VERBOSE | re.MULTILINE\n )\n checksum_list = r.content.rstrip()\n results = re.match(checksum_regex, checksum_list).groupdict()\n return results\n\n\ndef main():\n module = AnsibleModule( # noqa: F405\n argument_spec=dict(\n ossec_version=dict(default=\"2.8.2\"),\n ),\n supports_check_mode=False\n )\n if not HAS_REQUESTS:\n module.fail_json(msg='requests required for this module')\n\n ossec_version = module.params['ossec_version']\n try:\n ossec_config = OSSECURLs(ossec_version=ossec_version)\n except: # noqa: E722\n msg = (\"Failed to find checksum information for OSSEC v{}.\"\n \"Ensure you have the proper release specified, \"\n \"and check the download page to confirm: \"\n \"http://www.ossec.net/?page_id=19\".format(ossec_version))\n module.fail_json(msg=msg)\n\n results = ossec_config.ansible_facts\n\n if results:\n module.exit_json(changed=False, ansible_facts=results)\n else:\n msg = \"Failed to fetch OSSEC URL facts.\"\n module.fail_json(msg=msg)\n\n\nfrom ansible.module_utils.basic import * # noqa E402,F403\nmain()\n", "path": "install_files/ansible-base/roles/build-ossec-deb-pkg/library/ossec_urls.py"}]} | 1,857 | 943 |
gh_patches_debug_62140 | rasdani/github-patches | git_diff | searx__searx-801 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tags <xml> are hidden from result titles
</issue>
<code>
[start of searx/engines/xpath.py]
1 from lxml import html
2 from urllib import urlencode, unquote
3 from urlparse import urlparse, urljoin
4 from lxml.etree import _ElementStringResult, _ElementUnicodeResult
5 from searx.utils import html_to_text
6
7 search_url = None
8 url_xpath = None
9 content_xpath = None
10 title_xpath = None
11 suggestion_xpath = ''
12 results_xpath = ''
13
14 # parameters for engines with paging support
15 #
16 # number of results on each page
17 # (only needed if the site requires not a page number, but an offset)
18 page_size = 1
19 # number of the first page (usually 0 or 1)
20 first_page_num = 1
21
22
23 '''
24 if xpath_results is list, extract the text from each result and concat the list
25 if xpath_results is a xml element, extract all the text node from it
26 ( text_content() method from lxml )
27 if xpath_results is a string element, then it's already done
28 '''
29
30
31 def extract_text(xpath_results):
32 if type(xpath_results) == list:
33 # it's list of result : concat everything using recursive call
34 if not xpath_results:
35 raise Exception('Empty url resultset')
36 result = ''
37 for e in xpath_results:
38 result = result + extract_text(e)
39 return result.strip()
40 elif type(xpath_results) in [_ElementStringResult, _ElementUnicodeResult]:
41 # it's a string
42 return ''.join(xpath_results)
43 else:
44 # it's a element
45 return html_to_text(xpath_results.text_content()).strip()
46
47
48 def extract_url(xpath_results, search_url):
49 url = extract_text(xpath_results)
50
51 if url.startswith('//'):
52 # add http or https to this kind of url //example.com/
53 parsed_search_url = urlparse(search_url)
54 url = parsed_search_url.scheme + url
55 elif url.startswith('/'):
56 # fix relative url to the search engine
57 url = urljoin(search_url, url)
58
59 # normalize url
60 url = normalize_url(url)
61
62 return url
63
64
65 def normalize_url(url):
66 parsed_url = urlparse(url)
67
68 # add a / at this end of the url if there is no path
69 if not parsed_url.netloc:
70 raise Exception('Cannot parse url')
71 if not parsed_url.path:
72 url += '/'
73
74 # FIXME : hack for yahoo
75 if parsed_url.hostname == 'search.yahoo.com'\
76 and parsed_url.path.startswith('/r'):
77 p = parsed_url.path
78 mark = p.find('/**')
79 if mark != -1:
80 return unquote(p[mark + 3:]).decode('utf-8')
81
82 return url
83
84
85 def request(query, params):
86 query = urlencode({'q': query})[2:]
87
88 fp = {'query': query}
89 if paging and search_url.find('{pageno}') >= 0:
90 fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num
91
92 params['url'] = search_url.format(**fp)
93 params['query'] = query
94
95 return params
96
97
98 def response(resp):
99 results = []
100 dom = html.fromstring(resp.text)
101 if results_xpath:
102 for result in dom.xpath(results_xpath):
103 url = extract_url(result.xpath(url_xpath), search_url)
104 title = extract_text(result.xpath(title_xpath)[0])
105 content = extract_text(result.xpath(content_xpath)[0])
106 results.append({'url': url, 'title': title, 'content': content})
107 else:
108 for url, title, content in zip(
109 (extract_url(x, search_url) for
110 x in dom.xpath(url_xpath)),
111 map(extract_text, dom.xpath(title_xpath)),
112 map(extract_text, dom.xpath(content_xpath))
113 ):
114 results.append({'url': url, 'title': title, 'content': content})
115
116 if not suggestion_xpath:
117 return results
118 for suggestion in dom.xpath(suggestion_xpath):
119 results.append({'suggestion': extract_text(suggestion)})
120 return results
121
[end of searx/engines/xpath.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py
--- a/searx/engines/xpath.py
+++ b/searx/engines/xpath.py
@@ -42,7 +42,9 @@
return ''.join(xpath_results)
else:
# it's a element
- return html_to_text(xpath_results.text_content()).strip()
+ text = html.tostring(xpath_results, encoding='unicode', method='text', with_tail=False)
+ text = text.strip().replace('\n', ' ')
+ return ' '.join(text.split())
def extract_url(xpath_results, search_url):
| {"golden_diff": "diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py\n--- a/searx/engines/xpath.py\n+++ b/searx/engines/xpath.py\n@@ -42,7 +42,9 @@\n return ''.join(xpath_results)\n else:\n # it's a element\n- return html_to_text(xpath_results.text_content()).strip()\n+ text = html.tostring(xpath_results, encoding='unicode', method='text', with_tail=False)\n+ text = text.strip().replace('\\n', ' ')\n+ return ' '.join(text.split())\n \n \n def extract_url(xpath_results, search_url):\n", "issue": "Tags <xml> are hidden from result titles\n\n", "before_files": [{"content": "from lxml import html\nfrom urllib import urlencode, unquote\nfrom urlparse import urlparse, urljoin\nfrom lxml.etree import _ElementStringResult, _ElementUnicodeResult\nfrom searx.utils import html_to_text\n\nsearch_url = None\nurl_xpath = None\ncontent_xpath = None\ntitle_xpath = None\nsuggestion_xpath = ''\nresults_xpath = ''\n\n# parameters for engines with paging support\n#\n# number of results on each page\n# (only needed if the site requires not a page number, but an offset)\npage_size = 1\n# number of the first page (usually 0 or 1)\nfirst_page_num = 1\n\n\n'''\nif xpath_results is list, extract the text from each result and concat the list\nif xpath_results is a xml element, extract all the text node from it\n ( text_content() method from lxml )\nif xpath_results is a string element, then it's already done\n'''\n\n\ndef extract_text(xpath_results):\n if type(xpath_results) == list:\n # it's list of result : concat everything using recursive call\n if not xpath_results:\n raise Exception('Empty url resultset')\n result = ''\n for e in xpath_results:\n result = result + extract_text(e)\n return result.strip()\n elif type(xpath_results) in [_ElementStringResult, _ElementUnicodeResult]:\n # it's a string\n return ''.join(xpath_results)\n else:\n # it's a element\n return html_to_text(xpath_results.text_content()).strip()\n\n\ndef extract_url(xpath_results, search_url):\n url = extract_text(xpath_results)\n\n if url.startswith('//'):\n # add http or https to this kind of url //example.com/\n parsed_search_url = urlparse(search_url)\n url = parsed_search_url.scheme + url\n elif url.startswith('/'):\n # fix relative url to the search engine\n url = urljoin(search_url, url)\n\n # normalize url\n url = normalize_url(url)\n\n return url\n\n\ndef normalize_url(url):\n parsed_url = urlparse(url)\n\n # add a / at this end of the url if there is no path\n if not parsed_url.netloc:\n raise Exception('Cannot parse url')\n if not parsed_url.path:\n url += '/'\n\n # FIXME : hack for yahoo\n if parsed_url.hostname == 'search.yahoo.com'\\\n and parsed_url.path.startswith('/r'):\n p = parsed_url.path\n mark = p.find('/**')\n if mark != -1:\n return unquote(p[mark + 3:]).decode('utf-8')\n\n return url\n\n\ndef request(query, params):\n query = urlencode({'q': query})[2:]\n\n fp = {'query': query}\n if paging and search_url.find('{pageno}') >= 0:\n fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num\n\n params['url'] = search_url.format(**fp)\n params['query'] = query\n\n return params\n\n\ndef response(resp):\n results = []\n dom = html.fromstring(resp.text)\n if results_xpath:\n for result in dom.xpath(results_xpath):\n url = extract_url(result.xpath(url_xpath), search_url)\n title = extract_text(result.xpath(title_xpath)[0])\n content = extract_text(result.xpath(content_xpath)[0])\n results.append({'url': url, 'title': title, 'content': content})\n else:\n for url, title, content in zip(\n (extract_url(x, search_url) for\n x in dom.xpath(url_xpath)),\n map(extract_text, dom.xpath(title_xpath)),\n map(extract_text, dom.xpath(content_xpath))\n ):\n results.append({'url': url, 'title': title, 'content': content})\n\n if not suggestion_xpath:\n return results\n for suggestion in dom.xpath(suggestion_xpath):\n results.append({'suggestion': extract_text(suggestion)})\n return results\n", "path": "searx/engines/xpath.py"}]} | 1,662 | 145 |
gh_patches_debug_40192 | rasdani/github-patches | git_diff | conan-io__conan-center-index-18702 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] tree-sitter-c/*: tree-sitter-c needs to be updated to conan v2
### Description
tree-sitter-c needs to wrok well with conan v2.
### Package and Environment Details
* Package Name/Version: **tree-sitter-c/***
* Operating System+version: **Arch Linux**
* Compiler+version: **GCC 13**
* Docker image: **conanio/gcc8**
* Conan version: **conan 2.0.4**
* Python version: **Python 3.11.3**
### Conan profile
```shell
Configuration for profile default:
[settings]
os=Linux
os_build=Linux
arch=x86_64
arch_build=x86_64
compiler=gcc
compiler.version=13
compiler.libcxx=libstdc++11
build_type=Release
[options]
[conf]
[build_requires]
[env]
```
### Steps to reproduce
```shell
conan create all/conanfile.py --version 0.20.2 -pr:b=default -pr:h=default -s build_type=Release -o "tree-sitter-c/0.20.2:shared=False"
```
### Logs
<details><summary>Click to expand log</summary>
```shell
ERROR: Error loading conanfile at '/home/whalien/codebase/misc/conan-center-index/recipes/tree-sitter-c/all/conanfile.py': Unable to load conanfile in /home/whalien/codebase/misc/conan-center-index/recipes/tree-sitter-c/all/conanfile.py
File "<frozen importlib._bootstrap_external>", line 940, in exec_module
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "/home/whalien/codebase/misc/conan-center-index/recipes/tree-sitter-c/all/conanfile.py", line 1, in <module>
from conans import CMake, ConanFile, tools
ImportError: cannot import name 'CMake' from 'conans' (/usr/lib/python3.11/site-packages/conans/__init__.py)
```
</details>
</issue>
<code>
[start of recipes/tree-sitter-c/all/conanfile.py]
1 from conans import CMake, ConanFile, tools
2 import functools
3 import os
4
5 required_conan_version = ">=1.33.0"
6
7
8 class TreeSitterCConan(ConanFile):
9 name = "tree-sitter-c"
10 description = "C grammar for tree-sitter."
11 topics = ("parser", "grammar", "tree", "c", "ide")
12 url = "https://github.com/conan-io/conan-center-index"
13 homepage = "https://github.com/tree-sitter/tree-sitter-c"
14 license = "MIT"
15 settings = "os", "arch", "compiler", "build_type"
16 options = {
17 "fPIC": [True, False],
18 "shared": [True, False],
19 }
20 default_options = {
21 "fPIC": True,
22 "shared": False,
23 }
24
25 generators = "cmake", "cmake_find_package_multi"
26 exports_sources = "CMakeLists.txt"
27
28 @property
29 def _source_subfolder(self):
30 return "source_subfolder"
31
32 def config_options(self):
33 if self.settings.os == "Windows":
34 del self.options.fPIC
35
36 def configure(self):
37 if self.options.shared:
38 del self.options.fPIC
39 del self.settings.compiler.libcxx
40 del self.settings.compiler.cppstd
41
42 def requirements(self):
43 self.requires("tree-sitter/0.20.0")
44
45 def source(self):
46 tools.get(**self.conan_data["sources"][self.version],
47 destination=self._source_subfolder, strip_root=True)
48
49 @functools.lru_cache(1)
50 def _configure_cmake(self):
51 cmake = CMake(self)
52 cmake.configure()
53 return cmake
54
55 def _patch_sources(self):
56 if not self.options.shared:
57 tools.replace_in_file(
58 os.path.join(self._source_subfolder, "src", "parser.c"),
59 "__declspec(dllexport)", ""
60 )
61
62 def build(self):
63 self._patch_sources()
64 cmake = self._configure_cmake()
65 cmake.build()
66
67 def package(self):
68 self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
69 cmake = self._configure_cmake()
70 cmake.install()
71
72 def package_info(self):
73 self.cpp_info.libs = ["tree-sitter-c"]
74
[end of recipes/tree-sitter-c/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/tree-sitter-c/all/conanfile.py b/recipes/tree-sitter-c/all/conanfile.py
--- a/recipes/tree-sitter-c/all/conanfile.py
+++ b/recipes/tree-sitter-c/all/conanfile.py
@@ -1,8 +1,10 @@
-from conans import CMake, ConanFile, tools
-import functools
+from conan import ConanFile
+from conan.tools.cmake import CMake
+from conan.tools.files import get, replace_in_file, copy
+from conan.tools.layout import basic_layout
import os
-required_conan_version = ">=1.33.0"
+required_conan_version = ">=1.53.0"
class TreeSitterCConan(ConanFile):
@@ -13,21 +15,22 @@
homepage = "https://github.com/tree-sitter/tree-sitter-c"
license = "MIT"
settings = "os", "arch", "compiler", "build_type"
+ package_type = "library"
+ generators = "CMakeToolchain", "CMakeDeps"
options = {
- "fPIC": [True, False],
"shared": [True, False],
+ "fPIC": [True, False],
}
default_options = {
- "fPIC": True,
"shared": False,
+ "fPIC": True,
}
- generators = "cmake", "cmake_find_package_multi"
+ generators = "CMakeToolchain", "CMakeDeps"
exports_sources = "CMakeLists.txt"
- @property
- def _source_subfolder(self):
- return "source_subfolder"
+ def layout(self):
+ basic_layout(self, src_folder="src")
def config_options(self):
if self.settings.os == "Windows":
@@ -35,38 +38,38 @@
def configure(self):
if self.options.shared:
- del self.options.fPIC
- del self.settings.compiler.libcxx
- del self.settings.compiler.cppstd
-
- def requirements(self):
- self.requires("tree-sitter/0.20.0")
+ self.options.rm_safe("fPIC")
+ self.settings.rm_safe("compiler.cppstd")
+ self.settings.rm_safe("compiler.libcxx")
def source(self):
- tools.get(**self.conan_data["sources"][self.version],
- destination=self._source_subfolder, strip_root=True)
+ get(self, **self.conan_data["sources"][self.version], strip_root=True)
- @functools.lru_cache(1)
- def _configure_cmake(self):
- cmake = CMake(self)
- cmake.configure()
- return cmake
+ def requirements(self):
+ self.requires("tree-sitter/0.20.8", transitive_headers=True, transitive_libs=True)
def _patch_sources(self):
if not self.options.shared:
- tools.replace_in_file(
- os.path.join(self._source_subfolder, "src", "parser.c"),
+ replace_in_file(
+ self,
+ os.path.join(self.source_folder, "src", "parser.c"),
"__declspec(dllexport)", ""
)
def build(self):
self._patch_sources()
- cmake = self._configure_cmake()
+ cmake = CMake(self)
+ cmake.configure(build_script_folder=os.path.join(self.source_folder, os.pardir))
cmake.build()
def package(self):
- self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
- cmake = self._configure_cmake()
+ copy(
+ self,
+ "LICENSE",
+ src=self.source_folder,
+ dst=os.path.join(self.package_folder, "licenses"),
+ )
+ cmake = CMake(self)
cmake.install()
def package_info(self):
| {"golden_diff": "diff --git a/recipes/tree-sitter-c/all/conanfile.py b/recipes/tree-sitter-c/all/conanfile.py\n--- a/recipes/tree-sitter-c/all/conanfile.py\n+++ b/recipes/tree-sitter-c/all/conanfile.py\n@@ -1,8 +1,10 @@\n-from conans import CMake, ConanFile, tools\n-import functools\n+from conan import ConanFile\n+from conan.tools.cmake import CMake\n+from conan.tools.files import get, replace_in_file, copy\n+from conan.tools.layout import basic_layout\n import os\n \n-required_conan_version = \">=1.33.0\"\n+required_conan_version = \">=1.53.0\"\n \n \n class TreeSitterCConan(ConanFile):\n@@ -13,21 +15,22 @@\n homepage = \"https://github.com/tree-sitter/tree-sitter-c\"\n license = \"MIT\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n+ package_type = \"library\"\n+ generators = \"CMakeToolchain\", \"CMakeDeps\"\n options = {\n- \"fPIC\": [True, False],\n \"shared\": [True, False],\n+ \"fPIC\": [True, False],\n }\n default_options = {\n- \"fPIC\": True,\n \"shared\": False,\n+ \"fPIC\": True,\n }\n \n- generators = \"cmake\", \"cmake_find_package_multi\"\n+ generators = \"CMakeToolchain\", \"CMakeDeps\"\n exports_sources = \"CMakeLists.txt\"\n \n- @property\n- def _source_subfolder(self):\n- return \"source_subfolder\"\n+ def layout(self):\n+ basic_layout(self, src_folder=\"src\")\n \n def config_options(self):\n if self.settings.os == \"Windows\":\n@@ -35,38 +38,38 @@\n \n def configure(self):\n if self.options.shared:\n- del self.options.fPIC\n- del self.settings.compiler.libcxx\n- del self.settings.compiler.cppstd\n-\n- def requirements(self):\n- self.requires(\"tree-sitter/0.20.0\")\n+ self.options.rm_safe(\"fPIC\")\n+ self.settings.rm_safe(\"compiler.cppstd\")\n+ self.settings.rm_safe(\"compiler.libcxx\")\n \n def source(self):\n- tools.get(**self.conan_data[\"sources\"][self.version],\n- destination=self._source_subfolder, strip_root=True)\n+ get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n \n- @functools.lru_cache(1)\n- def _configure_cmake(self):\n- cmake = CMake(self)\n- cmake.configure()\n- return cmake\n+ def requirements(self):\n+ self.requires(\"tree-sitter/0.20.8\", transitive_headers=True, transitive_libs=True)\n \n def _patch_sources(self):\n if not self.options.shared:\n- tools.replace_in_file(\n- os.path.join(self._source_subfolder, \"src\", \"parser.c\"),\n+ replace_in_file(\n+ self,\n+ os.path.join(self.source_folder, \"src\", \"parser.c\"),\n \"__declspec(dllexport)\", \"\"\n )\n \n def build(self):\n self._patch_sources()\n- cmake = self._configure_cmake()\n+ cmake = CMake(self)\n+ cmake.configure(build_script_folder=os.path.join(self.source_folder, os.pardir))\n cmake.build()\n \n def package(self):\n- self.copy(\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n- cmake = self._configure_cmake()\n+ copy(\n+ self,\n+ \"LICENSE\",\n+ src=self.source_folder,\n+ dst=os.path.join(self.package_folder, \"licenses\"),\n+ )\n+ cmake = CMake(self)\n cmake.install()\n \n def package_info(self):\n", "issue": "[package] tree-sitter-c/*: tree-sitter-c needs to be updated to conan v2\n### Description\r\n\r\ntree-sitter-c needs to wrok well with conan v2.\r\n\r\n### Package and Environment Details\r\n\r\n* Package Name/Version: **tree-sitter-c/***\r\n* Operating System+version: **Arch Linux**\r\n* Compiler+version: **GCC 13**\r\n* Docker image: **conanio/gcc8**\r\n* Conan version: **conan 2.0.4**\r\n* Python version: **Python 3.11.3**\r\n\r\n\r\n### Conan profile\r\n\r\n```shell\r\nConfiguration for profile default:\r\n\r\n[settings]\r\nos=Linux\r\nos_build=Linux\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=gcc\r\ncompiler.version=13\r\ncompiler.libcxx=libstdc++11\r\nbuild_type=Release\r\n[options]\r\n[conf]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\n```shell\r\nconan create all/conanfile.py --version 0.20.2 -pr:b=default -pr:h=default -s build_type=Release -o \"tree-sitter-c/0.20.2:shared=False\"\r\n```\r\n\r\n### Logs\r\n\r\n<details><summary>Click to expand log</summary>\r\n\r\n```shell\r\nERROR: Error loading conanfile at '/home/whalien/codebase/misc/conan-center-index/recipes/tree-sitter-c/all/conanfile.py': Unable to load conanfile in /home/whalien/codebase/misc/conan-center-index/recipes/tree-sitter-c/all/conanfile.py\r\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\r\n File \"/home/whalien/codebase/misc/conan-center-index/recipes/tree-sitter-c/all/conanfile.py\", line 1, in <module>\r\n from conans import CMake, ConanFile, tools\r\nImportError: cannot import name 'CMake' from 'conans' (/usr/lib/python3.11/site-packages/conans/__init__.py)\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conans import CMake, ConanFile, tools\nimport functools\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass TreeSitterCConan(ConanFile):\n name = \"tree-sitter-c\"\n description = \"C grammar for tree-sitter.\"\n topics = (\"parser\", \"grammar\", \"tree\", \"c\", \"ide\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/tree-sitter/tree-sitter-c\"\n license = \"MIT\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"fPIC\": [True, False],\n \"shared\": [True, False],\n }\n default_options = {\n \"fPIC\": True,\n \"shared\": False,\n }\n\n generators = \"cmake\", \"cmake_find_package_multi\"\n exports_sources = \"CMakeLists.txt\"\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def requirements(self):\n self.requires(\"tree-sitter/0.20.0\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n @functools.lru_cache(1)\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.configure()\n return cmake\n\n def _patch_sources(self):\n if not self.options.shared:\n tools.replace_in_file(\n os.path.join(self._source_subfolder, \"src\", \"parser.c\"),\n \"__declspec(dllexport)\", \"\"\n )\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs = [\"tree-sitter-c\"]\n", "path": "recipes/tree-sitter-c/all/conanfile.py"}]} | 1,658 | 862 |
gh_patches_debug_24601 | rasdani/github-patches | git_diff | fossasia__open-event-server-835 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Session Form: Make Title and Name always required
In session forms in step 5 of the wizard make "Title" and "Name" always required. Maybe we should make those in another color, so it becomes clear that they cannot be edited or change the UI somehow?

</issue>
<code>
[start of open_event/views/admin/models_views/events.py]
1 import os
2
3 from flask import request, url_for, redirect
4 from flask_admin import expose
5 from flask_admin.contrib.sqla import ModelView
6 from flask.ext import login
7 from ....helpers.data import DataManager, save_to_db
8 from ....helpers.data_getter import DataGetter
9 from datetime import datetime
10 from werkzeug.utils import secure_filename
11 from werkzeug.datastructures import ImmutableMultiDict
12
13 class EventsView(ModelView):
14 def is_accessible(self):
15 return login.current_user.is_authenticated
16
17 def _handle_view(self, name, **kwargs):
18 if not self.is_accessible():
19 return redirect(url_for('admin.login_view', next=request.url))
20
21 @expose('/')
22 def index_view(self):
23 live_events = DataGetter.get_live_events()
24 draft_events = DataGetter.get_draft_events()
25 past_events = DataGetter.get_past_events()
26 all_events = DataGetter.get_all_events()
27 return self.render('/gentelella/admin/event/index.html',
28 live_events=live_events, draft_events=draft_events, past_events=past_events,
29 all_events=all_events)
30
31 @expose('/create/', methods=('GET', 'POST'))
32 def create_view(self):
33 session_columns = DataGetter.get_session_columns()
34 speaker_columns = DataGetter.get_speaker_columns()
35 if request.method == 'POST':
36 imd = ImmutableMultiDict(request.files)
37 for img_file in imd.getlist('sponsors[logo]'):
38 filename = secure_filename(img_file.filename)
39 img_file.save(os.path.join(os.path.realpath('.') + '/static/media/image/', filename))
40 event = DataManager.create_event(request.form, imd)
41 if event:
42 return redirect(url_for('.details_view', event_id=event.id))
43 return redirect(url_for('.index_view'))
44 return self.render('/gentelella/admin/event/new/new.html',
45 session_columns=session_columns,
46 speaker_columns=speaker_columns,
47 event_types=DataGetter.get_event_types(),
48 event_topics=DataGetter.get_event_topics())
49
50 @expose('/<int:event_id>/', methods=('GET', 'POST'))
51 def details_view(self, event_id):
52 event = DataGetter.get_event(event_id)
53
54 return self.render('/gentelella/admin/event/details/details.html', event=event)
55
56 @expose('/<int:event_id>/edit/', methods=('GET', 'POST'))
57 def edit_view(self, event_id):
58 event = DataGetter.get_event(event_id)
59 session_types = DataGetter.get_session_types_by_event_id(event_id)
60 tracks = DataGetter.get_tracks(event_id)
61 social_links = DataGetter.get_social_links_by_event_id(event_id)
62 microlocations = DataGetter.get_microlocations(event_id)
63 call_for_speakers = DataGetter.get_call_for_papers(event_id).first()
64 sponsors = DataGetter.get_sponsors(event_id)
65 session_columns = DataGetter.get_session_columns()
66 speaker_columns = DataGetter.get_speaker_columns()
67 if request.method == 'GET':
68 return self.render('/gentelella/admin/event/edit/edit.html', event=event, session_types=session_types,
69 tracks=tracks, social_links=social_links, microlocations=microlocations,
70 call_for_speakers=call_for_speakers, sponsors=sponsors, session_columns=session_columns,
71 speaker_columns=speaker_columns, event_types=DataGetter.get_event_types(),
72 event_topics=DataGetter.get_event_topics())
73 if request.method == "POST":
74 event = DataManager.edit_event(request.form, event_id, event, session_types, tracks, social_links,
75 microlocations, call_for_speakers, sponsors)
76 return self.render('/gentelella/admin/event/details/details.html', event=event)
77
78 @expose('/<event_id>/delete/', methods=('GET',))
79 def delete_view(self, event_id):
80 if request.method == "GET":
81 DataManager.delete_event(event_id)
82 return redirect(url_for('.index_view'))
83
84 @expose('/<int:event_id>/update/', methods=('POST',))
85 def save_closing_date(self, event_id):
86 event = DataGetter.get_event(event_id)
87 event.closing_datetime = request.form['closing_datetime']
88 save_to_db(event, 'Closing Datetime Updated')
89 return self.render('/gentelella/admin/event/details/details.html', event=event)
90
[end of open_event/views/admin/models_views/events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/open_event/views/admin/models_views/events.py b/open_event/views/admin/models_views/events.py
--- a/open_event/views/admin/models_views/events.py
+++ b/open_event/views/admin/models_views/events.py
@@ -29,6 +29,9 @@
def create_view(self):
session_columns = DataGetter.get_session_columns()
speaker_columns = DataGetter.get_speaker_columns()
+ speaker_columns = list(speaker_columns)
+ speaker_columns.insert(2, speaker_columns.pop(4)) # Moving email to the top
+
if request.method == 'POST':
event = DataManager.create_event(request.form)
if event:
@@ -57,6 +60,7 @@
sponsors = DataGetter.get_sponsors(event_id)
session_columns = DataGetter.get_session_columns()
speaker_columns = DataGetter.get_speaker_columns()
+
if request.method == 'GET':
return self.render('/gentelella/admin/event/edit/edit.html', event=event, session_types=session_types,
tracks=tracks, social_links=social_links, microlocations=microlocations,
| {"golden_diff": "diff --git a/open_event/views/admin/models_views/events.py b/open_event/views/admin/models_views/events.py\n--- a/open_event/views/admin/models_views/events.py\n+++ b/open_event/views/admin/models_views/events.py\n@@ -29,6 +29,9 @@\n def create_view(self):\n session_columns = DataGetter.get_session_columns()\n speaker_columns = DataGetter.get_speaker_columns()\n+ speaker_columns = list(speaker_columns)\n+ speaker_columns.insert(2, speaker_columns.pop(4)) # Moving email to the top\n+\n if request.method == 'POST':\n event = DataManager.create_event(request.form)\n if event:\n@@ -57,6 +60,7 @@\n sponsors = DataGetter.get_sponsors(event_id)\n session_columns = DataGetter.get_session_columns()\n speaker_columns = DataGetter.get_speaker_columns()\n+\n if request.method == 'GET':\n return self.render('/gentelella/admin/event/edit/edit.html', event=event, session_types=session_types,\n tracks=tracks, social_links=social_links, microlocations=microlocations,\n", "issue": "Session Form: Make Title and Name always required\nIn session forms in step 5 of the wizard make \"Title\" and \"Name\" always required. Maybe we should make those in another color, so it becomes clear that they cannot be edited or change the UI somehow?\n\n\n\n", "before_files": [{"content": "import os\n\nfrom flask import request, url_for, redirect\nfrom flask_admin import expose\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask.ext import login\nfrom ....helpers.data import DataManager, save_to_db\nfrom ....helpers.data_getter import DataGetter\nfrom datetime import datetime\nfrom werkzeug.utils import secure_filename\nfrom werkzeug.datastructures import ImmutableMultiDict\n\nclass EventsView(ModelView):\n def is_accessible(self):\n return login.current_user.is_authenticated\n\n def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n return redirect(url_for('admin.login_view', next=request.url))\n\n @expose('/')\n def index_view(self):\n live_events = DataGetter.get_live_events()\n draft_events = DataGetter.get_draft_events()\n past_events = DataGetter.get_past_events()\n all_events = DataGetter.get_all_events()\n return self.render('/gentelella/admin/event/index.html',\n live_events=live_events, draft_events=draft_events, past_events=past_events,\n all_events=all_events)\n\n @expose('/create/', methods=('GET', 'POST'))\n def create_view(self):\n session_columns = DataGetter.get_session_columns()\n speaker_columns = DataGetter.get_speaker_columns()\n if request.method == 'POST':\n imd = ImmutableMultiDict(request.files)\n for img_file in imd.getlist('sponsors[logo]'):\n filename = secure_filename(img_file.filename)\n img_file.save(os.path.join(os.path.realpath('.') + '/static/media/image/', filename))\n event = DataManager.create_event(request.form, imd)\n if event:\n return redirect(url_for('.details_view', event_id=event.id))\n return redirect(url_for('.index_view'))\n return self.render('/gentelella/admin/event/new/new.html',\n session_columns=session_columns,\n speaker_columns=speaker_columns,\n event_types=DataGetter.get_event_types(),\n event_topics=DataGetter.get_event_topics())\n\n @expose('/<int:event_id>/', methods=('GET', 'POST'))\n def details_view(self, event_id):\n event = DataGetter.get_event(event_id)\n\n return self.render('/gentelella/admin/event/details/details.html', event=event)\n\n @expose('/<int:event_id>/edit/', methods=('GET', 'POST'))\n def edit_view(self, event_id):\n event = DataGetter.get_event(event_id)\n session_types = DataGetter.get_session_types_by_event_id(event_id)\n tracks = DataGetter.get_tracks(event_id)\n social_links = DataGetter.get_social_links_by_event_id(event_id)\n microlocations = DataGetter.get_microlocations(event_id)\n call_for_speakers = DataGetter.get_call_for_papers(event_id).first()\n sponsors = DataGetter.get_sponsors(event_id)\n session_columns = DataGetter.get_session_columns()\n speaker_columns = DataGetter.get_speaker_columns()\n if request.method == 'GET':\n return self.render('/gentelella/admin/event/edit/edit.html', event=event, session_types=session_types,\n tracks=tracks, social_links=social_links, microlocations=microlocations,\n call_for_speakers=call_for_speakers, sponsors=sponsors, session_columns=session_columns,\n speaker_columns=speaker_columns, event_types=DataGetter.get_event_types(),\n event_topics=DataGetter.get_event_topics())\n if request.method == \"POST\":\n event = DataManager.edit_event(request.form, event_id, event, session_types, tracks, social_links,\n microlocations, call_for_speakers, sponsors)\n return self.render('/gentelella/admin/event/details/details.html', event=event)\n\n @expose('/<event_id>/delete/', methods=('GET',))\n def delete_view(self, event_id):\n if request.method == \"GET\":\n DataManager.delete_event(event_id)\n return redirect(url_for('.index_view'))\n\n @expose('/<int:event_id>/update/', methods=('POST',))\n def save_closing_date(self, event_id):\n event = DataGetter.get_event(event_id)\n event.closing_datetime = request.form['closing_datetime']\n save_to_db(event, 'Closing Datetime Updated')\n return self.render('/gentelella/admin/event/details/details.html', event=event)\n", "path": "open_event/views/admin/models_views/events.py"}]} | 1,753 | 231 |
gh_patches_debug_27663 | rasdani/github-patches | git_diff | interlegis__sapl-2580 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Corrigir caixa de busca do lexml com link quebrado
http://sapl3.interlegis.leg.br/generico/lexml_pesquisar
Verificar com @edwardoliveira qual caminho tomar quanto a isso!

</issue>
<code>
[start of sapl/lexml/views.py]
1 from django.http import HttpResponse
2
3 from sapl.crud.base import CrudAux
4 from sapl.lexml.OAIServer import OAIServerFactory, get_config
5
6 from .models import LexmlProvedor, LexmlPublicador
7
8 LexmlProvedorCrud = CrudAux.build(LexmlProvedor, 'lexml_provedor')
9 LexmlPublicadorCrud = CrudAux.build(LexmlPublicador, 'lexml_publicador')
10
11
12 def lexml_request(request):
13 config = get_config(request.get_raw_uri(), int(request.GET.get('batch_size', 10)))
14 oai_server = OAIServerFactory(config)
15 r = oai_server.handleRequest(request.GET)
16 response = r.decode('UTF-8')
17 return HttpResponse(response, content_type='text/xml')
18
[end of sapl/lexml/views.py]
[start of sapl/lexml/urls.py]
1 from django.conf.urls import include, url
2
3 from sapl.lexml.views import LexmlProvedorCrud, LexmlPublicadorCrud, lexml_request
4
5 from .apps import AppConfig
6
7 app_name = AppConfig.name
8
9 urlpatterns = [
10 url(r'^sistema/lexml/provedor/',
11 include(LexmlProvedorCrud.get_urls())),
12 url(r'^sistema/lexml/publicador/',
13 include(LexmlPublicadorCrud.get_urls())),
14 url(r'^sistema/lexml', lexml_request, name='lexml_endpoint')
15 ]
16
[end of sapl/lexml/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sapl/lexml/urls.py b/sapl/lexml/urls.py
--- a/sapl/lexml/urls.py
+++ b/sapl/lexml/urls.py
@@ -1,6 +1,6 @@
from django.conf.urls import include, url
-from sapl.lexml.views import LexmlProvedorCrud, LexmlPublicadorCrud, lexml_request
+from sapl.lexml.views import LexmlProvedorCrud, LexmlPublicadorCrud, lexml_request, request_search
from .apps import AppConfig
@@ -11,5 +11,7 @@
include(LexmlProvedorCrud.get_urls())),
url(r'^sistema/lexml/publicador/',
include(LexmlPublicadorCrud.get_urls())),
- url(r'^sistema/lexml', lexml_request, name='lexml_endpoint')
+ url(r'^sistema/lexml/request_search/(?P<keyword>[\w\-]+)/', request_search, name='lexml_search'),
+ url(r'^sistema/lexml', lexml_request, name='lexml_endpoint'),
+
]
diff --git a/sapl/lexml/views.py b/sapl/lexml/views.py
--- a/sapl/lexml/views.py
+++ b/sapl/lexml/views.py
@@ -1,4 +1,5 @@
from django.http import HttpResponse
+from django.shortcuts import render
from sapl.crud.base import CrudAux
from sapl.lexml.OAIServer import OAIServerFactory, get_config
@@ -16,3 +17,6 @@
'metadataPrefix': request.GET.get('metadataPrefix', 'oai_lexml')})
response = r.decode('UTF-8')
return HttpResponse(response, content_type='text/xml')
+
+def request_search(request, keyword):
+ return render(request,"lexml/resultado-pesquisa.html",{"keyword":keyword})
\ No newline at end of file
| {"golden_diff": "diff --git a/sapl/lexml/urls.py b/sapl/lexml/urls.py\n--- a/sapl/lexml/urls.py\n+++ b/sapl/lexml/urls.py\n@@ -1,6 +1,6 @@\n from django.conf.urls import include, url\n \n-from sapl.lexml.views import LexmlProvedorCrud, LexmlPublicadorCrud, lexml_request\n+from sapl.lexml.views import LexmlProvedorCrud, LexmlPublicadorCrud, lexml_request, request_search\n \n from .apps import AppConfig\n \n@@ -11,5 +11,7 @@\n include(LexmlProvedorCrud.get_urls())),\n url(r'^sistema/lexml/publicador/',\n include(LexmlPublicadorCrud.get_urls())),\n- url(r'^sistema/lexml', lexml_request, name='lexml_endpoint')\n+ url(r'^sistema/lexml/request_search/(?P<keyword>[\\w\\-]+)/', request_search, name='lexml_search'),\n+ url(r'^sistema/lexml', lexml_request, name='lexml_endpoint'),\n+\n ]\ndiff --git a/sapl/lexml/views.py b/sapl/lexml/views.py\n--- a/sapl/lexml/views.py\n+++ b/sapl/lexml/views.py\n@@ -1,4 +1,5 @@\n from django.http import HttpResponse\n+from django.shortcuts import render\n \n from sapl.crud.base import CrudAux\n from sapl.lexml.OAIServer import OAIServerFactory, get_config\n@@ -16,3 +17,6 @@\n 'metadataPrefix': request.GET.get('metadataPrefix', 'oai_lexml')})\n response = r.decode('UTF-8')\n return HttpResponse(response, content_type='text/xml')\n+\n+def request_search(request, keyword):\n+ return render(request,\"lexml/resultado-pesquisa.html\",{\"keyword\":keyword})\n\\ No newline at end of file\n", "issue": "Corrigir caixa de busca do lexml com link quebrado\nhttp://sapl3.interlegis.leg.br/generico/lexml_pesquisar\r\nVerificar com @edwardoliveira qual caminho tomar quanto a isso!\r\n\r\n\r\n\n", "before_files": [{"content": "from django.http import HttpResponse\n\nfrom sapl.crud.base import CrudAux\nfrom sapl.lexml.OAIServer import OAIServerFactory, get_config\n\nfrom .models import LexmlProvedor, LexmlPublicador\n\nLexmlProvedorCrud = CrudAux.build(LexmlProvedor, 'lexml_provedor')\nLexmlPublicadorCrud = CrudAux.build(LexmlPublicador, 'lexml_publicador')\n\n\ndef lexml_request(request):\n config = get_config(request.get_raw_uri(), int(request.GET.get('batch_size', 10)))\n oai_server = OAIServerFactory(config)\n r = oai_server.handleRequest(request.GET)\n response = r.decode('UTF-8')\n return HttpResponse(response, content_type='text/xml')\n", "path": "sapl/lexml/views.py"}, {"content": "from django.conf.urls import include, url\n\nfrom sapl.lexml.views import LexmlProvedorCrud, LexmlPublicadorCrud, lexml_request\n\nfrom .apps import AppConfig\n\napp_name = AppConfig.name\n\nurlpatterns = [\n url(r'^sistema/lexml/provedor/',\n include(LexmlProvedorCrud.get_urls())),\n url(r'^sistema/lexml/publicador/',\n include(LexmlPublicadorCrud.get_urls())),\n url(r'^sistema/lexml', lexml_request, name='lexml_endpoint')\n]\n", "path": "sapl/lexml/urls.py"}]} | 1,020 | 424 |
gh_patches_debug_22765 | rasdani/github-patches | git_diff | ietf-tools__datatracker-5858 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
chore: Replace deprecated bootstrap features with alternatives
### Description
Throughout the code. As suggested by @NGPixel.
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of ietf/ietfauth/widgets.py]
1 from django.forms import PasswordInput
2 from django.utils.safestring import mark_safe
3 from django.utils.translation import gettext as _
4
5 # The PasswordStrengthInput and PasswordConfirmationInput widgets come from the
6 # django-password-strength project, https://pypi.org/project/django-password-strength/
7 #
8 # Original license:
9 #
10 # Copyright © 2015 A.J. May and individual contributors. All rights reserved.
11 #
12 # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
13 # following conditions are met:
14 #
15 # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
16 # disclaimer.
17 #
18 # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
19 # following disclaimer in the documentation and/or other materials provided with the distribution.
20 #
21 # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
22 # products derived from this software without specific prior written permission.
23 #
24 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
25 # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
30 # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #
32
33 class PasswordStrengthInput(PasswordInput):
34 """
35 Form widget to show the user how strong his/her password is.
36 """
37
38 def render(self, name, value, attrs=None, renderer=None):
39 strength_markup = """
40 <div style="margin-top: 10px;">
41 <div class="progress" style="margin-bottom: 10px;">
42 <div class="progress-bar progress-bar-warning password_strength_bar" role="progressbar" aria-valuenow="0" aria-valuemin="0" aria-valuemax="5" style="width: 0%%"></div>
43 </div>
44 <p class="text-muted password_strength_info hidden">
45 <span class="label label-danger">
46 %s
47 </span>
48 <span style="margin-left:5px;">
49 %s
50 </span>
51 </p>
52 </div>
53 """ % (
54 _("Warning"),
55 _(
56 'This password would take <em class="password_strength_time"></em> to crack.'
57 ),
58 )
59
60 try:
61 self.attrs["class"] = "%s password_strength".strip() % self.attrs["class"]
62 except KeyError:
63 self.attrs["class"] = "password_strength"
64
65 return mark_safe(
66 super(PasswordInput, self).render(name, value, attrs, renderer)
67 + strength_markup
68 )
69
70 class Media:
71 js = (
72 "ietf/js/zxcvbn.js",
73 "ietf/js/password_strength.js",
74 )
75
76
77 class PasswordConfirmationInput(PasswordInput):
78 """
79 Form widget to confirm the users password by letting him/her type it again.
80 """
81
82 def __init__(self, confirm_with=None, attrs=None, render_value=False):
83 super(PasswordConfirmationInput, self).__init__(attrs, render_value)
84 self.confirm_with = confirm_with
85
86 def render(self, name, value, attrs=None, renderer=None):
87 if self.confirm_with:
88 self.attrs["data-confirm-with"] = "id_%s" % self.confirm_with
89
90 confirmation_markup = """
91 <div style="margin-top: 10px;" class="hidden password_strength_info">
92 <p class="text-muted">
93 <span class="label label-danger">
94 %s
95 </span>
96 <span style="margin-left:5px;">%s</span>
97 </p>
98 </div>
99 """ % (
100 _("Warning"),
101 _("Your passwords don't match."),
102 )
103
104 try:
105 self.attrs["class"] = (
106 "%s password_confirmation".strip() % self.attrs["class"]
107 )
108 except KeyError:
109 self.attrs["class"] = "password_confirmation"
110
111 return mark_safe(
112 super(PasswordInput, self).render(name, value, attrs, renderer)
113 + confirmation_markup
114 )
115
[end of ietf/ietfauth/widgets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ietf/ietfauth/widgets.py b/ietf/ietfauth/widgets.py
--- a/ietf/ietfauth/widgets.py
+++ b/ietf/ietfauth/widgets.py
@@ -41,7 +41,7 @@
<div class="progress" style="margin-bottom: 10px;">
<div class="progress-bar progress-bar-warning password_strength_bar" role="progressbar" aria-valuenow="0" aria-valuemin="0" aria-valuemax="5" style="width: 0%%"></div>
</div>
- <p class="text-muted password_strength_info hidden">
+ <p class="text-body-secondary password_strength_info hidden">
<span class="label label-danger">
%s
</span>
@@ -89,7 +89,7 @@
confirmation_markup = """
<div style="margin-top: 10px;" class="hidden password_strength_info">
- <p class="text-muted">
+ <p class="text-body-secondary">
<span class="label label-danger">
%s
</span>
| {"golden_diff": "diff --git a/ietf/ietfauth/widgets.py b/ietf/ietfauth/widgets.py\n--- a/ietf/ietfauth/widgets.py\n+++ b/ietf/ietfauth/widgets.py\n@@ -41,7 +41,7 @@\n <div class=\"progress\" style=\"margin-bottom: 10px;\">\n <div class=\"progress-bar progress-bar-warning password_strength_bar\" role=\"progressbar\" aria-valuenow=\"0\" aria-valuemin=\"0\" aria-valuemax=\"5\" style=\"width: 0%%\"></div>\n </div>\n- <p class=\"text-muted password_strength_info hidden\">\n+ <p class=\"text-body-secondary password_strength_info hidden\">\n <span class=\"label label-danger\">\n %s\n </span>\n@@ -89,7 +89,7 @@\n \n confirmation_markup = \"\"\"\n <div style=\"margin-top: 10px;\" class=\"hidden password_strength_info\">\n- <p class=\"text-muted\">\n+ <p class=\"text-body-secondary\">\n <span class=\"label label-danger\">\n %s\n </span>\n", "issue": "chore: Replace deprecated bootstrap features with alternatives\n### Description\n\nThroughout the code. As suggested by @NGPixel.\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "from django.forms import PasswordInput\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext as _\n\n# The PasswordStrengthInput and PasswordConfirmationInput widgets come from the\n# django-password-strength project, https://pypi.org/project/django-password-strength/\n#\n# Original license:\n#\n# Copyright © 2015 A.J. May and individual contributors. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the \n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the \n# following disclaimer in the documentation and/or other materials provided with the distribution.\n# \n# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote \n# products derived from this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, \n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, \n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, \n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE \n# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\nclass PasswordStrengthInput(PasswordInput):\n \"\"\"\n Form widget to show the user how strong his/her password is.\n \"\"\"\n\n def render(self, name, value, attrs=None, renderer=None):\n strength_markup = \"\"\"\n <div style=\"margin-top: 10px;\">\n <div class=\"progress\" style=\"margin-bottom: 10px;\">\n <div class=\"progress-bar progress-bar-warning password_strength_bar\" role=\"progressbar\" aria-valuenow=\"0\" aria-valuemin=\"0\" aria-valuemax=\"5\" style=\"width: 0%%\"></div>\n </div>\n <p class=\"text-muted password_strength_info hidden\">\n <span class=\"label label-danger\">\n %s\n </span>\n <span style=\"margin-left:5px;\">\n %s\n </span>\n </p>\n </div>\n \"\"\" % (\n _(\"Warning\"),\n _(\n 'This password would take <em class=\"password_strength_time\"></em> to crack.'\n ),\n )\n\n try:\n self.attrs[\"class\"] = \"%s password_strength\".strip() % self.attrs[\"class\"]\n except KeyError:\n self.attrs[\"class\"] = \"password_strength\"\n\n return mark_safe(\n super(PasswordInput, self).render(name, value, attrs, renderer)\n + strength_markup\n )\n\n class Media:\n js = (\n \"ietf/js/zxcvbn.js\",\n \"ietf/js/password_strength.js\",\n )\n\n\nclass PasswordConfirmationInput(PasswordInput):\n \"\"\"\n Form widget to confirm the users password by letting him/her type it again.\n \"\"\"\n\n def __init__(self, confirm_with=None, attrs=None, render_value=False):\n super(PasswordConfirmationInput, self).__init__(attrs, render_value)\n self.confirm_with = confirm_with\n\n def render(self, name, value, attrs=None, renderer=None):\n if self.confirm_with:\n self.attrs[\"data-confirm-with\"] = \"id_%s\" % self.confirm_with\n \n confirmation_markup = \"\"\"\n <div style=\"margin-top: 10px;\" class=\"hidden password_strength_info\">\n <p class=\"text-muted\">\n <span class=\"label label-danger\">\n %s\n </span>\n <span style=\"margin-left:5px;\">%s</span>\n </p>\n </div>\n \"\"\" % (\n _(\"Warning\"),\n _(\"Your passwords don't match.\"),\n )\n\n try:\n self.attrs[\"class\"] = (\n \"%s password_confirmation\".strip() % self.attrs[\"class\"]\n )\n except KeyError:\n self.attrs[\"class\"] = \"password_confirmation\"\n\n return mark_safe(\n super(PasswordInput, self).render(name, value, attrs, renderer)\n + confirmation_markup\n )\n", "path": "ietf/ietfauth/widgets.py"}]} | 1,818 | 239 |
gh_patches_debug_21502 | rasdani/github-patches | git_diff | NVIDIA-Merlin__NVTabular-1380 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FEA] Add DistributedClient API and use global client objects in Workflow and Dataset
**Is your feature request related to a problem? Please describe.**
In order to perform distributed ETL and/or spill data between device/host/disk, NVTabular currently requires the user to provide a Dask-Distributed (or Dask-CUDA) cluster. Furthermore, the `nvt.Workflow` (and sometimes `nvt.Dataset`) need to be defined with an explicit `client=` argument in order for the distributed cluster to be used. Although I feel strongly that it would be dangerous and messy for NVTabular to automatically spin up a distributed cluster by default, I do suspect that the user experience could be much better.
**Describe the solution you'd like**
To improve the user experience of distributed ETL with NVTabular, I propose:
1. Simple `LocalCluster`/`LocalCUDACluster`-wrapper APIs be added to NVTabular so that users can enable multi-GPU processing and/or spilling without interacting with distributed/dask_cuda. I am not yet sure of the ideal API to expose in NVTabular, but perhaps something like `DistributedClient` (wrapping `distributed.Client`). This API could be used to automatically generate a local cluster (if the address of an existing cluster is not provided), and we could add a `cpu=False` kwarg to toggle between gpu and cpu mode.
2. [**DONE** in #1318] Automatically detect and **use** an existing Dask client object. NVTabular already [checks for a global dask client](https://github.com/NVIDIA-Merlin/NVTabular/blob/34d01d7e6090d6029ac40010ed79e1558f18759c/nvtabular/workflow/workflow.py#L88) in both `Workflow ` and `Dataset`. However, these checks result in a UserWarning whenever a global client is detected. Instead of warning the user, I propose that NVTabular automatically **use** the client object when it is detected (which is actually the defult behavior in `dask.dataframe` and `dask.array` anyway).
**Describe alternatives you've considered**
The alternative is to continue requiring the user to deploy Dask clusters/clients with `distributed`/`dask_cuda`, and require them to include an explicit `client` argument when defining an `Workflow` objects.
</issue>
<code>
[start of nvtabular/__init__.py]
1 #
2 # Copyright (c) 2022, NVIDIA CORPORATION.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16 import warnings
17
18 from merlin.graph import ColumnSelector
19 from merlin.schema import ColumnSchema, Schema
20
21 from . import graph, io, workflow # noqa
22 from ._version import get_versions
23
24 # suppress some warnings with cudf warning about column ordering with dlpack
25 # and numba warning about deprecated environment variables
26 warnings.filterwarnings("ignore", module="cudf.io.dlpack")
27 warnings.filterwarnings("ignore", module="numba.cuda.envvars")
28
29
30 WorkflowNode = workflow.WorkflowNode
31 Workflow = workflow.Workflow
32 Dataset = io.dataset.Dataset
33
34
35 # Provides an alias of ColumnSelector so that old usages of ColumnGroup to
36 # select columns at the beginning of an operator chain don't break
37 def ColumnGroup(columns):
38 warnings.warn("ColumnGroup is deprecated, use ColumnSelector instead", DeprecationWarning)
39 return ColumnSelector(columns)
40
41
42 __all__ = [
43 "Workflow",
44 "Dataset",
45 "WorkflowNode",
46 "ColumnGroup",
47 "ColumnSelector",
48 "ColumnSchema",
49 "Schema",
50 ]
51
52 # cudf warns about column ordering with dlpack methods, ignore it
53 warnings.filterwarnings("ignore", module="cudf.io.dlpack")
54
55
56 __version__ = get_versions()["version"]
57 del get_versions
58
[end of nvtabular/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nvtabular/__init__.py b/nvtabular/__init__.py
--- a/nvtabular/__init__.py
+++ b/nvtabular/__init__.py
@@ -18,7 +18,7 @@
from merlin.graph import ColumnSelector
from merlin.schema import ColumnSchema, Schema
-from . import graph, io, workflow # noqa
+from . import graph, io, utils, workflow # noqa
from ._version import get_versions
# suppress some warnings with cudf warning about column ordering with dlpack
@@ -30,6 +30,8 @@
WorkflowNode = workflow.WorkflowNode
Workflow = workflow.Workflow
Dataset = io.dataset.Dataset
+Distributed = utils.Distributed
+Serial = utils.Serial
# Provides an alias of ColumnSelector so that old usages of ColumnGroup to
@@ -47,6 +49,8 @@
"ColumnSelector",
"ColumnSchema",
"Schema",
+ "Distributed",
+ "Serial",
]
# cudf warns about column ordering with dlpack methods, ignore it
| {"golden_diff": "diff --git a/nvtabular/__init__.py b/nvtabular/__init__.py\n--- a/nvtabular/__init__.py\n+++ b/nvtabular/__init__.py\n@@ -18,7 +18,7 @@\n from merlin.graph import ColumnSelector\n from merlin.schema import ColumnSchema, Schema\n \n-from . import graph, io, workflow # noqa\n+from . import graph, io, utils, workflow # noqa\n from ._version import get_versions\n \n # suppress some warnings with cudf warning about column ordering with dlpack\n@@ -30,6 +30,8 @@\n WorkflowNode = workflow.WorkflowNode\n Workflow = workflow.Workflow\n Dataset = io.dataset.Dataset\n+Distributed = utils.Distributed\n+Serial = utils.Serial\n \n \n # Provides an alias of ColumnSelector so that old usages of ColumnGroup to\n@@ -47,6 +49,8 @@\n \"ColumnSelector\",\n \"ColumnSchema\",\n \"Schema\",\n+ \"Distributed\",\n+ \"Serial\",\n ]\n \n # cudf warns about column ordering with dlpack methods, ignore it\n", "issue": "[FEA] Add DistributedClient API and use global client objects in Workflow and Dataset\n**Is your feature request related to a problem? Please describe.**\r\nIn order to perform distributed ETL and/or spill data between device/host/disk, NVTabular currently requires the user to provide a Dask-Distributed (or Dask-CUDA) cluster. Furthermore, the `nvt.Workflow` (and sometimes `nvt.Dataset`) need to be defined with an explicit `client=` argument in order for the distributed cluster to be used. Although I feel strongly that it would be dangerous and messy for NVTabular to automatically spin up a distributed cluster by default, I do suspect that the user experience could be much better. \r\n\r\n**Describe the solution you'd like**\r\nTo improve the user experience of distributed ETL with NVTabular, I propose:\r\n\r\n1. Simple `LocalCluster`/`LocalCUDACluster`-wrapper APIs be added to NVTabular so that users can enable multi-GPU processing and/or spilling without interacting with distributed/dask_cuda. I am not yet sure of the ideal API to expose in NVTabular, but perhaps something like `DistributedClient` (wrapping `distributed.Client`). This API could be used to automatically generate a local cluster (if the address of an existing cluster is not provided), and we could add a `cpu=False` kwarg to toggle between gpu and cpu mode.\r\n2. [**DONE** in #1318] Automatically detect and **use** an existing Dask client object. NVTabular already [checks for a global dask client](https://github.com/NVIDIA-Merlin/NVTabular/blob/34d01d7e6090d6029ac40010ed79e1558f18759c/nvtabular/workflow/workflow.py#L88) in both `Workflow ` and `Dataset`. However, these checks result in a UserWarning whenever a global client is detected. Instead of warning the user, I propose that NVTabular automatically **use** the client object when it is detected (which is actually the defult behavior in `dask.dataframe` and `dask.array` anyway).\r\n\r\n**Describe alternatives you've considered**\r\nThe alternative is to continue requiring the user to deploy Dask clusters/clients with `distributed`/`dask_cuda`, and require them to include an explicit `client` argument when defining an `Workflow` objects.\r\n\n", "before_files": [{"content": "#\n# Copyright (c) 2022, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport warnings\n\nfrom merlin.graph import ColumnSelector\nfrom merlin.schema import ColumnSchema, Schema\n\nfrom . import graph, io, workflow # noqa\nfrom ._version import get_versions\n\n# suppress some warnings with cudf warning about column ordering with dlpack\n# and numba warning about deprecated environment variables\nwarnings.filterwarnings(\"ignore\", module=\"cudf.io.dlpack\")\nwarnings.filterwarnings(\"ignore\", module=\"numba.cuda.envvars\")\n\n\nWorkflowNode = workflow.WorkflowNode\nWorkflow = workflow.Workflow\nDataset = io.dataset.Dataset\n\n\n# Provides an alias of ColumnSelector so that old usages of ColumnGroup to\n# select columns at the beginning of an operator chain don't break\ndef ColumnGroup(columns):\n warnings.warn(\"ColumnGroup is deprecated, use ColumnSelector instead\", DeprecationWarning)\n return ColumnSelector(columns)\n\n\n__all__ = [\n \"Workflow\",\n \"Dataset\",\n \"WorkflowNode\",\n \"ColumnGroup\",\n \"ColumnSelector\",\n \"ColumnSchema\",\n \"Schema\",\n]\n\n# cudf warns about column ordering with dlpack methods, ignore it\nwarnings.filterwarnings(\"ignore\", module=\"cudf.io.dlpack\")\n\n\n__version__ = get_versions()[\"version\"]\ndel get_versions\n", "path": "nvtabular/__init__.py"}]} | 1,581 | 242 |
gh_patches_debug_32431 | rasdani/github-patches | git_diff | engnadeau__pybotics-13 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Robot Model-Specific Parameters Should be in External Files
Robot Model-Specific Parameters (e.g., MDH, joint limits, etc) Should be in External Files (e.g., csv, json, etc). This way, new models and modifications to models are not tied to the codebase. `robot_model.py` should be deprecated and example models should be saved in a separate `example` folder in the repo.
</issue>
<code>
[start of pybotics/__init__.py]
1 # make the follow accessible from pybotics namespace
2 from . import geometry
3 from . import kinematics
4 from .robot import *
5 from . import robot_model
6
[end of pybotics/__init__.py]
[start of pybotics/robot_model.py]
1 import numpy as np
2
3 '''
4 Modified DH parameter matrices of various robots (rx, tx, rz, tz) aka (alpha, a, theta, d)
5 '''
6
7
8 def ur10():
9 model = np.array([
10 [0, 0, 0, 118.0],
11 [np.pi / 2.0, 0, np.pi, 0],
12 [0, 612.7, 0, 0],
13 [0, 571.6, 0, 163.9],
14 [-np.pi / 2.0, 0, 0, 115.7],
15 [np.pi / 2.0, 0, np.pi, 92.2]
16 ])
17 return model
18
19
20 def puma_560():
21 model = np.array([
22 [0, 0, 0, 0],
23 [-np.pi / 2.0, 0, 0, 0],
24 [0, 612.7, 0, 0],
25 [0, 571.6, 0, 163.9],
26 [-np.pi / 2.0, 0, 0, 115.7],
27 [np.pi / 2.0, 0, np.pi, 92.2]
28 ])
29 return model
30
31
32 def kuka_lbr_iiwa_7():
33 model = np.array([
34 [0, 0, 0, 340],
35 [-np.pi / 2.0, 0, 0, 0],
36 [np.pi / 2.0, 0, 0, 400],
37 [np.pi / 2.0, 0, 0, 0],
38 [-np.pi / 2.0, 0, 0, 400],
39 [-np.pi / 2.0, 0, 0, 0],
40 [np.pi / 2.0, 0, 0, 126]
41 ])
42 return model
43
[end of pybotics/robot_model.py]
[start of examples/example_robot.py]
1 import copy
2
3 import pybotics as py
4 import numpy as np
5
6 # set numpy print options
7 np.set_printoptions(precision=3)
8 np.set_printoptions(suppress=True)
9
10 # create robot
11 ideal_robot = py.Robot()
12 ideal_robot.robot_model = py.robot_model.ur10()
13
14 # create pseudo-realistic robot with kinematic errors
15 real_robot = copy.deepcopy(ideal_robot)
16 real_robot.impair_robot_model()
17
18 print('Ideal Robot Model:\n', ideal_robot.robot_model, '\n')
19 print('Pseudo-Real Robot Model:\n', real_robot.robot_model, '\n')
20
21 # demonstrate forward kinematics
22 joints = [0, 0, 0, 0, 0, 0]
23
24 print('Ideal Pose:\n', ideal_robot.fk(joints), '\n')
25 print('Pseudo-Real Pose:\n', real_robot.fk(joints), '\n')
26
[end of examples/example_robot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/example_robot.py b/examples/example_robot.py
--- a/examples/example_robot.py
+++ b/examples/example_robot.py
@@ -8,18 +8,17 @@
np.set_printoptions(suppress=True)
# create robot
-ideal_robot = py.Robot()
-ideal_robot.robot_model = py.robot_model.ur10()
+model = np.loadtxt('ur10-mdh.csv', delimiter=',')
+robot = py.Robot(model)
-# create pseudo-realistic robot with kinematic errors
-real_robot = copy.deepcopy(ideal_robot)
-real_robot.impair_robot_model()
-
-print('Ideal Robot Model:\n', ideal_robot.robot_model, '\n')
-print('Pseudo-Real Robot Model:\n', real_robot.robot_model, '\n')
+print('Robot Model:\n{}\n'.format(robot.robot_model))
# demonstrate forward kinematics
-joints = [0, 0, 0, 0, 0, 0]
+joints = [0] * robot.num_dof()
+pose = robot.fk(joints)
+
+print('Pose:\n{}\n'.format(pose))
-print('Ideal Pose:\n', ideal_robot.fk(joints), '\n')
-print('Pseudo-Real Pose:\n', real_robot.fk(joints), '\n')
+# demonstrate inverse kinematics
+new_joints = robot.ik(pose)
+print('Solved Joints:\n{}\n'.format(new_joints))
diff --git a/pybotics/__init__.py b/pybotics/__init__.py
--- a/pybotics/__init__.py
+++ b/pybotics/__init__.py
@@ -1,5 +1,4 @@
# make the follow accessible from pybotics namespace
from . import geometry
from . import kinematics
-from .robot import *
-from . import robot_model
+from .robot import Robot
diff --git a/pybotics/robot_model.py b/pybotics/robot_model.py
deleted file mode 100644
--- a/pybotics/robot_model.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import numpy as np
-
-'''
-Modified DH parameter matrices of various robots (rx, tx, rz, tz) aka (alpha, a, theta, d)
-'''
-
-
-def ur10():
- model = np.array([
- [0, 0, 0, 118.0],
- [np.pi / 2.0, 0, np.pi, 0],
- [0, 612.7, 0, 0],
- [0, 571.6, 0, 163.9],
- [-np.pi / 2.0, 0, 0, 115.7],
- [np.pi / 2.0, 0, np.pi, 92.2]
- ])
- return model
-
-
-def puma_560():
- model = np.array([
- [0, 0, 0, 0],
- [-np.pi / 2.0, 0, 0, 0],
- [0, 612.7, 0, 0],
- [0, 571.6, 0, 163.9],
- [-np.pi / 2.0, 0, 0, 115.7],
- [np.pi / 2.0, 0, np.pi, 92.2]
- ])
- return model
-
-
-def kuka_lbr_iiwa_7():
- model = np.array([
- [0, 0, 0, 340],
- [-np.pi / 2.0, 0, 0, 0],
- [np.pi / 2.0, 0, 0, 400],
- [np.pi / 2.0, 0, 0, 0],
- [-np.pi / 2.0, 0, 0, 400],
- [-np.pi / 2.0, 0, 0, 0],
- [np.pi / 2.0, 0, 0, 126]
- ])
- return model
| {"golden_diff": "diff --git a/examples/example_robot.py b/examples/example_robot.py\n--- a/examples/example_robot.py\n+++ b/examples/example_robot.py\n@@ -8,18 +8,17 @@\n np.set_printoptions(suppress=True)\n \n # create robot\n-ideal_robot = py.Robot()\n-ideal_robot.robot_model = py.robot_model.ur10()\n+model = np.loadtxt('ur10-mdh.csv', delimiter=',')\n+robot = py.Robot(model)\n \n-# create pseudo-realistic robot with kinematic errors\n-real_robot = copy.deepcopy(ideal_robot)\n-real_robot.impair_robot_model()\n-\n-print('Ideal Robot Model:\\n', ideal_robot.robot_model, '\\n')\n-print('Pseudo-Real Robot Model:\\n', real_robot.robot_model, '\\n')\n+print('Robot Model:\\n{}\\n'.format(robot.robot_model))\n \n # demonstrate forward kinematics\n-joints = [0, 0, 0, 0, 0, 0]\n+joints = [0] * robot.num_dof()\n+pose = robot.fk(joints)\n+\n+print('Pose:\\n{}\\n'.format(pose))\n \n-print('Ideal Pose:\\n', ideal_robot.fk(joints), '\\n')\n-print('Pseudo-Real Pose:\\n', real_robot.fk(joints), '\\n')\n+# demonstrate inverse kinematics\n+new_joints = robot.ik(pose)\n+print('Solved Joints:\\n{}\\n'.format(new_joints))\ndiff --git a/pybotics/__init__.py b/pybotics/__init__.py\n--- a/pybotics/__init__.py\n+++ b/pybotics/__init__.py\n@@ -1,5 +1,4 @@\n # make the follow accessible from pybotics namespace\n from . import geometry\n from . import kinematics\n-from .robot import *\n-from . import robot_model\n+from .robot import Robot\ndiff --git a/pybotics/robot_model.py b/pybotics/robot_model.py\ndeleted file mode 100644\n--- a/pybotics/robot_model.py\n+++ /dev/null\n@@ -1,42 +0,0 @@\n-import numpy as np\n-\n-'''\n-Modified DH parameter matrices of various robots (rx, tx, rz, tz) aka (alpha, a, theta, d)\n-'''\n-\n-\n-def ur10():\n- model = np.array([\n- [0, 0, 0, 118.0],\n- [np.pi / 2.0, 0, np.pi, 0],\n- [0, 612.7, 0, 0],\n- [0, 571.6, 0, 163.9],\n- [-np.pi / 2.0, 0, 0, 115.7],\n- [np.pi / 2.0, 0, np.pi, 92.2]\n- ])\n- return model\n-\n-\n-def puma_560():\n- model = np.array([\n- [0, 0, 0, 0],\n- [-np.pi / 2.0, 0, 0, 0],\n- [0, 612.7, 0, 0],\n- [0, 571.6, 0, 163.9],\n- [-np.pi / 2.0, 0, 0, 115.7],\n- [np.pi / 2.0, 0, np.pi, 92.2]\n- ])\n- return model\n-\n-\n-def kuka_lbr_iiwa_7():\n- model = np.array([\n- [0, 0, 0, 340],\n- [-np.pi / 2.0, 0, 0, 0],\n- [np.pi / 2.0, 0, 0, 400],\n- [np.pi / 2.0, 0, 0, 0],\n- [-np.pi / 2.0, 0, 0, 400],\n- [-np.pi / 2.0, 0, 0, 0],\n- [np.pi / 2.0, 0, 0, 126]\n- ])\n- return model\n", "issue": "Robot Model-Specific Parameters Should be in External Files\nRobot Model-Specific Parameters (e.g., MDH, joint limits, etc) Should be in External Files (e.g., csv, json, etc). This way, new models and modifications to models are not tied to the codebase. `robot_model.py` should be deprecated and example models should be saved in a separate `example` folder in the repo.\n", "before_files": [{"content": "# make the follow accessible from pybotics namespace\nfrom . import geometry\nfrom . import kinematics\nfrom .robot import *\nfrom . import robot_model\n", "path": "pybotics/__init__.py"}, {"content": "import numpy as np\n\n'''\nModified DH parameter matrices of various robots (rx, tx, rz, tz) aka (alpha, a, theta, d)\n'''\n\n\ndef ur10():\n model = np.array([\n [0, 0, 0, 118.0],\n [np.pi / 2.0, 0, np.pi, 0],\n [0, 612.7, 0, 0],\n [0, 571.6, 0, 163.9],\n [-np.pi / 2.0, 0, 0, 115.7],\n [np.pi / 2.0, 0, np.pi, 92.2]\n ])\n return model\n\n\ndef puma_560():\n model = np.array([\n [0, 0, 0, 0],\n [-np.pi / 2.0, 0, 0, 0],\n [0, 612.7, 0, 0],\n [0, 571.6, 0, 163.9],\n [-np.pi / 2.0, 0, 0, 115.7],\n [np.pi / 2.0, 0, np.pi, 92.2]\n ])\n return model\n\n\ndef kuka_lbr_iiwa_7():\n model = np.array([\n [0, 0, 0, 340],\n [-np.pi / 2.0, 0, 0, 0],\n [np.pi / 2.0, 0, 0, 400],\n [np.pi / 2.0, 0, 0, 0],\n [-np.pi / 2.0, 0, 0, 400],\n [-np.pi / 2.0, 0, 0, 0],\n [np.pi / 2.0, 0, 0, 126]\n ])\n return model\n", "path": "pybotics/robot_model.py"}, {"content": "import copy\n\nimport pybotics as py\nimport numpy as np\n\n# set numpy print options\nnp.set_printoptions(precision=3)\nnp.set_printoptions(suppress=True)\n\n# create robot\nideal_robot = py.Robot()\nideal_robot.robot_model = py.robot_model.ur10()\n\n# create pseudo-realistic robot with kinematic errors\nreal_robot = copy.deepcopy(ideal_robot)\nreal_robot.impair_robot_model()\n\nprint('Ideal Robot Model:\\n', ideal_robot.robot_model, '\\n')\nprint('Pseudo-Real Robot Model:\\n', real_robot.robot_model, '\\n')\n\n# demonstrate forward kinematics\njoints = [0, 0, 0, 0, 0, 0]\n\nprint('Ideal Pose:\\n', ideal_robot.fk(joints), '\\n')\nprint('Pseudo-Real Pose:\\n', real_robot.fk(joints), '\\n')\n", "path": "examples/example_robot.py"}]} | 1,483 | 962 |
gh_patches_debug_169 | rasdani/github-patches | git_diff | joke2k__faker-1235 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
French IBAN should be 27 char of length
* Faker version: 4.1.1
### Steps to reproduce
```
import faker
from faker import Faker
fake = Faker('fr_FR')
fr_iban = fake.iban()
fr_iban
'FR96505438725498141631455686'
len(fr_iban)
28
```
### Expected behavior
```
>>> len(fr_iban)
27
```
[As stated on wikipedia in France row](https://en.wikipedia.org/wiki/International_Bank_Account_Number#IBAN_formats_by_country)
### Actual behavior
```
>>> len(fr_iban)
28
```
</issue>
<code>
[start of faker/providers/bank/fr_FR/__init__.py]
1 from .. import Provider as BankProvider
2
3
4 class Provider(BankProvider):
5 bban_format = '########################'
6 country_code = 'FR'
7
[end of faker/providers/bank/fr_FR/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/bank/fr_FR/__init__.py b/faker/providers/bank/fr_FR/__init__.py
--- a/faker/providers/bank/fr_FR/__init__.py
+++ b/faker/providers/bank/fr_FR/__init__.py
@@ -2,5 +2,5 @@
class Provider(BankProvider):
- bban_format = '########################'
+ bban_format = '#######################'
country_code = 'FR'
| {"golden_diff": "diff --git a/faker/providers/bank/fr_FR/__init__.py b/faker/providers/bank/fr_FR/__init__.py\n--- a/faker/providers/bank/fr_FR/__init__.py\n+++ b/faker/providers/bank/fr_FR/__init__.py\n@@ -2,5 +2,5 @@\n \n \n class Provider(BankProvider):\n- bban_format = '########################'\n+ bban_format = '#######################'\n country_code = 'FR'\n", "issue": "French IBAN should be 27 char of length\n* Faker version: 4.1.1\r\n\r\n### Steps to reproduce\r\n\r\n```\r\nimport faker\r\nfrom faker import Faker\r\nfake = Faker('fr_FR')\r\nfr_iban = fake.iban()\r\nfr_iban\r\n'FR96505438725498141631455686'\r\nlen(fr_iban)\r\n28\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\n```\r\n>>> len(fr_iban)\r\n27\r\n```\r\n\r\n[As stated on wikipedia in France row](https://en.wikipedia.org/wiki/International_Bank_Account_Number#IBAN_formats_by_country)\r\n\r\n### Actual behavior\r\n\r\n```\r\n>>> len(fr_iban)\r\n28\r\n```\r\n\n", "before_files": [{"content": "from .. import Provider as BankProvider\n\n\nclass Provider(BankProvider):\n bban_format = '########################'\n country_code = 'FR'\n", "path": "faker/providers/bank/fr_FR/__init__.py"}]} | 743 | 100 |
gh_patches_debug_12956 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-5392 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]: Wrong import in ColossalAuto's meta_registry/binary_elementwise_ops.py
### 🐛 Describe the bug
# Problem description
The file `colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py` contains the following line:
```python
from ..constants import BCAST_FUNC_OP
```
However, the file `colossalai/auto_parallel/meta_profiler/constants.py` which this import refers to does not contain any `BCAST_FUNC_OP`. This leads to an `ImportError` when running ColossalAuto in release 0.3.3 and newer.
This constant can be found in the file `colossalai/auto_parallel/tensor_shard/constants.py`. The last commit to `colossalai/auto_parallel/meta_profiler/constants.py` (commit ID `079bf3cb`) removes the import of tensor_shard's `constants.py` from meta_profiler's `constants.py` (seemingly due to an automated refactoring).
# Solution
Since no other file in the `meta_registry` module uses constants from the `tensor_shard/constants.py` and to avoid automated removal of "unused" imports in the future, the import statement in question in above-mentioned `binary_elementwise_ops.py` could be changed to:
```python
from colossalai.auto_parallel.tensor_shard.constants import BCAST_FUNC_OP
```
### Environment
- Python 3.8
- Torch 1.12.0
- no CUDA
</issue>
<code>
[start of colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py]
1 from typing import List, Tuple
2
3 import torch
4
5 from colossalai._analyzer._subclasses.flop_tensor import flop_mapping
6 from colossalai._analyzer.fx.node_util import compute_size_in_bytes as activation_size
7 from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, OperationDataType, TrainCycleItem
8
9 from ..constants import BCAST_FUNC_OP
10 from ..registry import meta_register
11
12 __all__ = ["binary_elementwise_meta_info"]
13
14
15 @meta_register.register(BCAST_FUNC_OP)
16 def binary_elementwise_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]:
17 """Meta information generator for binary elementwise operations
18 NOTE: Some of the binary elementwise operations will discard the input activation after computation, as they
19 don't need those tensors for back propagation, for example, if there are two tensors being sent for `torch.add`,
20 they will be discarded right after add operation is done. We create a simple API in `ShardMetaInfo` class to identify
21 this behavior, it is critical for better memory estimation.
22
23 Returns:
24 Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: compute cost, memory cost and forward inputs
25 """
26
27 input_op_data = [arg for arg in args if arg.type != OperationDataType.OUTPUT]
28 output_op_data = next(filter(lambda arg: arg.type == OperationDataType.OUTPUT, args))
29
30 # construct forward args for flop mapping
31 fwd_in_args = [opdata.data for opdata in input_op_data]
32 fwd_out_args = [output_op_data.data]
33
34 # calculate cost
35
36 # calculate compute cost
37 # NOTE: we set bwd_compute_cost two times of fwd_compute_cost in this case
38 fwd_compute_cost = flop_mapping[torch.ops.aten.add.Tensor](fwd_in_args, fwd_out_args)
39 bwd_compute_cost = fwd_compute_cost * 2
40 compute_cost = TrainCycleItem(fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost)
41
42 # calculate memory cost
43 param_mem_cost = activation_size([arg.data for arg in input_op_data if arg.type == OperationDataType.PARAM])
44 fwd_mem_cost = MemoryCost(
45 activation=activation_size(output_op_data.data),
46 parameter=param_mem_cost,
47 )
48 bwd_mem_cost = MemoryCost(
49 activation=activation_size(fwd_in_args),
50 parameter=param_mem_cost,
51 )
52
53 # total cost
54 total_mem_cost = MemoryCost(
55 activation=fwd_mem_cost.activation + bwd_mem_cost.activation,
56 parameter=fwd_mem_cost.parameter + bwd_mem_cost.parameter,
57 )
58
59 memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost)
60
61 # store fwd_in, fwd_buffer, fwd_out
62 fwd_in = []
63 fwd_buffer = []
64 fwd_out = [torch.zeros_like(output_op_data.data, device="meta")]
65
66 return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out
67
[end of colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py b/colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py
--- a/colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py
+++ b/colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py
@@ -4,9 +4,9 @@
from colossalai._analyzer._subclasses.flop_tensor import flop_mapping
from colossalai._analyzer.fx.node_util import compute_size_in_bytes as activation_size
+from colossalai.auto_parallel.tensor_shard.constants import BCAST_FUNC_OP
from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, OperationDataType, TrainCycleItem
-from ..constants import BCAST_FUNC_OP
from ..registry import meta_register
__all__ = ["binary_elementwise_meta_info"]
| {"golden_diff": "diff --git a/colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py b/colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py\n--- a/colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py\n+++ b/colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py\n@@ -4,9 +4,9 @@\n \n from colossalai._analyzer._subclasses.flop_tensor import flop_mapping\n from colossalai._analyzer.fx.node_util import compute_size_in_bytes as activation_size\n+from colossalai.auto_parallel.tensor_shard.constants import BCAST_FUNC_OP\n from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, OperationDataType, TrainCycleItem\n \n-from ..constants import BCAST_FUNC_OP\n from ..registry import meta_register\n \n __all__ = [\"binary_elementwise_meta_info\"]\n", "issue": "[BUG]: Wrong import in ColossalAuto's meta_registry/binary_elementwise_ops.py\n### \ud83d\udc1b Describe the bug\n\n# Problem description\r\n\r\nThe file `colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py` contains the following line: \r\n\r\n```python\r\nfrom ..constants import BCAST_FUNC_OP\r\n```\r\n\r\nHowever, the file `colossalai/auto_parallel/meta_profiler/constants.py` which this import refers to does not contain any `BCAST_FUNC_OP`. This leads to an `ImportError` when running ColossalAuto in release 0.3.3 and newer. \r\n\r\nThis constant can be found in the file `colossalai/auto_parallel/tensor_shard/constants.py`. The last commit to `colossalai/auto_parallel/meta_profiler/constants.py` (commit ID `079bf3cb`) removes the import of tensor_shard's `constants.py` from meta_profiler's `constants.py` (seemingly due to an automated refactoring).\r\n\r\n# Solution\r\n\r\nSince no other file in the `meta_registry` module uses constants from the `tensor_shard/constants.py` and to avoid automated removal of \"unused\" imports in the future, the import statement in question in above-mentioned `binary_elementwise_ops.py` could be changed to: \r\n\r\n```python\r\nfrom colossalai.auto_parallel.tensor_shard.constants import BCAST_FUNC_OP\r\n```\n\n### Environment\n\n- Python 3.8\r\n- Torch 1.12.0\r\n- no CUDA\n", "before_files": [{"content": "from typing import List, Tuple\n\nimport torch\n\nfrom colossalai._analyzer._subclasses.flop_tensor import flop_mapping\nfrom colossalai._analyzer.fx.node_util import compute_size_in_bytes as activation_size\nfrom colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, OperationDataType, TrainCycleItem\n\nfrom ..constants import BCAST_FUNC_OP\nfrom ..registry import meta_register\n\n__all__ = [\"binary_elementwise_meta_info\"]\n\n\n@meta_register.register(BCAST_FUNC_OP)\ndef binary_elementwise_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]:\n \"\"\"Meta information generator for binary elementwise operations\n NOTE: Some of the binary elementwise operations will discard the input activation after computation, as they\n don't need those tensors for back propagation, for example, if there are two tensors being sent for `torch.add`,\n they will be discarded right after add operation is done. We create a simple API in `ShardMetaInfo` class to identify\n this behavior, it is critical for better memory estimation.\n\n Returns:\n Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: compute cost, memory cost and forward inputs\n \"\"\"\n\n input_op_data = [arg for arg in args if arg.type != OperationDataType.OUTPUT]\n output_op_data = next(filter(lambda arg: arg.type == OperationDataType.OUTPUT, args))\n\n # construct forward args for flop mapping\n fwd_in_args = [opdata.data for opdata in input_op_data]\n fwd_out_args = [output_op_data.data]\n\n # calculate cost\n\n # calculate compute cost\n # NOTE: we set bwd_compute_cost two times of fwd_compute_cost in this case\n fwd_compute_cost = flop_mapping[torch.ops.aten.add.Tensor](fwd_in_args, fwd_out_args)\n bwd_compute_cost = fwd_compute_cost * 2\n compute_cost = TrainCycleItem(fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost)\n\n # calculate memory cost\n param_mem_cost = activation_size([arg.data for arg in input_op_data if arg.type == OperationDataType.PARAM])\n fwd_mem_cost = MemoryCost(\n activation=activation_size(output_op_data.data),\n parameter=param_mem_cost,\n )\n bwd_mem_cost = MemoryCost(\n activation=activation_size(fwd_in_args),\n parameter=param_mem_cost,\n )\n\n # total cost\n total_mem_cost = MemoryCost(\n activation=fwd_mem_cost.activation + bwd_mem_cost.activation,\n parameter=fwd_mem_cost.parameter + bwd_mem_cost.parameter,\n )\n\n memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost)\n\n # store fwd_in, fwd_buffer, fwd_out\n fwd_in = []\n fwd_buffer = []\n fwd_out = [torch.zeros_like(output_op_data.data, device=\"meta\")]\n\n return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out\n", "path": "colossalai/auto_parallel/meta_profiler/meta_registry/binary_elementwise_ops.py"}]} | 1,657 | 191 |
gh_patches_debug_20586 | rasdani/github-patches | git_diff | TheAlgorithms__Python-9178 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sorts/random_normal_distribution_quicksort.py has no tests
### Repository commit
3
### Python version (python --version)
Python 3.11.5
### Dependencies version (pip freeze)
Numpy
### Expected behavior
Tests.
### Actual behavior
No tests.
</issue>
<code>
[start of sorts/random_pivot_quick_sort.py]
1 """
2 Picks the random index as the pivot
3 """
4 import random
5
6
7 def partition(a, left_index, right_index):
8 pivot = a[left_index]
9 i = left_index + 1
10 for j in range(left_index + 1, right_index):
11 if a[j] < pivot:
12 a[j], a[i] = a[i], a[j]
13 i += 1
14 a[left_index], a[i - 1] = a[i - 1], a[left_index]
15 return i - 1
16
17
18 def quick_sort_random(a, left, right):
19 if left < right:
20 pivot = random.randint(left, right - 1)
21 a[pivot], a[left] = (
22 a[left],
23 a[pivot],
24 ) # switches the pivot with the left most bound
25 pivot_index = partition(a, left, right)
26 quick_sort_random(
27 a, left, pivot_index
28 ) # recursive quicksort to the left of the pivot point
29 quick_sort_random(
30 a, pivot_index + 1, right
31 ) # recursive quicksort to the right of the pivot point
32
33
34 def main():
35 user_input = input("Enter numbers separated by a comma:\n").strip()
36 arr = [int(item) for item in user_input.split(",")]
37
38 quick_sort_random(arr, 0, len(arr))
39
40 print(arr)
41
42
43 if __name__ == "__main__":
44 main()
45
[end of sorts/random_pivot_quick_sort.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sorts/random_pivot_quick_sort.py b/sorts/random_pivot_quick_sort.py
deleted file mode 100644
--- a/sorts/random_pivot_quick_sort.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""
-Picks the random index as the pivot
-"""
-import random
-
-
-def partition(a, left_index, right_index):
- pivot = a[left_index]
- i = left_index + 1
- for j in range(left_index + 1, right_index):
- if a[j] < pivot:
- a[j], a[i] = a[i], a[j]
- i += 1
- a[left_index], a[i - 1] = a[i - 1], a[left_index]
- return i - 1
-
-
-def quick_sort_random(a, left, right):
- if left < right:
- pivot = random.randint(left, right - 1)
- a[pivot], a[left] = (
- a[left],
- a[pivot],
- ) # switches the pivot with the left most bound
- pivot_index = partition(a, left, right)
- quick_sort_random(
- a, left, pivot_index
- ) # recursive quicksort to the left of the pivot point
- quick_sort_random(
- a, pivot_index + 1, right
- ) # recursive quicksort to the right of the pivot point
-
-
-def main():
- user_input = input("Enter numbers separated by a comma:\n").strip()
- arr = [int(item) for item in user_input.split(",")]
-
- quick_sort_random(arr, 0, len(arr))
-
- print(arr)
-
-
-if __name__ == "__main__":
- main()
| {"golden_diff": "diff --git a/sorts/random_pivot_quick_sort.py b/sorts/random_pivot_quick_sort.py\ndeleted file mode 100644\n--- a/sorts/random_pivot_quick_sort.py\n+++ /dev/null\n@@ -1,44 +0,0 @@\n-\"\"\"\r\n-Picks the random index as the pivot\r\n-\"\"\"\r\n-import random\r\n-\r\n-\r\n-def partition(a, left_index, right_index):\r\n- pivot = a[left_index]\r\n- i = left_index + 1\r\n- for j in range(left_index + 1, right_index):\r\n- if a[j] < pivot:\r\n- a[j], a[i] = a[i], a[j]\r\n- i += 1\r\n- a[left_index], a[i - 1] = a[i - 1], a[left_index]\r\n- return i - 1\r\n-\r\n-\r\n-def quick_sort_random(a, left, right):\r\n- if left < right:\r\n- pivot = random.randint(left, right - 1)\r\n- a[pivot], a[left] = (\r\n- a[left],\r\n- a[pivot],\r\n- ) # switches the pivot with the left most bound\r\n- pivot_index = partition(a, left, right)\r\n- quick_sort_random(\r\n- a, left, pivot_index\r\n- ) # recursive quicksort to the left of the pivot point\r\n- quick_sort_random(\r\n- a, pivot_index + 1, right\r\n- ) # recursive quicksort to the right of the pivot point\r\n-\r\n-\r\n-def main():\r\n- user_input = input(\"Enter numbers separated by a comma:\\n\").strip()\r\n- arr = [int(item) for item in user_input.split(\",\")]\r\n-\r\n- quick_sort_random(arr, 0, len(arr))\r\n-\r\n- print(arr)\r\n-\r\n-\r\n-if __name__ == \"__main__\":\r\n- main()\n", "issue": "sorts/random_normal_distribution_quicksort.py has no tests\n### Repository commit\n\n3\n\n### Python version (python --version)\n\nPython 3.11.5\n\n### Dependencies version (pip freeze)\n\nNumpy\n\n### Expected behavior\n\nTests.\n\n### Actual behavior\n\nNo tests.\n", "before_files": [{"content": "\"\"\"\r\nPicks the random index as the pivot\r\n\"\"\"\r\nimport random\r\n\r\n\r\ndef partition(a, left_index, right_index):\r\n pivot = a[left_index]\r\n i = left_index + 1\r\n for j in range(left_index + 1, right_index):\r\n if a[j] < pivot:\r\n a[j], a[i] = a[i], a[j]\r\n i += 1\r\n a[left_index], a[i - 1] = a[i - 1], a[left_index]\r\n return i - 1\r\n\r\n\r\ndef quick_sort_random(a, left, right):\r\n if left < right:\r\n pivot = random.randint(left, right - 1)\r\n a[pivot], a[left] = (\r\n a[left],\r\n a[pivot],\r\n ) # switches the pivot with the left most bound\r\n pivot_index = partition(a, left, right)\r\n quick_sort_random(\r\n a, left, pivot_index\r\n ) # recursive quicksort to the left of the pivot point\r\n quick_sort_random(\r\n a, pivot_index + 1, right\r\n ) # recursive quicksort to the right of the pivot point\r\n\r\n\r\ndef main():\r\n user_input = input(\"Enter numbers separated by a comma:\\n\").strip()\r\n arr = [int(item) for item in user_input.split(\",\")]\r\n\r\n quick_sort_random(arr, 0, len(arr))\r\n\r\n print(arr)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n", "path": "sorts/random_pivot_quick_sort.py"}]} | 993 | 403 |
gh_patches_debug_22354 | rasdani/github-patches | git_diff | sublimelsp__LSP-491 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LSP needs a LspJumpBackCommand
When executing LSP goto definition,
and then executing the built in `jump_back` command,
the cursor won't be placed in the previous place, as I expect.
To fix this, we can introduce a `LspJumpBackCommand`.
We can do the same thing as TernForSublime [did](https://github.com/ternjs/tern_for_sublime/blob/91a27a39b1b0a33a9043aa685e1ee48c64a58274/tern.py#L564).
</issue>
<code>
[start of plugin/definition.py]
1 import sublime
2
3 from .core.registry import client_for_view, LspTextCommand
4 from .core.protocol import Request, Point
5 from .core.documents import get_document_position, get_position, is_at_word
6 from .core.url import uri_to_filename
7 from .core.logging import debug
8 try:
9 from typing import List, Dict, Optional, Any
10 assert List and Dict and Optional and Any
11 except ImportError:
12 pass
13
14
15 class LspSymbolDefinitionCommand(LspTextCommand):
16 def __init__(self, view):
17 super().__init__(view)
18
19 def is_enabled(self, event=None):
20 if self.has_client_with_capability('definitionProvider'):
21 return is_at_word(self.view, event)
22 return False
23
24 def run(self, edit, event=None) -> None:
25 client = client_for_view(self.view)
26 if client:
27 pos = get_position(self.view, event)
28 document_position = get_document_position(self.view, pos)
29 if document_position:
30 request = Request.definition(document_position)
31 client.send_request(
32 request, lambda response: self.handle_response(response, pos))
33
34 def handle_response(self, response: 'Optional[Any]', position) -> None:
35 window = sublime.active_window()
36 if response:
37 location = response if isinstance(response, dict) else response[0]
38 file_path = uri_to_filename(location.get("uri"))
39 start = Point.from_lsp(location['range']['start'])
40 file_location = "{}:{}:{}".format(file_path, start.row + 1, start.col + 1)
41 debug("opening location", location)
42 window.open_file(file_location, sublime.ENCODED_POSITION)
43 # TODO: can add region here.
44 else:
45 window.run_command("goto_definition")
46
47 def want_event(self):
48 return True
49
[end of plugin/definition.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/definition.py b/plugin/definition.py
--- a/plugin/definition.py
+++ b/plugin/definition.py
@@ -5,6 +5,7 @@
from .core.documents import get_document_position, get_position, is_at_word
from .core.url import uri_to_filename
from .core.logging import debug
+from Default.history_list import get_jump_history_for_view # type: ignore
try:
from typing import List, Dict, Optional, Any
assert List and Dict and Optional and Any
@@ -34,6 +35,9 @@
def handle_response(self, response: 'Optional[Any]', position) -> None:
window = sublime.active_window()
if response:
+ # save to jump back history
+ get_jump_history_for_view(self.view).push_selection(self.view)
+
location = response if isinstance(response, dict) else response[0]
file_path = uri_to_filename(location.get("uri"))
start = Point.from_lsp(location['range']['start'])
| {"golden_diff": "diff --git a/plugin/definition.py b/plugin/definition.py\n--- a/plugin/definition.py\n+++ b/plugin/definition.py\n@@ -5,6 +5,7 @@\n from .core.documents import get_document_position, get_position, is_at_word\n from .core.url import uri_to_filename\n from .core.logging import debug\n+from Default.history_list import get_jump_history_for_view # type: ignore\n try:\n from typing import List, Dict, Optional, Any\n assert List and Dict and Optional and Any\n@@ -34,6 +35,9 @@\n def handle_response(self, response: 'Optional[Any]', position) -> None:\n window = sublime.active_window()\n if response:\n+ # save to jump back history\n+ get_jump_history_for_view(self.view).push_selection(self.view)\n+\n location = response if isinstance(response, dict) else response[0]\n file_path = uri_to_filename(location.get(\"uri\"))\n start = Point.from_lsp(location['range']['start'])\n", "issue": "LSP needs a LspJumpBackCommand\nWhen executing LSP goto definition, \r\nand then executing the built in `jump_back` command, \r\nthe cursor won't be placed in the previous place, as I expect.\r\n\r\nTo fix this, we can introduce a `LspJumpBackCommand`.\r\nWe can do the same thing as TernForSublime [did](https://github.com/ternjs/tern_for_sublime/blob/91a27a39b1b0a33a9043aa685e1ee48c64a58274/tern.py#L564). \n", "before_files": [{"content": "import sublime\n\nfrom .core.registry import client_for_view, LspTextCommand\nfrom .core.protocol import Request, Point\nfrom .core.documents import get_document_position, get_position, is_at_word\nfrom .core.url import uri_to_filename\nfrom .core.logging import debug\ntry:\n from typing import List, Dict, Optional, Any\n assert List and Dict and Optional and Any\nexcept ImportError:\n pass\n\n\nclass LspSymbolDefinitionCommand(LspTextCommand):\n def __init__(self, view):\n super().__init__(view)\n\n def is_enabled(self, event=None):\n if self.has_client_with_capability('definitionProvider'):\n return is_at_word(self.view, event)\n return False\n\n def run(self, edit, event=None) -> None:\n client = client_for_view(self.view)\n if client:\n pos = get_position(self.view, event)\n document_position = get_document_position(self.view, pos)\n if document_position:\n request = Request.definition(document_position)\n client.send_request(\n request, lambda response: self.handle_response(response, pos))\n\n def handle_response(self, response: 'Optional[Any]', position) -> None:\n window = sublime.active_window()\n if response:\n location = response if isinstance(response, dict) else response[0]\n file_path = uri_to_filename(location.get(\"uri\"))\n start = Point.from_lsp(location['range']['start'])\n file_location = \"{}:{}:{}\".format(file_path, start.row + 1, start.col + 1)\n debug(\"opening location\", location)\n window.open_file(file_location, sublime.ENCODED_POSITION)\n # TODO: can add region here.\n else:\n window.run_command(\"goto_definition\")\n\n def want_event(self):\n return True\n", "path": "plugin/definition.py"}]} | 1,140 | 217 |
gh_patches_debug_5516 | rasdani/github-patches | git_diff | kivy__kivy-4047 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pygame text provider does not render text opacity properly
Text renders as if opacity is set to 100% regardless of what's set in a Label's color attribute.
SDL2 works just fine.
Tested with master pulled today on Linux.
Pygame text provider does not render text opacity properly
Text renders as if opacity is set to 100% regardless of what's set in a Label's color attribute.
SDL2 works just fine.
Tested with master pulled today on Linux.
</issue>
<code>
[start of kivy/core/text/text_pygame.py]
1 '''
2 Text Pygame: Draw text with pygame
3 '''
4
5 __all__ = ('LabelPygame', )
6
7 from kivy.compat import PY2
8 from kivy.core.text import LabelBase
9 from kivy.core.image import ImageData
10
11 try:
12 import pygame
13 except:
14 raise
15
16 pygame_cache = {}
17 pygame_font_handles = {}
18 pygame_cache_order = []
19
20 # init pygame font
21 try:
22 pygame.ftfont.init()
23 except:
24 pygame.font.init()
25
26
27 class LabelPygame(LabelBase):
28
29 def _get_font_id(self):
30 if PY2:
31 try:
32 return '|'.join([unicode(self.options[x]) for x in
33 ('font_size', 'font_name_r',
34 'bold', 'italic')])
35 except UnicodeDecodeError:
36 pass
37 return '|'.join([str(self.options[x]) for x in
38 ('font_size', 'font_name_r', 'bold', 'italic')])
39
40 def _get_font(self):
41 fontid = self._get_font_id()
42 if fontid not in pygame_cache:
43 # try first the file if it's a filename
44 font_handle = fontobject = None
45 fontname = self.options['font_name_r']
46 ext = fontname.rsplit('.', 1)
47 if len(ext) == 2:
48 # try to open the font if it has an extension
49 font_handle = open(fontname, 'rb')
50 fontobject = pygame.font.Font(font_handle,
51 int(self.options['font_size']))
52
53 # fallback to search a system font
54 if fontobject is None:
55 # try to search the font
56 font = pygame.font.match_font(
57 self.options['font_name_r'].replace(' ', ''),
58 bold=self.options['bold'],
59 italic=self.options['italic'])
60
61 # fontobject
62 fontobject = pygame.font.Font(font,
63 int(self.options['font_size']))
64 pygame_cache[fontid] = fontobject
65 pygame_font_handles[fontid] = font_handle
66 pygame_cache_order.append(fontid)
67
68 # to prevent too much file open, limit the number of opened fonts to 64
69 while len(pygame_cache_order) > 64:
70 popid = pygame_cache_order.pop(0)
71 del pygame_cache[popid]
72 font_handle = pygame_font_handles.pop(popid)
73 if font_handle is not None:
74 font_handle.close()
75
76 return pygame_cache[fontid]
77
78 def get_ascent(self):
79 return self._get_font().get_ascent()
80
81 def get_descent(self):
82 return self._get_font().get_descent()
83
84 def get_extents(self, text):
85 return self._get_font().size(text)
86
87 def get_cached_extents(self):
88 return self._get_font().size
89
90 def _render_begin(self):
91 self._pygame_surface = pygame.Surface(self._size, pygame.SRCALPHA, 32)
92 self._pygame_surface.fill((0, 0, 0, 0))
93
94 def _render_text(self, text, x, y):
95 font = self._get_font()
96 color = [c * 255 for c in self.options['color']]
97 color[0], color[2] = color[2], color[0]
98 try:
99 text = font.render(text, True, color)
100 self._pygame_surface.blit(text, (x, y), None,
101 pygame.BLEND_RGBA_ADD)
102 except pygame.error:
103 pass
104
105 def _render_end(self):
106 w, h = self._size
107 data = ImageData(w, h,
108 'rgba', self._pygame_surface.get_buffer().raw)
109
110 del self._pygame_surface
111
112 return data
113
[end of kivy/core/text/text_pygame.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kivy/core/text/text_pygame.py b/kivy/core/text/text_pygame.py
--- a/kivy/core/text/text_pygame.py
+++ b/kivy/core/text/text_pygame.py
@@ -97,6 +97,7 @@
color[0], color[2] = color[2], color[0]
try:
text = font.render(text, True, color)
+ text.set_colorkey(color)
self._pygame_surface.blit(text, (x, y), None,
pygame.BLEND_RGBA_ADD)
except pygame.error:
| {"golden_diff": "diff --git a/kivy/core/text/text_pygame.py b/kivy/core/text/text_pygame.py\n--- a/kivy/core/text/text_pygame.py\n+++ b/kivy/core/text/text_pygame.py\n@@ -97,6 +97,7 @@\n color[0], color[2] = color[2], color[0]\n try:\n text = font.render(text, True, color)\n+ text.set_colorkey(color)\n self._pygame_surface.blit(text, (x, y), None,\n pygame.BLEND_RGBA_ADD)\n except pygame.error:\n", "issue": "Pygame text provider does not render text opacity properly\nText renders as if opacity is set to 100% regardless of what's set in a Label's color attribute.\n\nSDL2 works just fine.\n\nTested with master pulled today on Linux.\n\nPygame text provider does not render text opacity properly\nText renders as if opacity is set to 100% regardless of what's set in a Label's color attribute.\n\nSDL2 works just fine.\n\nTested with master pulled today on Linux.\n\n", "before_files": [{"content": "'''\nText Pygame: Draw text with pygame\n'''\n\n__all__ = ('LabelPygame', )\n\nfrom kivy.compat import PY2\nfrom kivy.core.text import LabelBase\nfrom kivy.core.image import ImageData\n\ntry:\n import pygame\nexcept:\n raise\n\npygame_cache = {}\npygame_font_handles = {}\npygame_cache_order = []\n\n# init pygame font\ntry:\n pygame.ftfont.init()\nexcept:\n pygame.font.init()\n\n\nclass LabelPygame(LabelBase):\n\n def _get_font_id(self):\n if PY2:\n try:\n return '|'.join([unicode(self.options[x]) for x in\n ('font_size', 'font_name_r',\n 'bold', 'italic')])\n except UnicodeDecodeError:\n pass\n return '|'.join([str(self.options[x]) for x in\n ('font_size', 'font_name_r', 'bold', 'italic')])\n\n def _get_font(self):\n fontid = self._get_font_id()\n if fontid not in pygame_cache:\n # try first the file if it's a filename\n font_handle = fontobject = None\n fontname = self.options['font_name_r']\n ext = fontname.rsplit('.', 1)\n if len(ext) == 2:\n # try to open the font if it has an extension\n font_handle = open(fontname, 'rb')\n fontobject = pygame.font.Font(font_handle,\n int(self.options['font_size']))\n\n # fallback to search a system font\n if fontobject is None:\n # try to search the font\n font = pygame.font.match_font(\n self.options['font_name_r'].replace(' ', ''),\n bold=self.options['bold'],\n italic=self.options['italic'])\n\n # fontobject\n fontobject = pygame.font.Font(font,\n int(self.options['font_size']))\n pygame_cache[fontid] = fontobject\n pygame_font_handles[fontid] = font_handle\n pygame_cache_order.append(fontid)\n\n # to prevent too much file open, limit the number of opened fonts to 64\n while len(pygame_cache_order) > 64:\n popid = pygame_cache_order.pop(0)\n del pygame_cache[popid]\n font_handle = pygame_font_handles.pop(popid)\n if font_handle is not None:\n font_handle.close()\n\n return pygame_cache[fontid]\n\n def get_ascent(self):\n return self._get_font().get_ascent()\n\n def get_descent(self):\n return self._get_font().get_descent()\n\n def get_extents(self, text):\n return self._get_font().size(text)\n\n def get_cached_extents(self):\n return self._get_font().size\n\n def _render_begin(self):\n self._pygame_surface = pygame.Surface(self._size, pygame.SRCALPHA, 32)\n self._pygame_surface.fill((0, 0, 0, 0))\n\n def _render_text(self, text, x, y):\n font = self._get_font()\n color = [c * 255 for c in self.options['color']]\n color[0], color[2] = color[2], color[0]\n try:\n text = font.render(text, True, color)\n self._pygame_surface.blit(text, (x, y), None,\n pygame.BLEND_RGBA_ADD)\n except pygame.error:\n pass\n\n def _render_end(self):\n w, h = self._size\n data = ImageData(w, h,\n 'rgba', self._pygame_surface.get_buffer().raw)\n\n del self._pygame_surface\n\n return data\n", "path": "kivy/core/text/text_pygame.py"}]} | 1,669 | 125 |
gh_patches_debug_13813 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-9500 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for kms-key in timestream service
### Describe the feature
Add support for kms-key in timestream service
### Extra information or context
_No response_
</issue>
<code>
[start of c7n/resources/timestream.py]
1 from c7n.manager import resources
2 from c7n.actions import Action
3 from c7n.query import DescribeSource, QueryResourceManager, TypeInfo
4 from c7n.utils import local_session, type_schema
5 from c7n.tags import (
6 TagDelayedAction,
7 TagActionFilter,
8 Tag as TagAction,
9 RemoveTag as RemoveTagAction
10 )
11
12
13 class DescribeTimestream(DescribeSource):
14 def augment(self, resources):
15 for r in resources:
16 client = local_session(self.manager.session_factory).client('timestream-write')
17 r['Tags'] = client.list_tags_for_resource(ResourceARN=r['Arn'])['Tags']
18 return resources
19
20
21 @resources.register('timestream-database')
22 class TimestreamDatabase(QueryResourceManager):
23 class resource_type(TypeInfo):
24 service = 'timestream-write'
25 arn_type = ''
26 name = 'DatabaseName'
27 id = arn = 'Arn'
28 enum_spec = ('list_databases', 'Databases', {})
29 permission_prefix = 'timestream'
30 permissions = ('timestream:ListDatabases', )
31 permissions_augment = ("timestream:ListTagsForResource",)
32 source_mapping = {
33 'describe': DescribeTimestream,
34 }
35
36
37 @resources.register('timestream-table')
38 class TimestreamTable(QueryResourceManager):
39 class resource_type(TypeInfo):
40 service = 'timestream-write'
41 arn_type = ''
42 name = 'TableName'
43 id = arn = 'Arn'
44 enum_spec = ('list_tables', 'Tables', {})
45 permission_prefix = 'timestream'
46 permissions = ('timestream:ListTables', )
47
48 source_mapping = {
49 'describe': DescribeTimestream,
50 }
51
52
53 @TimestreamDatabase.action_registry.register('tag')
54 @TimestreamTable.action_registry.register('tag')
55 class TimestreamTag(TagAction):
56
57 permissions = ('timestream:TagResource', )
58
59 def process_resource_set(self, client, resource_set, tags):
60 for r in resource_set:
61 client.tag_resource(ResourceARN=r['Arn'], Tags=tags)
62
63
64 @TimestreamDatabase.action_registry.register('remove-tag')
65 @TimestreamTable.action_registry.register('remove-tag')
66 class TimestreamRemoveTag(RemoveTagAction):
67
68 permissions = ('timestream:UntagResource', )
69
70 def process_resource_set(self, client, resource_set, tag_keys):
71 for r in resource_set:
72 client.untag_resource(ResourceARN=r['Arn'], TagKeys=tag_keys)
73
74
75 TimestreamDatabase.action_registry.register('mark-for-op', TagDelayedAction)
76 TimestreamTable.action_registry.register('mark-for-op', TagDelayedAction)
77
78 TimestreamDatabase.filter_registry.register('marked-for-op', TagActionFilter)
79 TimestreamTable.filter_registry.register('marked-for-op', TagActionFilter)
80
81
82 @TimestreamTable.action_registry.register('delete')
83 class TimestreamTableDelete(Action):
84 """
85 Deletes a timestream table
86 """
87
88 schema = type_schema('delete')
89 permissions = ('timestream:DeleteTable', )
90
91 def process(self, resources):
92 client = local_session(self.manager.session_factory).client('timestream-write')
93 for r in resources:
94 try:
95 client.delete_table(
96 DatabaseName=r['DatabaseName'],
97 TableName=r['TableName']
98 )
99 except client.exceptions.ResourceNotFoundException:
100 continue
101
102
103 @TimestreamDatabase.action_registry.register('delete')
104 class TimestreamDatabaseDelete(Action):
105 """
106 Deletes a timestream database
107 """
108
109 schema = type_schema('delete', force={'type': 'boolean', 'default': False})
110 permissions = (
111 'timestream:DeleteDatabase',
112 'timestream:ListTables', 'timestream:DeleteTable', )
113
114 def process(self, resources):
115 client = local_session(self.manager.session_factory).client('timestream-write')
116 for r in resources:
117 try:
118 client.delete_database(
119 DatabaseName=r['DatabaseName'],
120 )
121 except client.exceptions.ResourceNotFoundException:
122 continue
123 except client.exceptions.ValidationException:
124 if not self.data.get('force', False):
125 self.log.error(
126 f'Unable to delete database:{r["DatabaseName"]}, '
127 'tables must be deleted first')
128 continue
129 tables = client.list_tables(DatabaseName=r['DatabaseName'])['Tables']
130 TimestreamTableDelete(
131 data={'type': 'delete'},
132 manager=self.manager,
133 log_dir=self.log_dir
134 ).process(tables)
135 client.delete_database(
136 DatabaseName=r['DatabaseName'],
137 )
138
[end of c7n/resources/timestream.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/c7n/resources/timestream.py b/c7n/resources/timestream.py
--- a/c7n/resources/timestream.py
+++ b/c7n/resources/timestream.py
@@ -1,5 +1,6 @@
from c7n.manager import resources
from c7n.actions import Action
+from c7n.filters.kms import KmsRelatedFilter
from c7n.query import DescribeSource, QueryResourceManager, TypeInfo
from c7n.utils import local_session, type_schema
from c7n.tags import (
@@ -135,3 +136,8 @@
client.delete_database(
DatabaseName=r['DatabaseName'],
)
+
+
[email protected]_registry.register('kms-key')
+class KmsFilter(KmsRelatedFilter):
+ RelatedIdsExpression = 'KmsKeyId'
| {"golden_diff": "diff --git a/c7n/resources/timestream.py b/c7n/resources/timestream.py\n--- a/c7n/resources/timestream.py\n+++ b/c7n/resources/timestream.py\n@@ -1,5 +1,6 @@\n from c7n.manager import resources\n from c7n.actions import Action\n+from c7n.filters.kms import KmsRelatedFilter\n from c7n.query import DescribeSource, QueryResourceManager, TypeInfo\n from c7n.utils import local_session, type_schema\n from c7n.tags import (\n@@ -135,3 +136,8 @@\n client.delete_database(\n DatabaseName=r['DatabaseName'],\n )\n+\n+\[email protected]_registry.register('kms-key')\n+class KmsFilter(KmsRelatedFilter):\n+ RelatedIdsExpression = 'KmsKeyId'\n", "issue": "Add support for kms-key in timestream service\n### Describe the feature\n\nAdd support for kms-key in timestream service\n\n### Extra information or context\n\n_No response_\n", "before_files": [{"content": "from c7n.manager import resources\nfrom c7n.actions import Action\nfrom c7n.query import DescribeSource, QueryResourceManager, TypeInfo\nfrom c7n.utils import local_session, type_schema\nfrom c7n.tags import (\n TagDelayedAction,\n TagActionFilter,\n Tag as TagAction,\n RemoveTag as RemoveTagAction\n)\n\n\nclass DescribeTimestream(DescribeSource):\n def augment(self, resources):\n for r in resources:\n client = local_session(self.manager.session_factory).client('timestream-write')\n r['Tags'] = client.list_tags_for_resource(ResourceARN=r['Arn'])['Tags']\n return resources\n\n\[email protected]('timestream-database')\nclass TimestreamDatabase(QueryResourceManager):\n class resource_type(TypeInfo):\n service = 'timestream-write'\n arn_type = ''\n name = 'DatabaseName'\n id = arn = 'Arn'\n enum_spec = ('list_databases', 'Databases', {})\n permission_prefix = 'timestream'\n permissions = ('timestream:ListDatabases', )\n permissions_augment = (\"timestream:ListTagsForResource\",)\n source_mapping = {\n 'describe': DescribeTimestream,\n }\n\n\[email protected]('timestream-table')\nclass TimestreamTable(QueryResourceManager):\n class resource_type(TypeInfo):\n service = 'timestream-write'\n arn_type = ''\n name = 'TableName'\n id = arn = 'Arn'\n enum_spec = ('list_tables', 'Tables', {})\n permission_prefix = 'timestream'\n permissions = ('timestream:ListTables', )\n\n source_mapping = {\n 'describe': DescribeTimestream,\n }\n\n\[email protected]_registry.register('tag')\[email protected]_registry.register('tag')\nclass TimestreamTag(TagAction):\n\n permissions = ('timestream:TagResource', )\n\n def process_resource_set(self, client, resource_set, tags):\n for r in resource_set:\n client.tag_resource(ResourceARN=r['Arn'], Tags=tags)\n\n\[email protected]_registry.register('remove-tag')\[email protected]_registry.register('remove-tag')\nclass TimestreamRemoveTag(RemoveTagAction):\n\n permissions = ('timestream:UntagResource', )\n\n def process_resource_set(self, client, resource_set, tag_keys):\n for r in resource_set:\n client.untag_resource(ResourceARN=r['Arn'], TagKeys=tag_keys)\n\n\nTimestreamDatabase.action_registry.register('mark-for-op', TagDelayedAction)\nTimestreamTable.action_registry.register('mark-for-op', TagDelayedAction)\n\nTimestreamDatabase.filter_registry.register('marked-for-op', TagActionFilter)\nTimestreamTable.filter_registry.register('marked-for-op', TagActionFilter)\n\n\[email protected]_registry.register('delete')\nclass TimestreamTableDelete(Action):\n \"\"\"\n Deletes a timestream table\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('timestream:DeleteTable', )\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('timestream-write')\n for r in resources:\n try:\n client.delete_table(\n DatabaseName=r['DatabaseName'],\n TableName=r['TableName']\n )\n except client.exceptions.ResourceNotFoundException:\n continue\n\n\[email protected]_registry.register('delete')\nclass TimestreamDatabaseDelete(Action):\n \"\"\"\n Deletes a timestream database\n \"\"\"\n\n schema = type_schema('delete', force={'type': 'boolean', 'default': False})\n permissions = (\n 'timestream:DeleteDatabase',\n 'timestream:ListTables', 'timestream:DeleteTable', )\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('timestream-write')\n for r in resources:\n try:\n client.delete_database(\n DatabaseName=r['DatabaseName'],\n )\n except client.exceptions.ResourceNotFoundException:\n continue\n except client.exceptions.ValidationException:\n if not self.data.get('force', False):\n self.log.error(\n f'Unable to delete database:{r[\"DatabaseName\"]}, '\n 'tables must be deleted first')\n continue\n tables = client.list_tables(DatabaseName=r['DatabaseName'])['Tables']\n TimestreamTableDelete(\n data={'type': 'delete'},\n manager=self.manager,\n log_dir=self.log_dir\n ).process(tables)\n client.delete_database(\n DatabaseName=r['DatabaseName'],\n )\n", "path": "c7n/resources/timestream.py"}]} | 1,871 | 184 |
gh_patches_debug_28719 | rasdani/github-patches | git_diff | ansible__ansible-lint-533 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add type checking to role metadata in rule 701 MetaMainHasInfoRule
</issue>
<code>
[start of lib/ansiblelint/rules/MetaMainHasInfoRule.py]
1 # Copyright (c) 2016, Will Thames and contributors
2 # Copyright (c) 2018, Ansible Project
3
4 from ansiblelint import AnsibleLintRule
5
6
7 class MetaMainHasInfoRule(AnsibleLintRule):
8 id = '701'
9 shortdesc = 'meta/main.yml should contain relevant info'
10 info = [
11 'author',
12 'description',
13 'license',
14 'min_ansible_version',
15 'platforms',
16 ]
17 description = (
18 'meta/main.yml should contain: ``{}``'.format(', '.join(info))
19 )
20 severity = 'HIGH'
21 tags = ['metadata']
22 version_added = 'v4.0.0'
23
24 def matchplay(self, file, data):
25 if file['type'] != 'meta':
26 return False
27
28 galaxy_info = data.get('galaxy_info', None)
29 if not galaxy_info:
30 return [({'meta/main.yml': data},
31 "No 'galaxy_info' found")]
32
33 results = []
34 for info in self.info:
35 if not galaxy_info.get(info, None):
36 results.append(({'meta/main.yml': data},
37 'Role info should contain %s' % info))
38
39 platforms = galaxy_info.get('platforms', None)
40 if platforms:
41 for platform in platforms:
42 if not platform.get('name', None):
43 results.append(({'meta/main.yml': data},
44 'Platform should contain name'))
45
46 return results
47
[end of lib/ansiblelint/rules/MetaMainHasInfoRule.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansiblelint/rules/MetaMainHasInfoRule.py b/lib/ansiblelint/rules/MetaMainHasInfoRule.py
--- a/lib/ansiblelint/rules/MetaMainHasInfoRule.py
+++ b/lib/ansiblelint/rules/MetaMainHasInfoRule.py
@@ -1,6 +1,8 @@
# Copyright (c) 2016, Will Thames and contributors
# Copyright (c) 2018, Ansible Project
+import six
+
from ansiblelint import AnsibleLintRule
@@ -36,11 +38,29 @@
results.append(({'meta/main.yml': data},
'Role info should contain %s' % info))
+ for info in ['author', 'description']:
+ if not galaxy_info.get(info):
+ continue
+ if not isinstance(galaxy_info.get(info), six.string_types):
+ results.append(({'meta/main.yml': data},
+ '%s should be a string' % info))
+
platforms = galaxy_info.get('platforms', None)
- if platforms:
- for platform in platforms:
- if not platform.get('name', None):
- results.append(({'meta/main.yml': data},
- 'Platform should contain name'))
+ if not platforms:
+ return results
+
+ if not isinstance(platforms, list):
+ results.append(({'meta/main.yml': data},
+ 'Platforms should be a list of dictionaries'))
+ return results
+
+ for platform in platforms:
+ if not isinstance(platform, dict):
+ results.append(({'meta/main.yml': data},
+ 'Platforms should be a list of dictionaries'))
+ continue
+ if not platform.get('name', None):
+ results.append(({'meta/main.yml': data},
+ 'Platform should contain name'))
return results
| {"golden_diff": "diff --git a/lib/ansiblelint/rules/MetaMainHasInfoRule.py b/lib/ansiblelint/rules/MetaMainHasInfoRule.py\n--- a/lib/ansiblelint/rules/MetaMainHasInfoRule.py\n+++ b/lib/ansiblelint/rules/MetaMainHasInfoRule.py\n@@ -1,6 +1,8 @@\n # Copyright (c) 2016, Will Thames and contributors\n # Copyright (c) 2018, Ansible Project\n \n+import six\n+\n from ansiblelint import AnsibleLintRule\n \n \n@@ -36,11 +38,29 @@\n results.append(({'meta/main.yml': data},\n 'Role info should contain %s' % info))\n \n+ for info in ['author', 'description']:\n+ if not galaxy_info.get(info):\n+ continue\n+ if not isinstance(galaxy_info.get(info), six.string_types):\n+ results.append(({'meta/main.yml': data},\n+ '%s should be a string' % info))\n+\n platforms = galaxy_info.get('platforms', None)\n- if platforms:\n- for platform in platforms:\n- if not platform.get('name', None):\n- results.append(({'meta/main.yml': data},\n- 'Platform should contain name'))\n+ if not platforms:\n+ return results\n+\n+ if not isinstance(platforms, list):\n+ results.append(({'meta/main.yml': data},\n+ 'Platforms should be a list of dictionaries'))\n+ return results\n+\n+ for platform in platforms:\n+ if not isinstance(platform, dict):\n+ results.append(({'meta/main.yml': data},\n+ 'Platforms should be a list of dictionaries'))\n+ continue\n+ if not platform.get('name', None):\n+ results.append(({'meta/main.yml': data},\n+ 'Platform should contain name'))\n \n return results\n", "issue": "Add type checking to role metadata in rule 701 MetaMainHasInfoRule\n\n", "before_files": [{"content": "# Copyright (c) 2016, Will Thames and contributors\n# Copyright (c) 2018, Ansible Project\n\nfrom ansiblelint import AnsibleLintRule\n\n\nclass MetaMainHasInfoRule(AnsibleLintRule):\n id = '701'\n shortdesc = 'meta/main.yml should contain relevant info'\n info = [\n 'author',\n 'description',\n 'license',\n 'min_ansible_version',\n 'platforms',\n ]\n description = (\n 'meta/main.yml should contain: ``{}``'.format(', '.join(info))\n )\n severity = 'HIGH'\n tags = ['metadata']\n version_added = 'v4.0.0'\n\n def matchplay(self, file, data):\n if file['type'] != 'meta':\n return False\n\n galaxy_info = data.get('galaxy_info', None)\n if not galaxy_info:\n return [({'meta/main.yml': data},\n \"No 'galaxy_info' found\")]\n\n results = []\n for info in self.info:\n if not galaxy_info.get(info, None):\n results.append(({'meta/main.yml': data},\n 'Role info should contain %s' % info))\n\n platforms = galaxy_info.get('platforms', None)\n if platforms:\n for platform in platforms:\n if not platform.get('name', None):\n results.append(({'meta/main.yml': data},\n 'Platform should contain name'))\n\n return results\n", "path": "lib/ansiblelint/rules/MetaMainHasInfoRule.py"}]} | 974 | 405 |
gh_patches_debug_3237 | rasdani/github-patches | git_diff | DistrictDataLabs__yellowbrick-545 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Joint Plot Viz has messed up (overlapping labels on) axes
**Describe the bug**
If you look at the x and y axis on http://www.scikit-yb.org/en/latest/api/features/scatter.html#joint-plot-visualization you will see that the labels are overlapping.
**To Reproduce**
Create a joint plot as shown in the docs
**Expected behavior**
Labels on axes should be clear.
**Desktop (please complete the following information):**
- OS: macOS
- Python Version 3.6.4
- Yellowbrick Version 0.8
Joint Plot Viz has messed up (overlapping labels on) axes
**Describe the bug**
If you look at the x and y axis on http://www.scikit-yb.org/en/latest/api/features/scatter.html#joint-plot-visualization you will see that the labels are overlapping.
**To Reproduce**
Create a joint plot as shown in the docs
**Expected behavior**
Labels on axes should be clear.
**Desktop (please complete the following information):**
- OS: macOS
- Python Version 3.6.4
- Yellowbrick Version 0.8
</issue>
<code>
[start of docs/api/features/jointplot.py]
1 import pandas as pd
2 import matplotlib.pyplot as plt
3
4 from yellowbrick.features import JointPlotVisualizer
5
6
7 def jointplot(X, y, outpath, **kwargs):
8 # Create a new figure and axes
9 fig = plt.figure()
10 ax = fig.add_subplot(111)
11
12 # Create the visualizer
13 visualizer = JointPlotVisualizer(ax=ax, **kwargs)
14 visualizer.fit(X, y)
15 visualizer.transform(X)
16
17 # Save to disk
18 visualizer.poof(outpath=outpath)
19 plt.savefig(outpath)
20
21
22 if __name__ == '__main__':
23
24 # Load the regression data set
25 data = pd.read_csv("../../../examples/data/concrete/concrete.csv")
26
27 feature = 'cement'
28 target = 'strength'
29
30 # Get the X and y data from the DataFrame
31 Xs = data[feature]
32 ys = data[target]
33
34 # Draw the joint plot visualizer
35 jointplot(Xs, ys, "images/jointplot.png", feature=feature, target=target)
36
37 # Draw the joint plot visualizer with hexadecimal scatter plot
38 jointplot(Xs, ys, "images/jointplot_hex.png", feature=feature, target=target, joint_plot='hex')
39
[end of docs/api/features/jointplot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/api/features/jointplot.py b/docs/api/features/jointplot.py
--- a/docs/api/features/jointplot.py
+++ b/docs/api/features/jointplot.py
@@ -5,12 +5,8 @@
def jointplot(X, y, outpath, **kwargs):
- # Create a new figure and axes
- fig = plt.figure()
- ax = fig.add_subplot(111)
-
# Create the visualizer
- visualizer = JointPlotVisualizer(ax=ax, **kwargs)
+ visualizer = JointPlotVisualizer(**kwargs)
visualizer.fit(X, y)
visualizer.transform(X)
| {"golden_diff": "diff --git a/docs/api/features/jointplot.py b/docs/api/features/jointplot.py\n--- a/docs/api/features/jointplot.py\n+++ b/docs/api/features/jointplot.py\n@@ -5,12 +5,8 @@\n \n \n def jointplot(X, y, outpath, **kwargs):\n- # Create a new figure and axes\n- fig = plt.figure()\n- ax = fig.add_subplot(111)\n-\n # Create the visualizer\n- visualizer = JointPlotVisualizer(ax=ax, **kwargs)\n+ visualizer = JointPlotVisualizer(**kwargs)\n visualizer.fit(X, y)\n visualizer.transform(X)\n", "issue": "Joint Plot Viz has messed up (overlapping labels on) axes\n**Describe the bug**\r\n\r\nIf you look at the x and y axis on http://www.scikit-yb.org/en/latest/api/features/scatter.html#joint-plot-visualization you will see that the labels are overlapping.\r\n\r\n**To Reproduce**\r\n\r\nCreate a joint plot as shown in the docs\r\n\r\n\r\n\r\n**Expected behavior**\r\n\r\nLabels on axes should be clear.\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: macOS\r\n - Python Version 3.6.4\r\n - Yellowbrick Version 0.8\r\n\r\n\nJoint Plot Viz has messed up (overlapping labels on) axes\n**Describe the bug**\r\n\r\nIf you look at the x and y axis on http://www.scikit-yb.org/en/latest/api/features/scatter.html#joint-plot-visualization you will see that the labels are overlapping.\r\n\r\n**To Reproduce**\r\n\r\nCreate a joint plot as shown in the docs\r\n\r\n\r\n\r\n**Expected behavior**\r\n\r\nLabels on axes should be clear.\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: macOS\r\n - Python Version 3.6.4\r\n - Yellowbrick Version 0.8\r\n\r\n\n", "before_files": [{"content": "import pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom yellowbrick.features import JointPlotVisualizer\n\n\ndef jointplot(X, y, outpath, **kwargs):\n # Create a new figure and axes\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # Create the visualizer\n visualizer = JointPlotVisualizer(ax=ax, **kwargs)\n visualizer.fit(X, y)\n visualizer.transform(X)\n\n # Save to disk\n visualizer.poof(outpath=outpath)\n plt.savefig(outpath)\n\n\nif __name__ == '__main__':\n\n # Load the regression data set\n data = pd.read_csv(\"../../../examples/data/concrete/concrete.csv\")\n\n feature = 'cement'\n target = 'strength'\n\n # Get the X and y data from the DataFrame\n Xs = data[feature]\n ys = data[target]\n\n # Draw the joint plot visualizer\n jointplot(Xs, ys, \"images/jointplot.png\", feature=feature, target=target)\n\n # Draw the joint plot visualizer with hexadecimal scatter plot\n jointplot(Xs, ys, \"images/jointplot_hex.png\", feature=feature, target=target, joint_plot='hex')\n", "path": "docs/api/features/jointplot.py"}]} | 1,132 | 144 |
gh_patches_debug_26151 | rasdani/github-patches | git_diff | python-gitlab__python-gitlab-2771 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow update of protected branches
In gitlab 15.6 gitlab finally added api support to update protected branch settings, so ProjectProtectedBranch should be updated accordingly
https://gitlab.com/gitlab-org/gitlab/-/issues/20229/
</issue>
<code>
[start of gitlab/v4/objects/branches.py]
1 from typing import Any, cast, Union
2
3 from gitlab.base import RESTManager, RESTObject
4 from gitlab.mixins import NoUpdateMixin, ObjectDeleteMixin
5 from gitlab.types import RequiredOptional
6
7 __all__ = [
8 "ProjectBranch",
9 "ProjectBranchManager",
10 "ProjectProtectedBranch",
11 "ProjectProtectedBranchManager",
12 ]
13
14
15 class ProjectBranch(ObjectDeleteMixin, RESTObject):
16 _id_attr = "name"
17
18
19 class ProjectBranchManager(NoUpdateMixin, RESTManager):
20 _path = "/projects/{project_id}/repository/branches"
21 _obj_cls = ProjectBranch
22 _from_parent_attrs = {"project_id": "id"}
23 _create_attrs = RequiredOptional(required=("branch", "ref"))
24
25 def get(
26 self, id: Union[str, int], lazy: bool = False, **kwargs: Any
27 ) -> ProjectBranch:
28 return cast(ProjectBranch, super().get(id=id, lazy=lazy, **kwargs))
29
30
31 class ProjectProtectedBranch(ObjectDeleteMixin, RESTObject):
32 _id_attr = "name"
33
34
35 class ProjectProtectedBranchManager(NoUpdateMixin, RESTManager):
36 _path = "/projects/{project_id}/protected_branches"
37 _obj_cls = ProjectProtectedBranch
38 _from_parent_attrs = {"project_id": "id"}
39 _create_attrs = RequiredOptional(
40 required=("name",),
41 optional=(
42 "push_access_level",
43 "merge_access_level",
44 "unprotect_access_level",
45 "allow_force_push",
46 "allowed_to_push",
47 "allowed_to_merge",
48 "allowed_to_unprotect",
49 "code_owner_approval_required",
50 ),
51 )
52
53 def get(
54 self, id: Union[str, int], lazy: bool = False, **kwargs: Any
55 ) -> ProjectProtectedBranch:
56 return cast(ProjectProtectedBranch, super().get(id=id, lazy=lazy, **kwargs))
57
[end of gitlab/v4/objects/branches.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gitlab/v4/objects/branches.py b/gitlab/v4/objects/branches.py
--- a/gitlab/v4/objects/branches.py
+++ b/gitlab/v4/objects/branches.py
@@ -1,7 +1,13 @@
from typing import Any, cast, Union
from gitlab.base import RESTManager, RESTObject
-from gitlab.mixins import NoUpdateMixin, ObjectDeleteMixin
+from gitlab.mixins import (
+ CRUDMixin,
+ NoUpdateMixin,
+ ObjectDeleteMixin,
+ SaveMixin,
+ UpdateMethod,
+)
from gitlab.types import RequiredOptional
__all__ = [
@@ -28,11 +34,11 @@
return cast(ProjectBranch, super().get(id=id, lazy=lazy, **kwargs))
-class ProjectProtectedBranch(ObjectDeleteMixin, RESTObject):
+class ProjectProtectedBranch(SaveMixin, ObjectDeleteMixin, RESTObject):
_id_attr = "name"
-class ProjectProtectedBranchManager(NoUpdateMixin, RESTManager):
+class ProjectProtectedBranchManager(CRUDMixin, RESTManager):
_path = "/projects/{project_id}/protected_branches"
_obj_cls = ProjectProtectedBranch
_from_parent_attrs = {"project_id": "id"}
@@ -49,6 +55,7 @@
"code_owner_approval_required",
),
)
+ _update_method = UpdateMethod.PATCH
def get(
self, id: Union[str, int], lazy: bool = False, **kwargs: Any
| {"golden_diff": "diff --git a/gitlab/v4/objects/branches.py b/gitlab/v4/objects/branches.py\n--- a/gitlab/v4/objects/branches.py\n+++ b/gitlab/v4/objects/branches.py\n@@ -1,7 +1,13 @@\n from typing import Any, cast, Union\n \n from gitlab.base import RESTManager, RESTObject\n-from gitlab.mixins import NoUpdateMixin, ObjectDeleteMixin\n+from gitlab.mixins import (\n+ CRUDMixin,\n+ NoUpdateMixin,\n+ ObjectDeleteMixin,\n+ SaveMixin,\n+ UpdateMethod,\n+)\n from gitlab.types import RequiredOptional\n \n __all__ = [\n@@ -28,11 +34,11 @@\n return cast(ProjectBranch, super().get(id=id, lazy=lazy, **kwargs))\n \n \n-class ProjectProtectedBranch(ObjectDeleteMixin, RESTObject):\n+class ProjectProtectedBranch(SaveMixin, ObjectDeleteMixin, RESTObject):\n _id_attr = \"name\"\n \n \n-class ProjectProtectedBranchManager(NoUpdateMixin, RESTManager):\n+class ProjectProtectedBranchManager(CRUDMixin, RESTManager):\n _path = \"/projects/{project_id}/protected_branches\"\n _obj_cls = ProjectProtectedBranch\n _from_parent_attrs = {\"project_id\": \"id\"}\n@@ -49,6 +55,7 @@\n \"code_owner_approval_required\",\n ),\n )\n+ _update_method = UpdateMethod.PATCH\n \n def get(\n self, id: Union[str, int], lazy: bool = False, **kwargs: Any\n", "issue": "Allow update of protected branches\nIn gitlab 15.6 gitlab finally added api support to update protected branch settings, so ProjectProtectedBranch should be updated accordingly\r\n\r\nhttps://gitlab.com/gitlab-org/gitlab/-/issues/20229/\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Any, cast, Union\n\nfrom gitlab.base import RESTManager, RESTObject\nfrom gitlab.mixins import NoUpdateMixin, ObjectDeleteMixin\nfrom gitlab.types import RequiredOptional\n\n__all__ = [\n \"ProjectBranch\",\n \"ProjectBranchManager\",\n \"ProjectProtectedBranch\",\n \"ProjectProtectedBranchManager\",\n]\n\n\nclass ProjectBranch(ObjectDeleteMixin, RESTObject):\n _id_attr = \"name\"\n\n\nclass ProjectBranchManager(NoUpdateMixin, RESTManager):\n _path = \"/projects/{project_id}/repository/branches\"\n _obj_cls = ProjectBranch\n _from_parent_attrs = {\"project_id\": \"id\"}\n _create_attrs = RequiredOptional(required=(\"branch\", \"ref\"))\n\n def get(\n self, id: Union[str, int], lazy: bool = False, **kwargs: Any\n ) -> ProjectBranch:\n return cast(ProjectBranch, super().get(id=id, lazy=lazy, **kwargs))\n\n\nclass ProjectProtectedBranch(ObjectDeleteMixin, RESTObject):\n _id_attr = \"name\"\n\n\nclass ProjectProtectedBranchManager(NoUpdateMixin, RESTManager):\n _path = \"/projects/{project_id}/protected_branches\"\n _obj_cls = ProjectProtectedBranch\n _from_parent_attrs = {\"project_id\": \"id\"}\n _create_attrs = RequiredOptional(\n required=(\"name\",),\n optional=(\n \"push_access_level\",\n \"merge_access_level\",\n \"unprotect_access_level\",\n \"allow_force_push\",\n \"allowed_to_push\",\n \"allowed_to_merge\",\n \"allowed_to_unprotect\",\n \"code_owner_approval_required\",\n ),\n )\n\n def get(\n self, id: Union[str, int], lazy: bool = False, **kwargs: Any\n ) -> ProjectProtectedBranch:\n return cast(ProjectProtectedBranch, super().get(id=id, lazy=lazy, **kwargs))\n", "path": "gitlab/v4/objects/branches.py"}]} | 1,111 | 336 |
gh_patches_debug_51489 | rasdani/github-patches | git_diff | kivy__kivy-4728 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error: fromstring() in core/image/img_pil.py
Platform: Linux (OpenSuse, Ubuntu)
[INFO ] [Kivy ] v1.9.1
[INFO ] [Python ] v2.7.12 (default, Jul 01 2016, 15:36:53) [GCC]
Error:
File "/usr/lib64/python2.7/site-packages/kivy/core/image/img_pil.py", line 105, in save
image = PILImage.fromstring(fmt.upper(), (width, height), pixels)
File "/usr/lib64/python2.7/site-packages/PIL/Image.py", line 2063, in fromstring
"Please call frombytes() instead.")
Exception: fromstring() has been removed. Please call frombytes() instead.
In File "/usr/lib64/python2.7/site-packages/kivy/core/image/img_pil.py"
Line 105:
image = PILImage.fromstring(fmt.upper(), (width, height), pixels)
use...
image = PILImage.frombytes(fmt.upper(), (width, height), pixels)
</issue>
<code>
[start of kivy/core/image/img_pil.py]
1 '''
2 PIL: PIL image loader
3 '''
4
5 __all__ = ('ImageLoaderPIL', )
6
7 try:
8 from PIL import Image as PILImage
9 except:
10 import Image as PILImage
11
12 from kivy.logger import Logger
13 from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
14
15
16 class ImageLoaderPIL(ImageLoaderBase):
17 '''Image loader based on the PIL library.
18
19 .. versionadded:: 1.0.8
20
21 Support for GIF animation added.
22
23 Gif animation has a lot of issues(transparency/color depths... etc).
24 In order to keep it simple, what is implemented here is what is
25 natively supported by the PIL library.
26
27 As a general rule, try to use gifs that have no transparency.
28 Gif's with transparency will work but be prepared for some
29 artifacts until transparency support is improved.
30
31 '''
32
33 @staticmethod
34 def can_save():
35 return True
36
37 @staticmethod
38 def can_load_memory():
39 return True
40
41 @staticmethod
42 def extensions():
43 '''Return accepted extensions for this loader'''
44 # See http://www.pythonware.com/library/pil/handbook/index.htm
45 return ('bmp', 'bufr', 'cur', 'dcx', 'fits', 'fl', 'fpx', 'gbr',
46 'gd', 'gif', 'grib', 'hdf5', 'ico', 'im', 'imt', 'iptc',
47 'jpeg', 'jpg', 'jpe', 'mcidas', 'mic', 'mpeg', 'msp',
48 'pcd', 'pcx', 'pixar', 'png', 'ppm', 'psd', 'sgi',
49 'spider', 'tga', 'tiff', 'wal', 'wmf', 'xbm', 'xpm',
50 'xv')
51
52 def _img_correct(self, _img_tmp):
53 '''Convert image to the correct format and orientation.
54 '''
55 # image loader work only with rgb/rgba image
56 if _img_tmp.mode.lower() not in ('rgb', 'rgba'):
57 try:
58 imc = _img_tmp.convert('RGBA')
59 except:
60 Logger.warning(
61 'Image: Unable to convert image to rgba (was %s)' %
62 (_img_tmp.mode.lower()))
63 raise
64 _img_tmp = imc
65
66 return _img_tmp
67
68 def _img_read(self, im):
69 '''Read images from an animated file.
70 '''
71 im.seek(0)
72
73 # Read all images inside
74 try:
75 img_ol = None
76 while True:
77 img_tmp = im
78 img_tmp = self._img_correct(img_tmp)
79 if img_ol and (hasattr(im, 'dispose') and not im.dispose):
80 # paste new frame over old so as to handle
81 # transparency properly
82 img_ol.paste(img_tmp, (0, 0), img_tmp)
83 img_tmp = img_ol
84 img_ol = img_tmp
85 yield ImageData(img_tmp.size[0], img_tmp.size[1],
86 img_tmp.mode.lower(), img_tmp.tobytes())
87 im.seek(im.tell() + 1)
88 except EOFError:
89 pass
90
91 def load(self, filename):
92 try:
93 im = PILImage.open(filename)
94 except:
95 Logger.warning('Image: Unable to load image <%s>' % filename)
96 raise
97 # update internals
98 if not self._inline:
99 self.filename = filename
100 # returns an array of type ImageData len 1 if not a sequence image
101 return list(self._img_read(im))
102
103 @staticmethod
104 def save(filename, width, height, fmt, pixels, flipped=False):
105 image = PILImage.fromstring(fmt.upper(), (width, height), pixels)
106 if flipped:
107 image = image.transpose(PILImage.FLIP_TOP_BOTTOM)
108 image.save(filename)
109 return True
110
111
112 # register
113 ImageLoader.register(ImageLoaderPIL)
114
[end of kivy/core/image/img_pil.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kivy/core/image/img_pil.py b/kivy/core/image/img_pil.py
--- a/kivy/core/image/img_pil.py
+++ b/kivy/core/image/img_pil.py
@@ -102,7 +102,8 @@
@staticmethod
def save(filename, width, height, fmt, pixels, flipped=False):
- image = PILImage.fromstring(fmt.upper(), (width, height), pixels)
+ image = PILImage.frombytes(fmt.upper(), (width, height), pixels)
+
if flipped:
image = image.transpose(PILImage.FLIP_TOP_BOTTOM)
image.save(filename)
| {"golden_diff": "diff --git a/kivy/core/image/img_pil.py b/kivy/core/image/img_pil.py\n--- a/kivy/core/image/img_pil.py\n+++ b/kivy/core/image/img_pil.py\n@@ -102,7 +102,8 @@\n \n @staticmethod\n def save(filename, width, height, fmt, pixels, flipped=False):\n- image = PILImage.fromstring(fmt.upper(), (width, height), pixels)\n+ image = PILImage.frombytes(fmt.upper(), (width, height), pixels)\n+\n if flipped:\n image = image.transpose(PILImage.FLIP_TOP_BOTTOM)\n image.save(filename)\n", "issue": "Error: fromstring() in core/image/img_pil.py\nPlatform: Linux (OpenSuse, Ubuntu)\r\n\r\n[INFO ] [Kivy ] v1.9.1\r\n[INFO ] [Python ] v2.7.12 (default, Jul 01 2016, 15:36:53) [GCC]\r\n\r\nError:\r\n File \"/usr/lib64/python2.7/site-packages/kivy/core/image/img_pil.py\", line 105, in save\r\n image = PILImage.fromstring(fmt.upper(), (width, height), pixels)\r\n File \"/usr/lib64/python2.7/site-packages/PIL/Image.py\", line 2063, in fromstring\r\n \"Please call frombytes() instead.\")\r\nException: fromstring() has been removed. Please call frombytes() instead.\r\n\r\n\r\nIn File \"/usr/lib64/python2.7/site-packages/kivy/core/image/img_pil.py\"\r\nLine 105:\r\nimage = PILImage.fromstring(fmt.upper(), (width, height), pixels)\r\n\r\nuse...\r\n\r\nimage = PILImage.frombytes(fmt.upper(), (width, height), pixels)\n", "before_files": [{"content": "'''\nPIL: PIL image loader\n'''\n\n__all__ = ('ImageLoaderPIL', )\n\ntry:\n from PIL import Image as PILImage\nexcept:\n import Image as PILImage\n\nfrom kivy.logger import Logger\nfrom kivy.core.image import ImageLoaderBase, ImageData, ImageLoader\n\n\nclass ImageLoaderPIL(ImageLoaderBase):\n '''Image loader based on the PIL library.\n\n .. versionadded:: 1.0.8\n\n Support for GIF animation added.\n\n Gif animation has a lot of issues(transparency/color depths... etc).\n In order to keep it simple, what is implemented here is what is\n natively supported by the PIL library.\n\n As a general rule, try to use gifs that have no transparency.\n Gif's with transparency will work but be prepared for some\n artifacts until transparency support is improved.\n\n '''\n\n @staticmethod\n def can_save():\n return True\n\n @staticmethod\n def can_load_memory():\n return True\n\n @staticmethod\n def extensions():\n '''Return accepted extensions for this loader'''\n # See http://www.pythonware.com/library/pil/handbook/index.htm\n return ('bmp', 'bufr', 'cur', 'dcx', 'fits', 'fl', 'fpx', 'gbr',\n 'gd', 'gif', 'grib', 'hdf5', 'ico', 'im', 'imt', 'iptc',\n 'jpeg', 'jpg', 'jpe', 'mcidas', 'mic', 'mpeg', 'msp',\n 'pcd', 'pcx', 'pixar', 'png', 'ppm', 'psd', 'sgi',\n 'spider', 'tga', 'tiff', 'wal', 'wmf', 'xbm', 'xpm',\n 'xv')\n\n def _img_correct(self, _img_tmp):\n '''Convert image to the correct format and orientation.\n '''\n # image loader work only with rgb/rgba image\n if _img_tmp.mode.lower() not in ('rgb', 'rgba'):\n try:\n imc = _img_tmp.convert('RGBA')\n except:\n Logger.warning(\n 'Image: Unable to convert image to rgba (was %s)' %\n (_img_tmp.mode.lower()))\n raise\n _img_tmp = imc\n\n return _img_tmp\n\n def _img_read(self, im):\n '''Read images from an animated file.\n '''\n im.seek(0)\n\n # Read all images inside\n try:\n img_ol = None\n while True:\n img_tmp = im\n img_tmp = self._img_correct(img_tmp)\n if img_ol and (hasattr(im, 'dispose') and not im.dispose):\n # paste new frame over old so as to handle\n # transparency properly\n img_ol.paste(img_tmp, (0, 0), img_tmp)\n img_tmp = img_ol\n img_ol = img_tmp\n yield ImageData(img_tmp.size[0], img_tmp.size[1],\n img_tmp.mode.lower(), img_tmp.tobytes())\n im.seek(im.tell() + 1)\n except EOFError:\n pass\n\n def load(self, filename):\n try:\n im = PILImage.open(filename)\n except:\n Logger.warning('Image: Unable to load image <%s>' % filename)\n raise\n # update internals\n if not self._inline:\n self.filename = filename\n # returns an array of type ImageData len 1 if not a sequence image\n return list(self._img_read(im))\n\n @staticmethod\n def save(filename, width, height, fmt, pixels, flipped=False):\n image = PILImage.fromstring(fmt.upper(), (width, height), pixels)\n if flipped:\n image = image.transpose(PILImage.FLIP_TOP_BOTTOM)\n image.save(filename)\n return True\n\n\n# register\nImageLoader.register(ImageLoaderPIL)\n", "path": "kivy/core/image/img_pil.py"}]} | 1,884 | 140 |
gh_patches_debug_752 | rasdani/github-patches | git_diff | CTPUG__wafer-657 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
icalendar 5.0 breaks the tests
With icalendar 5.0, the test_ics_view test fails with
```
File "/home/runner/work/wafer/wafer/wafer/schedule/tests/test_views.py", line 1526, in test_ics_view
20
self.assertEqual(event['dtstart'].params['value'], 'DATE-TIME')
21
File "/opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/icalendar/caselessdict.py", line 40, in __getitem__
22
return super().__getitem__(key.upper())
23
KeyError: 'VALUE'
```
but it works fine with 4.1
There's nothing obvious in the icalendar changelog about this behaviour change, so more investriagtion is needed.
</issue>
<code>
[start of setup.py]
1 from glob import glob
2 import subprocess
3
4 from setuptools import find_packages, setup
5
6 REQUIRES = [
7 'Django>=2.2,<5',
8 'bleach',
9 'bleach-allowlist',
10 'diff-match-patch',
11 'django-bakery>=0.13.0',
12 'django-crispy-forms',
13 'django-markitup>=4.0.0',
14 'django-registration-redux',
15 'django-reversion',
16 'django-select2',
17 'djangorestframework',
18 'drf-extensions>=0.5.0',
19 'icalendar>=4.0,<5.0',
20 'jsonfield',
21 'markdown>=2.5',
22 'pillow',
23 'py3dns',
24 'pyLibravatar',
25 'pytz',
26 'requests',
27 ]
28
29 SOURCES = []
30
31
32 with open('README.rst', 'r') as f:
33 long_description = f.read()
34
35
36 def compile_translations():
37 try:
38 subprocess.check_call(['./manage.py', 'compilemessages'])
39 except subprocess.CalledProcessError:
40 print("WARNING: cannot compile translations.")
41 return glob('wafer/locale/*/LC_MESSAGES/django.mo')
42
43
44 setup(
45 name="wafer",
46 version="0.14.1a",
47 url='http://github.com/CTPUG/wafer',
48 license='ISC',
49 description="A wafer-thin Django library for running small conferences.",
50 long_description=long_description,
51 long_description_content_type="text/x-rst",
52 author='CTPUG',
53 author_email='[email protected]',
54 packages=find_packages(),
55 include_package_data=True,
56 install_requires=REQUIRES,
57 dependency_links=SOURCES,
58 data_files=[
59 ('locale', compile_translations()),
60 ],
61 setup_requires=[
62 # Add setuptools-git, so we get correct behaviour for
63 # include_package_data
64 'setuptools_git >= 1.0',
65 ],
66 classifiers=[
67 'Development Status :: 4 - Beta',
68 'Intended Audience :: Developers',
69 'License :: OSI Approved :: ISC License (ISCL)',
70 'Operating System :: POSIX',
71 'Programming Language :: Python :: 3',
72 'Programming Language :: Python :: 3.6',
73 'Programming Language :: Python :: 3.7',
74 'Programming Language :: Python :: 3.8',
75 'Framework :: Django',
76 'Topic :: Software Development :: Libraries :: Python Modules',
77 'Topic :: Internet :: WWW/HTTP',
78 ],
79 )
80
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@
'django-select2',
'djangorestframework',
'drf-extensions>=0.5.0',
- 'icalendar>=4.0,<5.0',
+ 'icalendar>=4.0',
'jsonfield',
'markdown>=2.5',
'pillow',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n 'django-select2',\n 'djangorestframework',\n 'drf-extensions>=0.5.0',\n- 'icalendar>=4.0,<5.0',\n+ 'icalendar>=4.0',\n 'jsonfield',\n 'markdown>=2.5',\n 'pillow',\n", "issue": "icalendar 5.0 breaks the tests\nWith icalendar 5.0, the test_ics_view test fails with\r\n```\r\nFile \"/home/runner/work/wafer/wafer/wafer/schedule/tests/test_views.py\", line 1526, in test_ics_view\r\n20\r\n self.assertEqual(event['dtstart'].params['value'], 'DATE-TIME')\r\n21\r\n File \"/opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/icalendar/caselessdict.py\", line 40, in __getitem__\r\n22\r\n return super().__getitem__(key.upper())\r\n23\r\nKeyError: 'VALUE'\r\n```\r\n\r\nbut it works fine with 4.1\r\n\r\nThere's nothing obvious in the icalendar changelog about this behaviour change, so more investriagtion is needed.\r\n\n", "before_files": [{"content": "from glob import glob\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nREQUIRES = [\n 'Django>=2.2,<5',\n 'bleach',\n 'bleach-allowlist',\n 'diff-match-patch',\n 'django-bakery>=0.13.0',\n 'django-crispy-forms',\n 'django-markitup>=4.0.0',\n 'django-registration-redux',\n 'django-reversion',\n 'django-select2',\n 'djangorestframework',\n 'drf-extensions>=0.5.0',\n 'icalendar>=4.0,<5.0',\n 'jsonfield',\n 'markdown>=2.5',\n 'pillow',\n 'py3dns',\n 'pyLibravatar',\n 'pytz',\n 'requests',\n]\n\nSOURCES = []\n\n\nwith open('README.rst', 'r') as f:\n long_description = f.read()\n\n\ndef compile_translations():\n try:\n subprocess.check_call(['./manage.py', 'compilemessages'])\n except subprocess.CalledProcessError:\n print(\"WARNING: cannot compile translations.\")\n return glob('wafer/locale/*/LC_MESSAGES/django.mo')\n\n\nsetup(\n name=\"wafer\",\n version=\"0.14.1a\",\n url='http://github.com/CTPUG/wafer',\n license='ISC',\n description=\"A wafer-thin Django library for running small conferences.\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n author='CTPUG',\n author_email='[email protected]',\n packages=find_packages(),\n include_package_data=True,\n install_requires=REQUIRES,\n dependency_links=SOURCES,\n data_files=[\n ('locale', compile_translations()),\n ],\n setup_requires=[\n # Add setuptools-git, so we get correct behaviour for\n # include_package_data\n 'setuptools_git >= 1.0',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: ISC License (ISCL)',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Framework :: Django',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py"}]} | 1,421 | 101 |
gh_patches_debug_22334 | rasdani/github-patches | git_diff | wagtail__wagtail-10913 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Login template does not respect non_fields_errors display
When using a custom `WAGTAILADMIN_USER_LOGIN_FORM`, you can't set form-wide errors as they will always be displayed as `Your {{ username_field }} and password didn't match. Please try again.` from `"wagtailadmin/login.html"`
As the default LoginForm (`wagtail.admin.forms.auth.LoginForm`) subclasses `django.contrib.auth.forms.AuthenticationForm` which already has an `'invalid_login'` error message (that is usename_field-aware) we could just use that, but if we want this particular message we could just override this error message"
</issue>
<code>
[start of wagtail/admin/forms/auth.py]
1 from django import forms
2 from django.contrib.auth.forms import AuthenticationForm
3 from django.contrib.auth.forms import PasswordChangeForm as DjangoPasswordChangeForm
4 from django.contrib.auth.forms import PasswordResetForm as DjangoPasswordResetForm
5 from django.utils.translation import gettext_lazy
6
7
8 class LoginForm(AuthenticationForm):
9 username = forms.CharField(max_length=254, widget=forms.TextInput())
10
11 password = forms.CharField(
12 widget=forms.PasswordInput(
13 attrs={
14 "placeholder": gettext_lazy("Enter password"),
15 }
16 )
17 )
18
19 remember = forms.BooleanField(required=False)
20
21 def __init__(self, request=None, *args, **kwargs):
22 super().__init__(request=request, *args, **kwargs)
23 self.fields["username"].widget.attrs["placeholder"] = gettext_lazy(
24 "Enter your %(username_field_name)s"
25 ) % {"username_field_name": self.username_field.verbose_name}
26 self.fields["username"].widget.attrs["autofocus"] = ""
27
28 @property
29 def extra_fields(self):
30 for field_name in self.fields.keys():
31 if field_name not in ["username", "password", "remember"]:
32 yield field_name, self[field_name]
33
34
35 class PasswordResetForm(DjangoPasswordResetForm):
36 email = forms.EmailField(
37 label=gettext_lazy("Enter your email address to reset your password"),
38 max_length=254,
39 required=True,
40 )
41
42 @property
43 def extra_fields(self):
44 for field_name in self.fields.keys():
45 if field_name not in ["email"]:
46 yield field_name, self[field_name]
47
48
49 class PasswordChangeForm(DjangoPasswordChangeForm):
50 """
51 Since this is displayed as part of a larger form, this differs from the vanilla Django
52 PasswordChangeForm as follows:
53 * the old-password field is not auto-focused
54 * Fields are not marked as required
55 """
56
57 def __init__(self, *args, **kwargs):
58 super().__init__(*args, **kwargs)
59 try:
60 del self.fields["old_password"].widget.attrs["autofocus"]
61 except KeyError:
62 pass
63
64 self.fields["old_password"].required = False
65 self.fields["new_password1"].required = False
66 self.fields["new_password2"].required = False
67
[end of wagtail/admin/forms/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/admin/forms/auth.py b/wagtail/admin/forms/auth.py
--- a/wagtail/admin/forms/auth.py
+++ b/wagtail/admin/forms/auth.py
@@ -18,6 +18,13 @@
remember = forms.BooleanField(required=False)
+ error_messages = {
+ **AuthenticationForm.error_messages,
+ "invalid_login": gettext_lazy(
+ "Your %(username_field)s and password didn't match. Please try again."
+ ),
+ }
+
def __init__(self, request=None, *args, **kwargs):
super().__init__(request=request, *args, **kwargs)
self.fields["username"].widget.attrs["placeholder"] = gettext_lazy(
@@ -31,6 +38,13 @@
if field_name not in ["username", "password", "remember"]:
yield field_name, self[field_name]
+ def get_invalid_login_error(self):
+ return forms.ValidationError(
+ self.error_messages["invalid_login"],
+ code="invalid_login",
+ params={"username_field": self.username_field.verbose_name},
+ )
+
class PasswordResetForm(DjangoPasswordResetForm):
email = forms.EmailField(
| {"golden_diff": "diff --git a/wagtail/admin/forms/auth.py b/wagtail/admin/forms/auth.py\n--- a/wagtail/admin/forms/auth.py\n+++ b/wagtail/admin/forms/auth.py\n@@ -18,6 +18,13 @@\n \n remember = forms.BooleanField(required=False)\n \n+ error_messages = {\n+ **AuthenticationForm.error_messages,\n+ \"invalid_login\": gettext_lazy(\n+ \"Your %(username_field)s and password didn't match. Please try again.\"\n+ ),\n+ }\n+\n def __init__(self, request=None, *args, **kwargs):\n super().__init__(request=request, *args, **kwargs)\n self.fields[\"username\"].widget.attrs[\"placeholder\"] = gettext_lazy(\n@@ -31,6 +38,13 @@\n if field_name not in [\"username\", \"password\", \"remember\"]:\n yield field_name, self[field_name]\n \n+ def get_invalid_login_error(self):\n+ return forms.ValidationError(\n+ self.error_messages[\"invalid_login\"],\n+ code=\"invalid_login\",\n+ params={\"username_field\": self.username_field.verbose_name},\n+ )\n+\n \n class PasswordResetForm(DjangoPasswordResetForm):\n email = forms.EmailField(\n", "issue": "Login template does not respect non_fields_errors display\nWhen using a custom `WAGTAILADMIN_USER_LOGIN_FORM`, you can't set form-wide errors as they will always be displayed as `Your {{ username_field }} and password didn't match. Please try again.` from `\"wagtailadmin/login.html\"`\r\n\r\nAs the default LoginForm (`wagtail.admin.forms.auth.LoginForm`) subclasses `django.contrib.auth.forms.AuthenticationForm` which already has an `'invalid_login'` error message (that is usename_field-aware) we could just use that, but if we want this particular message we could just override this error message\"\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth.forms import PasswordChangeForm as DjangoPasswordChangeForm\nfrom django.contrib.auth.forms import PasswordResetForm as DjangoPasswordResetForm\nfrom django.utils.translation import gettext_lazy\n\n\nclass LoginForm(AuthenticationForm):\n username = forms.CharField(max_length=254, widget=forms.TextInput())\n\n password = forms.CharField(\n widget=forms.PasswordInput(\n attrs={\n \"placeholder\": gettext_lazy(\"Enter password\"),\n }\n )\n )\n\n remember = forms.BooleanField(required=False)\n\n def __init__(self, request=None, *args, **kwargs):\n super().__init__(request=request, *args, **kwargs)\n self.fields[\"username\"].widget.attrs[\"placeholder\"] = gettext_lazy(\n \"Enter your %(username_field_name)s\"\n ) % {\"username_field_name\": self.username_field.verbose_name}\n self.fields[\"username\"].widget.attrs[\"autofocus\"] = \"\"\n\n @property\n def extra_fields(self):\n for field_name in self.fields.keys():\n if field_name not in [\"username\", \"password\", \"remember\"]:\n yield field_name, self[field_name]\n\n\nclass PasswordResetForm(DjangoPasswordResetForm):\n email = forms.EmailField(\n label=gettext_lazy(\"Enter your email address to reset your password\"),\n max_length=254,\n required=True,\n )\n\n @property\n def extra_fields(self):\n for field_name in self.fields.keys():\n if field_name not in [\"email\"]:\n yield field_name, self[field_name]\n\n\nclass PasswordChangeForm(DjangoPasswordChangeForm):\n \"\"\"\n Since this is displayed as part of a larger form, this differs from the vanilla Django\n PasswordChangeForm as follows:\n * the old-password field is not auto-focused\n * Fields are not marked as required\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n try:\n del self.fields[\"old_password\"].widget.attrs[\"autofocus\"]\n except KeyError:\n pass\n\n self.fields[\"old_password\"].required = False\n self.fields[\"new_password1\"].required = False\n self.fields[\"new_password2\"].required = False\n", "path": "wagtail/admin/forms/auth.py"}]} | 1,268 | 261 |
gh_patches_debug_36983 | rasdani/github-patches | git_diff | liberapay__liberapay.com-1717 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`Cache-Control: immutable`
It's [a new experimental HTTP feature](https://bitsup.blogspot.fr/2016/05/cache-control-immutable.html) that we should probably start using. It's low priority though.
</issue>
<code>
[start of liberapay/utils/http_caching.py]
1 """
2 Handles HTTP caching.
3 """
4
5 import atexit
6 from hashlib import md5
7 import os
8 from tempfile import mkstemp
9
10 from aspen.request_processor.dispatcher import DispatchResult, DispatchStatus
11 from pando import Response
12
13 from liberapay.utils import b64encode_s, find_files
14
15
16 ETAGS = {}
17
18
19 def compile_assets(website):
20 cleanup = []
21 for spt in find_files(website.www_root+'/assets/', '*.spt'):
22 filepath = spt[:-4] # /path/to/www/assets/foo.css
23 if not os.path.exists(filepath):
24 cleanup.append(filepath)
25 dispatch_result = DispatchResult(DispatchStatus.okay, spt, None, None, None)
26 state = dict(dispatch_result=dispatch_result, response=Response())
27 state['state'] = state
28 resource = website.request_processor.resources.get(spt)
29 content = resource.render(state, dispatch_result, None).body
30 if not isinstance(content, bytes):
31 content = content.encode('utf8')
32 tmpfd, tmpfpath = mkstemp(dir='.')
33 os.write(tmpfd, content)
34 os.close(tmpfd)
35 os.rename(tmpfpath, filepath)
36 if website.env.clean_assets:
37 atexit.register(lambda: rm_f(*cleanup))
38
39
40 def rm_f(*paths):
41 for path in paths:
42 try:
43 os.unlink(path)
44 except Exception:
45 pass
46
47
48 def clean_assets(www_root):
49 rm_f(*[spt[:-4] for spt in find_files(www_root+'/assets/', '*.spt')])
50
51
52 def asset_etag(path):
53 if path.endswith('.spt'):
54 return ''
55 if path in ETAGS:
56 return ETAGS[path]
57 with open(path, 'rb') as f:
58 h = b64encode_s(md5(f.read()).digest())
59 ETAGS[path] = h
60 return h
61
62
63 # algorithm functions
64
65 def get_etag_for_file(dispatch_result, website, state):
66 if dispatch_result.status != DispatchStatus.okay:
67 return {'etag': None}
68 try:
69 return {'etag': asset_etag(dispatch_result.match)}
70 except Exception as e:
71 website.tell_sentry(e, state)
72 return {'etag': None}
73
74
75 def try_to_serve_304(dispatch_result, request, response, etag):
76 """Try to serve a 304 for static resources.
77 """
78 if not etag:
79 # This is a request for a dynamic resource.
80 return
81
82 qs_etag = request.qs.get('etag')
83 if qs_etag and qs_etag != etag:
84 # Don't serve one version of a file as if it were another.
85 raise response.error(410)
86
87 headers_etag = request.headers.get(b'If-None-Match', b'').decode('ascii', 'replace')
88 if not headers_etag:
89 # This client doesn't want a 304.
90 return
91
92 if headers_etag != etag:
93 # Cache miss, the client sent an old or invalid etag.
94 return
95
96 # Huzzah!
97 # =======
98 # We can serve a 304! :D
99
100 raise response.success(304)
101
102
103 def add_caching_to_response(response, request=None, etag=None):
104 """Set caching headers.
105 """
106 if not etag:
107 # This is a dynamic resource, disable caching by default
108 if b'Cache-Control' not in response.headers:
109 response.headers[b'Cache-Control'] = b'no-cache'
110 return
111
112 assert request is not None # sanity check
113
114 if response.code not in (200, 304):
115 return
116
117 # https://developers.google.com/speed/docs/best-practices/caching
118 response.headers[b'Etag'] = etag.encode('ascii')
119
120 if request.qs.get('etag'):
121 # We can cache "indefinitely" when the querystring contains the etag.
122 response.headers[b'Cache-Control'] = b'public, max-age=31536000'
123 else:
124 # Otherwise we cache for 1 hour
125 response.headers[b'Cache-Control'] = b'public, max-age=3600'
126
[end of liberapay/utils/http_caching.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/liberapay/utils/http_caching.py b/liberapay/utils/http_caching.py
--- a/liberapay/utils/http_caching.py
+++ b/liberapay/utils/http_caching.py
@@ -79,47 +79,41 @@
# This is a request for a dynamic resource.
return
+ # Compare the etag in the request's querystring to the one we have.
qs_etag = request.qs.get('etag')
if qs_etag and qs_etag != etag:
# Don't serve one version of a file as if it were another.
raise response.error(410)
+ # Compare the etag in the request's headers to the one we have.
headers_etag = request.headers.get(b'If-None-Match', b'').decode('ascii', 'replace')
- if not headers_etag:
- # This client doesn't want a 304.
- return
-
- if headers_etag != etag:
- # Cache miss, the client sent an old or invalid etag.
- return
-
- # Huzzah!
- # =======
- # We can serve a 304! :D
+ if headers_etag and headers_etag == etag:
+ # Success! We can serve a 304.
+ raise response.success(304)
- raise response.success(304)
-
-def add_caching_to_response(response, request=None, etag=None):
+def add_caching_to_response(state, website, response, request=None, etag=None):
"""Set caching headers.
"""
- if not etag:
- # This is a dynamic resource, disable caching by default
- if b'Cache-Control' not in response.headers:
- response.headers[b'Cache-Control'] = b'no-cache'
- return
-
- assert request is not None # sanity check
-
if response.code not in (200, 304):
return
-
- # https://developers.google.com/speed/docs/best-practices/caching
- response.headers[b'Etag'] = etag.encode('ascii')
-
- if request.qs.get('etag'):
- # We can cache "indefinitely" when the querystring contains the etag.
- response.headers[b'Cache-Control'] = b'public, max-age=31536000'
+ if b'Cache-Control' in response.headers:
+ # The caching policy has already been defined somewhere else
+ return
+ if etag:
+ try:
+ assert not response.headers.cookie
+ except Exception as e:
+ website.tell_sentry(e, state)
+ response.headers.cookie.clear()
+ # https://developers.google.com/speed/docs/best-practices/caching
+ response.headers[b'Etag'] = etag.encode('ascii')
+ if request.qs.get('etag'):
+ # We can cache "indefinitely" when the querystring contains the etag.
+ response.headers[b'Cache-Control'] = b'public, max-age=31536000, immutable'
+ else:
+ # Otherwise we cache for 1 hour
+ response.headers[b'Cache-Control'] = b'public, max-age=3600'
else:
- # Otherwise we cache for 1 hour
- response.headers[b'Cache-Control'] = b'public, max-age=3600'
+ # This is a dynamic resource, disable caching by default
+ response.headers[b'Cache-Control'] = b'no-cache'
| {"golden_diff": "diff --git a/liberapay/utils/http_caching.py b/liberapay/utils/http_caching.py\n--- a/liberapay/utils/http_caching.py\n+++ b/liberapay/utils/http_caching.py\n@@ -79,47 +79,41 @@\n # This is a request for a dynamic resource.\n return\n \n+ # Compare the etag in the request's querystring to the one we have.\n qs_etag = request.qs.get('etag')\n if qs_etag and qs_etag != etag:\n # Don't serve one version of a file as if it were another.\n raise response.error(410)\n \n+ # Compare the etag in the request's headers to the one we have.\n headers_etag = request.headers.get(b'If-None-Match', b'').decode('ascii', 'replace')\n- if not headers_etag:\n- # This client doesn't want a 304.\n- return\n-\n- if headers_etag != etag:\n- # Cache miss, the client sent an old or invalid etag.\n- return\n-\n- # Huzzah!\n- # =======\n- # We can serve a 304! :D\n+ if headers_etag and headers_etag == etag:\n+ # Success! We can serve a 304.\n+ raise response.success(304)\n \n- raise response.success(304)\n \n-\n-def add_caching_to_response(response, request=None, etag=None):\n+def add_caching_to_response(state, website, response, request=None, etag=None):\n \"\"\"Set caching headers.\n \"\"\"\n- if not etag:\n- # This is a dynamic resource, disable caching by default\n- if b'Cache-Control' not in response.headers:\n- response.headers[b'Cache-Control'] = b'no-cache'\n- return\n-\n- assert request is not None # sanity check\n-\n if response.code not in (200, 304):\n return\n-\n- # https://developers.google.com/speed/docs/best-practices/caching\n- response.headers[b'Etag'] = etag.encode('ascii')\n-\n- if request.qs.get('etag'):\n- # We can cache \"indefinitely\" when the querystring contains the etag.\n- response.headers[b'Cache-Control'] = b'public, max-age=31536000'\n+ if b'Cache-Control' in response.headers:\n+ # The caching policy has already been defined somewhere else\n+ return\n+ if etag:\n+ try:\n+ assert not response.headers.cookie\n+ except Exception as e:\n+ website.tell_sentry(e, state)\n+ response.headers.cookie.clear()\n+ # https://developers.google.com/speed/docs/best-practices/caching\n+ response.headers[b'Etag'] = etag.encode('ascii')\n+ if request.qs.get('etag'):\n+ # We can cache \"indefinitely\" when the querystring contains the etag.\n+ response.headers[b'Cache-Control'] = b'public, max-age=31536000, immutable'\n+ else:\n+ # Otherwise we cache for 1 hour\n+ response.headers[b'Cache-Control'] = b'public, max-age=3600'\n else:\n- # Otherwise we cache for 1 hour\n- response.headers[b'Cache-Control'] = b'public, max-age=3600'\n+ # This is a dynamic resource, disable caching by default\n+ response.headers[b'Cache-Control'] = b'no-cache'\n", "issue": "`Cache-Control: immutable`\nIt's [a new experimental HTTP feature](https://bitsup.blogspot.fr/2016/05/cache-control-immutable.html) that we should probably start using. It's low priority though.\n\n", "before_files": [{"content": "\"\"\"\nHandles HTTP caching.\n\"\"\"\n\nimport atexit\nfrom hashlib import md5\nimport os\nfrom tempfile import mkstemp\n\nfrom aspen.request_processor.dispatcher import DispatchResult, DispatchStatus\nfrom pando import Response\n\nfrom liberapay.utils import b64encode_s, find_files\n\n\nETAGS = {}\n\n\ndef compile_assets(website):\n cleanup = []\n for spt in find_files(website.www_root+'/assets/', '*.spt'):\n filepath = spt[:-4] # /path/to/www/assets/foo.css\n if not os.path.exists(filepath):\n cleanup.append(filepath)\n dispatch_result = DispatchResult(DispatchStatus.okay, spt, None, None, None)\n state = dict(dispatch_result=dispatch_result, response=Response())\n state['state'] = state\n resource = website.request_processor.resources.get(spt)\n content = resource.render(state, dispatch_result, None).body\n if not isinstance(content, bytes):\n content = content.encode('utf8')\n tmpfd, tmpfpath = mkstemp(dir='.')\n os.write(tmpfd, content)\n os.close(tmpfd)\n os.rename(tmpfpath, filepath)\n if website.env.clean_assets:\n atexit.register(lambda: rm_f(*cleanup))\n\n\ndef rm_f(*paths):\n for path in paths:\n try:\n os.unlink(path)\n except Exception:\n pass\n\n\ndef clean_assets(www_root):\n rm_f(*[spt[:-4] for spt in find_files(www_root+'/assets/', '*.spt')])\n\n\ndef asset_etag(path):\n if path.endswith('.spt'):\n return ''\n if path in ETAGS:\n return ETAGS[path]\n with open(path, 'rb') as f:\n h = b64encode_s(md5(f.read()).digest())\n ETAGS[path] = h\n return h\n\n\n# algorithm functions\n\ndef get_etag_for_file(dispatch_result, website, state):\n if dispatch_result.status != DispatchStatus.okay:\n return {'etag': None}\n try:\n return {'etag': asset_etag(dispatch_result.match)}\n except Exception as e:\n website.tell_sentry(e, state)\n return {'etag': None}\n\n\ndef try_to_serve_304(dispatch_result, request, response, etag):\n \"\"\"Try to serve a 304 for static resources.\n \"\"\"\n if not etag:\n # This is a request for a dynamic resource.\n return\n\n qs_etag = request.qs.get('etag')\n if qs_etag and qs_etag != etag:\n # Don't serve one version of a file as if it were another.\n raise response.error(410)\n\n headers_etag = request.headers.get(b'If-None-Match', b'').decode('ascii', 'replace')\n if not headers_etag:\n # This client doesn't want a 304.\n return\n\n if headers_etag != etag:\n # Cache miss, the client sent an old or invalid etag.\n return\n\n # Huzzah!\n # =======\n # We can serve a 304! :D\n\n raise response.success(304)\n\n\ndef add_caching_to_response(response, request=None, etag=None):\n \"\"\"Set caching headers.\n \"\"\"\n if not etag:\n # This is a dynamic resource, disable caching by default\n if b'Cache-Control' not in response.headers:\n response.headers[b'Cache-Control'] = b'no-cache'\n return\n\n assert request is not None # sanity check\n\n if response.code not in (200, 304):\n return\n\n # https://developers.google.com/speed/docs/best-practices/caching\n response.headers[b'Etag'] = etag.encode('ascii')\n\n if request.qs.get('etag'):\n # We can cache \"indefinitely\" when the querystring contains the etag.\n response.headers[b'Cache-Control'] = b'public, max-age=31536000'\n else:\n # Otherwise we cache for 1 hour\n response.headers[b'Cache-Control'] = b'public, max-age=3600'\n", "path": "liberapay/utils/http_caching.py"}]} | 1,789 | 811 |
gh_patches_debug_26075 | rasdani/github-patches | git_diff | openmc-dev__openmc-2825 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
_DECAY_ENERGIES not cleared when changing chain in openmc.config
<!--
If you are a user of OpenMC and are running into trouble with the code or are
seeking general user support, we highly recommend posting on the OpenMC
discourse forum first. GitHub issues should be used specifically for bug reports
and feature requests.
https://openmc.discourse.group/
-->
## Bug Description
This causes incorrect sets of decay energies to be used. The PR which solves this has a more in-depth description of the problem.
## Steps to Reproduce
Run two depletion calcs, each with a different chain. Then, try to postprocess the decay heats from each chain within the same python script by changing `openmc.config['chain_file']`. The decay heats will use energies from the first one we loaded, but not the second. This is because the decay heats are cached in `openmc/data/decay.py` and we're not clearing that dictionary upon changing the chain.
</issue>
<code>
[start of openmc/config.py]
1 from collections.abc import MutableMapping
2 import os
3 from pathlib import Path
4 import warnings
5
6 from openmc.data import DataLibrary
7 from openmc.data.decay import _DECAY_PHOTON_ENERGY
8
9 __all__ = ["config"]
10
11
12 class _Config(MutableMapping):
13 def __init__(self, data=()):
14 self._mapping = {}
15 self.update(data)
16
17 def __getitem__(self, key):
18 return self._mapping[key]
19
20 def __delitem__(self, key):
21 del self._mapping[key]
22 if key == 'cross_sections':
23 del os.environ['OPENMC_CROSS_SECTIONS']
24 elif key == 'mg_cross_sections':
25 del os.environ['OPENMC_MG_CROSS_SECTIONS']
26 elif key == 'chain_file':
27 del os.environ['OPENMC_CHAIN_FILE']
28 # Reset photon source data since it relies on chain file
29 _DECAY_PHOTON_ENERGY.clear()
30
31 def __setitem__(self, key, value):
32 if key == 'cross_sections':
33 # Force environment variable to match
34 self._set_path(key, value)
35 os.environ['OPENMC_CROSS_SECTIONS'] = str(value)
36 elif key == 'mg_cross_sections':
37 self._set_path(key, value)
38 os.environ['OPENMC_MG_CROSS_SECTIONS'] = str(value)
39 elif key == 'chain_file':
40 self._set_path(key, value)
41 os.environ['OPENMC_CHAIN_FILE'] = str(value)
42 # Reset photon source data since it relies on chain file
43 _DECAY_PHOTON_ENERGY.clear()
44 else:
45 raise KeyError(f'Unrecognized config key: {key}. Acceptable keys '
46 'are "cross_sections", "mg_cross_sections" and '
47 '"chain_file"')
48
49 def __iter__(self):
50 return iter(self._mapping)
51
52 def __len__(self):
53 return len(self._mapping)
54
55 def __repr__(self):
56 return repr(self._mapping)
57
58 def _set_path(self, key, value):
59 self._mapping[key] = p = Path(value)
60 if not p.exists():
61 warnings.warn(f"'{value}' does not exist.")
62
63
64 def _default_config():
65 """Return default configuration"""
66 config = _Config()
67
68 # Set cross sections using environment variable
69 if "OPENMC_CROSS_SECTIONS" in os.environ:
70 config['cross_sections'] = os.environ["OPENMC_CROSS_SECTIONS"]
71 if "OPENMC_MG_CROSS_SECTIONS" in os.environ:
72 config['mg_cross_sections'] = os.environ["OPENMC_MG_CROSS_SECTIONS"]
73
74 # Set depletion chain
75 chain_file = os.environ.get("OPENMC_CHAIN_FILE")
76 if (chain_file is None and
77 config.get('cross_sections') is not None and
78 config['cross_sections'].exists()
79 ):
80 # Check for depletion chain in cross_sections.xml
81 data = DataLibrary.from_xml(config['cross_sections'])
82 for lib in reversed(data.libraries):
83 if lib['type'] == 'depletion_chain':
84 chain_file = lib['path']
85 break
86 if chain_file is not None:
87 config['chain_file'] = chain_file
88
89 return config
90
91
92 config = _default_config()
93
[end of openmc/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openmc/config.py b/openmc/config.py
--- a/openmc/config.py
+++ b/openmc/config.py
@@ -4,7 +4,7 @@
import warnings
from openmc.data import DataLibrary
-from openmc.data.decay import _DECAY_PHOTON_ENERGY
+from openmc.data.decay import _DECAY_ENERGY, _DECAY_PHOTON_ENERGY
__all__ = ["config"]
@@ -41,6 +41,7 @@
os.environ['OPENMC_CHAIN_FILE'] = str(value)
# Reset photon source data since it relies on chain file
_DECAY_PHOTON_ENERGY.clear()
+ _DECAY_ENERGY.clear()
else:
raise KeyError(f'Unrecognized config key: {key}. Acceptable keys '
'are "cross_sections", "mg_cross_sections" and '
@@ -76,7 +77,7 @@
if (chain_file is None and
config.get('cross_sections') is not None and
config['cross_sections'].exists()
- ):
+ ):
# Check for depletion chain in cross_sections.xml
data = DataLibrary.from_xml(config['cross_sections'])
for lib in reversed(data.libraries):
| {"golden_diff": "diff --git a/openmc/config.py b/openmc/config.py\n--- a/openmc/config.py\n+++ b/openmc/config.py\n@@ -4,7 +4,7 @@\n import warnings\n \n from openmc.data import DataLibrary\n-from openmc.data.decay import _DECAY_PHOTON_ENERGY\n+from openmc.data.decay import _DECAY_ENERGY, _DECAY_PHOTON_ENERGY\n \n __all__ = [\"config\"]\n \n@@ -41,6 +41,7 @@\n os.environ['OPENMC_CHAIN_FILE'] = str(value)\n # Reset photon source data since it relies on chain file\n _DECAY_PHOTON_ENERGY.clear()\n+ _DECAY_ENERGY.clear()\n else:\n raise KeyError(f'Unrecognized config key: {key}. Acceptable keys '\n 'are \"cross_sections\", \"mg_cross_sections\" and '\n@@ -76,7 +77,7 @@\n if (chain_file is None and\n config.get('cross_sections') is not None and\n config['cross_sections'].exists()\n- ):\n+ ):\n # Check for depletion chain in cross_sections.xml\n data = DataLibrary.from_xml(config['cross_sections'])\n for lib in reversed(data.libraries):\n", "issue": "_DECAY_ENERGIES not cleared when changing chain in openmc.config\n<!--\r\nIf you are a user of OpenMC and are running into trouble with the code or are\r\nseeking general user support, we highly recommend posting on the OpenMC\r\ndiscourse forum first. GitHub issues should be used specifically for bug reports\r\nand feature requests.\r\n\r\nhttps://openmc.discourse.group/\r\n\r\n-->\r\n\r\n## Bug Description\r\nThis causes incorrect sets of decay energies to be used. The PR which solves this has a more in-depth description of the problem.\r\n\r\n\r\n## Steps to Reproduce\r\nRun two depletion calcs, each with a different chain. Then, try to postprocess the decay heats from each chain within the same python script by changing `openmc.config['chain_file']`. The decay heats will use energies from the first one we loaded, but not the second. This is because the decay heats are cached in `openmc/data/decay.py` and we're not clearing that dictionary upon changing the chain.\r\n\n", "before_files": [{"content": "from collections.abc import MutableMapping\nimport os\nfrom pathlib import Path\nimport warnings\n\nfrom openmc.data import DataLibrary\nfrom openmc.data.decay import _DECAY_PHOTON_ENERGY\n\n__all__ = [\"config\"]\n\n\nclass _Config(MutableMapping):\n def __init__(self, data=()):\n self._mapping = {}\n self.update(data)\n\n def __getitem__(self, key):\n return self._mapping[key]\n\n def __delitem__(self, key):\n del self._mapping[key]\n if key == 'cross_sections':\n del os.environ['OPENMC_CROSS_SECTIONS']\n elif key == 'mg_cross_sections':\n del os.environ['OPENMC_MG_CROSS_SECTIONS']\n elif key == 'chain_file':\n del os.environ['OPENMC_CHAIN_FILE']\n # Reset photon source data since it relies on chain file\n _DECAY_PHOTON_ENERGY.clear()\n\n def __setitem__(self, key, value):\n if key == 'cross_sections':\n # Force environment variable to match\n self._set_path(key, value)\n os.environ['OPENMC_CROSS_SECTIONS'] = str(value)\n elif key == 'mg_cross_sections':\n self._set_path(key, value)\n os.environ['OPENMC_MG_CROSS_SECTIONS'] = str(value)\n elif key == 'chain_file':\n self._set_path(key, value)\n os.environ['OPENMC_CHAIN_FILE'] = str(value)\n # Reset photon source data since it relies on chain file\n _DECAY_PHOTON_ENERGY.clear()\n else:\n raise KeyError(f'Unrecognized config key: {key}. Acceptable keys '\n 'are \"cross_sections\", \"mg_cross_sections\" and '\n '\"chain_file\"')\n\n def __iter__(self):\n return iter(self._mapping)\n\n def __len__(self):\n return len(self._mapping)\n\n def __repr__(self):\n return repr(self._mapping)\n\n def _set_path(self, key, value):\n self._mapping[key] = p = Path(value)\n if not p.exists():\n warnings.warn(f\"'{value}' does not exist.\")\n\n\ndef _default_config():\n \"\"\"Return default configuration\"\"\"\n config = _Config()\n\n # Set cross sections using environment variable\n if \"OPENMC_CROSS_SECTIONS\" in os.environ:\n config['cross_sections'] = os.environ[\"OPENMC_CROSS_SECTIONS\"]\n if \"OPENMC_MG_CROSS_SECTIONS\" in os.environ:\n config['mg_cross_sections'] = os.environ[\"OPENMC_MG_CROSS_SECTIONS\"]\n\n # Set depletion chain\n chain_file = os.environ.get(\"OPENMC_CHAIN_FILE\")\n if (chain_file is None and\n config.get('cross_sections') is not None and\n config['cross_sections'].exists()\n ):\n # Check for depletion chain in cross_sections.xml\n data = DataLibrary.from_xml(config['cross_sections'])\n for lib in reversed(data.libraries):\n if lib['type'] == 'depletion_chain':\n chain_file = lib['path']\n break\n if chain_file is not None:\n config['chain_file'] = chain_file\n\n return config\n\n\nconfig = _default_config()\n", "path": "openmc/config.py"}]} | 1,622 | 268 |
gh_patches_debug_3701 | rasdani/github-patches | git_diff | huggingface__transformers-10531 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo in deberta_v2/__init__.py
https://github.com/huggingface/transformers/blob/c503a1c15ec1b11e69a3eaaf06edfa87c05a2849/src/transformers/models/deberta_v2/__init__.py#L31
Should be '' DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST ''.
</issue>
<code>
[start of src/transformers/models/deberta_v2/__init__.py]
1 # flake8: noqa
2 # There's no way to ignore "F401 '...' imported but unused" warnings in this
3 # module, but to preserve other warnings. So, don't check this module at all.
4
5 # Copyright 2020 The HuggingFace Team. All rights reserved.
6 #
7 # Licensed under the Apache License, Version 2.0 (the "License");
8 # you may not use this file except in compliance with the License.
9 # You may obtain a copy of the License at
10 #
11 # http://www.apache.org/licenses/LICENSE-2.0
12 #
13 # Unless required by applicable law or agreed to in writing, software
14 # distributed under the License is distributed on an "AS IS" BASIS,
15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
18
19 from typing import TYPE_CHECKING
20
21 from ...file_utils import _BaseLazyModule, is_torch_available
22
23
24 _import_structure = {
25 "configuration_deberta_v2": ["DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaV2Config"],
26 "tokenization_deberta_v2": ["DebertaV2Tokenizer"],
27 }
28
29 if is_torch_available():
30 _import_structure["modeling_deberta_v2"] = [
31 "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
32 "DebertaV2ForSequenceClassification",
33 "DebertaV2Model",
34 "DebertaV2ForMaskedLM",
35 "DebertaV2PreTrainedModel",
36 "DebertaV2ForTokenClassification",
37 "DebertaV2ForQuestionAnswering",
38 ]
39
40
41 if TYPE_CHECKING:
42 from .configuration_deberta_v2 import DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaV2Config
43 from .tokenization_deberta_v2 import DebertaV2Tokenizer
44
45 if is_torch_available():
46 from .modeling_deberta_v2 import (
47 DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
48 DebertaV2ForMaskedLM,
49 DebertaV2ForQuestionAnswering,
50 DebertaV2ForSequenceClassification,
51 DebertaV2ForTokenClassification,
52 DebertaV2Model,
53 DebertaV2PreTrainedModel,
54 )
55
56 else:
57 import importlib
58 import os
59 import sys
60
61 class _LazyModule(_BaseLazyModule):
62 """
63 Module class that surfaces all objects but only performs associated imports when the objects are requested.
64 """
65
66 __file__ = globals()["__file__"]
67 __path__ = [os.path.dirname(__file__)]
68
69 def _get_module(self, module_name: str):
70 return importlib.import_module("." + module_name, self.__name__)
71
72 sys.modules[__name__] = _LazyModule(__name__, _import_structure)
73
[end of src/transformers/models/deberta_v2/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/transformers/models/deberta_v2/__init__.py b/src/transformers/models/deberta_v2/__init__.py
--- a/src/transformers/models/deberta_v2/__init__.py
+++ b/src/transformers/models/deberta_v2/__init__.py
@@ -28,7 +28,7 @@
if is_torch_available():
_import_structure["modeling_deberta_v2"] = [
- "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaV2ForSequenceClassification",
"DebertaV2Model",
"DebertaV2ForMaskedLM",
| {"golden_diff": "diff --git a/src/transformers/models/deberta_v2/__init__.py b/src/transformers/models/deberta_v2/__init__.py\n--- a/src/transformers/models/deberta_v2/__init__.py\n+++ b/src/transformers/models/deberta_v2/__init__.py\n@@ -28,7 +28,7 @@\n \n if is_torch_available():\n _import_structure[\"modeling_deberta_v2\"] = [\n- \"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST\",\n+ \"DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST\",\n \"DebertaV2ForSequenceClassification\",\n \"DebertaV2Model\",\n \"DebertaV2ForMaskedLM\",\n", "issue": "Typo in deberta_v2/__init__.py\nhttps://github.com/huggingface/transformers/blob/c503a1c15ec1b11e69a3eaaf06edfa87c05a2849/src/transformers/models/deberta_v2/__init__.py#L31\r\nShould be '' DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST ''.\n", "before_files": [{"content": "# flake8: noqa\n# There's no way to ignore \"F401 '...' imported but unused\" warnings in this\n# module, but to preserve other warnings. So, don't check this module at all.\n\n# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import TYPE_CHECKING\n\nfrom ...file_utils import _BaseLazyModule, is_torch_available\n\n\n_import_structure = {\n \"configuration_deberta_v2\": [\"DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP\", \"DebertaV2Config\"],\n \"tokenization_deberta_v2\": [\"DebertaV2Tokenizer\"],\n}\n\nif is_torch_available():\n _import_structure[\"modeling_deberta_v2\"] = [\n \"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST\",\n \"DebertaV2ForSequenceClassification\",\n \"DebertaV2Model\",\n \"DebertaV2ForMaskedLM\",\n \"DebertaV2PreTrainedModel\",\n \"DebertaV2ForTokenClassification\",\n \"DebertaV2ForQuestionAnswering\",\n ]\n\n\nif TYPE_CHECKING:\n from .configuration_deberta_v2 import DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaV2Config\n from .tokenization_deberta_v2 import DebertaV2Tokenizer\n\n if is_torch_available():\n from .modeling_deberta_v2 import (\n DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST,\n DebertaV2ForMaskedLM,\n DebertaV2ForQuestionAnswering,\n DebertaV2ForSequenceClassification,\n DebertaV2ForTokenClassification,\n DebertaV2Model,\n DebertaV2PreTrainedModel,\n )\n\nelse:\n import importlib\n import os\n import sys\n\n class _LazyModule(_BaseLazyModule):\n \"\"\"\n Module class that surfaces all objects but only performs associated imports when the objects are requested.\n \"\"\"\n\n __file__ = globals()[\"__file__\"]\n __path__ = [os.path.dirname(__file__)]\n\n def _get_module(self, module_name: str):\n return importlib.import_module(\".\" + module_name, self.__name__)\n\n sys.modules[__name__] = _LazyModule(__name__, _import_structure)\n", "path": "src/transformers/models/deberta_v2/__init__.py"}]} | 1,424 | 163 |
gh_patches_debug_12268 | rasdani/github-patches | git_diff | ManimCommunity__manim-1363 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Suggestion to improve user experience w.r.t InCodeTexTemplate example
## Enhancement proposal
At present on macOS (with MacTex latest or even older installations) if you run InCodeTexTemplate from example_scenes/customtex.py it would fail for quite a lot of people with the following error

As you can see that I have version 2.11. The error message suggests having version 2.4 or greater.
Unfortunately here the issue is not because of the version rather it is because of `ghostscript` installation which is required by `dvisvgm`. Interesting I do have the `ghostscript` installed on my machine yet `dvisvgm` is not able to see.
In order for `dvisvgm` to see it, I have to make sure that a certain environment variable is set. See image below:

After this, the `example_scenes/customtex.py` would work fine.
I have described the resolution (which could be classified as an installation issue) in case others are facing it. The issue with Ghostscript and mactex is not unique to manim hence should not be considered a bug related to manim. That said, the error message is a bit misleading and handling errors properly is always a challenging problem in s/w engineering.
That said, my suggestion here is to modify the `example_scenes/customtex.py` to not advertise the `pdf` output. Here is the snippet from the example:
```
class InCodeTexTemplate(Scene):
"""This example scene demonstrates how to modify the tex template
for a particular scene from the code for the scene itself.
"""
def construct(self):
# Create a new template
myTemplate = TexTemplate()
# Add packages to the template
myTemplate.add_to_preamble(r"\usepackage{esvect}")
# Set the compiler and output format (default: latex and .dvi)
# possible tex compilers: "latex", "pdflatex", "xelatex", "lualatex", "luatex"
# possible output formats: ".dvi", ".pdf", and ".xdv"
myTemplate.tex_compiler = "pdflatex"
myTemplate.output_format = ".pdf"
# To use this template in a Tex() or MathTex() object
# use the keyword argument tex_template
text = MathTex(r"\vv{vb}", tex_template=myTemplate)
self.play(Write(text))
self.wait(1)
```
Here is my rationale for not advertising the `pdf` output -
a) The example is first and foremost about the ability to add imports for additional packages in the preamble of tex document
b) Some of the imports indeed require a custom compiler so it is okay to suggest that you could change the compiler
c) Forgive me for making a bit biased opinion here as I may be ignoring some use case but I am not able to see the use of generating the `pdf` as the output as ultimately the goal is to get the `svg` anyways.
Usage of `pdf` will invite issues with `ghostscript` described above leading to a bad first user experience w.r.t usage of the feature of `TexTemplate`. I think it is a great feature to add preamble dynamically!
I have been using latex for many years and even I got stumbled by this Ghostscript issue and had to manually print the output of `dvisvgm` to figure it therefore I have a reason to believe that the beginners would have a bad experience because of this.
IMHO, modifying the example would be a better thing to do given the audience and primary functionality of manim.
</issue>
<code>
[start of example_scenes/customtex.py]
1 from manim import *
2
3
4 class TexTemplateFromCLI(Scene):
5 """This scene uses a custom TexTemplate file.
6 The path of the TexTemplate _must_ be passed with the command line
7 argument `--tex_template <path to template>`.
8 For this scene, you can use the custom_template.tex file next to it.
9 This scene will fail to render if a tex_template.tex that doesn't
10 import esvect is passed, and will throw a LaTeX error in that case.
11 """
12
13 def construct(self):
14 text = MathTex(r"\vv{vb}")
15 self.play(Write(text))
16 self.wait(1)
17
18
19 class InCodeTexTemplate(Scene):
20 """This example scene demonstrates how to modify the tex template
21 for a particular scene from the code for the scene itself.
22 """
23
24 def construct(self):
25 # Create a new template
26 myTemplate = TexTemplate()
27
28 # Add packages to the template
29 myTemplate.add_to_preamble(r"\usepackage{esvect}")
30
31 # Set the compiler and output format (default: latex and .dvi)
32 # possible tex compilers: "latex", "pdflatex", "xelatex", "lualatex", "luatex"
33 # possible output formats: ".dvi", ".pdf", and ".xdv"
34 myTemplate.tex_compiler = "pdflatex"
35 myTemplate.output_format = ".pdf"
36
37 # To use this template in a Tex() or MathTex() object
38 # use the keyword argument tex_template
39 text = MathTex(r"\vv{vb}", tex_template=myTemplate)
40 self.play(Write(text))
41 self.wait(1)
42
[end of example_scenes/customtex.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/example_scenes/customtex.py b/example_scenes/customtex.py
--- a/example_scenes/customtex.py
+++ b/example_scenes/customtex.py
@@ -31,8 +31,8 @@
# Set the compiler and output format (default: latex and .dvi)
# possible tex compilers: "latex", "pdflatex", "xelatex", "lualatex", "luatex"
# possible output formats: ".dvi", ".pdf", and ".xdv"
- myTemplate.tex_compiler = "pdflatex"
- myTemplate.output_format = ".pdf"
+ myTemplate.tex_compiler = "latex"
+ myTemplate.output_format = ".dvi"
# To use this template in a Tex() or MathTex() object
# use the keyword argument tex_template
| {"golden_diff": "diff --git a/example_scenes/customtex.py b/example_scenes/customtex.py\n--- a/example_scenes/customtex.py\n+++ b/example_scenes/customtex.py\n@@ -31,8 +31,8 @@\n # Set the compiler and output format (default: latex and .dvi)\n # possible tex compilers: \"latex\", \"pdflatex\", \"xelatex\", \"lualatex\", \"luatex\"\n # possible output formats: \".dvi\", \".pdf\", and \".xdv\"\n- myTemplate.tex_compiler = \"pdflatex\"\n- myTemplate.output_format = \".pdf\"\n+ myTemplate.tex_compiler = \"latex\"\n+ myTemplate.output_format = \".dvi\"\n \n # To use this template in a Tex() or MathTex() object\n # use the keyword argument tex_template\n", "issue": "Suggestion to improve user experience w.r.t InCodeTexTemplate example\n## Enhancement proposal\r\n\r\nAt present on macOS (with MacTex latest or even older installations) if you run InCodeTexTemplate from example_scenes/customtex.py it would fail for quite a lot of people with the following error\r\n\r\n\r\n\r\nAs you can see that I have version 2.11. The error message suggests having version 2.4 or greater.\r\n\r\nUnfortunately here the issue is not because of the version rather it is because of `ghostscript` installation which is required by `dvisvgm`. Interesting I do have the `ghostscript` installed on my machine yet `dvisvgm` is not able to see.\r\n\r\nIn order for `dvisvgm` to see it, I have to make sure that a certain environment variable is set. See image below:\r\n\r\n\r\n\r\nAfter this, the `example_scenes/customtex.py` would work fine.\r\n\r\nI have described the resolution (which could be classified as an installation issue) in case others are facing it. The issue with Ghostscript and mactex is not unique to manim hence should not be considered a bug related to manim. That said, the error message is a bit misleading and handling errors properly is always a challenging problem in s/w engineering.\r\n\r\nThat said, my suggestion here is to modify the `example_scenes/customtex.py` to not advertise the `pdf` output. Here is the snippet from the example:\r\n\r\n```\r\nclass InCodeTexTemplate(Scene):\r\n \"\"\"This example scene demonstrates how to modify the tex template\r\n for a particular scene from the code for the scene itself.\r\n \"\"\"\r\n\r\n def construct(self):\r\n # Create a new template\r\n myTemplate = TexTemplate()\r\n\r\n # Add packages to the template\r\n myTemplate.add_to_preamble(r\"\\usepackage{esvect}\")\r\n\r\n # Set the compiler and output format (default: latex and .dvi)\r\n # possible tex compilers: \"latex\", \"pdflatex\", \"xelatex\", \"lualatex\", \"luatex\"\r\n # possible output formats: \".dvi\", \".pdf\", and \".xdv\"\r\n myTemplate.tex_compiler = \"pdflatex\"\r\n myTemplate.output_format = \".pdf\"\r\n\r\n # To use this template in a Tex() or MathTex() object\r\n # use the keyword argument tex_template\r\n text = MathTex(r\"\\vv{vb}\", tex_template=myTemplate)\r\n self.play(Write(text))\r\n self.wait(1)\r\n```\r\n\r\nHere is my rationale for not advertising the `pdf` output -\r\n\r\na) The example is first and foremost about the ability to add imports for additional packages in the preamble of tex document\r\n\r\nb) Some of the imports indeed require a custom compiler so it is okay to suggest that you could change the compiler\r\n\r\nc) Forgive me for making a bit biased opinion here as I may be ignoring some use case but I am not able to see the use of generating the `pdf` as the output as ultimately the goal is to get the `svg` anyways.\r\n\r\nUsage of `pdf` will invite issues with `ghostscript` described above leading to a bad first user experience w.r.t usage of the feature of `TexTemplate`. I think it is a great feature to add preamble dynamically!\r\n\r\nI have been using latex for many years and even I got stumbled by this Ghostscript issue and had to manually print the output of `dvisvgm` to figure it therefore I have a reason to believe that the beginners would have a bad experience because of this.\r\n\r\nIMHO, modifying the example would be a better thing to do given the audience and primary functionality of manim.\r\n\r\n\n", "before_files": [{"content": "from manim import *\n\n\nclass TexTemplateFromCLI(Scene):\n \"\"\"This scene uses a custom TexTemplate file.\n The path of the TexTemplate _must_ be passed with the command line\n argument `--tex_template <path to template>`.\n For this scene, you can use the custom_template.tex file next to it.\n This scene will fail to render if a tex_template.tex that doesn't\n import esvect is passed, and will throw a LaTeX error in that case.\n \"\"\"\n\n def construct(self):\n text = MathTex(r\"\\vv{vb}\")\n self.play(Write(text))\n self.wait(1)\n\n\nclass InCodeTexTemplate(Scene):\n \"\"\"This example scene demonstrates how to modify the tex template\n for a particular scene from the code for the scene itself.\n \"\"\"\n\n def construct(self):\n # Create a new template\n myTemplate = TexTemplate()\n\n # Add packages to the template\n myTemplate.add_to_preamble(r\"\\usepackage{esvect}\")\n\n # Set the compiler and output format (default: latex and .dvi)\n # possible tex compilers: \"latex\", \"pdflatex\", \"xelatex\", \"lualatex\", \"luatex\"\n # possible output formats: \".dvi\", \".pdf\", and \".xdv\"\n myTemplate.tex_compiler = \"pdflatex\"\n myTemplate.output_format = \".pdf\"\n\n # To use this template in a Tex() or MathTex() object\n # use the keyword argument tex_template\n text = MathTex(r\"\\vv{vb}\", tex_template=myTemplate)\n self.play(Write(text))\n self.wait(1)\n", "path": "example_scenes/customtex.py"}]} | 1,859 | 185 |
gh_patches_debug_17781 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1428 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SD: legislators have no email addresses
Example legislator: http://sdlegislature.gov/Legislators/Legislators/MemberDetail.aspx?Session=2017&Member=1125&Cleaned=True
Looking at him in openstates, his phone number is there but his email address is missing: https://openstates.org/api/v1/legislators/SDL000286/
The data is there, the scraper just needs to be updated to capture the email property.
Thanks!
</issue>
<code>
[start of openstates/sd/legislators.py]
1 import re
2
3 from billy.scrape import NoDataForPeriod
4 from billy.scrape.legislators import LegislatorScraper, Legislator
5
6 import lxml.html
7
8
9 class SDLegislatorScraper(LegislatorScraper):
10 jurisdiction = 'sd'
11 latest_only = True
12
13 def scrape(self, chamber, term):
14 url = 'http://www.sdlegislature.gov/Legislators/default.aspx' \
15 '?CurrentSession=True'
16
17 if chamber == 'upper':
18 search = 'Senate Members'
19 else:
20 search = 'House Members'
21
22 page = self.get(url).text
23 page = lxml.html.fromstring(page)
24 page.make_links_absolute(url)
25
26 for link in page.xpath("//h4[text()='{}']/../div/a".format(search)):
27 name = link.text.strip()
28
29 self.scrape_legislator(name, chamber, term,
30 '{}&Cleaned=True'.format(
31 link.attrib['href']))
32
33 def scrape_legislator(self, name, chamber, term, url):
34 page = self.get(url).text
35 page = lxml.html.fromstring(page)
36 page.make_links_absolute(url)
37
38 party = page.xpath("string(//span[contains(@id, 'Party')])")
39 party = party.strip()
40
41 if party == 'Democrat':
42 party = 'Democratic'
43
44 district = page.xpath("string(//span[contains(@id, 'District')])")
45 district = district.strip().lstrip('0')
46
47 occupation = page.xpath(
48 "string(//span[contains(@id, 'Occupation')])")
49 occupation = occupation.strip()
50
51 (photo_url, ) = page.xpath('//img[contains(@id, "_imgMember")]/@src')
52
53 office_phone = page.xpath(
54 "string(//span[contains(@id, 'CapitolPhone')])").strip()
55
56 email = None
57
58 email_link = page.xpath('//a[@id="lnkMail"]')
59
60 if email_link:
61 email = email_link[0].attrib['href'].split(":")[1]
62
63 legislator = Legislator(term, chamber, district, name,
64 party=party,
65 occupation=occupation,
66 photo_url=photo_url,
67 url=url)
68 kwargs = {}
69 if office_phone.strip() != "":
70 kwargs['phone'] = office_phone
71
72 if email and email.strip() != "":
73 # South Dakota protects their email addresses from scraping using
74 # some JS code that runs on page load
75 # Until that code is run, all their email addresses are listed as
76 # *@example.com; so, fix this
77 kwargs['email'] = re.sub(r'@example\.com$', '@sdlegislature.gov', email)
78
79 if kwargs:
80 legislator.add_office('capitol', 'Capitol Office', **kwargs)
81
82 home_address = [
83 x.strip() for x in
84 page.xpath('//td/span[contains(@id, "HomeAddress")]/text()')
85 if x.strip()
86 ]
87 if home_address:
88 home_address = "\n".join(home_address)
89 home_phone = page.xpath(
90 "string(//span[contains(@id, 'HomePhone')])").strip()
91 legislator.add_office(
92 'district',
93 'District Office',
94 address=home_address,
95 phone=home_phone or None
96 )
97
98 legislator.add_source(url)
99
100 comm_url = page.xpath("//a[. = 'Committees']")[0].attrib['href']
101 self.scrape_committees(legislator, comm_url)
102
103 self.save_legislator(legislator)
104
105 def scrape_committees(self, leg, url):
106 page = self.get(url).text
107 page = lxml.html.fromstring(page)
108 leg.add_source(url)
109
110 term = leg['roles'][0]['term']
111
112 for link in page.xpath("//a[contains(@href, 'CommitteeMem')]"):
113 comm = link.text.strip()
114
115 role = link.xpath('../following-sibling::td')[0]\
116 .text_content().lower()
117
118 if comm.startswith('Joint'):
119 chamber = 'joint'
120 else:
121 chamber = leg['roles'][0]['chamber']
122
123 leg.add_role('committee member', term=term, chamber=chamber,
124 committee=comm, position=role)
125
[end of openstates/sd/legislators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/sd/legislators.py b/openstates/sd/legislators.py
--- a/openstates/sd/legislators.py
+++ b/openstates/sd/legislators.py
@@ -69,12 +69,14 @@
if office_phone.strip() != "":
kwargs['phone'] = office_phone
- if email and email.strip() != "":
- # South Dakota protects their email addresses from scraping using
- # some JS code that runs on page load
- # Until that code is run, all their email addresses are listed as
- # *@example.com; so, fix this
- kwargs['email'] = re.sub(r'@example\.com$', '@sdlegislature.gov', email)
+ # SD is hiding their email addresses entirely in JS now, so
+ # search through <script> blocks looking for them
+ for script in page.xpath('//script'):
+ if script.text:
+ match = re.search(r'([\w.]+@sdlegislature\.gov)', script.text)
+ if match:
+ kwargs['email'] = match.group(0)
+ break
if kwargs:
legislator.add_office('capitol', 'Capitol Office', **kwargs)
| {"golden_diff": "diff --git a/openstates/sd/legislators.py b/openstates/sd/legislators.py\n--- a/openstates/sd/legislators.py\n+++ b/openstates/sd/legislators.py\n@@ -69,12 +69,14 @@\n if office_phone.strip() != \"\":\n kwargs['phone'] = office_phone\n \n- if email and email.strip() != \"\":\n- # South Dakota protects their email addresses from scraping using\n- # some JS code that runs on page load\n- # Until that code is run, all their email addresses are listed as\n- # *@example.com; so, fix this\n- kwargs['email'] = re.sub(r'@example\\.com$', '@sdlegislature.gov', email)\n+ # SD is hiding their email addresses entirely in JS now, so\n+ # search through <script> blocks looking for them\n+ for script in page.xpath('//script'):\n+ if script.text:\n+ match = re.search(r'([\\w.]+@sdlegislature\\.gov)', script.text)\n+ if match:\n+ kwargs['email'] = match.group(0)\n+ break\n \n if kwargs:\n legislator.add_office('capitol', 'Capitol Office', **kwargs)\n", "issue": "SD: legislators have no email addresses\nExample legislator: http://sdlegislature.gov/Legislators/Legislators/MemberDetail.aspx?Session=2017&Member=1125&Cleaned=True\r\n\r\nLooking at him in openstates, his phone number is there but his email address is missing: https://openstates.org/api/v1/legislators/SDL000286/\r\n\r\nThe data is there, the scraper just needs to be updated to capture the email property.\r\n\r\nThanks!\n", "before_files": [{"content": "import re\n\nfrom billy.scrape import NoDataForPeriod\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\n\nimport lxml.html\n\n\nclass SDLegislatorScraper(LegislatorScraper):\n jurisdiction = 'sd'\n latest_only = True\n\n def scrape(self, chamber, term):\n url = 'http://www.sdlegislature.gov/Legislators/default.aspx' \\\n '?CurrentSession=True'\n\n if chamber == 'upper':\n search = 'Senate Members'\n else:\n search = 'House Members'\n\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for link in page.xpath(\"//h4[text()='{}']/../div/a\".format(search)):\n name = link.text.strip()\n\n self.scrape_legislator(name, chamber, term,\n '{}&Cleaned=True'.format(\n link.attrib['href']))\n\n def scrape_legislator(self, name, chamber, term, url):\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n party = page.xpath(\"string(//span[contains(@id, 'Party')])\")\n party = party.strip()\n\n if party == 'Democrat':\n party = 'Democratic'\n\n district = page.xpath(\"string(//span[contains(@id, 'District')])\")\n district = district.strip().lstrip('0')\n\n occupation = page.xpath(\n \"string(//span[contains(@id, 'Occupation')])\")\n occupation = occupation.strip()\n\n (photo_url, ) = page.xpath('//img[contains(@id, \"_imgMember\")]/@src')\n\n office_phone = page.xpath(\n \"string(//span[contains(@id, 'CapitolPhone')])\").strip()\n\n email = None\n\n email_link = page.xpath('//a[@id=\"lnkMail\"]')\n\n if email_link:\n email = email_link[0].attrib['href'].split(\":\")[1]\n\n legislator = Legislator(term, chamber, district, name,\n party=party,\n occupation=occupation,\n photo_url=photo_url,\n url=url)\n kwargs = {}\n if office_phone.strip() != \"\":\n kwargs['phone'] = office_phone\n\n if email and email.strip() != \"\":\n # South Dakota protects their email addresses from scraping using\n # some JS code that runs on page load\n # Until that code is run, all their email addresses are listed as\n # *@example.com; so, fix this\n kwargs['email'] = re.sub(r'@example\\.com$', '@sdlegislature.gov', email)\n\n if kwargs:\n legislator.add_office('capitol', 'Capitol Office', **kwargs)\n\n home_address = [\n x.strip() for x in\n page.xpath('//td/span[contains(@id, \"HomeAddress\")]/text()')\n if x.strip()\n ]\n if home_address:\n home_address = \"\\n\".join(home_address)\n home_phone = page.xpath(\n \"string(//span[contains(@id, 'HomePhone')])\").strip()\n legislator.add_office(\n 'district',\n 'District Office',\n address=home_address,\n phone=home_phone or None\n )\n\n legislator.add_source(url)\n\n comm_url = page.xpath(\"//a[. = 'Committees']\")[0].attrib['href']\n self.scrape_committees(legislator, comm_url)\n\n self.save_legislator(legislator)\n\n def scrape_committees(self, leg, url):\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n leg.add_source(url)\n\n term = leg['roles'][0]['term']\n\n for link in page.xpath(\"//a[contains(@href, 'CommitteeMem')]\"):\n comm = link.text.strip()\n\n role = link.xpath('../following-sibling::td')[0]\\\n .text_content().lower()\n\n if comm.startswith('Joint'):\n chamber = 'joint'\n else:\n chamber = leg['roles'][0]['chamber']\n\n leg.add_role('committee member', term=term, chamber=chamber,\n committee=comm, position=role)\n", "path": "openstates/sd/legislators.py"}]} | 1,856 | 279 |
gh_patches_debug_6207 | rasdani/github-patches | git_diff | getsentry__sentry-23499 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError /api/0/organizations/{organization_slug}/sentry-apps/
## Important Details
How are you running Sentry?
<!-- Please pick one of the following -->
On-Premise w/ Docker, Sentry 21.1.0486d790, based on https://github.com/Rungutan/sentry-performance-monitoring
## Description
Opening of these sub items in "Organization" settings fails:
* Audit Log
* Integrations
* Developer settings
This seems to be the resulting issue in the "Internal" Sentry project:
```
AttributeError: 'NoneType' object has no attribute 'get_allowed_origins'
File "sentry/api/base.py", line 124, in handle_exception
response = super(Endpoint, self).handle_exception(exc)
File "rest_framework/views.py", line 449, in handle_exception
self.raise_uncaught_exception(exc)
File "sentry/api/base.py", line 237, in dispatch
response = handler(request, *args, **kwargs)
File "sentry/api/bases/sentryapps.py", line 59, in wrapped
return func(self, *args, **kwargs)
File "sentry/api/endpoints/organization_sentry_apps.py", line 19, in get
on_results=lambda x: serialize(x, request.user, access=request.access),
File "sentry/api/base.py", line 330, in paginate
results = on_results(cursor_result.results)
File "sentry/api/endpoints/organization_sentry_apps.py", line 19, in <lambda>
on_results=lambda x: serialize(x, request.user, access=request.access),
File "sentry/api/serializers/base.py", line 45, in serialize
return [serializer(o, attrs=attrs.get(o, {}), user=user, **kwargs) for o in objects]
File "sentry/api/serializers/base.py", line 45, in <listcomp>
return [serializer(o, attrs=attrs.get(o, {}), user=user, **kwargs) for o in objects]
File "sentry/api/serializers/base.py", line 60, in __call__
return self.serialize(obj, attrs, user, **kwargs)
File "sentry/api/serializers/models/sentry_app.py", line 31, in serialize
"allowedOrigins": obj.application.get_allowed_origins(),
```
Last SQL before exception
```
SELECT "sentry_organization"."id", "sentry_organization"."name", "sentry_organization"."slug", "sentry_organization"."status", "sentry_organization"."date_added", "sentry_organization"."default_role", "sentry_organization"."flags" FROM "sentry_organization" WHERE "sentry_organization"."id" = %s
```
### What you expected to happen
Menus should work.
### Possible Solution
[If you have an idea on how this could be solved include that detail here.]
</issue>
<code>
[start of src/sentry/api/endpoints/organization_sentry_apps.py]
1 from sentry.api.bases import OrganizationEndpoint, add_integration_platform_metric_tag
2 from sentry.api.paginator import OffsetPaginator
3 from sentry.api.serializers import serialize
4 from sentry.models import SentryApp
5
6
7 class OrganizationSentryAppsEndpoint(OrganizationEndpoint):
8 @add_integration_platform_metric_tag
9 def get(self, request, organization):
10 queryset = SentryApp.objects.filter(owner=organization)
11
12 return self.paginate(
13 request=request,
14 queryset=queryset,
15 order_by="-date_added",
16 paginator_cls=OffsetPaginator,
17 on_results=lambda x: serialize(x, request.user, access=request.access),
18 )
19
[end of src/sentry/api/endpoints/organization_sentry_apps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/api/endpoints/organization_sentry_apps.py b/src/sentry/api/endpoints/organization_sentry_apps.py
--- a/src/sentry/api/endpoints/organization_sentry_apps.py
+++ b/src/sentry/api/endpoints/organization_sentry_apps.py
@@ -7,7 +7,7 @@
class OrganizationSentryAppsEndpoint(OrganizationEndpoint):
@add_integration_platform_metric_tag
def get(self, request, organization):
- queryset = SentryApp.objects.filter(owner=organization)
+ queryset = SentryApp.objects.filter(owner=organization, application__isnull=False)
return self.paginate(
request=request,
| {"golden_diff": "diff --git a/src/sentry/api/endpoints/organization_sentry_apps.py b/src/sentry/api/endpoints/organization_sentry_apps.py\n--- a/src/sentry/api/endpoints/organization_sentry_apps.py\n+++ b/src/sentry/api/endpoints/organization_sentry_apps.py\n@@ -7,7 +7,7 @@\n class OrganizationSentryAppsEndpoint(OrganizationEndpoint):\n @add_integration_platform_metric_tag\n def get(self, request, organization):\n- queryset = SentryApp.objects.filter(owner=organization)\n+ queryset = SentryApp.objects.filter(owner=organization, application__isnull=False)\n \n return self.paginate(\n request=request,\n", "issue": "AttributeError /api/0/organizations/{organization_slug}/sentry-apps/\n## Important Details\r\n\r\nHow are you running Sentry?\r\n\r\n<!-- Please pick one of the following -->\r\nOn-Premise w/ Docker, Sentry 21.1.0486d790, based on https://github.com/Rungutan/sentry-performance-monitoring\r\n\r\n## Description\r\n\r\nOpening of these sub items in \"Organization\" settings fails:\r\n* Audit Log\r\n* Integrations\r\n* Developer settings\r\n\r\nThis seems to be the resulting issue in the \"Internal\" Sentry project:\r\n```\r\nAttributeError: 'NoneType' object has no attribute 'get_allowed_origins'\r\n File \"sentry/api/base.py\", line 124, in handle_exception\r\n response = super(Endpoint, self).handle_exception(exc)\r\n File \"rest_framework/views.py\", line 449, in handle_exception\r\n self.raise_uncaught_exception(exc)\r\n File \"sentry/api/base.py\", line 237, in dispatch\r\n response = handler(request, *args, **kwargs)\r\n File \"sentry/api/bases/sentryapps.py\", line 59, in wrapped\r\n return func(self, *args, **kwargs)\r\n File \"sentry/api/endpoints/organization_sentry_apps.py\", line 19, in get\r\n on_results=lambda x: serialize(x, request.user, access=request.access),\r\n File \"sentry/api/base.py\", line 330, in paginate\r\n results = on_results(cursor_result.results)\r\n File \"sentry/api/endpoints/organization_sentry_apps.py\", line 19, in <lambda>\r\n on_results=lambda x: serialize(x, request.user, access=request.access),\r\n File \"sentry/api/serializers/base.py\", line 45, in serialize\r\n return [serializer(o, attrs=attrs.get(o, {}), user=user, **kwargs) for o in objects]\r\n File \"sentry/api/serializers/base.py\", line 45, in <listcomp>\r\n return [serializer(o, attrs=attrs.get(o, {}), user=user, **kwargs) for o in objects]\r\n File \"sentry/api/serializers/base.py\", line 60, in __call__\r\n return self.serialize(obj, attrs, user, **kwargs)\r\n File \"sentry/api/serializers/models/sentry_app.py\", line 31, in serialize\r\n \"allowedOrigins\": obj.application.get_allowed_origins(),\r\n```\r\n\r\nLast SQL before exception\r\n\r\n```\r\nSELECT \"sentry_organization\".\"id\", \"sentry_organization\".\"name\", \"sentry_organization\".\"slug\", \"sentry_organization\".\"status\", \"sentry_organization\".\"date_added\", \"sentry_organization\".\"default_role\", \"sentry_organization\".\"flags\" FROM \"sentry_organization\" WHERE \"sentry_organization\".\"id\" = %s\r\n```\r\n\r\n### What you expected to happen\r\nMenus should work.\r\n\r\n### Possible Solution\r\n\r\n[If you have an idea on how this could be solved include that detail here.]\r\n\n", "before_files": [{"content": "from sentry.api.bases import OrganizationEndpoint, add_integration_platform_metric_tag\nfrom sentry.api.paginator import OffsetPaginator\nfrom sentry.api.serializers import serialize\nfrom sentry.models import SentryApp\n\n\nclass OrganizationSentryAppsEndpoint(OrganizationEndpoint):\n @add_integration_platform_metric_tag\n def get(self, request, organization):\n queryset = SentryApp.objects.filter(owner=organization)\n\n return self.paginate(\n request=request,\n queryset=queryset,\n order_by=\"-date_added\",\n paginator_cls=OffsetPaginator,\n on_results=lambda x: serialize(x, request.user, access=request.access),\n )\n", "path": "src/sentry/api/endpoints/organization_sentry_apps.py"}]} | 1,348 | 139 |
gh_patches_debug_16973 | rasdani/github-patches | git_diff | coala__coala-1081 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
coala-ci doesn't use fail_aqcuire_settings
</issue>
<code>
[start of coalib/coala_main.py]
1 from itertools import chain
2 from pyprint.ConsolePrinter import ConsolePrinter
3 import os
4
5 from coalib.output.printers.LogPrinter import LogPrinter
6 from coalib.processes.Processing import execute_section
7 from coalib.results.HiddenResult import HiddenResult
8 from coalib.settings.ConfigurationGathering import gather_configuration
9 from coalib.misc.Exceptions import get_exitcode
10 from coalib.bears.BEAR_KIND import BEAR_KIND
11 from coalib.collecting.Collectors import collect_bears
12 from coalib.output.Tagging import tag_results, delete_tagged_results
13
14
15 do_nothing = lambda *args: True
16
17
18 def run_coala(log_printer=None,
19 print_results=do_nothing,
20 acquire_settings=do_nothing,
21 print_section_beginning=do_nothing,
22 nothing_done=do_nothing,
23 show_bears=do_nothing):
24 """
25 This is a main method that should be usable for almost all purposes and
26 reduces executing coala to one function call.
27
28 :param log_printer: A LogPrinter object to use for logging.
29 :param print_results: A callback that takes a LogPrinter, a
30 section, a list of results to be printed,
31 the file dict and the mutable file diff
32 dict.
33 :param acquire_settings: The method to use for requesting settings.
34 It will get a parameter which is a
35 dictionary with the settings name as key
36 and a list containing a description in [0]
37 and the names of the bears who need this
38 setting in all following indexes.
39 :param print_section_beginning: A callback that will be called with a
40 section name string whenever analysis of a
41 new section is started.
42 :param nothing_done: A callback that will be called without
43 parameters if nothing was done.
44 :param show_bears: A callback that will be called with first
45 a list of local bears, second a list of
46 global bears to output them. A third bool
47 parameter may be used to indicate if a
48 compressed output (True) or a normal output
49 (False) is desired, the former being used
50 for showing all available bears to the user.
51 :return: A dictionary containing a list of results
52 for all analyzed sections as key.
53 """
54 log_printer = log_printer or LogPrinter(ConsolePrinter())
55
56 exitcode = 0
57 results = None
58 try:
59 yielded_results = False
60 did_nothing = True
61 (sections,
62 local_bears,
63 global_bears,
64 targets) = gather_configuration(acquire_settings, log_printer)
65
66 tag = str(sections['default'].get('tag', None))
67 dtag = str(sections['default'].get('dtag', None))
68
69 show_all_bears = bool(sections['default'].get('show_all_bears', False))
70 show_bears_ = bool(sections["default"].get("show_bears", "False"))
71 if show_all_bears:
72 show_bears_ = True
73 for section in sections:
74 bear_dirs = sections[section].bear_dirs()
75 local_bears[section] = collect_bears(bear_dirs,
76 ["**"],
77 [BEAR_KIND.LOCAL],
78 log_printer)
79 global_bears[section] = collect_bears(bear_dirs,
80 ["**"],
81 [BEAR_KIND.GLOBAL],
82 log_printer)
83
84 if dtag != "None":
85 delete_tagged_results(
86 dtag,
87 os.path.abspath(str(sections["default"].get("config"))))
88
89 if show_bears_:
90 show_bears(local_bears,
91 global_bears,
92 show_all_bears)
93 did_nothing = False
94 else:
95 results = {}
96 for section_name in sections:
97 section = sections[section_name]
98 if not section.is_enabled(targets):
99 continue
100
101 print_section_beginning(section)
102 section_result = execute_section(
103 section=section,
104 global_bear_list=global_bears[section_name],
105 local_bear_list=local_bears[section_name],
106 print_results=print_results,
107 log_printer=log_printer)
108 yielded_results = yielded_results or section_result[0]
109
110 results_for_section = []
111 for value in chain(section_result[1].values(),
112 section_result[2].values()):
113 if value is None:
114 continue
115
116 for result in value:
117 if not isinstance(result, HiddenResult):
118 results_for_section.append(result)
119
120 results[section_name] = results_for_section
121 did_nothing = False
122
123 if tag != "None":
124 tag_results(
125 tag,
126 os.path.abspath(str(sections["default"].get("config"))),
127 results)
128
129 if did_nothing:
130 nothing_done(log_printer)
131
132 if yielded_results:
133 exitcode = 1
134 except BaseException as exception: # pylint: disable=broad-except
135 exitcode = exitcode or get_exitcode(exception, log_printer)
136
137 return results, exitcode
138
[end of coalib/coala_main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/coalib/coala_main.py b/coalib/coala_main.py
--- a/coalib/coala_main.py
+++ b/coalib/coala_main.py
@@ -9,6 +9,7 @@
from coalib.misc.Exceptions import get_exitcode
from coalib.bears.BEAR_KIND import BEAR_KIND
from coalib.collecting.Collectors import collect_bears
+from coalib.output.Interactions import fail_acquire_settings
from coalib.output.Tagging import tag_results, delete_tagged_results
@@ -17,7 +18,7 @@
def run_coala(log_printer=None,
print_results=do_nothing,
- acquire_settings=do_nothing,
+ acquire_settings=fail_acquire_settings,
print_section_beginning=do_nothing,
nothing_done=do_nothing,
show_bears=do_nothing):
| {"golden_diff": "diff --git a/coalib/coala_main.py b/coalib/coala_main.py\n--- a/coalib/coala_main.py\n+++ b/coalib/coala_main.py\n@@ -9,6 +9,7 @@\n from coalib.misc.Exceptions import get_exitcode\n from coalib.bears.BEAR_KIND import BEAR_KIND\n from coalib.collecting.Collectors import collect_bears\n+from coalib.output.Interactions import fail_acquire_settings\n from coalib.output.Tagging import tag_results, delete_tagged_results\n \n \n@@ -17,7 +18,7 @@\n \n def run_coala(log_printer=None,\n print_results=do_nothing,\n- acquire_settings=do_nothing,\n+ acquire_settings=fail_acquire_settings,\n print_section_beginning=do_nothing,\n nothing_done=do_nothing,\n show_bears=do_nothing):\n", "issue": "coala-ci doesn't use fail_aqcuire_settings\n\n", "before_files": [{"content": "from itertools import chain\nfrom pyprint.ConsolePrinter import ConsolePrinter\nimport os\n\nfrom coalib.output.printers.LogPrinter import LogPrinter\nfrom coalib.processes.Processing import execute_section\nfrom coalib.results.HiddenResult import HiddenResult\nfrom coalib.settings.ConfigurationGathering import gather_configuration\nfrom coalib.misc.Exceptions import get_exitcode\nfrom coalib.bears.BEAR_KIND import BEAR_KIND\nfrom coalib.collecting.Collectors import collect_bears\nfrom coalib.output.Tagging import tag_results, delete_tagged_results\n\n\ndo_nothing = lambda *args: True\n\n\ndef run_coala(log_printer=None,\n print_results=do_nothing,\n acquire_settings=do_nothing,\n print_section_beginning=do_nothing,\n nothing_done=do_nothing,\n show_bears=do_nothing):\n \"\"\"\n This is a main method that should be usable for almost all purposes and\n reduces executing coala to one function call.\n\n :param log_printer: A LogPrinter object to use for logging.\n :param print_results: A callback that takes a LogPrinter, a\n section, a list of results to be printed,\n the file dict and the mutable file diff\n dict.\n :param acquire_settings: The method to use for requesting settings.\n It will get a parameter which is a\n dictionary with the settings name as key\n and a list containing a description in [0]\n and the names of the bears who need this\n setting in all following indexes.\n :param print_section_beginning: A callback that will be called with a\n section name string whenever analysis of a\n new section is started.\n :param nothing_done: A callback that will be called without\n parameters if nothing was done.\n :param show_bears: A callback that will be called with first\n a list of local bears, second a list of\n global bears to output them. A third bool\n parameter may be used to indicate if a\n compressed output (True) or a normal output\n (False) is desired, the former being used\n for showing all available bears to the user.\n :return: A dictionary containing a list of results\n for all analyzed sections as key.\n \"\"\"\n log_printer = log_printer or LogPrinter(ConsolePrinter())\n\n exitcode = 0\n results = None\n try:\n yielded_results = False\n did_nothing = True\n (sections,\n local_bears,\n global_bears,\n targets) = gather_configuration(acquire_settings, log_printer)\n\n tag = str(sections['default'].get('tag', None))\n dtag = str(sections['default'].get('dtag', None))\n\n show_all_bears = bool(sections['default'].get('show_all_bears', False))\n show_bears_ = bool(sections[\"default\"].get(\"show_bears\", \"False\"))\n if show_all_bears:\n show_bears_ = True\n for section in sections:\n bear_dirs = sections[section].bear_dirs()\n local_bears[section] = collect_bears(bear_dirs,\n [\"**\"],\n [BEAR_KIND.LOCAL],\n log_printer)\n global_bears[section] = collect_bears(bear_dirs,\n [\"**\"],\n [BEAR_KIND.GLOBAL],\n log_printer)\n\n if dtag != \"None\":\n delete_tagged_results(\n dtag,\n os.path.abspath(str(sections[\"default\"].get(\"config\"))))\n\n if show_bears_:\n show_bears(local_bears,\n global_bears,\n show_all_bears)\n did_nothing = False\n else:\n results = {}\n for section_name in sections:\n section = sections[section_name]\n if not section.is_enabled(targets):\n continue\n\n print_section_beginning(section)\n section_result = execute_section(\n section=section,\n global_bear_list=global_bears[section_name],\n local_bear_list=local_bears[section_name],\n print_results=print_results,\n log_printer=log_printer)\n yielded_results = yielded_results or section_result[0]\n\n results_for_section = []\n for value in chain(section_result[1].values(),\n section_result[2].values()):\n if value is None:\n continue\n\n for result in value:\n if not isinstance(result, HiddenResult):\n results_for_section.append(result)\n\n results[section_name] = results_for_section\n did_nothing = False\n\n if tag != \"None\":\n tag_results(\n tag,\n os.path.abspath(str(sections[\"default\"].get(\"config\"))),\n results)\n\n if did_nothing:\n nothing_done(log_printer)\n\n if yielded_results:\n exitcode = 1\n except BaseException as exception: # pylint: disable=broad-except\n exitcode = exitcode or get_exitcode(exception, log_printer)\n\n return results, exitcode\n", "path": "coalib/coala_main.py"}]} | 1,921 | 187 |
gh_patches_debug_7932 | rasdani/github-patches | git_diff | chainer__chainer-240 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add type check to Parameter function
Related to #123
</issue>
<code>
[start of chainer/functions/parameter.py]
1 import numpy
2
3 from chainer import function
4
5
6 class Parameter(function.Function):
7
8 """Function that outputs its weight array.
9
10 This is a parameterized function that takes no input and returns a variable
11 holding a shallow copy of the parameter array.
12
13 Args:
14 array: Initial parameter array.
15
16 """
17 parameter_names = 'W',
18 gradient_names = 'gW',
19
20 def __init__(self, array):
21 self.W = array
22 self.gW = numpy.empty_like(array)
23
24 def forward(self, x):
25 return self.W,
26
27 def backward(self, x, gy):
28 self.gW += gy[0]
29 return ()
30
[end of chainer/functions/parameter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/functions/parameter.py b/chainer/functions/parameter.py
--- a/chainer/functions/parameter.py
+++ b/chainer/functions/parameter.py
@@ -1,6 +1,7 @@
import numpy
from chainer import function
+from chainer.utils import type_check
class Parameter(function.Function):
@@ -21,6 +22,9 @@
self.W = array
self.gW = numpy.empty_like(array)
+ def check_type_forward(self, in_types):
+ type_check.expect(in_types.size() == 0)
+
def forward(self, x):
return self.W,
| {"golden_diff": "diff --git a/chainer/functions/parameter.py b/chainer/functions/parameter.py\n--- a/chainer/functions/parameter.py\n+++ b/chainer/functions/parameter.py\n@@ -1,6 +1,7 @@\n import numpy\n \n from chainer import function\n+from chainer.utils import type_check\n \n \n class Parameter(function.Function):\n@@ -21,6 +22,9 @@\n self.W = array\n self.gW = numpy.empty_like(array)\n \n+ def check_type_forward(self, in_types):\n+ type_check.expect(in_types.size() == 0)\n+\n def forward(self, x):\n return self.W,\n", "issue": "Add type check to Parameter function\nRelated to #123 \n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import function\n\n\nclass Parameter(function.Function):\n\n \"\"\"Function that outputs its weight array.\n\n This is a parameterized function that takes no input and returns a variable\n holding a shallow copy of the parameter array.\n\n Args:\n array: Initial parameter array.\n\n \"\"\"\n parameter_names = 'W',\n gradient_names = 'gW',\n\n def __init__(self, array):\n self.W = array\n self.gW = numpy.empty_like(array)\n\n def forward(self, x):\n return self.W,\n\n def backward(self, x, gy):\n self.gW += gy[0]\n return ()\n", "path": "chainer/functions/parameter.py"}]} | 747 | 137 |
gh_patches_debug_17412 | rasdani/github-patches | git_diff | ESMCI__cime-4411 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
case.build --clean-build will remove the CASEDIR contents if EXEDIR points to the CASEDIR
I'm reporting this from @dmleung. He setup some cases where EXEDIR was set to be the same as CASEDIR (so NOT a bld subdirectory of the CASEDIR). If he then runs "./case.build --clean-build" it then erases the contents of the CASEDIR. Outside of that it actually functions fine, but if you clean your build it will cause problems. Note, that he did this with older cime versions and it seemed to work OK, but with cime6.0.45 it will remove the case directory contents.
The important thing to note is that this is something you should NOT do! So we can either close this as a WONTFIX as a degenerate case that you shouldn't do. Or a simple thing to do would be to add a simple check that EXEDIR should not be the same as CASEDIR, or there should be an error reported.
</issue>
<code>
[start of CIME/XML/env_build.py]
1 """
2 Interface to the env_build.xml file. This class inherits from EnvBase
3 """
4 from CIME.XML.standard_module_setup import *
5
6 from CIME import utils
7 from CIME.XML.env_base import EnvBase
8
9 logger = logging.getLogger(__name__)
10
11
12 class EnvBuild(EnvBase):
13 # pylint: disable=unused-argument
14 def __init__(
15 self, case_root=None, infile="env_build.xml", components=None, read_only=False
16 ):
17 """
18 initialize an object interface to file env_build.xml in the case directory
19 """
20 schema = os.path.join(utils.get_schema_path(), "env_entry_id.xsd")
21 EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only)
22
[end of CIME/XML/env_build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/CIME/XML/env_build.py b/CIME/XML/env_build.py
--- a/CIME/XML/env_build.py
+++ b/CIME/XML/env_build.py
@@ -18,4 +18,19 @@
initialize an object interface to file env_build.xml in the case directory
"""
schema = os.path.join(utils.get_schema_path(), "env_entry_id.xsd")
+ self._caseroot = case_root
EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only)
+
+ def set_value(self, vid, value, subgroup=None, ignore_type=False):
+ """
+ Set the value of an entry-id field to value
+ Returns the value or None if not found
+ subgroup is ignored in the general routine and applied in specific methods
+ """
+ # Do not allow any of these to be the same as CASEROOT
+ if vid in ("EXEROOT", "OBJDIR", "LIBROOT"):
+ utils.expect(value != self._caseroot, f"Cannot set {vid} to CASEROOT")
+
+ return super(EnvBuild, self).set_value(
+ vid, value, subgroup=subgroup, ignore_type=ignore_type
+ )
| {"golden_diff": "diff --git a/CIME/XML/env_build.py b/CIME/XML/env_build.py\n--- a/CIME/XML/env_build.py\n+++ b/CIME/XML/env_build.py\n@@ -18,4 +18,19 @@\n initialize an object interface to file env_build.xml in the case directory\n \"\"\"\n schema = os.path.join(utils.get_schema_path(), \"env_entry_id.xsd\")\n+ self._caseroot = case_root\n EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only)\n+\n+ def set_value(self, vid, value, subgroup=None, ignore_type=False):\n+ \"\"\"\n+ Set the value of an entry-id field to value\n+ Returns the value or None if not found\n+ subgroup is ignored in the general routine and applied in specific methods\n+ \"\"\"\n+ # Do not allow any of these to be the same as CASEROOT\n+ if vid in (\"EXEROOT\", \"OBJDIR\", \"LIBROOT\"):\n+ utils.expect(value != self._caseroot, f\"Cannot set {vid} to CASEROOT\")\n+\n+ return super(EnvBuild, self).set_value(\n+ vid, value, subgroup=subgroup, ignore_type=ignore_type\n+ )\n", "issue": "case.build --clean-build will remove the CASEDIR contents if EXEDIR points to the CASEDIR\nI'm reporting this from @dmleung. He setup some cases where EXEDIR was set to be the same as CASEDIR (so NOT a bld subdirectory of the CASEDIR). If he then runs \"./case.build --clean-build\" it then erases the contents of the CASEDIR. Outside of that it actually functions fine, but if you clean your build it will cause problems. Note, that he did this with older cime versions and it seemed to work OK, but with cime6.0.45 it will remove the case directory contents.\r\n\r\nThe important thing to note is that this is something you should NOT do! So we can either close this as a WONTFIX as a degenerate case that you shouldn't do. Or a simple thing to do would be to add a simple check that EXEDIR should not be the same as CASEDIR, or there should be an error reported.\n", "before_files": [{"content": "\"\"\"\nInterface to the env_build.xml file. This class inherits from EnvBase\n\"\"\"\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME import utils\nfrom CIME.XML.env_base import EnvBase\n\nlogger = logging.getLogger(__name__)\n\n\nclass EnvBuild(EnvBase):\n # pylint: disable=unused-argument\n def __init__(\n self, case_root=None, infile=\"env_build.xml\", components=None, read_only=False\n ):\n \"\"\"\n initialize an object interface to file env_build.xml in the case directory\n \"\"\"\n schema = os.path.join(utils.get_schema_path(), \"env_entry_id.xsd\")\n EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only)\n", "path": "CIME/XML/env_build.py"}]} | 940 | 275 |
gh_patches_debug_9678 | rasdani/github-patches | git_diff | conan-io__conan-9087 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[question] using `boost/system` recipe with system-wide config
<!-- What is your question? Please be as specific as possible! -->
- [x] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).
Hi! My question is about `conan` recipes for system packages (like [gtk/system](https://conan.io/center/gtk) or [xorg/system](https://conan.io/center/xorg) from CCI). In our ecosystem we want to use `boost/system` (custom one) together with `boost/1.7?` from CCI (we need a transparent switching between them in the root of dependency tree). We use `CMakeDeps` generator. So before build it generates (among other things) `Boost-config.cmake`, which is very hard to reproduce in `boost/system` .
- Before `1.36` we use the following smart crutch which exploits a bug (?) in generator. Our recipe declare package `boost/system` with filename (in latest terms `cmake_file_name`) `SystemBoost` and with name (in latest terms `cmake_target_name`) `Boost`. When somebody (e.g. `A`) wants to depend on boost, `CMakeDeps` generates **unused** `SystemBoost-config.cmake` and call `find_package(Boost ... ) ` in `A-config.cmake`. This `find_package` uses system-wide `FindBoost.cmake` or config from `boost-cmake` project and works perfectly.
- In `1.36` this bug was fixed. ([the method](https://github.com/conan-io/conan/blob/develop/conan/tools/cmake/cmakedeps.py#L698) was introduced). So the crutch is not working now.
- Could you elaborate this situation?
There are my thoughts about it:
- I like the idea to use system-wide config in the specific case of system boost.
- Generating conan's `Boost-config.cmake` to emulate a system one seems very hard. In CCI the only way to do this is using `PkgConfig`, but Boost has no official `.pc` file AFAIK.
- Maybe `CMakeDeps` can take a system-wide boost config somehow (copying or symlinking it instead of generate its own) in this specific case?
- Maybe conan can legitimate the crutch described above?
Thanks in advance!
</issue>
<code>
[start of conan/tools/cmake/cmakedeps/cmakedeps.py]
1 import os
2
3 from conan.tools.cmake.cmakedeps.templates.config import ConfigTemplate
4 from conan.tools.cmake.cmakedeps.templates.config_version import ConfigVersionTemplate
5 from conan.tools.cmake.cmakedeps.templates.macros import MacrosTemplate
6 from conan.tools.cmake.cmakedeps.templates.target_configuration import TargetConfigurationTemplate
7 from conan.tools.cmake.cmakedeps.templates.target_data import ConfigDataTemplate
8 from conan.tools.cmake.cmakedeps.templates.targets import TargetsTemplate
9 from conans.errors import ConanException
10 from conans.util.files import save
11
12
13 class CMakeDeps(object):
14
15 def __init__(self, conanfile):
16 self._conanfile = conanfile
17 self.arch = self._conanfile.settings.get_safe("arch")
18 self.configuration = str(self._conanfile.settings.build_type)
19
20 self.configurations = [v for v in conanfile.settings.build_type.values_range if v != "None"]
21 # Activate the build config files for the specified libraries
22 self.build_context_activated = []
23 # By default, the build modules are generated for host context only
24 self.build_context_build_modules = []
25 # If specified, the files/targets/variables for the build context will be renamed appending
26 # a suffix. It is necessary in case of same require and build_require and will cause an error
27 self.build_context_suffix = {}
28
29 def generate(self):
30 # Current directory is the generators_folder
31 generator_files = self.content
32 for generator_file, content in generator_files.items():
33 save(generator_file, content)
34
35 @property
36 def content(self):
37 macros = MacrosTemplate()
38 ret = {macros.filename: macros.render()}
39
40 host_req = self._conanfile.dependencies.host_requires
41 build_req = self._conanfile.dependencies.direct_build_requires
42
43 # Check if the same package is at host and build and the same time
44 activated_br = {r.ref.name for r in build_req.values()
45 if r.ref.name in self.build_context_activated}
46 common_names = {r.ref.name for r in host_req.values()}.intersection(activated_br)
47 for common_name in common_names:
48 suffix = self.build_context_suffix.get(common_name)
49 if not suffix:
50 raise ConanException("The package '{}' exists both as 'require' and as "
51 "'build require'. You need to specify a suffix using the "
52 "'build_context_suffix' attribute at the CMakeDeps "
53 "generator.".format(common_name))
54
55 # Iterate all the transitive requires
56 for require, dep in list(host_req.items()) + list(build_req.items()):
57 # Require is not used at the moment, but its information could be used,
58 # and will be used in Conan 2.0
59 # Filter the build_requires not activated with cmakedeps.build_context_activated
60 if dep.is_build_context and dep.ref.name not in self.build_context_activated:
61 continue
62
63 config_version = ConfigVersionTemplate(self, require, dep)
64 ret[config_version.filename] = config_version.render()
65
66 data_target = ConfigDataTemplate(self, require, dep)
67 ret[data_target.filename] = data_target.render()
68
69 target_configuration = TargetConfigurationTemplate(self, require, dep)
70 ret[target_configuration.filename] = target_configuration.render()
71
72 targets = TargetsTemplate(self, require, dep)
73 ret[targets.filename] = targets.render()
74
75 config = ConfigTemplate(self, require, dep)
76 # Check if the XXConfig.cmake exists to keep the first generated configuration
77 # to only include the build_modules from the first conan install. The rest of the
78 # file is common for the different configurations.
79 if not os.path.exists(config.filename):
80 ret[config.filename] = config.render()
81 return ret
82
[end of conan/tools/cmake/cmakedeps/cmakedeps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conan/tools/cmake/cmakedeps/cmakedeps.py b/conan/tools/cmake/cmakedeps/cmakedeps.py
--- a/conan/tools/cmake/cmakedeps/cmakedeps.py
+++ b/conan/tools/cmake/cmakedeps/cmakedeps.py
@@ -60,6 +60,10 @@
if dep.is_build_context and dep.ref.name not in self.build_context_activated:
continue
+ if dep.new_cpp_info.get_property("skip_deps_file", "CMakeDeps"):
+ # Skip the generation of config files for this node, it will be located externally
+ continue
+
config_version = ConfigVersionTemplate(self, require, dep)
ret[config_version.filename] = config_version.render()
| {"golden_diff": "diff --git a/conan/tools/cmake/cmakedeps/cmakedeps.py b/conan/tools/cmake/cmakedeps/cmakedeps.py\n--- a/conan/tools/cmake/cmakedeps/cmakedeps.py\n+++ b/conan/tools/cmake/cmakedeps/cmakedeps.py\n@@ -60,6 +60,10 @@\n if dep.is_build_context and dep.ref.name not in self.build_context_activated:\n continue\n \n+ if dep.new_cpp_info.get_property(\"skip_deps_file\", \"CMakeDeps\"):\n+ # Skip the generation of config files for this node, it will be located externally\n+ continue\n+\n config_version = ConfigVersionTemplate(self, require, dep)\n ret[config_version.filename] = config_version.render()\n", "issue": "[question] using `boost/system` recipe with system-wide config\n<!-- What is your question? Please be as specific as possible! -->\r\n\r\n- [x] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).\r\n\r\nHi! My question is about `conan` recipes for system packages (like [gtk/system](https://conan.io/center/gtk) or [xorg/system](https://conan.io/center/xorg) from CCI). In our ecosystem we want to use `boost/system` (custom one) together with `boost/1.7?` from CCI (we need a transparent switching between them in the root of dependency tree). We use `CMakeDeps` generator. So before build it generates (among other things) `Boost-config.cmake`, which is very hard to reproduce in `boost/system` . \r\n\r\n- Before `1.36` we use the following smart crutch which exploits a bug (?) in generator. Our recipe declare package `boost/system` with filename (in latest terms `cmake_file_name`) `SystemBoost` and with name (in latest terms `cmake_target_name`) `Boost`. When somebody (e.g. `A`) wants to depend on boost, `CMakeDeps` generates **unused** `SystemBoost-config.cmake` and call `find_package(Boost ... ) ` in `A-config.cmake`. This `find_package` uses system-wide `FindBoost.cmake` or config from `boost-cmake` project and works perfectly. \r\n- In `1.36` this bug was fixed. ([the method](https://github.com/conan-io/conan/blob/develop/conan/tools/cmake/cmakedeps.py#L698) was introduced). So the crutch is not working now.\r\n- Could you elaborate this situation?\r\n\r\nThere are my thoughts about it:\r\n- I like the idea to use system-wide config in the specific case of system boost.\r\n- Generating conan's `Boost-config.cmake` to emulate a system one seems very hard. In CCI the only way to do this is using `PkgConfig`, but Boost has no official `.pc` file AFAIK.\r\n- Maybe `CMakeDeps` can take a system-wide boost config somehow (copying or symlinking it instead of generate its own) in this specific case?\r\n- Maybe conan can legitimate the crutch described above? \r\n\r\nThanks in advance!\n", "before_files": [{"content": "import os\n\nfrom conan.tools.cmake.cmakedeps.templates.config import ConfigTemplate\nfrom conan.tools.cmake.cmakedeps.templates.config_version import ConfigVersionTemplate\nfrom conan.tools.cmake.cmakedeps.templates.macros import MacrosTemplate\nfrom conan.tools.cmake.cmakedeps.templates.target_configuration import TargetConfigurationTemplate\nfrom conan.tools.cmake.cmakedeps.templates.target_data import ConfigDataTemplate\nfrom conan.tools.cmake.cmakedeps.templates.targets import TargetsTemplate\nfrom conans.errors import ConanException\nfrom conans.util.files import save\n\n\nclass CMakeDeps(object):\n\n def __init__(self, conanfile):\n self._conanfile = conanfile\n self.arch = self._conanfile.settings.get_safe(\"arch\")\n self.configuration = str(self._conanfile.settings.build_type)\n\n self.configurations = [v for v in conanfile.settings.build_type.values_range if v != \"None\"]\n # Activate the build config files for the specified libraries\n self.build_context_activated = []\n # By default, the build modules are generated for host context only\n self.build_context_build_modules = []\n # If specified, the files/targets/variables for the build context will be renamed appending\n # a suffix. It is necessary in case of same require and build_require and will cause an error\n self.build_context_suffix = {}\n\n def generate(self):\n # Current directory is the generators_folder\n generator_files = self.content\n for generator_file, content in generator_files.items():\n save(generator_file, content)\n\n @property\n def content(self):\n macros = MacrosTemplate()\n ret = {macros.filename: macros.render()}\n\n host_req = self._conanfile.dependencies.host_requires\n build_req = self._conanfile.dependencies.direct_build_requires\n\n # Check if the same package is at host and build and the same time\n activated_br = {r.ref.name for r in build_req.values()\n if r.ref.name in self.build_context_activated}\n common_names = {r.ref.name for r in host_req.values()}.intersection(activated_br)\n for common_name in common_names:\n suffix = self.build_context_suffix.get(common_name)\n if not suffix:\n raise ConanException(\"The package '{}' exists both as 'require' and as \"\n \"'build require'. You need to specify a suffix using the \"\n \"'build_context_suffix' attribute at the CMakeDeps \"\n \"generator.\".format(common_name))\n\n # Iterate all the transitive requires\n for require, dep in list(host_req.items()) + list(build_req.items()):\n # Require is not used at the moment, but its information could be used,\n # and will be used in Conan 2.0\n # Filter the build_requires not activated with cmakedeps.build_context_activated\n if dep.is_build_context and dep.ref.name not in self.build_context_activated:\n continue\n\n config_version = ConfigVersionTemplate(self, require, dep)\n ret[config_version.filename] = config_version.render()\n\n data_target = ConfigDataTemplate(self, require, dep)\n ret[data_target.filename] = data_target.render()\n\n target_configuration = TargetConfigurationTemplate(self, require, dep)\n ret[target_configuration.filename] = target_configuration.render()\n\n targets = TargetsTemplate(self, require, dep)\n ret[targets.filename] = targets.render()\n\n config = ConfigTemplate(self, require, dep)\n # Check if the XXConfig.cmake exists to keep the first generated configuration\n # to only include the build_modules from the first conan install. The rest of the\n # file is common for the different configurations.\n if not os.path.exists(config.filename):\n ret[config.filename] = config.render()\n return ret\n", "path": "conan/tools/cmake/cmakedeps/cmakedeps.py"}]} | 2,039 | 164 |
gh_patches_debug_35541 | rasdani/github-patches | git_diff | mlcommons__GaNDLF-296 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include a git submodule update when doing pip install
**Is your feature request related to a problem? Please describe.**
Currently, the installation step requires a user to run `git submodule update --init --recursive` before doing `pip install`, which can lead to broken installation if the user forgets the step.
**Describe the solution you'd like**
Ensure the [git submodule line is working correctly in the setup.py file](https://github.com/CBICA/GaNDLF/blob/f4d0f02848c894d43259c2bb0b70612db612eb53/setup.py#L95).
**Describe alternatives you've considered**
N.A.
**Additional context**
N.A.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 """The setup script."""
4
5 from setuptools import setup, find_packages
6
7 with open("README.md") as readme_file:
8 readme = readme_file.read()
9
10 # read version.py
11 import sys, re
12
13 try:
14 filepath = "GANDLF/version.py"
15 version_file = open(filepath)
16 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
17
18 except Exception as error:
19 __version__ = "0.0.1"
20 sys.stderr.write("Warning: Could not open '%s' due %s\n" % (filepath, error))
21
22 requirements = [
23 "black",
24 "numpy==1.21.0",
25 "scipy",
26 "SimpleITK==2.1.0",
27 "torch>=1.7",
28 "torchvision",
29 "tqdm",
30 "torchio==0.18.57",
31 "pandas",
32 "pylint",
33 "scikit-learn==0.23.1",
34 "pickle5==0.0.11",
35 "setuptools",
36 "seaborn",
37 "pyyaml",
38 "openslide-python",
39 "scikit-image",
40 "matplotlib",
41 "requests>=2.25.0",
42 "pyvips",
43 "pytest",
44 "coverage",
45 "psutil",
46 "medcam",
47 "opencv-python",
48 "torchmetrics",
49 "OpenPatchMiner==0.1.6",
50 "pydicom",
51 ]
52
53 setup(
54 name="GANDLF",
55 version=__version__,
56 author="Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun Güley, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos", # alphabetical order
57 author_email="[email protected]",
58 python_requires=">=3.6",
59 packages=find_packages(),
60 scripts=[
61 "gandlf_run",
62 "gandlf_constructCSV",
63 "gandlf_collectStats",
64 "gandlf_patchMiner",
65 "gandlf_preprocess",
66 "gandlf_anonymizer",
67 ],
68 classifiers=[
69 "Development Status :: 3 - Alpha",
70 "Intended Audience :: Science/Research",
71 "License :: OSI Approved :: BSD License",
72 "Natural Language :: English",
73 "Operating System :: OS Independent",
74 "Programming Language :: Python :: 3.6",
75 "Programming Language :: Python :: 3.7",
76 "Programming Language :: Python :: 3.8",
77 "Programming Language :: Python :: 3.9",
78 "Topic :: Scientific/Engineering :: Medical Science Apps",
79 ],
80 description=(
81 "PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging."
82 ),
83 install_requires=requirements,
84 license="BSD-3-Clause License",
85 long_description=readme,
86 long_description_content_type="text/markdown",
87 include_package_data=True,
88 keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging",
89 zip_safe=False,
90 )
91
92 import os
93
94 ## submodule update
95 os.system("git submodule update --init --recursive")
96
97 ## windows vips installation
98 if os.name == "nt": # proceed for windows
99 from pathlib import Path
100
101 # download and extract if main dll is absent
102 if not Path("./vips/vips-dev-8.10/bin/libvips-42.dll").exists():
103 print("Downloading and extracting VIPS for Windows")
104 url = "https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip"
105 zip_to_extract = "./vips.zip"
106 import urllib.request, zipfile
107
108 urllib.request.urlretrieve(url, zip_to_extract)
109 z = zipfile.ZipFile(zip_to_extract)
110 z.extractall("./vips")
111 z.close()
112 os.remove(zip_to_extract)
113
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -2,11 +2,40 @@
"""The setup script."""
+
+import os
from setuptools import setup, find_packages
+from setuptools.command.install import install
+from setuptools.command.develop import develop
+from setuptools.command.egg_info import egg_info
with open("README.md") as readme_file:
readme = readme_file.read()
+
+def git_submodule_update():
+ ## submodule update
+ os.system("git submodule update --init --recursive")
+
+
+class CustomInstallCommand(install):
+ def run(self):
+ install.run(self)
+ git_submodule_update()
+
+
+class CustomDevelopCommand(develop):
+ def run(self):
+ develop.run(self)
+ git_submodule_update()
+
+
+class CustomEggInfoCommand(egg_info):
+ def run(self):
+ egg_info.run(self)
+ git_submodule_update()
+
+
# read version.py
import sys, re
@@ -42,6 +71,7 @@
"pyvips",
"pytest",
"coverage",
+ "pytest-cov",
"psutil",
"medcam",
"opencv-python",
@@ -53,10 +83,15 @@
setup(
name="GANDLF",
version=__version__,
- author="Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun Güley, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos", # alphabetical order
+ author="Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun Güley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos", # alphabetical order
author_email="[email protected]",
python_requires=">=3.6",
packages=find_packages(),
+ cmdclass={ # this ensures git_submodule_update is called during install
+ "install": CustomInstallCommand,
+ "develop": CustomDevelopCommand,
+ "egg_info": CustomEggInfoCommand,
+ },
scripts=[
"gandlf_run",
"gandlf_constructCSV",
@@ -89,11 +124,6 @@
zip_safe=False,
)
-import os
-
-## submodule update
-os.system("git submodule update --init --recursive")
-
## windows vips installation
if os.name == "nt": # proceed for windows
from pathlib import Path
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -2,11 +2,40 @@\n \n \"\"\"The setup script.\"\"\"\n \n+\n+import os\n from setuptools import setup, find_packages\n+from setuptools.command.install import install\n+from setuptools.command.develop import develop\n+from setuptools.command.egg_info import egg_info\n \n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\n \n+\n+def git_submodule_update():\n+ ## submodule update\n+ os.system(\"git submodule update --init --recursive\")\n+\n+\n+class CustomInstallCommand(install):\n+ def run(self):\n+ install.run(self)\n+ git_submodule_update()\n+\n+\n+class CustomDevelopCommand(develop):\n+ def run(self):\n+ develop.run(self)\n+ git_submodule_update()\n+\n+\n+class CustomEggInfoCommand(egg_info):\n+ def run(self):\n+ egg_info.run(self)\n+ git_submodule_update()\n+\n+\n # read version.py\n import sys, re\n \n@@ -42,6 +71,7 @@\n \"pyvips\",\n \"pytest\",\n \"coverage\",\n+ \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n@@ -53,10 +83,15 @@\n setup(\n name=\"GANDLF\",\n version=__version__,\n- author=\"Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun G\u00fcley, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos\", # alphabetical order\n+ author=\"Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun G\u00fcley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos\", # alphabetical order\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n packages=find_packages(),\n+ cmdclass={ # this ensures git_submodule_update is called during install\n+ \"install\": CustomInstallCommand,\n+ \"develop\": CustomDevelopCommand,\n+ \"egg_info\": CustomEggInfoCommand,\n+ },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n@@ -89,11 +124,6 @@\n zip_safe=False,\n )\n \n-import os\n-\n-## submodule update\n-os.system(\"git submodule update --init --recursive\")\n-\n ## windows vips installation\n if os.name == \"nt\": # proceed for windows\n from pathlib import Path\n", "issue": "Include a git submodule update when doing pip install\n**Is your feature request related to a problem? Please describe.**\r\nCurrently, the installation step requires a user to run `git submodule update --init --recursive` before doing `pip install`, which can lead to broken installation if the user forgets the step.\r\n\r\n**Describe the solution you'd like**\r\nEnsure the [git submodule line is working correctly in the setup.py file](https://github.com/CBICA/GaNDLF/blob/f4d0f02848c894d43259c2bb0b70612db612eb53/setup.py#L95).\r\n\r\n**Describe alternatives you've considered**\r\nN.A.\r\n\r\n**Additional context**\r\nN.A.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import setup, find_packages\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\n# read version.py\nimport sys, re\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"black\",\n \"numpy==1.21.0\",\n \"scipy\",\n \"SimpleITK==2.1.0\",\n \"torch>=1.7\",\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.57\",\n \"pandas\",\n \"pylint\",\n \"scikit-learn==0.23.1\",\n \"pickle5==0.0.11\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"openslide-python\",\n \"scikit-image\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pyvips\",\n \"pytest\",\n \"coverage\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics\",\n \"OpenPatchMiner==0.1.6\",\n \"pydicom\",\n]\n\nsetup(\n name=\"GANDLF\",\n version=__version__,\n author=\"Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun G\u00fcley, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos\", # alphabetical order\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n packages=find_packages(),\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"BSD-3-Clause License\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging\",\n zip_safe=False,\n)\n\nimport os\n\n## submodule update\nos.system(\"git submodule update --init --recursive\")\n\n## windows vips installation\nif os.name == \"nt\": # proceed for windows\n from pathlib import Path\n\n # download and extract if main dll is absent\n if not Path(\"./vips/vips-dev-8.10/bin/libvips-42.dll\").exists():\n print(\"Downloading and extracting VIPS for Windows\")\n url = \"https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip\"\n zip_to_extract = \"./vips.zip\"\n import urllib.request, zipfile\n\n urllib.request.urlretrieve(url, zip_to_extract)\n z = zipfile.ZipFile(zip_to_extract)\n z.extractall(\"./vips\")\n z.close()\n os.remove(zip_to_extract)\n", "path": "setup.py"}]} | 1,834 | 662 |
gh_patches_debug_35199 | rasdani/github-patches | git_diff | litestar-org__litestar-2581 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: CLI Schema Export fails
### Description
Exporting the Litestar schema via CLI fails presumably because the `json.dumps` can't serialize certain field types such as `UUID` or `datetime`.
> [!IMPORTANT]
> This is only when `create_examples` is `True`
### URL to code causing the issue
_No response_
### MCVE
```python
from datetime import datetime
from litestar import Litestar, get
from litestar.openapi import OpenAPIConfig
@get()
async def something(date: datetime) -> None:
return None
app = Litestar([something], openapi_config=OpenAPIConfig('example', '0.0.1', True))
```
or
```python
from __future__ import annotations
from datetime import datetime
from uuid import UUID
from litestar import Litestar, get
from litestar.openapi import OpenAPIConfig
from pydantic import BaseModel, Field
class FileSchema(BaseModel):
id: UUID
updated: datetime
@get()
async def something() -> FileSchema | None:
return None
app = Litestar([something], openapi_config=OpenAPIConfig("example", "0.0.1", True))
```
### Steps to reproduce
```bash
1. Create mcve.py
2. `litestar schema openapi --output schema.json`
3. See error
```
### Screenshots
_No response_
### Logs
_No response_
### Litestar Version
2.x
### Platform
- [ ] Linux
- [ ] Mac
- [ ] Windows
- [X] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/2575">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2575/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2575/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
</issue>
<code>
[start of litestar/cli/commands/schema.py]
1 from json import dumps
2 from pathlib import Path
3 from typing import TYPE_CHECKING
4
5 from yaml import dump as dump_yaml
6
7 from litestar import Litestar
8 from litestar._openapi.typescript_converter.converter import (
9 convert_openapi_to_typescript,
10 )
11 from litestar.cli._utils import JSBEAUTIFIER_INSTALLED, RICH_CLICK_INSTALLED, LitestarCLIException, LitestarGroup
12
13 if TYPE_CHECKING or not RICH_CLICK_INSTALLED: # pragma: no cover
14 from click import Path as ClickPath
15 from click import group, option
16 else:
17 from rich_click import Path as ClickPath
18 from rich_click import group, option
19
20 if JSBEAUTIFIER_INSTALLED: # pragma: no cover
21 from jsbeautifier import Beautifier
22
23 beautifier = Beautifier()
24
25
26 __all__ = ("generate_openapi_schema", "generate_typescript_specs", "schema_group")
27
28
29 @group(cls=LitestarGroup, name="schema")
30 def schema_group() -> None:
31 """Manage server-side OpenAPI schemas."""
32
33
34 @schema_group.command("openapi") # type: ignore
35 @option(
36 "--output",
37 help="output file path",
38 type=ClickPath(dir_okay=False, path_type=Path),
39 default=Path("openapi_schema.json"),
40 show_default=True,
41 )
42 def generate_openapi_schema(app: Litestar, output: Path) -> None:
43 """Generate an OpenAPI Schema."""
44 if output.suffix in (".yml", ".yaml"):
45 content = dump_yaml(app.openapi_schema.to_schema(), default_flow_style=False)
46 else:
47 content = dumps(app.openapi_schema.to_schema(), indent=4)
48
49 try:
50 output.write_text(content)
51 except OSError as e: # pragma: no cover
52 raise LitestarCLIException(f"failed to write schema to path {output}") from e
53
54
55 @schema_group.command("typescript") # type: ignore
56 @option(
57 "--output",
58 help="output file path",
59 type=ClickPath(dir_okay=False, path_type=Path),
60 default=Path("api-specs.ts"),
61 show_default=True,
62 )
63 @option("--namespace", help="namespace to use for the typescript specs", type=str, default="API")
64 def generate_typescript_specs(app: Litestar, output: Path, namespace: str) -> None:
65 """Generate TypeScript specs from the OpenAPI schema."""
66 try:
67 specs = convert_openapi_to_typescript(app.openapi_schema, namespace)
68 # beautifier will be defined if JSBEAUTIFIER_INSTALLED is True
69 specs_output = (
70 beautifier.beautify(specs.write()) if JSBEAUTIFIER_INSTALLED else specs.write() # pyright: ignore
71 )
72 output.write_text(specs_output)
73 except OSError as e: # pragma: no cover
74 raise LitestarCLIException(f"failed to write schema to path {output}") from e
75
[end of litestar/cli/commands/schema.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/litestar/cli/commands/schema.py b/litestar/cli/commands/schema.py
--- a/litestar/cli/commands/schema.py
+++ b/litestar/cli/commands/schema.py
@@ -1,7 +1,7 @@
-from json import dumps
from pathlib import Path
from typing import TYPE_CHECKING
+import msgspec
from yaml import dump as dump_yaml
from litestar import Litestar
@@ -9,6 +9,7 @@
convert_openapi_to_typescript,
)
from litestar.cli._utils import JSBEAUTIFIER_INSTALLED, RICH_CLICK_INSTALLED, LitestarCLIException, LitestarGroup
+from litestar.serialization import encode_json, get_serializer
if TYPE_CHECKING or not RICH_CLICK_INSTALLED: # pragma: no cover
from click import Path as ClickPath
@@ -31,6 +32,27 @@
"""Manage server-side OpenAPI schemas."""
+def _generate_openapi_schema(app: Litestar, output: Path) -> None:
+ """Generate an OpenAPI Schema."""
+ serializer = get_serializer(app.type_encoders)
+ if output.suffix in (".yml", ".yaml"):
+ content = dump_yaml(
+ msgspec.to_builtins(app.openapi_schema.to_schema(), enc_hook=serializer),
+ default_flow_style=False,
+ encoding="utf-8",
+ )
+ else:
+ content = msgspec.json.format(
+ encode_json(app.openapi_schema.to_schema(), serializer=serializer),
+ indent=4,
+ )
+
+ try:
+ output.write_bytes(content)
+ except OSError as e: # pragma: no cover
+ raise LitestarCLIException(f"failed to write schema to path {output}") from e
+
+
@schema_group.command("openapi") # type: ignore
@option(
"--output",
@@ -41,15 +63,7 @@
)
def generate_openapi_schema(app: Litestar, output: Path) -> None:
"""Generate an OpenAPI Schema."""
- if output.suffix in (".yml", ".yaml"):
- content = dump_yaml(app.openapi_schema.to_schema(), default_flow_style=False)
- else:
- content = dumps(app.openapi_schema.to_schema(), indent=4)
-
- try:
- output.write_text(content)
- except OSError as e: # pragma: no cover
- raise LitestarCLIException(f"failed to write schema to path {output}") from e
+ _generate_openapi_schema(app, output)
@schema_group.command("typescript") # type: ignore
| {"golden_diff": "diff --git a/litestar/cli/commands/schema.py b/litestar/cli/commands/schema.py\n--- a/litestar/cli/commands/schema.py\n+++ b/litestar/cli/commands/schema.py\n@@ -1,7 +1,7 @@\n-from json import dumps\n from pathlib import Path\n from typing import TYPE_CHECKING\n \n+import msgspec\n from yaml import dump as dump_yaml\n \n from litestar import Litestar\n@@ -9,6 +9,7 @@\n convert_openapi_to_typescript,\n )\n from litestar.cli._utils import JSBEAUTIFIER_INSTALLED, RICH_CLICK_INSTALLED, LitestarCLIException, LitestarGroup\n+from litestar.serialization import encode_json, get_serializer\n \n if TYPE_CHECKING or not RICH_CLICK_INSTALLED: # pragma: no cover\n from click import Path as ClickPath\n@@ -31,6 +32,27 @@\n \"\"\"Manage server-side OpenAPI schemas.\"\"\"\n \n \n+def _generate_openapi_schema(app: Litestar, output: Path) -> None:\n+ \"\"\"Generate an OpenAPI Schema.\"\"\"\n+ serializer = get_serializer(app.type_encoders)\n+ if output.suffix in (\".yml\", \".yaml\"):\n+ content = dump_yaml(\n+ msgspec.to_builtins(app.openapi_schema.to_schema(), enc_hook=serializer),\n+ default_flow_style=False,\n+ encoding=\"utf-8\",\n+ )\n+ else:\n+ content = msgspec.json.format(\n+ encode_json(app.openapi_schema.to_schema(), serializer=serializer),\n+ indent=4,\n+ )\n+\n+ try:\n+ output.write_bytes(content)\n+ except OSError as e: # pragma: no cover\n+ raise LitestarCLIException(f\"failed to write schema to path {output}\") from e\n+\n+\n @schema_group.command(\"openapi\") # type: ignore\n @option(\n \"--output\",\n@@ -41,15 +63,7 @@\n )\n def generate_openapi_schema(app: Litestar, output: Path) -> None:\n \"\"\"Generate an OpenAPI Schema.\"\"\"\n- if output.suffix in (\".yml\", \".yaml\"):\n- content = dump_yaml(app.openapi_schema.to_schema(), default_flow_style=False)\n- else:\n- content = dumps(app.openapi_schema.to_schema(), indent=4)\n-\n- try:\n- output.write_text(content)\n- except OSError as e: # pragma: no cover\n- raise LitestarCLIException(f\"failed to write schema to path {output}\") from e\n+ _generate_openapi_schema(app, output)\n \n \n @schema_group.command(\"typescript\") # type: ignore\n", "issue": "Bug: CLI Schema Export fails\n### Description\r\n\r\nExporting the Litestar schema via CLI fails presumably because the `json.dumps` can't serialize certain field types such as `UUID` or `datetime`.\r\n\r\n> [!IMPORTANT] \r\n> This is only when `create_examples` is `True`\r\n\r\n### URL to code causing the issue\r\n\r\n_No response_\r\n\r\n### MCVE\r\n\r\n```python\r\nfrom datetime import datetime\r\nfrom litestar import Litestar, get\r\nfrom litestar.openapi import OpenAPIConfig\r\n\r\n@get()\r\nasync def something(date: datetime) -> None:\r\n return None\r\n\r\napp = Litestar([something], openapi_config=OpenAPIConfig('example', '0.0.1', True))\r\n```\r\n\r\nor\r\n\r\n```python\r\nfrom __future__ import annotations\r\nfrom datetime import datetime\r\nfrom uuid import UUID\r\nfrom litestar import Litestar, get\r\nfrom litestar.openapi import OpenAPIConfig\r\nfrom pydantic import BaseModel, Field\r\n\r\nclass FileSchema(BaseModel):\r\n id: UUID\r\n updated: datetime\r\n\r\n@get()\r\nasync def something() -> FileSchema | None:\r\n return None\r\n\r\napp = Litestar([something], openapi_config=OpenAPIConfig(\"example\", \"0.0.1\", True))\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\n```bash\r\n1. Create mcve.py\r\n2. `litestar schema openapi --output schema.json`\r\n3. See error\r\n```\r\n\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Logs\r\n\r\n_No response_\r\n\r\n### Litestar Version\r\n\r\n2.x\r\n\r\n### Platform\r\n\r\n- [ ] Linux\r\n- [ ] Mac\r\n- [ ] Windows\r\n- [X] Other (Please specify in the description above)\r\n\r\n<!-- POLAR PLEDGE BADGE START -->\r\n---\r\n> [!NOTE] \r\n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \r\n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\r\n>\r\n> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)\r\n> * If you would like to see an issue prioritized, make a pledge towards it!\r\n> * We receive the pledge once the issue is completed & verified\r\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\r\n\r\n<a href=\"https://polar.sh/litestar-org/litestar/issues/2575\">\r\n<picture>\r\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/2575/pledge.svg?darkmode=1\">\r\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/2575/pledge.svg\">\r\n</picture>\r\n</a>\r\n<!-- POLAR PLEDGE BADGE END -->\r\n\n", "before_files": [{"content": "from json import dumps\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nfrom yaml import dump as dump_yaml\n\nfrom litestar import Litestar\nfrom litestar._openapi.typescript_converter.converter import (\n convert_openapi_to_typescript,\n)\nfrom litestar.cli._utils import JSBEAUTIFIER_INSTALLED, RICH_CLICK_INSTALLED, LitestarCLIException, LitestarGroup\n\nif TYPE_CHECKING or not RICH_CLICK_INSTALLED: # pragma: no cover\n from click import Path as ClickPath\n from click import group, option\nelse:\n from rich_click import Path as ClickPath\n from rich_click import group, option\n\nif JSBEAUTIFIER_INSTALLED: # pragma: no cover\n from jsbeautifier import Beautifier\n\n beautifier = Beautifier()\n\n\n__all__ = (\"generate_openapi_schema\", \"generate_typescript_specs\", \"schema_group\")\n\n\n@group(cls=LitestarGroup, name=\"schema\")\ndef schema_group() -> None:\n \"\"\"Manage server-side OpenAPI schemas.\"\"\"\n\n\n@schema_group.command(\"openapi\") # type: ignore\n@option(\n \"--output\",\n help=\"output file path\",\n type=ClickPath(dir_okay=False, path_type=Path),\n default=Path(\"openapi_schema.json\"),\n show_default=True,\n)\ndef generate_openapi_schema(app: Litestar, output: Path) -> None:\n \"\"\"Generate an OpenAPI Schema.\"\"\"\n if output.suffix in (\".yml\", \".yaml\"):\n content = dump_yaml(app.openapi_schema.to_schema(), default_flow_style=False)\n else:\n content = dumps(app.openapi_schema.to_schema(), indent=4)\n\n try:\n output.write_text(content)\n except OSError as e: # pragma: no cover\n raise LitestarCLIException(f\"failed to write schema to path {output}\") from e\n\n\n@schema_group.command(\"typescript\") # type: ignore\n@option(\n \"--output\",\n help=\"output file path\",\n type=ClickPath(dir_okay=False, path_type=Path),\n default=Path(\"api-specs.ts\"),\n show_default=True,\n)\n@option(\"--namespace\", help=\"namespace to use for the typescript specs\", type=str, default=\"API\")\ndef generate_typescript_specs(app: Litestar, output: Path, namespace: str) -> None:\n \"\"\"Generate TypeScript specs from the OpenAPI schema.\"\"\"\n try:\n specs = convert_openapi_to_typescript(app.openapi_schema, namespace)\n # beautifier will be defined if JSBEAUTIFIER_INSTALLED is True\n specs_output = (\n beautifier.beautify(specs.write()) if JSBEAUTIFIER_INSTALLED else specs.write() # pyright: ignore\n )\n output.write_text(specs_output)\n except OSError as e: # pragma: no cover\n raise LitestarCLIException(f\"failed to write schema to path {output}\") from e\n", "path": "litestar/cli/commands/schema.py"}]} | 1,948 | 569 |
gh_patches_debug_17160 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-18 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow to specify host and port in dev server
</issue>
<code>
[start of strawberry/cli/__init__.py]
1 import click
2 import sys
3
4 import os
5 from starlette.applications import Starlette
6 from starlette.responses import HTMLResponse
7 import importlib
8
9 import uvicorn
10
11 import hupper
12
13 from strawberry.contrib.starlette import GraphQLApp
14
15
16 @click.group()
17 def run():
18 pass
19
20
21 @run.command("server")
22 @click.argument("module", type=str)
23 def server(module):
24 sys.path.append(os.getcwd())
25
26 reloader = hupper.start_reloader("strawberry.cli.run")
27
28 schema_module = importlib.import_module(module)
29
30 reloader.watch_files([schema_module.__file__])
31
32 app = Starlette(debug=True)
33 app.add_route("/graphql", GraphQLApp(schema_module.schema))
34 uvicorn.run(app, host="0.0.0.0", port=8000, log_level="error")
35
[end of strawberry/cli/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/cli/__init__.py b/strawberry/cli/__init__.py
--- a/strawberry/cli/__init__.py
+++ b/strawberry/cli/__init__.py
@@ -20,10 +20,12 @@
@run.command("server")
@click.argument("module", type=str)
-def server(module):
[email protected]("-h", "--host", default="0.0.0.0", type=str)
[email protected]("-p", "--port", default=8000, type=int)
+def server(module, host, port):
sys.path.append(os.getcwd())
- reloader = hupper.start_reloader("strawberry.cli.run")
+ reloader = hupper.start_reloader("strawberry.cli.run", verbose=False)
schema_module = importlib.import_module(module)
@@ -31,4 +33,7 @@
app = Starlette(debug=True)
app.add_route("/graphql", GraphQLApp(schema_module.schema))
- uvicorn.run(app, host="0.0.0.0", port=8000, log_level="error")
+
+ print(f"Running strawberry on http://{host}:{port}/graphql 🍓")
+
+ uvicorn.run(app, host=host, port=port, log_level="error")
| {"golden_diff": "diff --git a/strawberry/cli/__init__.py b/strawberry/cli/__init__.py\n--- a/strawberry/cli/__init__.py\n+++ b/strawberry/cli/__init__.py\n@@ -20,10 +20,12 @@\n \n @run.command(\"server\")\n @click.argument(\"module\", type=str)\n-def server(module):\[email protected](\"-h\", \"--host\", default=\"0.0.0.0\", type=str)\[email protected](\"-p\", \"--port\", default=8000, type=int)\n+def server(module, host, port):\n sys.path.append(os.getcwd())\n \n- reloader = hupper.start_reloader(\"strawberry.cli.run\")\n+ reloader = hupper.start_reloader(\"strawberry.cli.run\", verbose=False)\n \n schema_module = importlib.import_module(module)\n \n@@ -31,4 +33,7 @@\n \n app = Starlette(debug=True)\n app.add_route(\"/graphql\", GraphQLApp(schema_module.schema))\n- uvicorn.run(app, host=\"0.0.0.0\", port=8000, log_level=\"error\")\n+\n+ print(f\"Running strawberry on http://{host}:{port}/graphql \ud83c\udf53\")\n+\n+ uvicorn.run(app, host=host, port=port, log_level=\"error\")\n", "issue": "Allow to specify host and port in dev server\n\n", "before_files": [{"content": "import click\nimport sys\n\nimport os\nfrom starlette.applications import Starlette\nfrom starlette.responses import HTMLResponse\nimport importlib\n\nimport uvicorn\n\nimport hupper\n\nfrom strawberry.contrib.starlette import GraphQLApp\n\n\[email protected]()\ndef run():\n pass\n\n\[email protected](\"server\")\[email protected](\"module\", type=str)\ndef server(module):\n sys.path.append(os.getcwd())\n\n reloader = hupper.start_reloader(\"strawberry.cli.run\")\n\n schema_module = importlib.import_module(module)\n\n reloader.watch_files([schema_module.__file__])\n\n app = Starlette(debug=True)\n app.add_route(\"/graphql\", GraphQLApp(schema_module.schema))\n uvicorn.run(app, host=\"0.0.0.0\", port=8000, log_level=\"error\")\n", "path": "strawberry/cli/__init__.py"}]} | 791 | 296 |
gh_patches_debug_37398 | rasdani/github-patches | git_diff | akvo__akvo-rsr-4787 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow passing date range for perform_iati_checks
Currently, [`perform_iati_checks`](https://github.com/akvo/akvo-rsr/blob/master/akvo/rsr/management/commands/perform_iati_checks.py) only has the `--all` option. It should be possible to pass a date range of projects to execute the script on.
Executing it on all projects is unnecessary as most are probably inactive.
This is related to #4779 as some project will have to be rechecked and we don't want to check all projects.
</issue>
<code>
[start of akvo/rsr/management/commands/perform_iati_checks.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo Reporting is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from akvo.rsr.models.project import Project
8
9 from django.core.management.base import BaseCommand
10
11
12 class Command(BaseCommand):
13 help = "Perform all IATI checks for projects."
14
15 def add_arguments(self, parser):
16 parser.add_argument(
17 '--all',
18 action='store_true',
19 default=False,
20 help='Run IATI checks for all the projects in the DB.',
21 )
22
23 def handle(self, *args, **options):
24 projects = Project.objects.all() if options['all'] else Project.objects.filter(run_iati_checks=True)
25 self.stdout.write('Performing IATI checks for {} ...'.format(projects.count()))
26 for project in projects:
27 self.stdout.write('Performing IATI checks for project {0}...'.format(project.pk))
28 project.update_iati_checks()
29
[end of akvo/rsr/management/commands/perform_iati_checks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/management/commands/perform_iati_checks.py b/akvo/rsr/management/commands/perform_iati_checks.py
--- a/akvo/rsr/management/commands/perform_iati_checks.py
+++ b/akvo/rsr/management/commands/perform_iati_checks.py
@@ -3,25 +3,60 @@
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
+import argparse
+import datetime
from akvo.rsr.models.project import Project
from django.core.management.base import BaseCommand
+def date_arg_type(string):
+ return datetime.datetime.strptime(string, '%Y-%m-%d').date()
+
+
class Command(BaseCommand):
help = "Perform all IATI checks for projects."
- def add_arguments(self, parser):
+ def add_arguments(self, parser: argparse.ArgumentParser):
parser.add_argument(
'--all',
action='store_true',
default=False,
help='Run IATI checks for all the projects in the DB.',
+
+ )
+ parser.add_argument(
+ '--date-start',
+ type=date_arg_type,
+ help='Limit to projects created on and after this day',
+ )
+ parser.add_argument(
+ '--date-end',
+ type=date_arg_type,
+ help='Limit to projects created on or before this day',
)
def handle(self, *args, **options):
- projects = Project.objects.all() if options['all'] else Project.objects.filter(run_iati_checks=True)
+ all_option = options["all"]
+ date_start = options["date_start"]
+ date_end = options["date_end"]
+
+ # Filter projects with options
+ projects = Project.objects.all()
+ if not (all_option or date_start or date_end):
+ self.stdout.write("No options provided: only checking projects with run_iati_checks=True")
+ projects = projects.filter(run_iati_checks=True)
+ elif all_option:
+ self.stdout.write("Checking ALL projects. This might take a while...")
+ else:
+ if date_start:
+ self.stdout.write("Filtering projects on and after %s" % date_start)
+ projects = projects.filter(created_at__gte=date_start)
+ if date_end:
+ self.stdout.write("Filtering projects on and before %s" % date_end)
+ projects = projects.filter(created_at__lte=date_end)
+
self.stdout.write('Performing IATI checks for {} ...'.format(projects.count()))
for project in projects:
self.stdout.write('Performing IATI checks for project {0}...'.format(project.pk))
| {"golden_diff": "diff --git a/akvo/rsr/management/commands/perform_iati_checks.py b/akvo/rsr/management/commands/perform_iati_checks.py\n--- a/akvo/rsr/management/commands/perform_iati_checks.py\n+++ b/akvo/rsr/management/commands/perform_iati_checks.py\n@@ -3,25 +3,60 @@\n # Akvo Reporting is covered by the GNU Affero General Public License.\n # See more details in the license.txt file located at the root folder of the Akvo RSR module.\n # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n+import argparse\n+import datetime\n \n from akvo.rsr.models.project import Project\n \n from django.core.management.base import BaseCommand\n \n \n+def date_arg_type(string):\n+ return datetime.datetime.strptime(string, '%Y-%m-%d').date()\n+\n+\n class Command(BaseCommand):\n help = \"Perform all IATI checks for projects.\"\n \n- def add_arguments(self, parser):\n+ def add_arguments(self, parser: argparse.ArgumentParser):\n parser.add_argument(\n '--all',\n action='store_true',\n default=False,\n help='Run IATI checks for all the projects in the DB.',\n+\n+ )\n+ parser.add_argument(\n+ '--date-start',\n+ type=date_arg_type,\n+ help='Limit to projects created on and after this day',\n+ )\n+ parser.add_argument(\n+ '--date-end',\n+ type=date_arg_type,\n+ help='Limit to projects created on or before this day',\n )\n \n def handle(self, *args, **options):\n- projects = Project.objects.all() if options['all'] else Project.objects.filter(run_iati_checks=True)\n+ all_option = options[\"all\"]\n+ date_start = options[\"date_start\"]\n+ date_end = options[\"date_end\"]\n+\n+ # Filter projects with options\n+ projects = Project.objects.all()\n+ if not (all_option or date_start or date_end):\n+ self.stdout.write(\"No options provided: only checking projects with run_iati_checks=True\")\n+ projects = projects.filter(run_iati_checks=True)\n+ elif all_option:\n+ self.stdout.write(\"Checking ALL projects. This might take a while...\")\n+ else:\n+ if date_start:\n+ self.stdout.write(\"Filtering projects on and after %s\" % date_start)\n+ projects = projects.filter(created_at__gte=date_start)\n+ if date_end:\n+ self.stdout.write(\"Filtering projects on and before %s\" % date_end)\n+ projects = projects.filter(created_at__lte=date_end)\n+\n self.stdout.write('Performing IATI checks for {} ...'.format(projects.count()))\n for project in projects:\n self.stdout.write('Performing IATI checks for project {0}...'.format(project.pk))\n", "issue": "Allow passing date range for perform_iati_checks\nCurrently, [`perform_iati_checks`](https://github.com/akvo/akvo-rsr/blob/master/akvo/rsr/management/commands/perform_iati_checks.py) only has the `--all` option. It should be possible to pass a date range of projects to execute the script on.\r\n\r\nExecuting it on all projects is unnecessary as most are probably inactive.\r\n\r\nThis is related to #4779 as some project will have to be rechecked and we don't want to check all projects.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo Reporting is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom akvo.rsr.models.project import Project\n\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n help = \"Perform all IATI checks for projects.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--all',\n action='store_true',\n default=False,\n help='Run IATI checks for all the projects in the DB.',\n )\n\n def handle(self, *args, **options):\n projects = Project.objects.all() if options['all'] else Project.objects.filter(run_iati_checks=True)\n self.stdout.write('Performing IATI checks for {} ...'.format(projects.count()))\n for project in projects:\n self.stdout.write('Performing IATI checks for project {0}...'.format(project.pk))\n project.update_iati_checks()\n", "path": "akvo/rsr/management/commands/perform_iati_checks.py"}]} | 961 | 625 |
gh_patches_debug_12644 | rasdani/github-patches | git_diff | bridgecrewio__checkov-281 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CloudFormation: S3 default encryption check should look for ServerSideEncryptionByDefault
The CloudFormation check for CKV_AWS_19 only looks for `ServerSideEncryptionRule`, not `ServerSideEncryptionByDefault`:
https://github.com/bridgecrewio/checkov/blob/864d12ad0dfc6ca898dbacf7dc16fdc20f702733/checkov/cloudformation/checks/resource/aws/S3Encryption.py
The syntax used in the S3 template does not appear to be used anywhere else in GitHub:
https://github.com/bridgecrewio/checkov/blob/864d12ad0dfc6ca898dbacf7dc16fdc20f702733/tests/cloudformation/checks/resource/aws/S3Templates/ExampleS3.yaml#L13
All of the working CloudFormation templates which I've seen look like this:
https://github.com/cloudconformity/documentation-api/blob/cba56c057d42c19db606f7d72ca197ee52d97848/examples/cloudformation-snippets/S3Bucket.yml#L8-L11
Checkov version: 1.0.312
</issue>
<code>
[start of checkov/cloudformation/checks/resource/aws/S3Encryption.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class S3Encryption(BaseResourceCheck):
6 def __init__(self):
7 name = "Ensure the S3 bucket has server-side-encryption enabled"
8 id = "CKV_AWS_19"
9 supported_resources = ['AWS::S3::Bucket']
10 categories = [CheckCategories.LOGGING]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 if conf.get('Properties') and conf['Properties'].get('ServerSideEncryptionRule'):
15 return CheckResult.PASSED
16 return CheckResult.FAILED
17
18
19 check = S3Encryption()
20
[end of checkov/cloudformation/checks/resource/aws/S3Encryption.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/cloudformation/checks/resource/aws/S3Encryption.py b/checkov/cloudformation/checks/resource/aws/S3Encryption.py
--- a/checkov/cloudformation/checks/resource/aws/S3Encryption.py
+++ b/checkov/cloudformation/checks/resource/aws/S3Encryption.py
@@ -11,8 +11,11 @@
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
- if conf.get('Properties') and conf['Properties'].get('ServerSideEncryptionRule'):
- return CheckResult.PASSED
+ if conf.get('Properties'):
+ if conf['Properties'].get('ServerSideEncryptionRule') or \
+ (conf['Properties'].get('BucketEncryption') and
+ len(conf['Properties']['BucketEncryption'].get('ServerSideEncryptionConfiguration', [])) > 0):
+ return CheckResult.PASSED
return CheckResult.FAILED
| {"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/S3Encryption.py b/checkov/cloudformation/checks/resource/aws/S3Encryption.py\n--- a/checkov/cloudformation/checks/resource/aws/S3Encryption.py\n+++ b/checkov/cloudformation/checks/resource/aws/S3Encryption.py\n@@ -11,8 +11,11 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def scan_resource_conf(self, conf):\n- if conf.get('Properties') and conf['Properties'].get('ServerSideEncryptionRule'):\n- return CheckResult.PASSED\n+ if conf.get('Properties'):\n+ if conf['Properties'].get('ServerSideEncryptionRule') or \\\n+ (conf['Properties'].get('BucketEncryption') and\n+ len(conf['Properties']['BucketEncryption'].get('ServerSideEncryptionConfiguration', [])) > 0):\n+ return CheckResult.PASSED\n return CheckResult.FAILED\n", "issue": "CloudFormation: S3 default encryption check should look for ServerSideEncryptionByDefault\nThe CloudFormation check for CKV_AWS_19 only looks for `ServerSideEncryptionRule`, not `ServerSideEncryptionByDefault`:\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/864d12ad0dfc6ca898dbacf7dc16fdc20f702733/checkov/cloudformation/checks/resource/aws/S3Encryption.py\r\n\r\nThe syntax used in the S3 template does not appear to be used anywhere else in GitHub:\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/864d12ad0dfc6ca898dbacf7dc16fdc20f702733/tests/cloudformation/checks/resource/aws/S3Templates/ExampleS3.yaml#L13 \r\n\r\nAll of the working CloudFormation templates which I've seen look like this:\r\n\r\nhttps://github.com/cloudconformity/documentation-api/blob/cba56c057d42c19db606f7d72ca197ee52d97848/examples/cloudformation-snippets/S3Bucket.yml#L8-L11\r\n\r\nCheckov version: 1.0.312\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass S3Encryption(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure the S3 bucket has server-side-encryption enabled\"\n id = \"CKV_AWS_19\"\n supported_resources = ['AWS::S3::Bucket']\n categories = [CheckCategories.LOGGING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if conf.get('Properties') and conf['Properties'].get('ServerSideEncryptionRule'):\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = S3Encryption()\n", "path": "checkov/cloudformation/checks/resource/aws/S3Encryption.py"}]} | 1,026 | 212 |
gh_patches_debug_15364 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3933 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OCI Security Group Control Problem
**Describe the issue**
AbsSecurtiGroupUnrestictedIngress The function needs to control "destination_port_range" instead of "source_port_range"
[Codo Block](https://github.com/bridgecrewio/checkov/blob/b13dd31e10b84b7e4ce87d1d80c9d7d54b404742/checkov/terraform/checks/resource/oci/AbsSecurityGroupUnrestrictedIngress.py#L33-L36)
**Examples**
**Version (please complete the following information):**
- Checkov Version 2.2.86
</issue>
<code>
[start of checkov/terraform/checks/resource/oci/AbsSecurityGroupUnrestrictedIngress.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3 from checkov.common.util.type_forcers import force_int
4
5
6 class AbsSecurityGroupUnrestrictedIngress(BaseResourceCheck):
7 def __init__(self, check_id: str, port: int) -> None:
8 name = f"Ensure no security groups rules allow ingress from 0.0.0.0/0 to port {port}"
9 supported_resources = ['oci_core_network_security_group_security_rule']
10 categories = [CheckCategories.NETWORKING]
11 super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)
12 self.port = port
13
14 def scan_resource_conf(self, conf):
15 direction = conf.get('direction')
16 source = conf.get('source')
17 protocol = conf.get('protocol')
18 tcp_options = conf.get('tcp_options')
19 self.evaluated_keys = ["direction"]
20 if direction and direction[0] != 'INGRESS':
21 return CheckResult.UNKNOWN
22 self.evaluated_keys.append("source")
23 if source and source[0] != "0.0.0.0/0":
24 return CheckResult.PASSED
25 elif (tcp_options is None and (protocol[0] == 'all' or protocol[0] == '6')) \
26 or tcp_options and self.scan_protocol_conf(tcp_options) is False:
27 self.evaluated_keys.append("protocol")
28 return CheckResult.FAILED
29 return CheckResult.PASSED
30
31 def scan_protocol_conf(self, protocol_name):
32 """ scan tcp_options configuration"""
33 if 'source_port_range' not in protocol_name[0]:
34 return False
35 max_port = force_int(protocol_name[0]['source_port_range'][0]['max'][0])
36 min_port = force_int(protocol_name[0]['source_port_range'][0]['min'][0])
37 if max_port and min_port and min_port <= self.port <= max_port:
38 return False
39 return True
40
[end of checkov/terraform/checks/resource/oci/AbsSecurityGroupUnrestrictedIngress.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/oci/AbsSecurityGroupUnrestrictedIngress.py b/checkov/terraform/checks/resource/oci/AbsSecurityGroupUnrestrictedIngress.py
--- a/checkov/terraform/checks/resource/oci/AbsSecurityGroupUnrestrictedIngress.py
+++ b/checkov/terraform/checks/resource/oci/AbsSecurityGroupUnrestrictedIngress.py
@@ -30,10 +30,10 @@
def scan_protocol_conf(self, protocol_name):
""" scan tcp_options configuration"""
- if 'source_port_range' not in protocol_name[0]:
+ if 'destination_port_range' not in protocol_name[0]:
return False
- max_port = force_int(protocol_name[0]['source_port_range'][0]['max'][0])
- min_port = force_int(protocol_name[0]['source_port_range'][0]['min'][0])
+ max_port = force_int(protocol_name[0]['destination_port_range'][0]['max'][0])
+ min_port = force_int(protocol_name[0]['destination_port_range'][0]['min'][0])
if max_port and min_port and min_port <= self.port <= max_port:
return False
return True
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/oci/AbsSecurityGroupUnrestrictedIngress.py b/checkov/terraform/checks/resource/oci/AbsSecurityGroupUnrestrictedIngress.py\n--- a/checkov/terraform/checks/resource/oci/AbsSecurityGroupUnrestrictedIngress.py\n+++ b/checkov/terraform/checks/resource/oci/AbsSecurityGroupUnrestrictedIngress.py\n@@ -30,10 +30,10 @@\n \n def scan_protocol_conf(self, protocol_name):\n \"\"\" scan tcp_options configuration\"\"\"\n- if 'source_port_range' not in protocol_name[0]:\n+ if 'destination_port_range' not in protocol_name[0]:\n return False\n- max_port = force_int(protocol_name[0]['source_port_range'][0]['max'][0])\n- min_port = force_int(protocol_name[0]['source_port_range'][0]['min'][0])\n+ max_port = force_int(protocol_name[0]['destination_port_range'][0]['max'][0])\n+ min_port = force_int(protocol_name[0]['destination_port_range'][0]['min'][0])\n if max_port and min_port and min_port <= self.port <= max_port:\n return False\n return True\n", "issue": "OCI Security Group Control Problem\n**Describe the issue**\r\nAbsSecurtiGroupUnrestictedIngress The function needs to control \"destination_port_range\" instead of \"source_port_range\"\r\n\r\n[Codo Block](https://github.com/bridgecrewio/checkov/blob/b13dd31e10b84b7e4ce87d1d80c9d7d54b404742/checkov/terraform/checks/resource/oci/AbsSecurityGroupUnrestrictedIngress.py#L33-L36)\r\n\r\n**Examples**\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.2.86\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.util.type_forcers import force_int\n\n\nclass AbsSecurityGroupUnrestrictedIngress(BaseResourceCheck):\n def __init__(self, check_id: str, port: int) -> None:\n name = f\"Ensure no security groups rules allow ingress from 0.0.0.0/0 to port {port}\"\n supported_resources = ['oci_core_network_security_group_security_rule']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)\n self.port = port\n\n def scan_resource_conf(self, conf):\n direction = conf.get('direction')\n source = conf.get('source')\n protocol = conf.get('protocol')\n tcp_options = conf.get('tcp_options')\n self.evaluated_keys = [\"direction\"]\n if direction and direction[0] != 'INGRESS':\n return CheckResult.UNKNOWN\n self.evaluated_keys.append(\"source\")\n if source and source[0] != \"0.0.0.0/0\":\n return CheckResult.PASSED\n elif (tcp_options is None and (protocol[0] == 'all' or protocol[0] == '6')) \\\n or tcp_options and self.scan_protocol_conf(tcp_options) is False:\n self.evaluated_keys.append(\"protocol\")\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n def scan_protocol_conf(self, protocol_name):\n \"\"\" scan tcp_options configuration\"\"\"\n if 'source_port_range' not in protocol_name[0]:\n return False\n max_port = force_int(protocol_name[0]['source_port_range'][0]['max'][0])\n min_port = force_int(protocol_name[0]['source_port_range'][0]['min'][0])\n if max_port and min_port and min_port <= self.port <= max_port:\n return False\n return True\n", "path": "checkov/terraform/checks/resource/oci/AbsSecurityGroupUnrestrictedIngress.py"}]} | 1,211 | 264 |
gh_patches_debug_70 | rasdani/github-patches | git_diff | d2l-ai__d2l-en-2254 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ModuleNotFoundError when running the official pytorch colab notebook

I can replicate the error at multiple official pytorch colab notebooks, e.g.
https://colab.research.google.com/github/d2l-ai/d2l-pytorch-colab/blob/master/chapter_linear-classification/image-classification-dataset.ipynb#scrollTo=ee445cce
ModuleNotFoundError when running the official pytorch colab notebook

I can replicate the error at multiple official pytorch colab notebooks, e.g.
https://colab.research.google.com/github/d2l-ai/d2l-pytorch-colab/blob/master/chapter_linear-classification/image-classification-dataset.ipynb#scrollTo=ee445cce
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 import d2l
3
4 requirements = [
5 'jupyter',
6 'numpy',
7 'matplotlib',
8 'requests',
9 'pandas',
10 'gym'
11 ]
12
13 setup(
14 name='d2l',
15 version=d2l.__version__,
16 python_requires='>=3.5',
17 author='D2L Developers',
18 author_email='[email protected]',
19 url='https://d2l.ai',
20 description='Dive into Deep Learning',
21 license='MIT-0',
22 packages=find_packages(),
23 zip_safe=True,
24 install_requires=requirements,
25 )
26
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -2,6 +2,7 @@
import d2l
requirements = [
+ 'ipython>=7.23',
'jupyter',
'numpy',
'matplotlib',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -2,6 +2,7 @@\n import d2l\n \n requirements = [\n+ 'ipython>=7.23',\n 'jupyter',\n 'numpy',\n 'matplotlib',\n", "issue": "ModuleNotFoundError when running the official pytorch colab notebook\n\r\n\r\nI can replicate the error at multiple official pytorch colab notebooks, e.g. \r\n\r\nhttps://colab.research.google.com/github/d2l-ai/d2l-pytorch-colab/blob/master/chapter_linear-classification/image-classification-dataset.ipynb#scrollTo=ee445cce\r\n\r\n\r\n\nModuleNotFoundError when running the official pytorch colab notebook\n\r\n\r\nI can replicate the error at multiple official pytorch colab notebooks, e.g. \r\n\r\nhttps://colab.research.google.com/github/d2l-ai/d2l-pytorch-colab/blob/master/chapter_linear-classification/image-classification-dataset.ipynb#scrollTo=ee445cce\r\n\r\n\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport d2l\n\nrequirements = [\n 'jupyter',\n 'numpy',\n 'matplotlib',\n 'requests',\n 'pandas',\n 'gym'\n]\n\nsetup(\n name='d2l',\n version=d2l.__version__,\n python_requires='>=3.5',\n author='D2L Developers',\n author_email='[email protected]',\n url='https://d2l.ai',\n description='Dive into Deep Learning',\n license='MIT-0',\n packages=find_packages(),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py"}]} | 997 | 63 |
gh_patches_debug_3462 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-504 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Shelf page json view has a server error
This is a quick fix, needs a unittest.
</issue>
<code>
[start of bookwyrm/models/shelf.py]
1 ''' puttin' books on shelves '''
2 import re
3 from django.db import models
4
5 from bookwyrm import activitypub
6 from .base_model import ActivitypubMixin, BookWyrmModel
7 from .base_model import OrderedCollectionMixin
8 from . import fields
9
10
11 class Shelf(OrderedCollectionMixin, BookWyrmModel):
12 ''' a list of books owned by a user '''
13 name = fields.CharField(max_length=100)
14 identifier = models.CharField(max_length=100)
15 user = fields.ForeignKey(
16 'User', on_delete=models.PROTECT, activitypub_field='owner')
17 editable = models.BooleanField(default=True)
18 privacy = fields.CharField(
19 max_length=255,
20 default='public',
21 choices=fields.PrivacyLevels.choices
22 )
23 books = models.ManyToManyField(
24 'Edition',
25 symmetrical=False,
26 through='ShelfBook',
27 through_fields=('shelf', 'book')
28 )
29
30 def save(self, *args, **kwargs):
31 ''' set the identifier '''
32 saved = super().save(*args, **kwargs)
33 if not self.identifier:
34 slug = re.sub(r'[^\w]', '', self.name).lower()
35 self.identifier = '%s-%d' % (slug, self.id)
36 return super().save(*args, **kwargs)
37 return saved
38
39 @property
40 def collection_queryset(self):
41 ''' list of books for this shelf, overrides OrderedCollectionMixin '''
42 return self.books
43
44 def get_remote_id(self):
45 ''' shelf identifier instead of id '''
46 base_path = self.user.remote_id
47 return '%s/shelf/%s' % (base_path, self.identifier)
48
49 class Meta:
50 ''' user/shelf unqiueness '''
51 unique_together = ('user', 'identifier')
52
53
54 class ShelfBook(ActivitypubMixin, BookWyrmModel):
55 ''' many to many join table for books and shelves '''
56 book = fields.ForeignKey(
57 'Edition', on_delete=models.PROTECT, activitypub_field='object')
58 shelf = fields.ForeignKey(
59 'Shelf', on_delete=models.PROTECT, activitypub_field='target')
60 added_by = fields.ForeignKey(
61 'User',
62 blank=True,
63 null=True,
64 on_delete=models.PROTECT,
65 activitypub_field='actor'
66 )
67
68 activity_serializer = activitypub.AddBook
69
70 def to_add_activity(self, user):
71 ''' AP for shelving a book'''
72 return activitypub.Add(
73 id='%s#add' % self.remote_id,
74 actor=user.remote_id,
75 object=self.book.to_activity(),
76 target=self.shelf.remote_id,
77 ).serialize()
78
79 def to_remove_activity(self, user):
80 ''' AP for un-shelving a book'''
81 return activitypub.Remove(
82 id='%s#remove' % self.remote_id,
83 actor=user.remote_id,
84 object=self.book.to_activity(),
85 target=self.shelf.to_activity()
86 ).serialize()
87
88
89 class Meta:
90 ''' an opinionated constraint!
91 you can't put a book on shelf twice '''
92 unique_together = ('book', 'shelf')
93
[end of bookwyrm/models/shelf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/models/shelf.py b/bookwyrm/models/shelf.py
--- a/bookwyrm/models/shelf.py
+++ b/bookwyrm/models/shelf.py
@@ -39,7 +39,7 @@
@property
def collection_queryset(self):
''' list of books for this shelf, overrides OrderedCollectionMixin '''
- return self.books
+ return self.books.all()
def get_remote_id(self):
''' shelf identifier instead of id '''
| {"golden_diff": "diff --git a/bookwyrm/models/shelf.py b/bookwyrm/models/shelf.py\n--- a/bookwyrm/models/shelf.py\n+++ b/bookwyrm/models/shelf.py\n@@ -39,7 +39,7 @@\n @property\n def collection_queryset(self):\n ''' list of books for this shelf, overrides OrderedCollectionMixin '''\n- return self.books\n+ return self.books.all()\n \n def get_remote_id(self):\n ''' shelf identifier instead of id '''\n", "issue": "Shelf page json view has a server error\nThis is a quick fix, needs a unittest.\n", "before_files": [{"content": "''' puttin' books on shelves '''\nimport re\nfrom django.db import models\n\nfrom bookwyrm import activitypub\nfrom .base_model import ActivitypubMixin, BookWyrmModel\nfrom .base_model import OrderedCollectionMixin\nfrom . import fields\n\n\nclass Shelf(OrderedCollectionMixin, BookWyrmModel):\n ''' a list of books owned by a user '''\n name = fields.CharField(max_length=100)\n identifier = models.CharField(max_length=100)\n user = fields.ForeignKey(\n 'User', on_delete=models.PROTECT, activitypub_field='owner')\n editable = models.BooleanField(default=True)\n privacy = fields.CharField(\n max_length=255,\n default='public',\n choices=fields.PrivacyLevels.choices\n )\n books = models.ManyToManyField(\n 'Edition',\n symmetrical=False,\n through='ShelfBook',\n through_fields=('shelf', 'book')\n )\n\n def save(self, *args, **kwargs):\n ''' set the identifier '''\n saved = super().save(*args, **kwargs)\n if not self.identifier:\n slug = re.sub(r'[^\\w]', '', self.name).lower()\n self.identifier = '%s-%d' % (slug, self.id)\n return super().save(*args, **kwargs)\n return saved\n\n @property\n def collection_queryset(self):\n ''' list of books for this shelf, overrides OrderedCollectionMixin '''\n return self.books\n\n def get_remote_id(self):\n ''' shelf identifier instead of id '''\n base_path = self.user.remote_id\n return '%s/shelf/%s' % (base_path, self.identifier)\n\n class Meta:\n ''' user/shelf unqiueness '''\n unique_together = ('user', 'identifier')\n\n\nclass ShelfBook(ActivitypubMixin, BookWyrmModel):\n ''' many to many join table for books and shelves '''\n book = fields.ForeignKey(\n 'Edition', on_delete=models.PROTECT, activitypub_field='object')\n shelf = fields.ForeignKey(\n 'Shelf', on_delete=models.PROTECT, activitypub_field='target')\n added_by = fields.ForeignKey(\n 'User',\n blank=True,\n null=True,\n on_delete=models.PROTECT,\n activitypub_field='actor'\n )\n\n activity_serializer = activitypub.AddBook\n\n def to_add_activity(self, user):\n ''' AP for shelving a book'''\n return activitypub.Add(\n id='%s#add' % self.remote_id,\n actor=user.remote_id,\n object=self.book.to_activity(),\n target=self.shelf.remote_id,\n ).serialize()\n\n def to_remove_activity(self, user):\n ''' AP for un-shelving a book'''\n return activitypub.Remove(\n id='%s#remove' % self.remote_id,\n actor=user.remote_id,\n object=self.book.to_activity(),\n target=self.shelf.to_activity()\n ).serialize()\n\n\n class Meta:\n ''' an opinionated constraint!\n you can't put a book on shelf twice '''\n unique_together = ('book', 'shelf')\n", "path": "bookwyrm/models/shelf.py"}]} | 1,393 | 107 |
gh_patches_debug_19760 | rasdani/github-patches | git_diff | streamlink__streamlink-1618 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BritTV in need of password ?
- [x] I have read the contribution guidelines.
Brit TV won't open for me any more, could it be it needs One's credentials ?
How would that look ? --brittv-username=xxxx --brittv-password=xxxx ?
(Tested of course).
```
C:\Users\Ordval>call streamlink.exe "http://brittv.co.uk/watch/?channel=1" best
[cli][info] Found matching plugin brittv for URL http://brittv.co.uk/watch/?channel=1
[cli][info] Waiting for streams, retrying every 1.0 second(s)
```
```
C:\Users\Ordval>call streamlink.exe -l debug --http-header User-Agent=Chrome/54.0.2840.59 "https://brittv.co.uk/watch/?channel=1" best
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.5.2
[cli][debug] Streamlink: 0.11.0
[cli][debug] Requests(2.18.4), Socks(1.6.7), Websocket(0.47.0)
[cli][info] Found matching plugin brittv for URL https://brittv.co.uk/watch/?channel=1
[plugin.brittv][debug] Found js key: aHR0cDovL2IxYTJmODdjLmFyY2guYmx1d2FyZS50di9kYWcxLm0zdTg/c2lkPWV6ZCZjaWQ9YmJjMSZhdXRoPWMzUnBiV1U5TVRVeU5EUXlNVEEzTnlaMllXeHBaR2wwZVQwekptRm5iajB3Sm1OdmJtWTlkVEFtYzJsbmJqMDBZVEk1WVdFeE4yRmtNamswWWpWaFpqWXlaR1JpWkRnek5qZGtObUl6Wmc9PQ==
error: No playable streams found on this URL: https://brittv.co.uk/watch/?channel=1
```
Opens on website. The temporary:
`call streamlink.exe hls://"http://b1a2f87c.arch.bluware.tv/dag1.m3u8?sid=ezd&cid=bbc1&auth=c3RpbWU9MTUyNDQwMzQ5OCZ2YWxpZGl0eT0zJmFnbj0wJmNvbmY9dTAmc2lnbj00ZWY4NTExMjNmODBlOGU5MmVjZWE1ZWUzZDc4YzQ0Mg==" best `
will play.
</issue>
<code>
[start of src/streamlink/plugins/brittv.py]
1 import re
2
3 from streamlink.plugin import Plugin
4 from streamlink.plugin.api import http
5 from streamlink.plugin.api import useragents
6 from streamlink.compat import urljoin
7 from streamlink.stream import HLSStream
8
9
10 class BritTV(Plugin):
11 url_re = re.compile(r"https?://(?:www\.)?brittv\.co.uk/watch/")
12 js_re = re.compile(r"""/js/brittv\.player\.js\.php\?key=([^'"]+)['"]""")
13 player_re = re.compile(r"file: '(http://[^']+)'")
14
15 @classmethod
16 def can_handle_url(cls, url):
17 return cls.url_re.match(url) is not None
18
19 def _get_streams(self):
20 res = http.get(self.url, headers={"User-Agent": useragents.CHROME})
21 m = self.js_re.search(res.text)
22 if m:
23 self.logger.debug("Found js key: {0}", m.group(1))
24 js_url = m.group(0)
25 res = http.get(urljoin(self.url, js_url))
26
27 for url in self.player_re.findall(res.text):
28 if "adblock" not in url:
29 yield "live", HLSStream(self.session, url)
30
31
32 __plugin__ = BritTV
33
[end of src/streamlink/plugins/brittv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/brittv.py b/src/streamlink/plugins/brittv.py
--- a/src/streamlink/plugins/brittv.py
+++ b/src/streamlink/plugins/brittv.py
@@ -10,7 +10,7 @@
class BritTV(Plugin):
url_re = re.compile(r"https?://(?:www\.)?brittv\.co.uk/watch/")
js_re = re.compile(r"""/js/brittv\.player\.js\.php\?key=([^'"]+)['"]""")
- player_re = re.compile(r"file: '(http://[^']+)'")
+ player_re = re.compile(r'''src\s*:\s*(?P<quote>['"])(https?://.+?)(?P=quote)''')
@classmethod
def can_handle_url(cls, url):
@@ -24,6 +24,7 @@
js_url = m.group(0)
res = http.get(urljoin(self.url, js_url))
+ self.logger.debug("Looking for stream URL...")
for url in self.player_re.findall(res.text):
if "adblock" not in url:
yield "live", HLSStream(self.session, url)
| {"golden_diff": "diff --git a/src/streamlink/plugins/brittv.py b/src/streamlink/plugins/brittv.py\n--- a/src/streamlink/plugins/brittv.py\n+++ b/src/streamlink/plugins/brittv.py\n@@ -10,7 +10,7 @@\n class BritTV(Plugin):\n url_re = re.compile(r\"https?://(?:www\\.)?brittv\\.co.uk/watch/\")\n js_re = re.compile(r\"\"\"/js/brittv\\.player\\.js\\.php\\?key=([^'\"]+)['\"]\"\"\")\n- player_re = re.compile(r\"file: '(http://[^']+)'\")\n+ player_re = re.compile(r'''src\\s*:\\s*(?P<quote>['\"])(https?://.+?)(?P=quote)''')\n \n @classmethod\n def can_handle_url(cls, url):\n@@ -24,6 +24,7 @@\n js_url = m.group(0)\n res = http.get(urljoin(self.url, js_url))\n \n+ self.logger.debug(\"Looking for stream URL...\")\n for url in self.player_re.findall(res.text):\n if \"adblock\" not in url:\n yield \"live\", HLSStream(self.session, url)\n", "issue": "BritTV in need of password ? \n - [x] I have read the contribution guidelines.\r\n \r\nBrit TV won't open for me any more, could it be it needs One's credentials ? \r\nHow would that look ? --brittv-username=xxxx --brittv-password=xxxx ?\r\n(Tested of course).\r\n```\r\nC:\\Users\\Ordval>call streamlink.exe \"http://brittv.co.uk/watch/?channel=1\" best\r\n[cli][info] Found matching plugin brittv for URL http://brittv.co.uk/watch/?channel=1\r\n[cli][info] Waiting for streams, retrying every 1.0 second(s)\r\n```\r\n\r\n```\r\nC:\\Users\\Ordval>call streamlink.exe -l debug --http-header User-Agent=Chrome/54.0.2840.59 \"https://brittv.co.uk/watch/?channel=1\" best\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.5.2\r\n[cli][debug] Streamlink: 0.11.0\r\n[cli][debug] Requests(2.18.4), Socks(1.6.7), Websocket(0.47.0)\r\n[cli][info] Found matching plugin brittv for URL https://brittv.co.uk/watch/?channel=1\r\n[plugin.brittv][debug] Found js key: aHR0cDovL2IxYTJmODdjLmFyY2guYmx1d2FyZS50di9kYWcxLm0zdTg/c2lkPWV6ZCZjaWQ9YmJjMSZhdXRoPWMzUnBiV1U5TVRVeU5EUXlNVEEzTnlaMllXeHBaR2wwZVQwekptRm5iajB3Sm1OdmJtWTlkVEFtYzJsbmJqMDBZVEk1WVdFeE4yRmtNamswWWpWaFpqWXlaR1JpWkRnek5qZGtObUl6Wmc9PQ==\r\nerror: No playable streams found on this URL: https://brittv.co.uk/watch/?channel=1 \r\n```\r\nOpens on website. The temporary:\r\n`call streamlink.exe hls://\"http://b1a2f87c.arch.bluware.tv/dag1.m3u8?sid=ezd&cid=bbc1&auth=c3RpbWU9MTUyNDQwMzQ5OCZ2YWxpZGl0eT0zJmFnbj0wJmNvbmY9dTAmc2lnbj00ZWY4NTExMjNmODBlOGU5MmVjZWE1ZWUzZDc4YzQ0Mg==\" best `\r\nwill play.\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import useragents\nfrom streamlink.compat import urljoin\nfrom streamlink.stream import HLSStream\n\n\nclass BritTV(Plugin):\n url_re = re.compile(r\"https?://(?:www\\.)?brittv\\.co.uk/watch/\")\n js_re = re.compile(r\"\"\"/js/brittv\\.player\\.js\\.php\\?key=([^'\"]+)['\"]\"\"\")\n player_re = re.compile(r\"file: '(http://[^']+)'\")\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n res = http.get(self.url, headers={\"User-Agent\": useragents.CHROME})\n m = self.js_re.search(res.text)\n if m:\n self.logger.debug(\"Found js key: {0}\", m.group(1))\n js_url = m.group(0)\n res = http.get(urljoin(self.url, js_url))\n\n for url in self.player_re.findall(res.text):\n if \"adblock\" not in url:\n yield \"live\", HLSStream(self.session, url)\n\n\n__plugin__ = BritTV\n", "path": "src/streamlink/plugins/brittv.py"}]} | 1,511 | 261 |
gh_patches_debug_35445 | rasdani/github-patches | git_diff | weecology__retriever-408 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
download_only w/path fails to use path argument when checking for file
When `download_only` checks to see if the file already exists before copying it, it ignores the path argument. This means that:
```
retriever download MoM2003 -p testdata
```
will keep overwriting the file in `testdata` if it exists, and it will not copy the file to `testdata` if the file exists in `.`.
Fixes this is probably just a little logic improvement in the `final_cleanup` function of `download_only`.
</issue>
<code>
[start of engines/download_only.py]
1 import os
2 import platform
3 import shutil
4 import inspect
5 from retriever.lib.engine import filename_from_url
6 from retriever.lib.models import Engine, no_cleanup
7 from retriever import DATA_DIR, HOME_DIR
8
9 class DummyConnection:
10 def cursor(self):
11 pass
12 def commit(self):
13 pass
14 def rollback(self):
15 pass
16 def close(self):
17 pass
18
19 class DummyCursor(DummyConnection):
20 pass
21
22
23 class engine(Engine):
24 """Engine instance for writing data to a CSV file."""
25 name = "Download Only"
26 abbreviation = "download"
27 required_opts = [("path",
28 "File path to copy data files",
29 "./"),
30 ("subdir",
31 "Keep the subdirectories for archived files",
32 False)
33 ]
34
35 def table_exists(self, dbname, tablename):
36 try:
37 tablename = self.table_name(name=tablename, dbname=dbname)
38 return os.path.exists(tablename)
39 except:
40 return False
41
42 def get_connection(self):
43 """Gets the db connection."""
44 self.get_input()
45 return DummyConnection()
46
47 def final_cleanup(self):
48 if hasattr(self, "all_files"):
49 for file_name in self.all_files:
50 file_path, file_name_nopath = os.path.split(file_name)
51 subdir = os.path.split(file_path)[1] if self.opts['subdir'] else ''
52 dest_path = os.path.join(self.opts['path'], subdir)
53 if os.path.abspath(file_path) == os.path.abspath(os.path.join(DATA_DIR, subdir)):
54 print ("%s is already in the working directory" % file_name_nopath)
55 print("Keeping existing copy.")
56 else:
57 print("Copying %s from %s" % (file_name_nopath, file_path))
58 if os.path.isdir(dest_path):
59 try:
60 shutil.copy(file_name, dest_path)
61 except:
62 print("Couldn't copy file to %s" % dest_path)
63 else:
64 try:
65 print("Creating directory %s" % dest_path)
66 os.makedirs(dest_path)
67 shutil.copy(file_name, dest_path)
68 except:
69 print("Couldn't create directory %s" % dest_path)
70 self.all_files = set()
71
72 def auto_create_table(self, table, url=None, filename=None, pk=None):
73 if url and not filename:
74 filename = filename_from_url(url)
75
76 if url and not self.find_file(filename):
77 # If the file doesn't exist, download it
78 self.download_file(url, filename)
79
80 def insert_data_from_url(self, url):
81 filename = filename_from_url(url)
82 find = self.find_file(filename)
83 if not find:
84 self.create_raw_data_dir()
85 self.download_file(url, filename)
86
87 def find_file(self, filename):
88 result = Engine.find_file(self, filename)
89 if not hasattr(self, "all_files"): self.all_files = set()
90 if result: self.all_files.add(result)
91 return result
92
93 def register_files(self, filenames):
94 """Identify a list of files to be moved by the download
95
96 When downloading archives with multiple files the engine needs to be
97 informed of all of the file names so that it can move them.
98
99 """
100 full_filenames = {self.find_file(filename) for filename in filenames
101 if self.find_file(filename)}
102 self.all_files = self.all_files.union(full_filenames)
103
104
105 # replace all other methods with a function that does nothing
106 def dummy_method(self, *args, **kwargs):
107 pass
108 methods = inspect.getmembers(engine, predicate=inspect.ismethod)
109 keep_methods = {'table_exists',
110 'get_connection',
111 'final_cleanup',
112 'auto_create_table',
113 'insert_data_from_url',
114 }
115 remove_methods = ['insert_data_from_file']
116 for name, method in methods:
117 if (not name in keep_methods
118 and not 'download' in name
119 and not 'file' in name
120 and not 'dir' in name):
121
122 setattr(engine, name, dummy_method)
123 for name in remove_methods:
124 setattr(engine, name, dummy_method)
125
[end of engines/download_only.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/engines/download_only.py b/engines/download_only.py
--- a/engines/download_only.py
+++ b/engines/download_only.py
@@ -48,14 +48,17 @@
if hasattr(self, "all_files"):
for file_name in self.all_files:
file_path, file_name_nopath = os.path.split(file_name)
+ full_path = os.path.join(self.opts['path'], file_name_nopath)
subdir = os.path.split(file_path)[1] if self.opts['subdir'] else ''
dest_path = os.path.join(self.opts['path'], subdir)
if os.path.abspath(file_path) == os.path.abspath(os.path.join(DATA_DIR, subdir)):
print ("%s is already in the working directory" % file_name_nopath)
print("Keeping existing copy.")
+ elif os.path.exists(full_path):
+ print ("File already exists at specified location")
else:
- print("Copying %s from %s" % (file_name_nopath, file_path))
if os.path.isdir(dest_path):
+ print("Copying %s from %s" % (file_name_nopath, file_path))
try:
shutil.copy(file_name, dest_path)
except:
@@ -64,6 +67,7 @@
try:
print("Creating directory %s" % dest_path)
os.makedirs(dest_path)
+ print("Copying %s from %s" % (file_name_nopath, file_path))
shutil.copy(file_name, dest_path)
except:
print("Couldn't create directory %s" % dest_path)
@@ -122,3 +126,4 @@
setattr(engine, name, dummy_method)
for name in remove_methods:
setattr(engine, name, dummy_method)
+
| {"golden_diff": "diff --git a/engines/download_only.py b/engines/download_only.py\n--- a/engines/download_only.py\n+++ b/engines/download_only.py\n@@ -48,14 +48,17 @@\n if hasattr(self, \"all_files\"):\n for file_name in self.all_files:\n file_path, file_name_nopath = os.path.split(file_name)\n+ full_path = os.path.join(self.opts['path'], file_name_nopath)\n subdir = os.path.split(file_path)[1] if self.opts['subdir'] else ''\n dest_path = os.path.join(self.opts['path'], subdir)\n if os.path.abspath(file_path) == os.path.abspath(os.path.join(DATA_DIR, subdir)):\n print (\"%s is already in the working directory\" % file_name_nopath)\n print(\"Keeping existing copy.\")\n+ elif os.path.exists(full_path):\n+ print (\"File already exists at specified location\")\n else:\n- print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n if os.path.isdir(dest_path):\n+ print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n try:\n shutil.copy(file_name, dest_path)\n except:\n@@ -64,6 +67,7 @@\n try:\n print(\"Creating directory %s\" % dest_path)\n os.makedirs(dest_path)\n+ print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n shutil.copy(file_name, dest_path)\n except:\n print(\"Couldn't create directory %s\" % dest_path)\n@@ -122,3 +126,4 @@\n setattr(engine, name, dummy_method)\n for name in remove_methods:\n setattr(engine, name, dummy_method)\n+\n", "issue": "download_only w/path fails to use path argument when checking for file\nWhen `download_only` checks to see if the file already exists before copying it, it ignores the path argument. This means that:\n\n```\nretriever download MoM2003 -p testdata\n```\n\nwill keep overwriting the file in `testdata` if it exists, and it will not copy the file to `testdata` if the file exists in `.`.\n\nFixes this is probably just a little logic improvement in the `final_cleanup` function of `download_only`.\n\n", "before_files": [{"content": "import os\nimport platform\nimport shutil\nimport inspect\nfrom retriever.lib.engine import filename_from_url\nfrom retriever.lib.models import Engine, no_cleanup\nfrom retriever import DATA_DIR, HOME_DIR\n\nclass DummyConnection:\n def cursor(self):\n pass\n def commit(self):\n pass\n def rollback(self):\n pass\n def close(self):\n pass\n\nclass DummyCursor(DummyConnection):\n pass\n\n\nclass engine(Engine):\n \"\"\"Engine instance for writing data to a CSV file.\"\"\"\n name = \"Download Only\"\n abbreviation = \"download\"\n required_opts = [(\"path\",\n \"File path to copy data files\",\n \"./\"),\n (\"subdir\",\n \"Keep the subdirectories for archived files\",\n False)\n ]\n\n def table_exists(self, dbname, tablename):\n try:\n tablename = self.table_name(name=tablename, dbname=dbname)\n return os.path.exists(tablename)\n except:\n return False\n\n def get_connection(self):\n \"\"\"Gets the db connection.\"\"\"\n self.get_input()\n return DummyConnection()\n\n def final_cleanup(self):\n if hasattr(self, \"all_files\"):\n for file_name in self.all_files:\n file_path, file_name_nopath = os.path.split(file_name)\n subdir = os.path.split(file_path)[1] if self.opts['subdir'] else ''\n dest_path = os.path.join(self.opts['path'], subdir)\n if os.path.abspath(file_path) == os.path.abspath(os.path.join(DATA_DIR, subdir)):\n print (\"%s is already in the working directory\" % file_name_nopath)\n print(\"Keeping existing copy.\")\n else:\n print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n if os.path.isdir(dest_path):\n try:\n shutil.copy(file_name, dest_path)\n except:\n print(\"Couldn't copy file to %s\" % dest_path)\n else:\n try:\n print(\"Creating directory %s\" % dest_path)\n os.makedirs(dest_path)\n shutil.copy(file_name, dest_path)\n except:\n print(\"Couldn't create directory %s\" % dest_path)\n self.all_files = set()\n\n def auto_create_table(self, table, url=None, filename=None, pk=None):\n if url and not filename:\n filename = filename_from_url(url)\n\n if url and not self.find_file(filename):\n # If the file doesn't exist, download it\n self.download_file(url, filename)\n\n def insert_data_from_url(self, url):\n filename = filename_from_url(url)\n find = self.find_file(filename)\n if not find:\n self.create_raw_data_dir()\n self.download_file(url, filename)\n\n def find_file(self, filename):\n result = Engine.find_file(self, filename)\n if not hasattr(self, \"all_files\"): self.all_files = set()\n if result: self.all_files.add(result)\n return result\n\n def register_files(self, filenames):\n \"\"\"Identify a list of files to be moved by the download\n\n When downloading archives with multiple files the engine needs to be\n informed of all of the file names so that it can move them.\n\n \"\"\"\n full_filenames = {self.find_file(filename) for filename in filenames\n if self.find_file(filename)}\n self.all_files = self.all_files.union(full_filenames)\n\n\n# replace all other methods with a function that does nothing\ndef dummy_method(self, *args, **kwargs):\n pass\nmethods = inspect.getmembers(engine, predicate=inspect.ismethod)\nkeep_methods = {'table_exists',\n 'get_connection',\n 'final_cleanup',\n 'auto_create_table',\n 'insert_data_from_url',\n }\nremove_methods = ['insert_data_from_file']\nfor name, method in methods:\n if (not name in keep_methods\n and not 'download' in name\n and not 'file' in name\n and not 'dir' in name):\n\n setattr(engine, name, dummy_method)\nfor name in remove_methods:\n setattr(engine, name, dummy_method)\n", "path": "engines/download_only.py"}]} | 1,783 | 387 |
gh_patches_debug_40892 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-1875 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider texas_roadhouse is broken
During the global build at 2021-05-26-14-42-23, spider **texas_roadhouse** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/texas_roadhouse.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/texas_roadhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/texas_roadhouse.geojson))
</issue>
<code>
[start of locations/spiders/texas_roadhouse.py]
1 import json
2
3 import scrapy
4
5 from locations.items import GeojsonPointItem
6 from locations.hours import OpeningHours
7
8
9 class TexasRoadhouseSpider(scrapy.Spider):
10 name = "texas_roadhouse"
11 item_attributes = { 'brand': "Texas Roadhouse", 'brand_wikidata': "Q7707945" }
12 allowed_domains = ["www.texasroadhouse.com"]
13 start_urls = (
14 'https://www.texasroadhouse.com/locations',
15 )
16
17 def parse_hours(self, store_hours):
18 opening_hours = OpeningHours()
19
20 for weekday in store_hours:
21 # convert day from full Monday to Mo, etc
22 day = weekday.get('day')[:2]
23 open_time = weekday.get('hours').get('open')
24 close_time = weekday.get('hours').get('close')
25 opening_hours.add_range(day=day,
26 open_time=open_time,
27 close_time=close_time,
28 time_format='%I:%M%p')
29
30 return opening_hours.as_opening_hours()
31
32 def parse(self, response):
33 script_content = response.xpath('//script[contains(text(),"__locations__")]/text()').extract_first()
34 # effectively strip off leading "window.__locations__ = " where
35 # the rest is a json blob
36 script_data = script_content.split(" = ", 1)[-1]
37 script_data = script_data.rstrip(";")
38 stores = json.loads(script_data)
39
40 for store in stores:
41 properties = {
42 'lat': store['gps_lat'],
43 'lon': store['gps_lon'],
44 'ref': store['url'],
45 'name': store['name'],
46 'addr_full': store['address1'],
47 'city': store['city'],
48 'state': store['state'],
49 'postcode': store['zip'],
50 'country': store['country'],
51 'phone': store['phone'],
52 'website': response.urljoin(store['url']),
53 'opening_hours': self.parse_hours(store['schedule']),
54 'extras': {
55 'amenity:toilets': True,
56 },
57 }
58
59 yield GeojsonPointItem(**properties)
60
[end of locations/spiders/texas_roadhouse.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/texas_roadhouse.py b/locations/spiders/texas_roadhouse.py
--- a/locations/spiders/texas_roadhouse.py
+++ b/locations/spiders/texas_roadhouse.py
@@ -1,7 +1,5 @@
import json
-
import scrapy
-
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
@@ -11,7 +9,7 @@
item_attributes = { 'brand': "Texas Roadhouse", 'brand_wikidata': "Q7707945" }
allowed_domains = ["www.texasroadhouse.com"]
start_urls = (
- 'https://www.texasroadhouse.com/locations',
+ 'https://www.texasroadhouse.com/sitemap.xml',
)
def parse_hours(self, store_hours):
@@ -20,8 +18,8 @@
for weekday in store_hours:
# convert day from full Monday to Mo, etc
day = weekday.get('day')[:2]
- open_time = weekday.get('hours').get('open')
- close_time = weekday.get('hours').get('close')
+ open_time = weekday.get('hours').get('openTime')
+ close_time = weekday.get('hours').get('closeTime')
opening_hours.add_range(day=day,
open_time=open_time,
close_time=close_time,
@@ -30,30 +28,31 @@
return opening_hours.as_opening_hours()
def parse(self, response):
- script_content = response.xpath('//script[contains(text(),"__locations__")]/text()').extract_first()
- # effectively strip off leading "window.__locations__ = " where
- # the rest is a json blob
- script_data = script_content.split(" = ", 1)[-1]
- script_data = script_data.rstrip(";")
- stores = json.loads(script_data)
-
- for store in stores:
- properties = {
- 'lat': store['gps_lat'],
- 'lon': store['gps_lon'],
- 'ref': store['url'],
- 'name': store['name'],
- 'addr_full': store['address1'],
- 'city': store['city'],
- 'state': store['state'],
- 'postcode': store['zip'],
- 'country': store['country'],
- 'phone': store['phone'],
- 'website': response.urljoin(store['url']),
- 'opening_hours': self.parse_hours(store['schedule']),
- 'extras': {
- 'amenity:toilets': True,
- },
- }
-
- yield GeojsonPointItem(**properties)
+ response.selector.remove_namespaces()
+ city_urls = response.xpath('//url/loc/text()').extract()
+ for path in city_urls:
+ if path.startswith('https://www.texasroadhouse.com/locations/'):
+ yield scrapy.Request(
+ path.strip(),
+ callback=self.parse_store,
+ )
+
+ def parse_store(self, response):
+ data = json.loads(response.xpath('//script/text()').extract_first()[22:-1])
+
+ properties = {
+ 'lat': data['latitude'],
+ 'lon': data['longitude'],
+ 'ref': data['url'],
+ 'name': data['name'],
+ 'addr_full': data['address1'],
+ 'city': data['city'],
+ 'state': data['state'],
+ 'postcode': data['postalCode'],
+ 'country': data['countryCode'],
+ 'phone': data['telephone'],
+ 'website': response.urljoin(data['url']),
+ 'opening_hours': self.parse_hours(data['schedule']),
+ }
+
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/texas_roadhouse.py b/locations/spiders/texas_roadhouse.py\n--- a/locations/spiders/texas_roadhouse.py\n+++ b/locations/spiders/texas_roadhouse.py\n@@ -1,7 +1,5 @@\n import json\n-\n import scrapy\n-\n from locations.items import GeojsonPointItem\n from locations.hours import OpeningHours\n \n@@ -11,7 +9,7 @@\n item_attributes = { 'brand': \"Texas Roadhouse\", 'brand_wikidata': \"Q7707945\" }\n allowed_domains = [\"www.texasroadhouse.com\"]\n start_urls = (\n- 'https://www.texasroadhouse.com/locations',\n+ 'https://www.texasroadhouse.com/sitemap.xml',\n )\n \n def parse_hours(self, store_hours):\n@@ -20,8 +18,8 @@\n for weekday in store_hours:\n # convert day from full Monday to Mo, etc\n day = weekday.get('day')[:2]\n- open_time = weekday.get('hours').get('open')\n- close_time = weekday.get('hours').get('close')\n+ open_time = weekday.get('hours').get('openTime')\n+ close_time = weekday.get('hours').get('closeTime')\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n@@ -30,30 +28,31 @@\n return opening_hours.as_opening_hours()\n \n def parse(self, response):\n- script_content = response.xpath('//script[contains(text(),\"__locations__\")]/text()').extract_first()\n- # effectively strip off leading \"window.__locations__ = \" where\n- # the rest is a json blob\n- script_data = script_content.split(\" = \", 1)[-1]\n- script_data = script_data.rstrip(\";\")\n- stores = json.loads(script_data)\n-\n- for store in stores:\n- properties = {\n- 'lat': store['gps_lat'],\n- 'lon': store['gps_lon'],\n- 'ref': store['url'],\n- 'name': store['name'],\n- 'addr_full': store['address1'],\n- 'city': store['city'],\n- 'state': store['state'],\n- 'postcode': store['zip'],\n- 'country': store['country'],\n- 'phone': store['phone'],\n- 'website': response.urljoin(store['url']),\n- 'opening_hours': self.parse_hours(store['schedule']),\n- 'extras': {\n- 'amenity:toilets': True,\n- },\n- }\n-\n- yield GeojsonPointItem(**properties)\n+ response.selector.remove_namespaces()\n+ city_urls = response.xpath('//url/loc/text()').extract()\n+ for path in city_urls:\n+ if path.startswith('https://www.texasroadhouse.com/locations/'):\n+ yield scrapy.Request(\n+ path.strip(),\n+ callback=self.parse_store,\n+ )\n+\n+ def parse_store(self, response):\n+ data = json.loads(response.xpath('//script/text()').extract_first()[22:-1])\n+\n+ properties = {\n+ 'lat': data['latitude'],\n+ 'lon': data['longitude'],\n+ 'ref': data['url'],\n+ 'name': data['name'],\n+ 'addr_full': data['address1'],\n+ 'city': data['city'],\n+ 'state': data['state'],\n+ 'postcode': data['postalCode'],\n+ 'country': data['countryCode'],\n+ 'phone': data['telephone'],\n+ 'website': response.urljoin(data['url']),\n+ 'opening_hours': self.parse_hours(data['schedule']),\n+ }\n+\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider texas_roadhouse is broken\nDuring the global build at 2021-05-26-14-42-23, spider **texas_roadhouse** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/texas_roadhouse.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/texas_roadhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/texas_roadhouse.geojson))\n", "before_files": [{"content": "import json\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass TexasRoadhouseSpider(scrapy.Spider):\n name = \"texas_roadhouse\"\n item_attributes = { 'brand': \"Texas Roadhouse\", 'brand_wikidata': \"Q7707945\" }\n allowed_domains = [\"www.texasroadhouse.com\"]\n start_urls = (\n 'https://www.texasroadhouse.com/locations',\n )\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n\n for weekday in store_hours:\n # convert day from full Monday to Mo, etc\n day = weekday.get('day')[:2]\n open_time = weekday.get('hours').get('open')\n close_time = weekday.get('hours').get('close')\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M%p')\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n script_content = response.xpath('//script[contains(text(),\"__locations__\")]/text()').extract_first()\n # effectively strip off leading \"window.__locations__ = \" where\n # the rest is a json blob\n script_data = script_content.split(\" = \", 1)[-1]\n script_data = script_data.rstrip(\";\")\n stores = json.loads(script_data)\n\n for store in stores:\n properties = {\n 'lat': store['gps_lat'],\n 'lon': store['gps_lon'],\n 'ref': store['url'],\n 'name': store['name'],\n 'addr_full': store['address1'],\n 'city': store['city'],\n 'state': store['state'],\n 'postcode': store['zip'],\n 'country': store['country'],\n 'phone': store['phone'],\n 'website': response.urljoin(store['url']),\n 'opening_hours': self.parse_hours(store['schedule']),\n 'extras': {\n 'amenity:toilets': True,\n },\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/texas_roadhouse.py"}]} | 1,306 | 831 |
gh_patches_debug_32460 | rasdani/github-patches | git_diff | google__timesketch-1735 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sigma tags as labels / tags for events
Sigma rules do come with something like:
```
tags:
- attack.initial_access
- attack.t1190
```
It could be useful to apply those tags to events that are found with the Sigma analyzer that does tag / label events. Instead (or in addition) of just the filename, the tags from the Sigma rule could be applied.
</issue>
<code>
[start of timesketch/lib/analyzers/sigma_tagger.py]
1 """Index analyzer plugin for sigma."""
2 from __future__ import unicode_literals
3
4 import logging
5 import time
6 import elasticsearch
7
8 from timesketch.lib.analyzers import utils
9
10 from timesketch.lib.analyzers import interface
11 from timesketch.lib.analyzers import manager
12 import timesketch.lib.sigma_util as ts_sigma_lib
13
14
15 logger = logging.getLogger('timesketch.analyzers.sigma_tagger')
16
17
18 class SigmaPlugin(interface.BaseAnalyzer):
19 """Analyzer for Sigma."""
20
21 NAME = 'sigma'
22 DISPLAY_NAME = 'Sigma'
23 DESCRIPTION = 'Run pre-defined Sigma rules and tag matching events'
24
25 def run_sigma_rule(self, query, tag_name):
26 """Runs a sigma rule and applies the appropriate tags.
27
28 Args:
29 query: elastic search query for events to tag.
30 tag_name: tag to apply to matching events.
31
32 Returns:
33 int: number of events tagged.
34 """
35 return_fields = []
36 tagged_events_counter = 0
37 events = self.event_stream(
38 query_string=query, return_fields=return_fields)
39 for event in events:
40 event.add_tags(['sigma_{0:s}'.format(tag_name)])
41 event.commit()
42 tagged_events_counter += 1
43 return tagged_events_counter
44
45 def run(self):
46 """Entry point for the analyzer.
47
48 Returns:
49 String with summary of the analyzer result.
50 """
51
52 tags_applied = {}
53 sigma_rule_counter = 0
54 sigma_rules = ts_sigma_lib.get_all_sigma_rules()
55
56 if sigma_rules is None:
57 logger.error('No Sigma rules found. Check SIGMA_RULES_FOLDERS')
58
59 problem_strings = []
60 output_strings = []
61
62 for rule in sigma_rules:
63 tags_applied[rule.get('file_name')] = 0
64 try:
65 sigma_rule_counter += 1
66 tagged_events_counter = self.run_sigma_rule(
67 rule.get('es_query'), rule.get('file_name'))
68 tags_applied[rule.get('file_name')] += tagged_events_counter
69 except elasticsearch.TransportError as e:
70 logger.error(
71 'Timeout executing search for {0:s}: '
72 '{1!s} waiting for 10 seconds'.format(
73 rule.get('file_name'), e), exc_info=True)
74 # this is caused by to many ES queries in short time range
75 # thus waiting for 10 seconds before sending the next one.
76 time.sleep(10)
77 # This except block is by purpose very broad as one bad rule could
78 # otherwise stop the whole analyzer run
79 # it might be an option to write the problematic rules to the output
80 except: # pylint: disable=bare-except
81 logger.error(
82 'Problem with rule in file {0:s}: '.format(
83 rule.get('file_name')), exc_info=True)
84 problem_strings.append('* {0:s}'.format(
85 rule.get('file_name')))
86 continue
87
88 total_tagged_events = sum(tags_applied.values())
89 output_strings.append('Applied {0:d} tags'.format(total_tagged_events))
90 for tag_name, tagged_events_counter in tags_applied.items():
91 output_strings.append('* {0:s}: {1:d}'.format(
92 tag_name, tagged_events_counter))
93
94 if sigma_rule_counter > 0:
95 view = self.sketch.add_view(
96 view_name='Sigma Rule matches', analyzer_name=self.NAME,
97 query_string='tag:"sigma*"')
98 agg_params = {
99 'field': 'tag',
100 'limit': 20,
101 'index': [self.timeline_id],
102 }
103 agg_obj = self.sketch.add_aggregation(
104 name='Top 20 Sigma tags', agg_name='field_bucket',
105 agg_params=agg_params, view_id=view.id, chart_type='hbarchart',
106 description='Created by the Sigma analyzer')
107
108 story = self.sketch.add_story('Sigma Rule hits')
109 story.add_text(
110 utils.SIGMA_STORY_HEADER, skip_if_exists=True)
111
112 story.add_text(
113 '## Sigma Analyzer.\n\nThe Sigma '
114 'analyzer takes Events and matches them with Sigma rules.'
115 'In this timeline the analyzer discovered {0:d} '
116 'Sigma tags.\n\nThis is a summary of '
117 'it\'s findings.'.format(sigma_rule_counter))
118 story.add_text(
119 'The top 20 most commonly discovered tags were:')
120 story.add_aggregation(agg_obj)
121 story.add_text(
122 'And an overview of all the discovered search terms:')
123 story.add_view(view)
124
125 output_strings.append('Problematic rules:')
126 output_strings.extend(problem_strings)
127
128 return '\n'.join(output_strings)
129
130
131 class RulesSigmaPlugin(SigmaPlugin):
132 """Sigma plugin to run rules."""
133
134 NAME = 'sigma'
135
136 manager.AnalysisManager.register_analyzer(RulesSigmaPlugin)
137
[end of timesketch/lib/analyzers/sigma_tagger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/timesketch/lib/analyzers/sigma_tagger.py b/timesketch/lib/analyzers/sigma_tagger.py
--- a/timesketch/lib/analyzers/sigma_tagger.py
+++ b/timesketch/lib/analyzers/sigma_tagger.py
@@ -22,12 +22,13 @@
DISPLAY_NAME = 'Sigma'
DESCRIPTION = 'Run pre-defined Sigma rules and tag matching events'
- def run_sigma_rule(self, query, tag_name):
+ def run_sigma_rule(self, query, rule_name, tag_list = None):
"""Runs a sigma rule and applies the appropriate tags.
Args:
query: elastic search query for events to tag.
- tag_name: tag to apply to matching events.
+ rule_name: rule_name to apply to matching events.
+ tag_list: a list of additional tags to be added to the event(s)
Returns:
int: number of events tagged.
@@ -37,7 +38,8 @@
events = self.event_stream(
query_string=query, return_fields=return_fields)
for event in events:
- event.add_tags(['sigma_{0:s}'.format(tag_name)])
+ event.add_tags(['sigma_{0:s}'.format(rule_name)])
+ event.add_tags(tag_list)
event.commit()
tagged_events_counter += 1
return tagged_events_counter
@@ -64,7 +66,8 @@
try:
sigma_rule_counter += 1
tagged_events_counter = self.run_sigma_rule(
- rule.get('es_query'), rule.get('file_name'))
+ rule.get('es_query'), rule.get('file_name'),
+ tag_list=rule.get('tags'))
tags_applied[rule.get('file_name')] += tagged_events_counter
except elasticsearch.TransportError as e:
logger.error(
| {"golden_diff": "diff --git a/timesketch/lib/analyzers/sigma_tagger.py b/timesketch/lib/analyzers/sigma_tagger.py\n--- a/timesketch/lib/analyzers/sigma_tagger.py\n+++ b/timesketch/lib/analyzers/sigma_tagger.py\n@@ -22,12 +22,13 @@\n DISPLAY_NAME = 'Sigma'\n DESCRIPTION = 'Run pre-defined Sigma rules and tag matching events'\n \n- def run_sigma_rule(self, query, tag_name):\n+ def run_sigma_rule(self, query, rule_name, tag_list = None):\n \"\"\"Runs a sigma rule and applies the appropriate tags.\n \n Args:\n query: elastic search query for events to tag.\n- tag_name: tag to apply to matching events.\n+ rule_name: rule_name to apply to matching events.\n+ tag_list: a list of additional tags to be added to the event(s)\n \n Returns:\n int: number of events tagged.\n@@ -37,7 +38,8 @@\n events = self.event_stream(\n query_string=query, return_fields=return_fields)\n for event in events:\n- event.add_tags(['sigma_{0:s}'.format(tag_name)])\n+ event.add_tags(['sigma_{0:s}'.format(rule_name)])\n+ event.add_tags(tag_list)\n event.commit()\n tagged_events_counter += 1\n return tagged_events_counter\n@@ -64,7 +66,8 @@\n try:\n sigma_rule_counter += 1\n tagged_events_counter = self.run_sigma_rule(\n- rule.get('es_query'), rule.get('file_name'))\n+ rule.get('es_query'), rule.get('file_name'),\n+ tag_list=rule.get('tags'))\n tags_applied[rule.get('file_name')] += tagged_events_counter\n except elasticsearch.TransportError as e:\n logger.error(\n", "issue": "Sigma tags as labels / tags for events\nSigma rules do come with something like:\r\n\r\n```\r\ntags:\r\n - attack.initial_access\r\n - attack.t1190\r\n ```\r\n\r\nIt could be useful to apply those tags to events that are found with the Sigma analyzer that does tag / label events. Instead (or in addition) of just the filename, the tags from the Sigma rule could be applied.\n", "before_files": [{"content": "\"\"\"Index analyzer plugin for sigma.\"\"\"\nfrom __future__ import unicode_literals\n\nimport logging\nimport time\nimport elasticsearch\n\nfrom timesketch.lib.analyzers import utils\n\nfrom timesketch.lib.analyzers import interface\nfrom timesketch.lib.analyzers import manager\nimport timesketch.lib.sigma_util as ts_sigma_lib\n\n\nlogger = logging.getLogger('timesketch.analyzers.sigma_tagger')\n\n\nclass SigmaPlugin(interface.BaseAnalyzer):\n \"\"\"Analyzer for Sigma.\"\"\"\n\n NAME = 'sigma'\n DISPLAY_NAME = 'Sigma'\n DESCRIPTION = 'Run pre-defined Sigma rules and tag matching events'\n\n def run_sigma_rule(self, query, tag_name):\n \"\"\"Runs a sigma rule and applies the appropriate tags.\n\n Args:\n query: elastic search query for events to tag.\n tag_name: tag to apply to matching events.\n\n Returns:\n int: number of events tagged.\n \"\"\"\n return_fields = []\n tagged_events_counter = 0\n events = self.event_stream(\n query_string=query, return_fields=return_fields)\n for event in events:\n event.add_tags(['sigma_{0:s}'.format(tag_name)])\n event.commit()\n tagged_events_counter += 1\n return tagged_events_counter\n\n def run(self):\n \"\"\"Entry point for the analyzer.\n\n Returns:\n String with summary of the analyzer result.\n \"\"\"\n\n tags_applied = {}\n sigma_rule_counter = 0\n sigma_rules = ts_sigma_lib.get_all_sigma_rules()\n\n if sigma_rules is None:\n logger.error('No Sigma rules found. Check SIGMA_RULES_FOLDERS')\n\n problem_strings = []\n output_strings = []\n\n for rule in sigma_rules:\n tags_applied[rule.get('file_name')] = 0\n try:\n sigma_rule_counter += 1\n tagged_events_counter = self.run_sigma_rule(\n rule.get('es_query'), rule.get('file_name'))\n tags_applied[rule.get('file_name')] += tagged_events_counter\n except elasticsearch.TransportError as e:\n logger.error(\n 'Timeout executing search for {0:s}: '\n '{1!s} waiting for 10 seconds'.format(\n rule.get('file_name'), e), exc_info=True)\n # this is caused by to many ES queries in short time range\n # thus waiting for 10 seconds before sending the next one.\n time.sleep(10)\n # This except block is by purpose very broad as one bad rule could\n # otherwise stop the whole analyzer run\n # it might be an option to write the problematic rules to the output\n except: # pylint: disable=bare-except\n logger.error(\n 'Problem with rule in file {0:s}: '.format(\n rule.get('file_name')), exc_info=True)\n problem_strings.append('* {0:s}'.format(\n rule.get('file_name')))\n continue\n\n total_tagged_events = sum(tags_applied.values())\n output_strings.append('Applied {0:d} tags'.format(total_tagged_events))\n for tag_name, tagged_events_counter in tags_applied.items():\n output_strings.append('* {0:s}: {1:d}'.format(\n tag_name, tagged_events_counter))\n\n if sigma_rule_counter > 0:\n view = self.sketch.add_view(\n view_name='Sigma Rule matches', analyzer_name=self.NAME,\n query_string='tag:\"sigma*\"')\n agg_params = {\n 'field': 'tag',\n 'limit': 20,\n 'index': [self.timeline_id],\n }\n agg_obj = self.sketch.add_aggregation(\n name='Top 20 Sigma tags', agg_name='field_bucket',\n agg_params=agg_params, view_id=view.id, chart_type='hbarchart',\n description='Created by the Sigma analyzer')\n\n story = self.sketch.add_story('Sigma Rule hits')\n story.add_text(\n utils.SIGMA_STORY_HEADER, skip_if_exists=True)\n\n story.add_text(\n '## Sigma Analyzer.\\n\\nThe Sigma '\n 'analyzer takes Events and matches them with Sigma rules.'\n 'In this timeline the analyzer discovered {0:d} '\n 'Sigma tags.\\n\\nThis is a summary of '\n 'it\\'s findings.'.format(sigma_rule_counter))\n story.add_text(\n 'The top 20 most commonly discovered tags were:')\n story.add_aggregation(agg_obj)\n story.add_text(\n 'And an overview of all the discovered search terms:')\n story.add_view(view)\n\n output_strings.append('Problematic rules:')\n output_strings.extend(problem_strings)\n\n return '\\n'.join(output_strings)\n\n\nclass RulesSigmaPlugin(SigmaPlugin):\n \"\"\"Sigma plugin to run rules.\"\"\"\n\n NAME = 'sigma'\n\nmanager.AnalysisManager.register_analyzer(RulesSigmaPlugin)\n", "path": "timesketch/lib/analyzers/sigma_tagger.py"}]} | 1,973 | 406 |
gh_patches_debug_26973 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-1613 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`plasma-calculator` does not launch on a Windows 10 system
Original comment...https://github.com/PlasmaPy/PlasmaPy/pull/1610#discussion_r912220976
The `plasma-calculator` does not launch when using a Windows 10 system...
> Also, I get an error when trying to run the calculator on my windows machine...
>
> 
>
> My suspicion is whatever functionality is generating the file path is ignorant of a windows file separator, since the path and file name are mashed into one without a separator.
</issue>
<code>
[start of plasmapy/utils/calculator/__init__.py]
1 """
2 Script and utilities to launch the plasma calculator
3 """
4 __all__ = ["main"]
5
6 import argparse
7 import os
8 import shlex
9 import subprocess
10
11 _description = """
12 Plasma calculator is a tool that opens a page in a web browser for
13 interactive calculation of plasma parameters.
14
15 This tool is currently in the prototype stage and is expected to change in
16 the future. Please raise an issue at the following link to provide suggestions
17 and feedback: https://github.com/PlasmaPy/PlasmaPy/issues/new
18 """
19
20
21 def main():
22 """
23 Stub function for command line tool that launches the plasma calculator notebook.
24 """
25 parser = argparse.ArgumentParser(description=_description)
26 parser.add_argument(
27 "--port", type=int, default=8866, help="Port to run the notebook"
28 )
29 parser.add_argument(
30 "--dark", action="store_true", help="Turn on dark mode, reduces eye strain"
31 )
32 parser.add_argument(
33 "--no-browser", action="store_true", help="Do not open the browser"
34 )
35
36 # module_path = plasmapy.__path__[0]
37 module_path = os.path.dirname(os.path.abspath(__file__))
38 computed_calculator_path = os.path.join(module_path, "plasma_calculator.ipynb")
39
40 args = parser.parse_args()
41 theme = "dark" if args.dark else "light"
42 no_browser = "--no-browser" if args.no_browser else ""
43
44 command = f"voila {no_browser} --port={args.port} --theme={theme} {computed_calculator_path} \
45 --VoilaConfiguration.file_whitelist favicon.ico"
46 try:
47 subprocess.call(shlex.split(command))
48 except KeyboardInterrupt:
49 print("Stopping calculator! Bye")
50
[end of plasmapy/utils/calculator/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plasmapy/utils/calculator/__init__.py b/plasmapy/utils/calculator/__init__.py
--- a/plasmapy/utils/calculator/__init__.py
+++ b/plasmapy/utils/calculator/__init__.py
@@ -4,7 +4,7 @@
__all__ = ["main"]
import argparse
-import os
+import pathlib
import shlex
import subprocess
@@ -33,17 +33,25 @@
"--no-browser", action="store_true", help="Do not open the browser"
)
- # module_path = plasmapy.__path__[0]
- module_path = os.path.dirname(os.path.abspath(__file__))
- computed_calculator_path = os.path.join(module_path, "plasma_calculator.ipynb")
+ module_path = pathlib.Path(__file__).parent.absolute()
+ notebook_path = module_path / "plasma_calculator.ipynb"
+ favicon_path = module_path / "favicon.ico"
args = parser.parse_args()
theme = "dark" if args.dark else "light"
no_browser = "--no-browser" if args.no_browser else ""
- command = f"voila {no_browser} --port={args.port} --theme={theme} {computed_calculator_path} \
- --VoilaConfiguration.file_whitelist favicon.ico"
+ command = [
+ "voila",
+ notebook_path,
+ f"--port={args.port}",
+ f"--theme={theme}",
+ f"--VoilaConfiguration.file_whitelist={favicon_path}",
+ ]
+ if no_browser:
+ command.append(no_browser)
+
try:
- subprocess.call(shlex.split(command))
+ subprocess.call(command)
except KeyboardInterrupt:
print("Stopping calculator! Bye")
| {"golden_diff": "diff --git a/plasmapy/utils/calculator/__init__.py b/plasmapy/utils/calculator/__init__.py\n--- a/plasmapy/utils/calculator/__init__.py\n+++ b/plasmapy/utils/calculator/__init__.py\n@@ -4,7 +4,7 @@\n __all__ = [\"main\"]\r\n \r\n import argparse\r\n-import os\r\n+import pathlib\r\n import shlex\r\n import subprocess\r\n \r\n@@ -33,17 +33,25 @@\n \"--no-browser\", action=\"store_true\", help=\"Do not open the browser\"\r\n )\r\n \r\n- # module_path = plasmapy.__path__[0]\r\n- module_path = os.path.dirname(os.path.abspath(__file__))\r\n- computed_calculator_path = os.path.join(module_path, \"plasma_calculator.ipynb\")\r\n+ module_path = pathlib.Path(__file__).parent.absolute()\r\n+ notebook_path = module_path / \"plasma_calculator.ipynb\"\r\n+ favicon_path = module_path / \"favicon.ico\"\r\n \r\n args = parser.parse_args()\r\n theme = \"dark\" if args.dark else \"light\"\r\n no_browser = \"--no-browser\" if args.no_browser else \"\"\r\n \r\n- command = f\"voila {no_browser} --port={args.port} --theme={theme} {computed_calculator_path} \\\r\n- --VoilaConfiguration.file_whitelist favicon.ico\"\r\n+ command = [\r\n+ \"voila\",\r\n+ notebook_path,\r\n+ f\"--port={args.port}\",\r\n+ f\"--theme={theme}\",\r\n+ f\"--VoilaConfiguration.file_whitelist={favicon_path}\",\r\n+ ]\r\n+ if no_browser:\r\n+ command.append(no_browser)\r\n+\r\n try:\r\n- subprocess.call(shlex.split(command))\r\n+ subprocess.call(command)\r\n except KeyboardInterrupt:\r\n print(\"Stopping calculator! Bye\")\n", "issue": "`plasma-calculator` does not launch on a Windows 10 system\nOriginal comment...https://github.com/PlasmaPy/PlasmaPy/pull/1610#discussion_r912220976\r\n\r\nThe `plasma-calculator` does not launch when using a Windows 10 system...\r\n\r\n> Also, I get an error when trying to run the calculator on my windows machine...\r\n> \r\n> \r\n>\r\n> My suspicion is whatever functionality is generating the file path is ignorant of a windows file separator, since the path and file name are mashed into one without a separator.\n", "before_files": [{"content": "\"\"\"\r\nScript and utilities to launch the plasma calculator\r\n\"\"\"\r\n__all__ = [\"main\"]\r\n\r\nimport argparse\r\nimport os\r\nimport shlex\r\nimport subprocess\r\n\r\n_description = \"\"\"\r\nPlasma calculator is a tool that opens a page in a web browser for\r\ninteractive calculation of plasma parameters.\r\n\r\nThis tool is currently in the prototype stage and is expected to change in\r\nthe future. Please raise an issue at the following link to provide suggestions\r\nand feedback: https://github.com/PlasmaPy/PlasmaPy/issues/new\r\n\"\"\"\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n Stub function for command line tool that launches the plasma calculator notebook.\r\n \"\"\"\r\n parser = argparse.ArgumentParser(description=_description)\r\n parser.add_argument(\r\n \"--port\", type=int, default=8866, help=\"Port to run the notebook\"\r\n )\r\n parser.add_argument(\r\n \"--dark\", action=\"store_true\", help=\"Turn on dark mode, reduces eye strain\"\r\n )\r\n parser.add_argument(\r\n \"--no-browser\", action=\"store_true\", help=\"Do not open the browser\"\r\n )\r\n\r\n # module_path = plasmapy.__path__[0]\r\n module_path = os.path.dirname(os.path.abspath(__file__))\r\n computed_calculator_path = os.path.join(module_path, \"plasma_calculator.ipynb\")\r\n\r\n args = parser.parse_args()\r\n theme = \"dark\" if args.dark else \"light\"\r\n no_browser = \"--no-browser\" if args.no_browser else \"\"\r\n\r\n command = f\"voila {no_browser} --port={args.port} --theme={theme} {computed_calculator_path} \\\r\n --VoilaConfiguration.file_whitelist favicon.ico\"\r\n try:\r\n subprocess.call(shlex.split(command))\r\n except KeyboardInterrupt:\r\n print(\"Stopping calculator! Bye\")\r\n", "path": "plasmapy/utils/calculator/__init__.py"}]} | 1,200 | 398 |
gh_patches_debug_64119 | rasdani/github-patches | git_diff | Pylons__pyramid-3271 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bump Sphinx to >=1.7.2
Would anyone be opposed to bumping Sphinx to >=1.7.2, != 1.7.3 in `setup.py`? I really want our PDFs to have `emphasize-lines` support, at long last, and bring in support for Unicode characters in PDFs via xelatex.
Refs:
* #667
* #2572
* https://github.com/rtfd/readthedocs.org/issues/4015
</issue>
<code>
[start of setup.py]
1 ##############################################################################
2 #
3 # Copyright (c) 2008-2013 Agendaless Consulting and Contributors.
4 # All Rights Reserved.
5 #
6 # This software is subject to the provisions of the BSD-like license at
7 # http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
8 # this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
9 # EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
10 # THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
11 # FITNESS FOR A PARTICULAR PURPOSE
12 #
13 ##############################################################################
14 from setuptools import setup, find_packages
15
16 def readfile(name):
17 with open(name) as f:
18 return f.read()
19
20 README = readfile('README.rst')
21 CHANGES = readfile('CHANGES.rst')
22
23 install_requires = [
24 'setuptools',
25 'WebOb >= 1.7.0', # Response.has_body
26 'zope.interface >= 3.8.0', # has zope.interface.registry
27 'zope.deprecation >= 3.5.0', # py3 compat
28 'venusian >= 1.0', # ``ignore``
29 'translationstring >= 0.4', # py3 compat
30 'PasteDeploy >= 1.5.0', # py3 compat
31 'plaster',
32 'plaster_pastedeploy',
33 'hupper',
34 ]
35
36 tests_require = [
37 'WebTest >= 1.3.1', # py3 compat
38 'zope.component >= 4.0', # py3 compat
39 ]
40
41
42 docs_extras = [
43 'Sphinx >= 1.3.5, != 1.7.3',
44 'docutils',
45 'repoze.sphinx.autointerface',
46 'pylons_sphinx_latesturl',
47 'pylons-sphinx-themes',
48 'sphinxcontrib-autoprogram',
49 ]
50
51 testing_extras = tests_require + [
52 'nose',
53 'coverage',
54 'virtualenv', # for scaffolding tests
55 ]
56
57 setup(name='pyramid',
58 version='1.10.dev0',
59 description='The Pyramid Web Framework, a Pylons project',
60 long_description=README + '\n\n' + CHANGES,
61 classifiers=[
62 "Development Status :: 6 - Mature",
63 "Intended Audience :: Developers",
64 "Programming Language :: Python",
65 "Programming Language :: Python :: 2.7",
66 "Programming Language :: Python :: 3",
67 "Programming Language :: Python :: 3.4",
68 "Programming Language :: Python :: 3.5",
69 "Programming Language :: Python :: 3.6",
70 "Programming Language :: Python :: Implementation :: CPython",
71 "Programming Language :: Python :: Implementation :: PyPy",
72 "Framework :: Pyramid",
73 "Topic :: Internet :: WWW/HTTP",
74 "Topic :: Internet :: WWW/HTTP :: WSGI",
75 "License :: Repoze Public License",
76 ],
77 keywords='web wsgi pylons pyramid',
78 author="Chris McDonough, Agendaless Consulting",
79 author_email="[email protected]",
80 url="https://trypyramid.com",
81 license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
82 packages=find_packages(),
83 include_package_data=True,
84 zip_safe=False,
85 python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
86 install_requires=install_requires,
87 extras_require={
88 ':python_version<"3.2"': ['repoze.lru >= 0.4'],
89 'testing': testing_extras,
90 'docs': docs_extras,
91 },
92 tests_require=tests_require,
93 test_suite="pyramid.tests",
94 entry_points="""\
95 [pyramid.scaffold]
96 starter=pyramid.scaffolds:StarterProjectTemplate
97 zodb=pyramid.scaffolds:ZODBProjectTemplate
98 alchemy=pyramid.scaffolds:AlchemyProjectTemplate
99 [pyramid.pshell_runner]
100 python=pyramid.scripts.pshell:python_shell_runner
101 [console_scripts]
102 pcreate = pyramid.scripts.pcreate:main
103 pserve = pyramid.scripts.pserve:main
104 pshell = pyramid.scripts.pshell:main
105 proutes = pyramid.scripts.proutes:main
106 pviews = pyramid.scripts.pviews:main
107 ptweens = pyramid.scripts.ptweens:main
108 prequest = pyramid.scripts.prequest:main
109 pdistreport = pyramid.scripts.pdistreport:main
110 [paste.server_runner]
111 wsgiref = pyramid.scripts.pserve:wsgiref_server_runner
112 cherrypy = pyramid.scripts.pserve:cherrypy_server_runner
113 """
114 )
115
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -40,7 +40,7 @@
docs_extras = [
- 'Sphinx >= 1.3.5, != 1.7.3',
+ 'Sphinx >= 1.7.4',
'docutils',
'repoze.sphinx.autointerface',
'pylons_sphinx_latesturl',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -40,7 +40,7 @@\n \n \n docs_extras = [\n- 'Sphinx >= 1.3.5, != 1.7.3',\n+ 'Sphinx >= 1.7.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n", "issue": "Bump Sphinx to >=1.7.2\nWould anyone be opposed to bumping Sphinx to >=1.7.2, != 1.7.3 in `setup.py`? I really want our PDFs to have `emphasize-lines` support, at long last, and bring in support for Unicode characters in PDFs via xelatex.\r\n\r\nRefs:\r\n* #667\r\n* #2572\r\n* https://github.com/rtfd/readthedocs.org/issues/4015\r\n\n", "before_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\nfrom setuptools import setup, find_packages\n\ndef readfile(name):\n with open(name) as f:\n return f.read()\n\nREADME = readfile('README.rst')\nCHANGES = readfile('CHANGES.rst')\n\ninstall_requires = [\n 'setuptools',\n 'WebOb >= 1.7.0', # Response.has_body\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n 'plaster',\n 'plaster_pastedeploy',\n 'hupper',\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n 'zope.component >= 4.0', # py3 compat\n ]\n\n\ndocs_extras = [\n 'Sphinx >= 1.3.5, != 1.7.3',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-autoprogram',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.10.dev0',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"https://trypyramid.com\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n install_requires=install_requires,\n extras_require={\n ':python_version<\"3.2\"': ['repoze.lru >= 0.4'],\n 'testing': testing_extras,\n 'docs': docs_extras,\n },\n tests_require=tests_require,\n test_suite=\"pyramid.tests\",\n entry_points=\"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n", "path": "setup.py"}]} | 1,925 | 99 |
gh_patches_debug_31946 | rasdani/github-patches | git_diff | ansible__ansible-2952 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hardcoded location of the chroot executable
On Gentoo, chroot is in /usr/bin and not /usr/sbin as ansible assumes.
</issue>
<code>
[start of lib/ansible/runner/connection_plugins/chroot.py]
1 # Based on local.py (c) 2012, Michael DeHaan <[email protected]>
2 # (c) 2013, Maykel Moya <[email protected]>
3 #
4 # This file is part of Ansible
5 #
6 # Ansible is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # Ansible is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
18
19 import traceback
20 import os
21 import pipes
22 import shutil
23 import subprocess
24 import select
25 import fcntl
26 from ansible import errors
27 from ansible import utils
28 from ansible.callbacks import vvv
29
30 class Connection(object):
31 ''' Local chroot based connections '''
32
33 def __init__(self, runner, host, port, *args, **kwargs):
34 self.chroot = host
35
36 if os.geteuid() != 0:
37 raise errors.AnsibleError("chroot connection requires running as root")
38
39 # we're running as root on the local system so do some
40 # trivial checks for ensuring 'host' is actually a chroot'able dir
41 if not os.path.isdir(self.chroot):
42 raise errors.AnsibleError("%s is not a directory" % self.chroot)
43
44 chrootsh = os.path.join(self.chroot, 'bin/sh')
45 if not utils.is_executable(chrootsh):
46 raise errors.AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
47
48 self.runner = runner
49 self.host = host
50 # port is unused, since this is local
51 self.port = port
52
53 def connect(self, port=None):
54 ''' connect to the chroot; nothing to do here '''
55
56 vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
57
58 return self
59
60 def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'):
61 ''' run a command on the chroot '''
62
63 # We enter chroot as root so sudo stuff can be ignored
64
65 chroot_cmd = '/usr/sbin/chroot'
66
67 if executable:
68 local_cmd = [chroot_cmd, self.chroot, executable, '-c', cmd]
69 else:
70 local_cmd = '%s "%s" %s' % (chroot_cmd, self.chroot, cmd)
71
72 vvv("EXEC %s" % (local_cmd), host=self.chroot)
73 p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
74 cwd=self.runner.basedir,
75 stdin=subprocess.PIPE,
76 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
77
78 stdout, stderr = p.communicate()
79 return (p.returncode, '', stdout, stderr)
80
81 def put_file(self, in_path, out_path):
82 ''' transfer a file from local to chroot '''
83
84 if not out_path.startswith(os.path.sep):
85 out_path = os.path.join(os.path.sep, out_path)
86 normpath = os.path.normpath(out_path)
87 out_path = os.path.join(self.chroot, normpath[1:])
88
89 vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
90 if not os.path.exists(in_path):
91 raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
92 try:
93 shutil.copyfile(in_path, out_path)
94 except shutil.Error:
95 traceback.print_exc()
96 raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
97 except IOError:
98 traceback.print_exc()
99 raise errors.AnsibleError("failed to transfer file to %s" % out_path)
100
101 def fetch_file(self, in_path, out_path):
102 ''' fetch a file from chroot to local '''
103
104 if not in_path.startswith(os.path.sep):
105 in_path = os.path.join(os.path.sep, in_path)
106 normpath = os.path.normpath(in_path)
107 in_path = os.path.join(self.chroot, normpath[1:])
108
109 vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
110 if not os.path.exists(in_path):
111 raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
112 try:
113 shutil.copyfile(in_path, out_path)
114 except shutil.Error:
115 traceback.print_exc()
116 raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
117 except IOError:
118 traceback.print_exc()
119 raise errors.AnsibleError("failed to transfer file to %s" % out_path)
120
121 def close(self):
122 ''' terminate the connection; nothing to do here '''
123 pass
124
[end of lib/ansible/runner/connection_plugins/chroot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansible/runner/connection_plugins/chroot.py b/lib/ansible/runner/connection_plugins/chroot.py
--- a/lib/ansible/runner/connection_plugins/chroot.py
+++ b/lib/ansible/runner/connection_plugins/chroot.py
@@ -16,13 +16,11 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+import distutils.spawn
import traceback
import os
-import pipes
import shutil
import subprocess
-import select
-import fcntl
from ansible import errors
from ansible import utils
from ansible.callbacks import vvv
@@ -45,6 +43,10 @@
if not utils.is_executable(chrootsh):
raise errors.AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
+ self.chroot_cmd = distutils.spawn.find_executable('chroot')
+ if not self.chroot_cmd:
+ raise errors.AnsibleError("chroot command not found in PATH")
+
self.runner = runner
self.host = host
# port is unused, since this is local
@@ -62,12 +64,10 @@
# We enter chroot as root so sudo stuff can be ignored
- chroot_cmd = '/usr/sbin/chroot'
-
if executable:
- local_cmd = [chroot_cmd, self.chroot, executable, '-c', cmd]
+ local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
else:
- local_cmd = '%s "%s" %s' % (chroot_cmd, self.chroot, cmd)
+ local_cmd = '%s "%s" %s' % (self.chroot_cmd, self.chroot, cmd)
vvv("EXEC %s" % (local_cmd), host=self.chroot)
p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
| {"golden_diff": "diff --git a/lib/ansible/runner/connection_plugins/chroot.py b/lib/ansible/runner/connection_plugins/chroot.py\n--- a/lib/ansible/runner/connection_plugins/chroot.py\n+++ b/lib/ansible/runner/connection_plugins/chroot.py\n@@ -16,13 +16,11 @@\n # You should have received a copy of the GNU General Public License\n # along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n \n+import distutils.spawn\n import traceback\n import os\n-import pipes\n import shutil\n import subprocess\n-import select\n-import fcntl\n from ansible import errors\n from ansible import utils\n from ansible.callbacks import vvv\n@@ -45,6 +43,10 @@\n if not utils.is_executable(chrootsh):\n raise errors.AnsibleError(\"%s does not look like a chrootable dir (/bin/sh missing)\" % self.chroot)\n \n+ self.chroot_cmd = distutils.spawn.find_executable('chroot')\n+ if not self.chroot_cmd:\n+ raise errors.AnsibleError(\"chroot command not found in PATH\")\n+\n self.runner = runner\n self.host = host\n # port is unused, since this is local\n@@ -62,12 +64,10 @@\n \n # We enter chroot as root so sudo stuff can be ignored\n \n- chroot_cmd = '/usr/sbin/chroot'\n-\n if executable:\n- local_cmd = [chroot_cmd, self.chroot, executable, '-c', cmd]\n+ local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]\n else:\n- local_cmd = '%s \"%s\" %s' % (chroot_cmd, self.chroot, cmd)\n+ local_cmd = '%s \"%s\" %s' % (self.chroot_cmd, self.chroot, cmd)\n \n vvv(\"EXEC %s\" % (local_cmd), host=self.chroot)\n p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),\n", "issue": "Hardcoded location of the chroot executable\nOn Gentoo, chroot is in /usr/bin and not /usr/sbin as ansible assumes.\n\n", "before_files": [{"content": "# Based on local.py (c) 2012, Michael DeHaan <[email protected]>\n# (c) 2013, Maykel Moya <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nimport traceback\nimport os\nimport pipes\nimport shutil\nimport subprocess\nimport select\nimport fcntl\nfrom ansible import errors\nfrom ansible import utils\nfrom ansible.callbacks import vvv\n\nclass Connection(object):\n ''' Local chroot based connections '''\n\n def __init__(self, runner, host, port, *args, **kwargs):\n self.chroot = host\n\n if os.geteuid() != 0:\n raise errors.AnsibleError(\"chroot connection requires running as root\")\n\n # we're running as root on the local system so do some\n # trivial checks for ensuring 'host' is actually a chroot'able dir\n if not os.path.isdir(self.chroot):\n raise errors.AnsibleError(\"%s is not a directory\" % self.chroot)\n\n chrootsh = os.path.join(self.chroot, 'bin/sh')\n if not utils.is_executable(chrootsh):\n raise errors.AnsibleError(\"%s does not look like a chrootable dir (/bin/sh missing)\" % self.chroot)\n\n self.runner = runner\n self.host = host\n # port is unused, since this is local\n self.port = port\n\n def connect(self, port=None):\n ''' connect to the chroot; nothing to do here '''\n\n vvv(\"THIS IS A LOCAL CHROOT DIR\", host=self.chroot)\n\n return self\n\n def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'):\n ''' run a command on the chroot '''\n\n # We enter chroot as root so sudo stuff can be ignored\n\n chroot_cmd = '/usr/sbin/chroot'\n\n if executable:\n local_cmd = [chroot_cmd, self.chroot, executable, '-c', cmd]\n else:\n local_cmd = '%s \"%s\" %s' % (chroot_cmd, self.chroot, cmd)\n\n vvv(\"EXEC %s\" % (local_cmd), host=self.chroot)\n p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),\n cwd=self.runner.basedir,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n stdout, stderr = p.communicate()\n return (p.returncode, '', stdout, stderr)\n\n def put_file(self, in_path, out_path):\n ''' transfer a file from local to chroot '''\n\n if not out_path.startswith(os.path.sep):\n out_path = os.path.join(os.path.sep, out_path)\n normpath = os.path.normpath(out_path)\n out_path = os.path.join(self.chroot, normpath[1:])\n\n vvv(\"PUT %s TO %s\" % (in_path, out_path), host=self.chroot)\n if not os.path.exists(in_path):\n raise errors.AnsibleFileNotFound(\"file or module does not exist: %s\" % in_path)\n try:\n shutil.copyfile(in_path, out_path)\n except shutil.Error:\n traceback.print_exc()\n raise errors.AnsibleError(\"failed to copy: %s and %s are the same\" % (in_path, out_path))\n except IOError:\n traceback.print_exc()\n raise errors.AnsibleError(\"failed to transfer file to %s\" % out_path)\n\n def fetch_file(self, in_path, out_path):\n ''' fetch a file from chroot to local '''\n\n if not in_path.startswith(os.path.sep):\n in_path = os.path.join(os.path.sep, in_path)\n normpath = os.path.normpath(in_path)\n in_path = os.path.join(self.chroot, normpath[1:])\n\n vvv(\"FETCH %s TO %s\" % (in_path, out_path), host=self.chroot)\n if not os.path.exists(in_path):\n raise errors.AnsibleFileNotFound(\"file or module does not exist: %s\" % in_path)\n try:\n shutil.copyfile(in_path, out_path)\n except shutil.Error:\n traceback.print_exc()\n raise errors.AnsibleError(\"failed to copy: %s and %s are the same\" % (in_path, out_path))\n except IOError:\n traceback.print_exc()\n raise errors.AnsibleError(\"failed to transfer file to %s\" % out_path)\n\n def close(self):\n ''' terminate the connection; nothing to do here '''\n pass\n", "path": "lib/ansible/runner/connection_plugins/chroot.py"}]} | 1,984 | 446 |
gh_patches_debug_41742 | rasdani/github-patches | git_diff | ephios-dev__ephios-597 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove CustomMultipleChoicePreference
As of https://github.com/agateblue/django-dynamic-preferences/pull/235 we can drop our own fix of the MultiplChoicePreference class (as soon as this is part of a release)
Remove CustomMultipleChoicePreference
As of https://github.com/agateblue/django-dynamic-preferences/pull/235 we can drop our own fix of the MultiplChoicePreference class (as soon as this is part of a release)
</issue>
<code>
[start of ephios/extra/preferences.py]
1 import json
2
3 from django import forms
4 from dynamic_preferences.types import (
5 BasePreferenceType,
6 BaseSerializer,
7 ModelMultipleChoicePreference,
8 )
9
10 from ephios.extra.json import CustomJSONDecoder, CustomJSONEncoder
11
12
13 class CustomModelMultipleChoicePreference(ModelMultipleChoicePreference):
14 def _setup_signals(self):
15 pass
16
17
18 class JSONSerializer(BaseSerializer):
19 @classmethod
20 def clean_to_db_value(cls, value):
21 return json.dumps(value, cls=CustomJSONEncoder, ensure_ascii=False)
22
23 @classmethod
24 def to_python(cls, value, **kwargs):
25 return json.loads(value, cls=CustomJSONDecoder)
26
27
28 class JSONPreference(BasePreferenceType):
29 serializer = JSONSerializer
30 field_class = forms.CharField
31 widget = forms.Textarea
32
[end of ephios/extra/preferences.py]
[start of ephios/core/dynamic_preferences_registry.py]
1 from django.contrib.auth.models import Group
2 from django.utils.safestring import mark_safe
3 from django.utils.translation import gettext_lazy as _
4 from django_select2.forms import Select2MultipleWidget
5 from dynamic_preferences.preferences import Section
6 from dynamic_preferences.registries import (
7 PerInstancePreferenceRegistry,
8 global_preferences_registry,
9 )
10 from dynamic_preferences.types import MultipleChoicePreference, StringPreference
11 from dynamic_preferences.users.registries import user_preferences_registry
12
13 import ephios
14 from ephios.core import plugins
15 from ephios.core.models import QualificationCategory, UserProfile
16 from ephios.core.services.notifications.backends import CORE_NOTIFICATION_BACKENDS
17 from ephios.core.services.notifications.types import CORE_NOTIFICATION_TYPES
18 from ephios.extra.preferences import CustomModelMultipleChoicePreference, JSONPreference
19
20
21 class EventTypeRegistry(PerInstancePreferenceRegistry):
22 pass
23
24
25 event_type_preference_registry = EventTypeRegistry()
26
27 notifications_user_section = Section("notifications")
28 responsible_notifications_user_section = Section("responsible_notifications")
29 general_global_section = Section("general")
30
31
32 @global_preferences_registry.register
33 class OrganizationName(StringPreference):
34 name = "organization_name"
35 verbose_name = _("Organization name")
36 default = ""
37 section = general_global_section
38 required = False
39
40
41 @global_preferences_registry.register
42 class RelevantQualificationCategories(CustomModelMultipleChoicePreference):
43 name = "relevant_qualification_categories"
44 section = general_global_section
45 model = QualificationCategory
46 default = QualificationCategory.objects.none()
47 verbose_name = _("Relevant qualification categories (for user list and disposition view)")
48 field_kwargs = {"widget": Select2MultipleWidget}
49
50
51 @global_preferences_registry.register
52 class EnabledPlugins(MultipleChoicePreference):
53 name = "enabled_plugins"
54 verbose_name = _("Enabled plugins")
55 default = [
56 ephios.plugins.basesignup.apps.PluginApp.__module__,
57 ephios.plugins.pages.apps.PluginApp.__module__,
58 ]
59 section = general_global_section
60 required = False
61
62 @staticmethod
63 def get_choices():
64 return [
65 (plugin.module, mark_safe(f"<strong>{plugin.name}</strong>: {plugin.description}"))
66 for plugin in plugins.get_all_plugins()
67 if getattr(plugin, "visible", True)
68 ]
69
70
71 @user_preferences_registry.register
72 class NotificationPreference(JSONPreference):
73 name = "notifications"
74 verbose_name = _("Notification preferences")
75 section = notifications_user_section
76 default = dict(
77 zip(
78 [not_type.slug for not_type in CORE_NOTIFICATION_TYPES],
79 [[backend.slug for backend in CORE_NOTIFICATION_BACKENDS]]
80 * len(CORE_NOTIFICATION_TYPES),
81 )
82 )
83
84
85 @event_type_preference_registry.register
86 class VisibleForPreference(CustomModelMultipleChoicePreference):
87 name = "visible_for"
88 verbose_name = _("Events of this type should by default be visible for")
89 model = Group
90 default = Group.objects.all()
91 field_kwargs = {"widget": Select2MultipleWidget}
92
93
94 @event_type_preference_registry.register
95 class ResponsibleUsersPreference(CustomModelMultipleChoicePreference):
96 name = "responsible_users"
97 verbose_name = _("Users that are responsible for this event type by default")
98 model = UserProfile
99 default = UserProfile.objects.none()
100 field_kwargs = {"widget": Select2MultipleWidget}
101
102
103 @event_type_preference_registry.register
104 class ResponsibleGroupsPreference(CustomModelMultipleChoicePreference):
105 name = "responsible_groups"
106 verbose_name = _("Groups that are responsible for this event type by default")
107 model = Group
108 default = Group.objects.none()
109 field_kwargs = {"widget": Select2MultipleWidget}
110
[end of ephios/core/dynamic_preferences_registry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ephios/core/dynamic_preferences_registry.py b/ephios/core/dynamic_preferences_registry.py
--- a/ephios/core/dynamic_preferences_registry.py
+++ b/ephios/core/dynamic_preferences_registry.py
@@ -7,7 +7,11 @@
PerInstancePreferenceRegistry,
global_preferences_registry,
)
-from dynamic_preferences.types import MultipleChoicePreference, StringPreference
+from dynamic_preferences.types import (
+ ModelMultipleChoicePreference,
+ MultipleChoicePreference,
+ StringPreference,
+)
from dynamic_preferences.users.registries import user_preferences_registry
import ephios
@@ -15,7 +19,7 @@
from ephios.core.models import QualificationCategory, UserProfile
from ephios.core.services.notifications.backends import CORE_NOTIFICATION_BACKENDS
from ephios.core.services.notifications.types import CORE_NOTIFICATION_TYPES
-from ephios.extra.preferences import CustomModelMultipleChoicePreference, JSONPreference
+from ephios.extra.preferences import JSONPreference
class EventTypeRegistry(PerInstancePreferenceRegistry):
@@ -39,7 +43,7 @@
@global_preferences_registry.register
-class RelevantQualificationCategories(CustomModelMultipleChoicePreference):
+class RelevantQualificationCategories(ModelMultipleChoicePreference):
name = "relevant_qualification_categories"
section = general_global_section
model = QualificationCategory
@@ -83,7 +87,7 @@
@event_type_preference_registry.register
-class VisibleForPreference(CustomModelMultipleChoicePreference):
+class VisibleForPreference(ModelMultipleChoicePreference):
name = "visible_for"
verbose_name = _("Events of this type should by default be visible for")
model = Group
@@ -92,7 +96,7 @@
@event_type_preference_registry.register
-class ResponsibleUsersPreference(CustomModelMultipleChoicePreference):
+class ResponsibleUsersPreference(ModelMultipleChoicePreference):
name = "responsible_users"
verbose_name = _("Users that are responsible for this event type by default")
model = UserProfile
@@ -101,7 +105,7 @@
@event_type_preference_registry.register
-class ResponsibleGroupsPreference(CustomModelMultipleChoicePreference):
+class ResponsibleGroupsPreference(ModelMultipleChoicePreference):
name = "responsible_groups"
verbose_name = _("Groups that are responsible for this event type by default")
model = Group
diff --git a/ephios/extra/preferences.py b/ephios/extra/preferences.py
--- a/ephios/extra/preferences.py
+++ b/ephios/extra/preferences.py
@@ -1,20 +1,11 @@
import json
from django import forms
-from dynamic_preferences.types import (
- BasePreferenceType,
- BaseSerializer,
- ModelMultipleChoicePreference,
-)
+from dynamic_preferences.types import BasePreferenceType, BaseSerializer
from ephios.extra.json import CustomJSONDecoder, CustomJSONEncoder
-class CustomModelMultipleChoicePreference(ModelMultipleChoicePreference):
- def _setup_signals(self):
- pass
-
-
class JSONSerializer(BaseSerializer):
@classmethod
def clean_to_db_value(cls, value):
| {"golden_diff": "diff --git a/ephios/core/dynamic_preferences_registry.py b/ephios/core/dynamic_preferences_registry.py\n--- a/ephios/core/dynamic_preferences_registry.py\n+++ b/ephios/core/dynamic_preferences_registry.py\n@@ -7,7 +7,11 @@\n PerInstancePreferenceRegistry,\n global_preferences_registry,\n )\n-from dynamic_preferences.types import MultipleChoicePreference, StringPreference\n+from dynamic_preferences.types import (\n+ ModelMultipleChoicePreference,\n+ MultipleChoicePreference,\n+ StringPreference,\n+)\n from dynamic_preferences.users.registries import user_preferences_registry\n \n import ephios\n@@ -15,7 +19,7 @@\n from ephios.core.models import QualificationCategory, UserProfile\n from ephios.core.services.notifications.backends import CORE_NOTIFICATION_BACKENDS\n from ephios.core.services.notifications.types import CORE_NOTIFICATION_TYPES\n-from ephios.extra.preferences import CustomModelMultipleChoicePreference, JSONPreference\n+from ephios.extra.preferences import JSONPreference\n \n \n class EventTypeRegistry(PerInstancePreferenceRegistry):\n@@ -39,7 +43,7 @@\n \n \n @global_preferences_registry.register\n-class RelevantQualificationCategories(CustomModelMultipleChoicePreference):\n+class RelevantQualificationCategories(ModelMultipleChoicePreference):\n name = \"relevant_qualification_categories\"\n section = general_global_section\n model = QualificationCategory\n@@ -83,7 +87,7 @@\n \n \n @event_type_preference_registry.register\n-class VisibleForPreference(CustomModelMultipleChoicePreference):\n+class VisibleForPreference(ModelMultipleChoicePreference):\n name = \"visible_for\"\n verbose_name = _(\"Events of this type should by default be visible for\")\n model = Group\n@@ -92,7 +96,7 @@\n \n \n @event_type_preference_registry.register\n-class ResponsibleUsersPreference(CustomModelMultipleChoicePreference):\n+class ResponsibleUsersPreference(ModelMultipleChoicePreference):\n name = \"responsible_users\"\n verbose_name = _(\"Users that are responsible for this event type by default\")\n model = UserProfile\n@@ -101,7 +105,7 @@\n \n \n @event_type_preference_registry.register\n-class ResponsibleGroupsPreference(CustomModelMultipleChoicePreference):\n+class ResponsibleGroupsPreference(ModelMultipleChoicePreference):\n name = \"responsible_groups\"\n verbose_name = _(\"Groups that are responsible for this event type by default\")\n model = Group\ndiff --git a/ephios/extra/preferences.py b/ephios/extra/preferences.py\n--- a/ephios/extra/preferences.py\n+++ b/ephios/extra/preferences.py\n@@ -1,20 +1,11 @@\n import json\n \n from django import forms\n-from dynamic_preferences.types import (\n- BasePreferenceType,\n- BaseSerializer,\n- ModelMultipleChoicePreference,\n-)\n+from dynamic_preferences.types import BasePreferenceType, BaseSerializer\n \n from ephios.extra.json import CustomJSONDecoder, CustomJSONEncoder\n \n \n-class CustomModelMultipleChoicePreference(ModelMultipleChoicePreference):\n- def _setup_signals(self):\n- pass\n-\n-\n class JSONSerializer(BaseSerializer):\n @classmethod\n def clean_to_db_value(cls, value):\n", "issue": "Remove CustomMultipleChoicePreference\nAs of https://github.com/agateblue/django-dynamic-preferences/pull/235 we can drop our own fix of the MultiplChoicePreference class (as soon as this is part of a release)\nRemove CustomMultipleChoicePreference\nAs of https://github.com/agateblue/django-dynamic-preferences/pull/235 we can drop our own fix of the MultiplChoicePreference class (as soon as this is part of a release)\n", "before_files": [{"content": "import json\n\nfrom django import forms\nfrom dynamic_preferences.types import (\n BasePreferenceType,\n BaseSerializer,\n ModelMultipleChoicePreference,\n)\n\nfrom ephios.extra.json import CustomJSONDecoder, CustomJSONEncoder\n\n\nclass CustomModelMultipleChoicePreference(ModelMultipleChoicePreference):\n def _setup_signals(self):\n pass\n\n\nclass JSONSerializer(BaseSerializer):\n @classmethod\n def clean_to_db_value(cls, value):\n return json.dumps(value, cls=CustomJSONEncoder, ensure_ascii=False)\n\n @classmethod\n def to_python(cls, value, **kwargs):\n return json.loads(value, cls=CustomJSONDecoder)\n\n\nclass JSONPreference(BasePreferenceType):\n serializer = JSONSerializer\n field_class = forms.CharField\n widget = forms.Textarea\n", "path": "ephios/extra/preferences.py"}, {"content": "from django.contrib.auth.models import Group\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\nfrom django_select2.forms import Select2MultipleWidget\nfrom dynamic_preferences.preferences import Section\nfrom dynamic_preferences.registries import (\n PerInstancePreferenceRegistry,\n global_preferences_registry,\n)\nfrom dynamic_preferences.types import MultipleChoicePreference, StringPreference\nfrom dynamic_preferences.users.registries import user_preferences_registry\n\nimport ephios\nfrom ephios.core import plugins\nfrom ephios.core.models import QualificationCategory, UserProfile\nfrom ephios.core.services.notifications.backends import CORE_NOTIFICATION_BACKENDS\nfrom ephios.core.services.notifications.types import CORE_NOTIFICATION_TYPES\nfrom ephios.extra.preferences import CustomModelMultipleChoicePreference, JSONPreference\n\n\nclass EventTypeRegistry(PerInstancePreferenceRegistry):\n pass\n\n\nevent_type_preference_registry = EventTypeRegistry()\n\nnotifications_user_section = Section(\"notifications\")\nresponsible_notifications_user_section = Section(\"responsible_notifications\")\ngeneral_global_section = Section(\"general\")\n\n\n@global_preferences_registry.register\nclass OrganizationName(StringPreference):\n name = \"organization_name\"\n verbose_name = _(\"Organization name\")\n default = \"\"\n section = general_global_section\n required = False\n\n\n@global_preferences_registry.register\nclass RelevantQualificationCategories(CustomModelMultipleChoicePreference):\n name = \"relevant_qualification_categories\"\n section = general_global_section\n model = QualificationCategory\n default = QualificationCategory.objects.none()\n verbose_name = _(\"Relevant qualification categories (for user list and disposition view)\")\n field_kwargs = {\"widget\": Select2MultipleWidget}\n\n\n@global_preferences_registry.register\nclass EnabledPlugins(MultipleChoicePreference):\n name = \"enabled_plugins\"\n verbose_name = _(\"Enabled plugins\")\n default = [\n ephios.plugins.basesignup.apps.PluginApp.__module__,\n ephios.plugins.pages.apps.PluginApp.__module__,\n ]\n section = general_global_section\n required = False\n\n @staticmethod\n def get_choices():\n return [\n (plugin.module, mark_safe(f\"<strong>{plugin.name}</strong>: {plugin.description}\"))\n for plugin in plugins.get_all_plugins()\n if getattr(plugin, \"visible\", True)\n ]\n\n\n@user_preferences_registry.register\nclass NotificationPreference(JSONPreference):\n name = \"notifications\"\n verbose_name = _(\"Notification preferences\")\n section = notifications_user_section\n default = dict(\n zip(\n [not_type.slug for not_type in CORE_NOTIFICATION_TYPES],\n [[backend.slug for backend in CORE_NOTIFICATION_BACKENDS]]\n * len(CORE_NOTIFICATION_TYPES),\n )\n )\n\n\n@event_type_preference_registry.register\nclass VisibleForPreference(CustomModelMultipleChoicePreference):\n name = \"visible_for\"\n verbose_name = _(\"Events of this type should by default be visible for\")\n model = Group\n default = Group.objects.all()\n field_kwargs = {\"widget\": Select2MultipleWidget}\n\n\n@event_type_preference_registry.register\nclass ResponsibleUsersPreference(CustomModelMultipleChoicePreference):\n name = \"responsible_users\"\n verbose_name = _(\"Users that are responsible for this event type by default\")\n model = UserProfile\n default = UserProfile.objects.none()\n field_kwargs = {\"widget\": Select2MultipleWidget}\n\n\n@event_type_preference_registry.register\nclass ResponsibleGroupsPreference(CustomModelMultipleChoicePreference):\n name = \"responsible_groups\"\n verbose_name = _(\"Groups that are responsible for this event type by default\")\n model = Group\n default = Group.objects.none()\n field_kwargs = {\"widget\": Select2MultipleWidget}\n", "path": "ephios/core/dynamic_preferences_registry.py"}]} | 1,860 | 653 |
gh_patches_debug_34936 | rasdani/github-patches | git_diff | pytorch__ignite-385 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve MetricLambda implementation
User can get the following output in the `state.metrics`:
```
{'accuracy': 0.37127896829244644,
'error': 0.6287210317075536,
'error[0]': -0.6287210317075536,
'error[0][0]': 0.37127896829244644,
```
when launch the below code:
```python
accuracy_metric = Accuracy()
error_metric = (accuracy_metric - 1.0) * (-1.0)
metrics = {
"accuracy": accuracy_metric,
"error": error_metric,
}
validator = create_supervised_evaluator(model, metrics=metrics)
validator.run(val_loader, max_epochs=1)
print(validator.state.metrics)
```
This is due to
https://github.com/pytorch/ignite/blob/d9820451da779e0d0c393804db381e5483240b1c/ignite/metrics/metrics_lambda.py#L50-L54
and
https://github.com/pytorch/ignite/blob/d9820451da779e0d0c393804db381e5483240b1c/ignite/metrics/metric.py#L68
IMO, user is not interested of this internal info used to compute `error_metric`. We can add some special characters to ignore the result of compute when insert to `engine.state.metrics[name]`.
cc @zasdfgbnm
</issue>
<code>
[start of ignite/metrics/metrics_lambda.py]
1 from ignite.metrics.metric import Metric
2
3
4 class MetricsLambda(Metric):
5 """
6 Apply a function to other metrics to obtain a new metric.
7 The result of the new metric is defined to be the result
8 of applying the function to the result of argument metrics.
9
10 When update, this metric does not recursively update the metrics
11 it depends on. When reset, all its dependency metrics would be
12 resetted. When attach, all its dependencies would be automatically
13 attached.
14
15 Arguments:
16 f (callable): the function that defines the computation
17 args (sequence): Sequence of other metrics or something
18 else that will be fed to ``f`` as arguments.
19
20 Examples:
21 >>> precision = Precision(average=False)
22 >>> recall = Recall(average=False)
23 >>> def Fbeta(r, p, beta):
24 >>> return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r)).item()
25 >>> F1 = MetricsLambda(Fbeta, recall, precision, 1)
26 >>> F2 = MetricsLambda(Fbeta, recall, precision, 2)
27 >>> F3 = MetricsLambda(Fbeta, recall, precision, 3)
28 >>> F4 = MetricsLambda(Fbeta, recall, precision, 4)
29 """
30 def __init__(self, f, *args):
31 self.function = f
32 self.args = args
33 super(MetricsLambda, self).__init__()
34
35 def reset(self):
36 for i in self.args:
37 if isinstance(i, Metric):
38 i.reset()
39
40 def update(self, output):
41 # NB: this method does not recursively update dependency metrics,
42 # which might cause duplicate update issue. To update this metric,
43 # users should manually update its dependencies.
44 pass
45
46 def compute(self):
47 materialized = [i.compute() if isinstance(i, Metric) else i for i in self.args]
48 return self.function(*materialized)
49
50 def attach(self, engine, name):
51 # recursively attach all its dependencies
52 for index, metric in enumerate(self.args):
53 if isinstance(metric, Metric):
54 metric.attach(engine, name + '[{}]'.format(index))
55 super(MetricsLambda, self).attach(engine, name)
56
[end of ignite/metrics/metrics_lambda.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ignite/metrics/metrics_lambda.py b/ignite/metrics/metrics_lambda.py
--- a/ignite/metrics/metrics_lambda.py
+++ b/ignite/metrics/metrics_lambda.py
@@ -1,4 +1,5 @@
from ignite.metrics.metric import Metric
+from ignite.engine import Events
class MetricsLambda(Metric):
@@ -12,20 +13,25 @@
resetted. When attach, all its dependencies would be automatically
attached.
- Arguments:
+ Args:
f (callable): the function that defines the computation
args (sequence): Sequence of other metrics or something
else that will be fed to ``f`` as arguments.
- Examples:
- >>> precision = Precision(average=False)
- >>> recall = Recall(average=False)
- >>> def Fbeta(r, p, beta):
- >>> return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r)).item()
- >>> F1 = MetricsLambda(Fbeta, recall, precision, 1)
- >>> F2 = MetricsLambda(Fbeta, recall, precision, 2)
- >>> F3 = MetricsLambda(Fbeta, recall, precision, 3)
- >>> F4 = MetricsLambda(Fbeta, recall, precision, 4)
+ Example:
+
+ .. code-block:: python
+
+ precision = Precision(average=False)
+ recall = Recall(average=False)
+
+ def Fbeta(r, p, beta):
+ return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r + 1e-20)).item()
+
+ F1 = MetricsLambda(Fbeta, recall, precision, 1)
+ F2 = MetricsLambda(Fbeta, recall, precision, 2)
+ F3 = MetricsLambda(Fbeta, recall, precision, 3)
+ F4 = MetricsLambda(Fbeta, recall, precision, 4)
"""
def __init__(self, f, *args):
self.function = f
@@ -51,5 +57,8 @@
# recursively attach all its dependencies
for index, metric in enumerate(self.args):
if isinstance(metric, Metric):
- metric.attach(engine, name + '[{}]'.format(index))
+ if not engine.has_event_handler(metric.started, Events.EPOCH_STARTED):
+ engine.add_event_handler(Events.EPOCH_STARTED, metric.started)
+ if not engine.has_event_handler(metric.iteration_completed, Events.ITERATION_COMPLETED):
+ engine.add_event_handler(Events.ITERATION_COMPLETED, metric.iteration_completed)
super(MetricsLambda, self).attach(engine, name)
| {"golden_diff": "diff --git a/ignite/metrics/metrics_lambda.py b/ignite/metrics/metrics_lambda.py\n--- a/ignite/metrics/metrics_lambda.py\n+++ b/ignite/metrics/metrics_lambda.py\n@@ -1,4 +1,5 @@\n from ignite.metrics.metric import Metric\n+from ignite.engine import Events\n \n \n class MetricsLambda(Metric):\n@@ -12,20 +13,25 @@\n resetted. When attach, all its dependencies would be automatically\n attached.\n \n- Arguments:\n+ Args:\n f (callable): the function that defines the computation\n args (sequence): Sequence of other metrics or something\n else that will be fed to ``f`` as arguments.\n \n- Examples:\n- >>> precision = Precision(average=False)\n- >>> recall = Recall(average=False)\n- >>> def Fbeta(r, p, beta):\n- >>> return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r)).item()\n- >>> F1 = MetricsLambda(Fbeta, recall, precision, 1)\n- >>> F2 = MetricsLambda(Fbeta, recall, precision, 2)\n- >>> F3 = MetricsLambda(Fbeta, recall, precision, 3)\n- >>> F4 = MetricsLambda(Fbeta, recall, precision, 4)\n+ Example:\n+\n+ .. code-block:: python\n+\n+ precision = Precision(average=False)\n+ recall = Recall(average=False)\n+\n+ def Fbeta(r, p, beta):\n+ return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r + 1e-20)).item()\n+\n+ F1 = MetricsLambda(Fbeta, recall, precision, 1)\n+ F2 = MetricsLambda(Fbeta, recall, precision, 2)\n+ F3 = MetricsLambda(Fbeta, recall, precision, 3)\n+ F4 = MetricsLambda(Fbeta, recall, precision, 4)\n \"\"\"\n def __init__(self, f, *args):\n self.function = f\n@@ -51,5 +57,8 @@\n # recursively attach all its dependencies\n for index, metric in enumerate(self.args):\n if isinstance(metric, Metric):\n- metric.attach(engine, name + '[{}]'.format(index))\n+ if not engine.has_event_handler(metric.started, Events.EPOCH_STARTED):\n+ engine.add_event_handler(Events.EPOCH_STARTED, metric.started)\n+ if not engine.has_event_handler(metric.iteration_completed, Events.ITERATION_COMPLETED):\n+ engine.add_event_handler(Events.ITERATION_COMPLETED, metric.iteration_completed)\n super(MetricsLambda, self).attach(engine, name)\n", "issue": "Improve MetricLambda implementation\nUser can get the following output in the `state.metrics`:\r\n```\r\n{'accuracy': 0.37127896829244644,\r\n 'error': 0.6287210317075536,\r\n 'error[0]': -0.6287210317075536,\r\n 'error[0][0]': 0.37127896829244644,\r\n```\r\nwhen launch the below code:\r\n```python\r\naccuracy_metric = Accuracy()\r\nerror_metric = (accuracy_metric - 1.0) * (-1.0)\r\n\r\nmetrics = {\r\n \"accuracy\": accuracy_metric,\r\n \"error\": error_metric,\r\n}\r\n\r\nvalidator = create_supervised_evaluator(model, metrics=metrics)\r\nvalidator.run(val_loader, max_epochs=1)\r\nprint(validator.state.metrics)\r\n```\r\n\r\nThis is due to \r\n\r\nhttps://github.com/pytorch/ignite/blob/d9820451da779e0d0c393804db381e5483240b1c/ignite/metrics/metrics_lambda.py#L50-L54\r\n\r\nand \r\n\r\nhttps://github.com/pytorch/ignite/blob/d9820451da779e0d0c393804db381e5483240b1c/ignite/metrics/metric.py#L68\r\n\r\nIMO, user is not interested of this internal info used to compute `error_metric`. We can add some special characters to ignore the result of compute when insert to `engine.state.metrics[name]`.\r\n\r\ncc @zasdfgbnm \n", "before_files": [{"content": "from ignite.metrics.metric import Metric\n\n\nclass MetricsLambda(Metric):\n \"\"\"\n Apply a function to other metrics to obtain a new metric.\n The result of the new metric is defined to be the result\n of applying the function to the result of argument metrics.\n\n When update, this metric does not recursively update the metrics\n it depends on. When reset, all its dependency metrics would be\n resetted. When attach, all its dependencies would be automatically\n attached.\n\n Arguments:\n f (callable): the function that defines the computation\n args (sequence): Sequence of other metrics or something\n else that will be fed to ``f`` as arguments.\n\n Examples:\n >>> precision = Precision(average=False)\n >>> recall = Recall(average=False)\n >>> def Fbeta(r, p, beta):\n >>> return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r)).item()\n >>> F1 = MetricsLambda(Fbeta, recall, precision, 1)\n >>> F2 = MetricsLambda(Fbeta, recall, precision, 2)\n >>> F3 = MetricsLambda(Fbeta, recall, precision, 3)\n >>> F4 = MetricsLambda(Fbeta, recall, precision, 4)\n \"\"\"\n def __init__(self, f, *args):\n self.function = f\n self.args = args\n super(MetricsLambda, self).__init__()\n\n def reset(self):\n for i in self.args:\n if isinstance(i, Metric):\n i.reset()\n\n def update(self, output):\n # NB: this method does not recursively update dependency metrics,\n # which might cause duplicate update issue. To update this metric,\n # users should manually update its dependencies.\n pass\n\n def compute(self):\n materialized = [i.compute() if isinstance(i, Metric) else i for i in self.args]\n return self.function(*materialized)\n\n def attach(self, engine, name):\n # recursively attach all its dependencies\n for index, metric in enumerate(self.args):\n if isinstance(metric, Metric):\n metric.attach(engine, name + '[{}]'.format(index))\n super(MetricsLambda, self).attach(engine, name)\n", "path": "ignite/metrics/metrics_lambda.py"}]} | 1,504 | 596 |
gh_patches_debug_4702 | rasdani/github-patches | git_diff | coala__coala-bears-1986 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upgrade memento_client to 0.6.1
Currently `memento_client` in our repo is still at version 0.5.3 which still contains bugs. Upgrading to `0.6.1` would fix many bugs that are found in `0.5.3`
</issue>
<code>
[start of bears/general/MementoBear.py]
1 import requests
2
3 from bears.general.URLBear import URLBear
4
5 from coalib.bears.LocalBear import LocalBear
6 from coalib.results.Result import Result
7 from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
8
9 from dependency_management.requirements.PipRequirement import PipRequirement
10
11 from memento_client import MementoClient
12
13
14 class MementoBear(LocalBear):
15 DEFAULT_TIMEOUT = 15
16 LANGUAGES = {'All'}
17 REQUIREMENTS = {PipRequirement('memento_client', '0.5.3')}
18 AUTHORS = {'The coala developers'}
19 AUTHORS_EMAILS = {'[email protected]'}
20 LICENSE = 'AGPL-3.0'
21 CAN_DETECT = {'Documentation'}
22 BEAR_DEPS = {URLBear}
23
24 @staticmethod
25 def check_archive(mc, link):
26 """
27 Check the link is it archived or not.
28
29 :param mc: A `memento_client.MementoClient` instance.
30 :param link: The link (str) that will be checked.
31 :return: Boolean, `True` means the link has been archived.
32 """
33 try:
34 mc.get_memento_info(link)['mementos']
35 except KeyError:
36 return False
37 return True
38
39 @staticmethod
40 def get_redirect_urls(link):
41 urls = []
42
43 resp = requests.head(link, allow_redirects=True)
44 for redirect in resp.history:
45 urls.append(redirect.url)
46
47 return urls
48
49 def run(self, filename, file, dependency_results=dict(),
50 follow_redirects: bool=True):
51 """
52 Find links in any text file and check if they are archived.
53
54 Link is considered valid if the link has been archived by any services
55 in memento_client.
56
57 This bear can automatically fix redirects.
58
59 Warning: This bear will make HEAD requests to all URLs mentioned in
60 your codebase, which can potentially be destructive. As an example,
61 this bear would naively just visit the URL from a line that goes like
62 `do_not_ever_open = 'https://api.acme.inc/delete-all-data'` wiping out
63 all your data.
64
65 :param dependency_results: Results given by URLBear.
66 :param follow_redirects: Set to true to check all redirect urls.
67 """
68 self._mc = MementoClient()
69
70 for result in dependency_results.get(URLBear.name, []):
71 line_number, link, code, context = result.contents
72
73 if not (code and 200 <= code < 400):
74 continue
75
76 status = MementoBear.check_archive(self._mc, link)
77 if not status:
78 yield Result.from_values(
79 self,
80 ('This link is not archived yet, visit '
81 'https://web.archive.org/save/%s to get it archived.'
82 % link),
83 file=filename,
84 line=line_number,
85 severity=RESULT_SEVERITY.INFO
86 )
87
88 if follow_redirects and 300 <= code < 400: # HTTP status 30x
89 redirect_urls = MementoBear.get_redirect_urls(link)
90
91 for url in redirect_urls:
92 status = MementoBear.check_archive(self._mc, url)
93 if not status:
94 yield Result.from_values(
95 self,
96 ('This link redirects to %s and not archived yet, '
97 'visit https://web.archive.org/save/%s to get it '
98 'archived.'
99 % (url, url)),
100 file=filename,
101 line=line_number,
102 severity=RESULT_SEVERITY.INFO
103 )
104
[end of bears/general/MementoBear.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bears/general/MementoBear.py b/bears/general/MementoBear.py
--- a/bears/general/MementoBear.py
+++ b/bears/general/MementoBear.py
@@ -14,7 +14,7 @@
class MementoBear(LocalBear):
DEFAULT_TIMEOUT = 15
LANGUAGES = {'All'}
- REQUIREMENTS = {PipRequirement('memento_client', '0.5.3')}
+ REQUIREMENTS = {PipRequirement('memento_client', '0.6.1')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL-3.0'
| {"golden_diff": "diff --git a/bears/general/MementoBear.py b/bears/general/MementoBear.py\n--- a/bears/general/MementoBear.py\n+++ b/bears/general/MementoBear.py\n@@ -14,7 +14,7 @@\n class MementoBear(LocalBear):\n DEFAULT_TIMEOUT = 15\n LANGUAGES = {'All'}\n- REQUIREMENTS = {PipRequirement('memento_client', '0.5.3')}\n+ REQUIREMENTS = {PipRequirement('memento_client', '0.6.1')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n", "issue": "Upgrade memento_client to 0.6.1\nCurrently `memento_client` in our repo is still at version 0.5.3 which still contains bugs. Upgrading to `0.6.1` would fix many bugs that are found in `0.5.3`\n", "before_files": [{"content": "import requests\n\nfrom bears.general.URLBear import URLBear\n\nfrom coalib.bears.LocalBear import LocalBear\nfrom coalib.results.Result import Result\nfrom coalib.results.RESULT_SEVERITY import RESULT_SEVERITY\n\nfrom dependency_management.requirements.PipRequirement import PipRequirement\n\nfrom memento_client import MementoClient\n\n\nclass MementoBear(LocalBear):\n DEFAULT_TIMEOUT = 15\n LANGUAGES = {'All'}\n REQUIREMENTS = {PipRequirement('memento_client', '0.5.3')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_DETECT = {'Documentation'}\n BEAR_DEPS = {URLBear}\n\n @staticmethod\n def check_archive(mc, link):\n \"\"\"\n Check the link is it archived or not.\n\n :param mc: A `memento_client.MementoClient` instance.\n :param link: The link (str) that will be checked.\n :return: Boolean, `True` means the link has been archived.\n \"\"\"\n try:\n mc.get_memento_info(link)['mementos']\n except KeyError:\n return False\n return True\n\n @staticmethod\n def get_redirect_urls(link):\n urls = []\n\n resp = requests.head(link, allow_redirects=True)\n for redirect in resp.history:\n urls.append(redirect.url)\n\n return urls\n\n def run(self, filename, file, dependency_results=dict(),\n follow_redirects: bool=True):\n \"\"\"\n Find links in any text file and check if they are archived.\n\n Link is considered valid if the link has been archived by any services\n in memento_client.\n\n This bear can automatically fix redirects.\n\n Warning: This bear will make HEAD requests to all URLs mentioned in\n your codebase, which can potentially be destructive. As an example,\n this bear would naively just visit the URL from a line that goes like\n `do_not_ever_open = 'https://api.acme.inc/delete-all-data'` wiping out\n all your data.\n\n :param dependency_results: Results given by URLBear.\n :param follow_redirects: Set to true to check all redirect urls.\n \"\"\"\n self._mc = MementoClient()\n\n for result in dependency_results.get(URLBear.name, []):\n line_number, link, code, context = result.contents\n\n if not (code and 200 <= code < 400):\n continue\n\n status = MementoBear.check_archive(self._mc, link)\n if not status:\n yield Result.from_values(\n self,\n ('This link is not archived yet, visit '\n 'https://web.archive.org/save/%s to get it archived.'\n % link),\n file=filename,\n line=line_number,\n severity=RESULT_SEVERITY.INFO\n )\n\n if follow_redirects and 300 <= code < 400: # HTTP status 30x\n redirect_urls = MementoBear.get_redirect_urls(link)\n\n for url in redirect_urls:\n status = MementoBear.check_archive(self._mc, url)\n if not status:\n yield Result.from_values(\n self,\n ('This link redirects to %s and not archived yet, '\n 'visit https://web.archive.org/save/%s to get it '\n 'archived.'\n % (url, url)),\n file=filename,\n line=line_number,\n severity=RESULT_SEVERITY.INFO\n )\n", "path": "bears/general/MementoBear.py"}]} | 1,575 | 153 |
gh_patches_debug_8542 | rasdani/github-patches | git_diff | bokeh__bokeh-9203 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FEATURE] Print full stacktrace on error
See discussion on [discussion forum](https://discourse.bokeh.org/t/debugging-recommendations/3934)
**Is your feature request related to a problem? Please describe.**
My underlying application is getting very big and when running into problems the debugging takes me a lot of time.
For example I now just get the line:
2019-08-26 13:57:29,620 error handling message Message ‘EVENT’ (revision 1) content: ‘{“event_name”:“button_click”,“event_values”:{“model_id”:“1027”}}’: ValueError(‘Wrong number of items passed 2, placement implies 1’)
**Describe the solution you'd like**
My desired solution is to see the stacktrace which includes the file and line of the source of the error.
@bryevdv pointed me to the protocol_handler.py script
**Describe alternatives you've considered**
The alternative is to set debug level, but I only want error handling improved, not have a full log. Also, I want the error to show directly, because I may not know how to reproduce it on a running server.
PR created
**Additional context**
I am not worried about the log getting too long. Errors should in general not occur, they should be fixed or at least be caught in case they are acceptable
</issue>
<code>
[start of bokeh/server/protocol_handler.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
3 # All rights reserved.
4 #
5 # The full license is in the file LICENSE.txt, distributed with this software.
6 #-----------------------------------------------------------------------------
7 ''' Encapsulate handling of all Bokeh Protocol messages a Bokeh server may
8 receive.
9
10 '''
11
12 #-----------------------------------------------------------------------------
13 # Boilerplate
14 #-----------------------------------------------------------------------------
15 from __future__ import absolute_import, division, print_function, unicode_literals
16
17 import logging
18 log = logging.getLogger(__name__)
19
20 #-----------------------------------------------------------------------------
21 # Imports
22 #-----------------------------------------------------------------------------
23
24 # Standard library imports
25
26 # External imports
27 from tornado import gen
28
29 # Bokeh imports
30 from .session import ServerSession
31 from ..protocol.exceptions import ProtocolError
32
33 #-----------------------------------------------------------------------------
34 # Globals and constants
35 #-----------------------------------------------------------------------------
36
37 __all__ = (
38 'ProtocolHandler',
39 )
40
41 #-----------------------------------------------------------------------------
42 # General API
43 #-----------------------------------------------------------------------------
44
45 class ProtocolHandler(object):
46 ''' A Bokeh server may be expected to receive any of the following protocol
47 messages:
48
49 * ``EVENT``
50 * ``PATCH-DOC``
51 * ``PULL-DOC-REQ``
52 * ``PUSH-DOC``
53 * ``SERVER-INFO-REQ``
54
55 The job of ``ProtocolHandler`` is to direct incoming messages to the right
56 specialized handler for each message type. When the server receives a new
57 message on a connection it will call ``handler`` with the message and the
58 connection that the message arrived on. Most messages are ultimately
59 handled by the ``ServerSession`` class, but some simpler messages types
60 such as ``SERVER-INFO-REQ`` may be handled directly by ``ProtocolHandler``.
61
62 Any unexpected messages will result in a ``ProtocolError``.
63
64 '''
65
66 def __init__(self):
67 self._handlers = dict()
68
69 self._handlers['PULL-DOC-REQ'] = ServerSession.pull
70 self._handlers['PUSH-DOC'] = ServerSession.push
71 self._handlers['PATCH-DOC'] = ServerSession.patch
72 self._handlers['SERVER-INFO-REQ'] = self._server_info_req
73 self._handlers['EVENT'] = ServerSession.event
74
75 @gen.coroutine
76 def handle(self, message, connection):
77 ''' Delegate a received message to the appropriate handler.
78
79 Args:
80 message (Message) :
81 The message that was receive that needs to be handled
82
83 connection (ServerConnection) :
84 The connection that received this message
85
86 Raises:
87 ProtocolError
88
89 '''
90
91 handler = self._handlers.get((message.msgtype, message.revision))
92
93 if handler is None:
94 handler = self._handlers.get(message.msgtype)
95
96 if handler is None:
97 raise ProtocolError("%s not expected on server" % message)
98
99 try:
100 work = yield handler(message, connection)
101 except Exception as e:
102 log.error("error handling message %r: %r", message, e)
103 log.debug(" message header %r content %r", message.header, message.content, exc_info=1)
104 work = connection.error(message, repr(e))
105 raise gen.Return(work)
106
107 @gen.coroutine
108 def _server_info_req(self, message, connection):
109 raise gen.Return(connection.protocol.create('SERVER-INFO-REPLY', message.header['msgid']))
110
111 #-----------------------------------------------------------------------------
112 # Dev API
113 #-----------------------------------------------------------------------------
114
115 #-----------------------------------------------------------------------------
116 # Private API
117 #-----------------------------------------------------------------------------
118
119 #-----------------------------------------------------------------------------
120 # Code
121 #-----------------------------------------------------------------------------
122
[end of bokeh/server/protocol_handler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bokeh/server/protocol_handler.py b/bokeh/server/protocol_handler.py
--- a/bokeh/server/protocol_handler.py
+++ b/bokeh/server/protocol_handler.py
@@ -99,8 +99,8 @@
try:
work = yield handler(message, connection)
except Exception as e:
- log.error("error handling message %r: %r", message, e)
- log.debug(" message header %r content %r", message.header, message.content, exc_info=1)
+ log.error("error handling message\n message: %r \n error: %r",
+ message, e, exc_info=True)
work = connection.error(message, repr(e))
raise gen.Return(work)
| {"golden_diff": "diff --git a/bokeh/server/protocol_handler.py b/bokeh/server/protocol_handler.py\n--- a/bokeh/server/protocol_handler.py\n+++ b/bokeh/server/protocol_handler.py\n@@ -99,8 +99,8 @@\n try:\n work = yield handler(message, connection)\n except Exception as e:\n- log.error(\"error handling message %r: %r\", message, e)\n- log.debug(\" message header %r content %r\", message.header, message.content, exc_info=1)\n+ log.error(\"error handling message\\n message: %r \\n error: %r\",\n+ message, e, exc_info=True)\n work = connection.error(message, repr(e))\n raise gen.Return(work)\n", "issue": "[FEATURE] Print full stacktrace on error\nSee discussion on [discussion forum](https://discourse.bokeh.org/t/debugging-recommendations/3934)\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\nMy underlying application is getting very big and when running into problems the debugging takes me a lot of time. \r\n\r\nFor example I now just get the line:\r\n2019-08-26 13:57:29,620 error handling message Message \u2018EVENT\u2019 (revision 1) content: \u2018{\u201cevent_name\u201d:\u201cbutton_click\u201d,\u201cevent_values\u201d:{\u201cmodel_id\u201d:\u201c1027\u201d}}\u2019: ValueError(\u2018Wrong number of items passed 2, placement implies 1\u2019)\r\n\r\n**Describe the solution you'd like**\r\nMy desired solution is to see the stacktrace which includes the file and line of the source of the error.\r\n\r\n@bryevdv pointed me to the protocol_handler.py script\r\n\r\n**Describe alternatives you've considered**\r\nThe alternative is to set debug level, but I only want error handling improved, not have a full log. Also, I want the error to show directly, because I may not know how to reproduce it on a running server.\r\n\r\nPR created\r\n\r\n**Additional context**\r\nI am not worried about the log getting too long. Errors should in general not occur, they should be fixed or at least be caught in case they are acceptable\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' Encapsulate handling of all Bokeh Protocol messages a Bokeh server may\nreceive.\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\n\n# External imports\nfrom tornado import gen\n\n# Bokeh imports\nfrom .session import ServerSession\nfrom ..protocol.exceptions import ProtocolError\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'ProtocolHandler',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\nclass ProtocolHandler(object):\n ''' A Bokeh server may be expected to receive any of the following protocol\n messages:\n\n * ``EVENT``\n * ``PATCH-DOC``\n * ``PULL-DOC-REQ``\n * ``PUSH-DOC``\n * ``SERVER-INFO-REQ``\n\n The job of ``ProtocolHandler`` is to direct incoming messages to the right\n specialized handler for each message type. When the server receives a new\n message on a connection it will call ``handler`` with the message and the\n connection that the message arrived on. Most messages are ultimately\n handled by the ``ServerSession`` class, but some simpler messages types\n such as ``SERVER-INFO-REQ`` may be handled directly by ``ProtocolHandler``.\n\n Any unexpected messages will result in a ``ProtocolError``.\n\n '''\n\n def __init__(self):\n self._handlers = dict()\n\n self._handlers['PULL-DOC-REQ'] = ServerSession.pull\n self._handlers['PUSH-DOC'] = ServerSession.push\n self._handlers['PATCH-DOC'] = ServerSession.patch\n self._handlers['SERVER-INFO-REQ'] = self._server_info_req\n self._handlers['EVENT'] = ServerSession.event\n\n @gen.coroutine\n def handle(self, message, connection):\n ''' Delegate a received message to the appropriate handler.\n\n Args:\n message (Message) :\n The message that was receive that needs to be handled\n\n connection (ServerConnection) :\n The connection that received this message\n\n Raises:\n ProtocolError\n\n '''\n\n handler = self._handlers.get((message.msgtype, message.revision))\n\n if handler is None:\n handler = self._handlers.get(message.msgtype)\n\n if handler is None:\n raise ProtocolError(\"%s not expected on server\" % message)\n\n try:\n work = yield handler(message, connection)\n except Exception as e:\n log.error(\"error handling message %r: %r\", message, e)\n log.debug(\" message header %r content %r\", message.header, message.content, exc_info=1)\n work = connection.error(message, repr(e))\n raise gen.Return(work)\n\n @gen.coroutine\n def _server_info_req(self, message, connection):\n raise gen.Return(connection.protocol.create('SERVER-INFO-REPLY', message.header['msgid']))\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "path": "bokeh/server/protocol_handler.py"}]} | 1,846 | 164 |
gh_patches_debug_20036 | rasdani/github-patches | git_diff | dotkom__onlineweb4-2372 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unexpected form parameter when creating new poster
As reported by sentry:
```
Internal Server Error: /admin/posters/poster/
FieldError at /admin/posters/poster/
Cannot resolve keyword 'category' into field.
```
As reported: `reason = 'Jeg søkte etter 2 og fant 500 i stedet'`
Presume this is due to a form POSTing a field param that's not accounted for. Should be a quick and easy fix.
</issue>
<code>
[start of apps/posters/admin.py]
1 from django.contrib import admin
2 from django.utils.translation import ugettext as _
3 from reversion.admin import VersionAdmin
4
5 from apps.posters.models import Poster
6
7
8 class PosterAdmin(VersionAdmin):
9 model = Poster
10 list_display = ('event', 'title', 'assigned_to', 'display_from',
11 'ordered_date', 'ordered_by', 'ordered_committee')
12 fieldsets = (
13 (_('Event info'), {'fields': ('event', 'title', 'price', 'description', 'comments')}),
14 (_('Order info'), {'fields': ('amount',)}),
15 (_('proKom'), {'fields': ('display_from', 'assigned_to', 'ordered_by', 'ordered_committee', 'finished')}),
16 )
17 search_fields = ('title', 'category', 'company', 'when')
18
19
20 admin.site.register(Poster, PosterAdmin)
21 # username, expiration_date, registered, note
22
[end of apps/posters/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/posters/admin.py b/apps/posters/admin.py
--- a/apps/posters/admin.py
+++ b/apps/posters/admin.py
@@ -2,9 +2,10 @@
from django.utils.translation import ugettext as _
from reversion.admin import VersionAdmin
-from apps.posters.models import Poster
+from .models import Poster
[email protected](Poster)
class PosterAdmin(VersionAdmin):
model = Poster
list_display = ('event', 'title', 'assigned_to', 'display_from',
@@ -14,8 +15,5 @@
(_('Order info'), {'fields': ('amount',)}),
(_('proKom'), {'fields': ('display_from', 'assigned_to', 'ordered_by', 'ordered_committee', 'finished')}),
)
- search_fields = ('title', 'category', 'company', 'when')
-
-
-admin.site.register(Poster, PosterAdmin)
-# username, expiration_date, registered, note
+ search_fields = ('title', 'event__title', 'assigned_to__first_name', 'assigned_to__last_name',
+ 'ordered_by__first_name', 'ordered_by__last_name')
| {"golden_diff": "diff --git a/apps/posters/admin.py b/apps/posters/admin.py\n--- a/apps/posters/admin.py\n+++ b/apps/posters/admin.py\n@@ -2,9 +2,10 @@\n from django.utils.translation import ugettext as _\n from reversion.admin import VersionAdmin\n \n-from apps.posters.models import Poster\n+from .models import Poster\n \n \[email protected](Poster)\n class PosterAdmin(VersionAdmin):\n model = Poster\n list_display = ('event', 'title', 'assigned_to', 'display_from',\n@@ -14,8 +15,5 @@\n (_('Order info'), {'fields': ('amount',)}),\n (_('proKom'), {'fields': ('display_from', 'assigned_to', 'ordered_by', 'ordered_committee', 'finished')}),\n )\n- search_fields = ('title', 'category', 'company', 'when')\n-\n-\n-admin.site.register(Poster, PosterAdmin)\n-# username, expiration_date, registered, note\n+ search_fields = ('title', 'event__title', 'assigned_to__first_name', 'assigned_to__last_name',\n+ 'ordered_by__first_name', 'ordered_by__last_name')\n", "issue": "Unexpected form parameter when creating new poster \nAs reported by sentry:\n\n```\nInternal Server Error: /admin/posters/poster/\n\nFieldError at /admin/posters/poster/\nCannot resolve keyword 'category' into field.\n```\n\nAs reported: `reason = 'Jeg s\u00f8kte etter 2 og fant 500 i stedet'`\n\nPresume this is due to a form POSTing a field param that's not accounted for. Should be a quick and easy fix. \n", "before_files": [{"content": "from django.contrib import admin\nfrom django.utils.translation import ugettext as _\nfrom reversion.admin import VersionAdmin\n\nfrom apps.posters.models import Poster\n\n\nclass PosterAdmin(VersionAdmin):\n model = Poster\n list_display = ('event', 'title', 'assigned_to', 'display_from',\n 'ordered_date', 'ordered_by', 'ordered_committee')\n fieldsets = (\n (_('Event info'), {'fields': ('event', 'title', 'price', 'description', 'comments')}),\n (_('Order info'), {'fields': ('amount',)}),\n (_('proKom'), {'fields': ('display_from', 'assigned_to', 'ordered_by', 'ordered_committee', 'finished')}),\n )\n search_fields = ('title', 'category', 'company', 'when')\n\n\nadmin.site.register(Poster, PosterAdmin)\n# username, expiration_date, registered, note\n", "path": "apps/posters/admin.py"}]} | 864 | 254 |
gh_patches_debug_12106 | rasdani/github-patches | git_diff | apache__airflow-24142 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Migrate MySQL example DAGs to new design
There is a new design of system tests that was introduced by the [AIP-47](https://cwiki.apache.org/confluence/display/AIRFLOW/AIP-47+New+design+of+Airflow+System+Tests).
All current example dags need to be migrated and converted into system tests, so they can be run in the CI process automatically before releases.
This is an aggregated issue for all example DAGs related to `MySQL` provider. It is created to track progress of their migration.
List of paths to example DAGs:
- [x] airflow/providers/mysql/example_dags/example_mysql.py
</issue>
<code>
[start of airflow/providers/mysql/example_dags/__init__.py]
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18
[end of airflow/providers/mysql/example_dags/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/providers/mysql/example_dags/__init__.py b/airflow/providers/mysql/example_dags/__init__.py
deleted file mode 100644
--- a/airflow/providers/mysql/example_dags/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
| {"golden_diff": "diff --git a/airflow/providers/mysql/example_dags/__init__.py b/airflow/providers/mysql/example_dags/__init__.py\ndeleted file mode 100644\n--- a/airflow/providers/mysql/example_dags/__init__.py\n+++ /dev/null\n@@ -1,17 +0,0 @@\n-#\n-# Licensed to the Apache Software Foundation (ASF) under one\n-# or more contributor license agreements. See the NOTICE file\n-# distributed with this work for additional information\n-# regarding copyright ownership. The ASF licenses this file\n-# to you under the Apache License, Version 2.0 (the\n-# \"License\"); you may not use this file except in compliance\n-# with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing,\n-# software distributed under the License is distributed on an\n-# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n-# KIND, either express or implied. See the License for the\n-# specific language governing permissions and limitations\n-# under the License.\n", "issue": "Migrate MySQL example DAGs to new design\nThere is a new design of system tests that was introduced by the [AIP-47](https://cwiki.apache.org/confluence/display/AIRFLOW/AIP-47+New+design+of+Airflow+System+Tests).\n\nAll current example dags need to be migrated and converted into system tests, so they can be run in the CI process automatically before releases.\n\nThis is an aggregated issue for all example DAGs related to `MySQL` provider. It is created to track progress of their migration.\n\nList of paths to example DAGs:\n- [x] airflow/providers/mysql/example_dags/example_mysql.py\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n", "path": "airflow/providers/mysql/example_dags/__init__.py"}]} | 886 | 265 |
gh_patches_debug_21299 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-349 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pass experiment tags to MLFlowLogger
**Is your feature request related to a problem? Please describe.**
When using MLFlowLogger, I'm unable to easily set experiment tags, like username or run name.
**Describe the solution you'd like**
Add parameter `tags=None` which is passed to `MLFlowLogger`. Tags will be passed to `create_run` method
**Describe alternatives you've considered**
Manually hack logger, get experiment from it and set tag there
If you don't see any drawbacks, I can make a PR
</issue>
<code>
[start of pytorch_lightning/logging/mlflow_logger.py]
1 from time import time
2 from logging import getLogger
3
4 import mlflow
5
6 from .base import LightningLoggerBase, rank_zero_only
7
8 logger = getLogger(__name__)
9
10
11 class MLFlowLogger(LightningLoggerBase):
12 def __init__(self, experiment_name, tracking_uri=None):
13 super().__init__()
14 self.client = mlflow.tracking.MlflowClient(tracking_uri)
15 self.experiment_name = experiment_name
16 self._run_id = None
17
18 @property
19 def run_id(self):
20 if self._run_id is not None:
21 return self._run_id
22
23 experiment = self.client.get_experiment_by_name(self.experiment_name)
24 if experiment is None:
25 logger.warning(
26 f"Experiment with name f{self.experiment_name} not found. Creating it."
27 )
28 self.client.create_experiment(self.experiment_name)
29 experiment = self.client.get_experiment_by_name(self.experiment_name)
30
31 run = self.client.create_run(experiment.experiment_id)
32 self._run_id = run.info.run_id
33 return self._run_id
34
35 @rank_zero_only
36 def log_hyperparams(self, params):
37 for k, v in vars(params).items():
38 self.client.log_param(self.run_id, k, v)
39
40 @rank_zero_only
41 def log_metrics(self, metrics, step_num=None):
42 timestamp_ms = int(time() * 1000)
43 for k, v in metrics.items():
44 if isinstance(v, str):
45 logger.warning(
46 f"Discarding metric with string value {k}={v}"
47 )
48 continue
49 self.client.log_metric(self.run_id, k, v, timestamp_ms, step_num)
50
51 def save(self):
52 pass
53
54 @rank_zero_only
55 def finalize(self, status="FINISHED"):
56 if status == 'success':
57 status = 'FINISHED'
58 self.client.set_terminated(self.run_id, status)
59
[end of pytorch_lightning/logging/mlflow_logger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_lightning/logging/mlflow_logger.py b/pytorch_lightning/logging/mlflow_logger.py
--- a/pytorch_lightning/logging/mlflow_logger.py
+++ b/pytorch_lightning/logging/mlflow_logger.py
@@ -9,11 +9,12 @@
class MLFlowLogger(LightningLoggerBase):
- def __init__(self, experiment_name, tracking_uri=None):
+ def __init__(self, experiment_name, tracking_uri=None, tags=None):
super().__init__()
self.client = mlflow.tracking.MlflowClient(tracking_uri)
self.experiment_name = experiment_name
self._run_id = None
+ self.tags = tags
@property
def run_id(self):
@@ -28,7 +29,7 @@
self.client.create_experiment(self.experiment_name)
experiment = self.client.get_experiment_by_name(self.experiment_name)
- run = self.client.create_run(experiment.experiment_id)
+ run = self.client.create_run(experiment.experiment_id, tags=self.tags)
self._run_id = run.info.run_id
return self._run_id
| {"golden_diff": "diff --git a/pytorch_lightning/logging/mlflow_logger.py b/pytorch_lightning/logging/mlflow_logger.py\n--- a/pytorch_lightning/logging/mlflow_logger.py\n+++ b/pytorch_lightning/logging/mlflow_logger.py\n@@ -9,11 +9,12 @@\n \n \n class MLFlowLogger(LightningLoggerBase):\n- def __init__(self, experiment_name, tracking_uri=None):\n+ def __init__(self, experiment_name, tracking_uri=None, tags=None):\n super().__init__()\n self.client = mlflow.tracking.MlflowClient(tracking_uri)\n self.experiment_name = experiment_name\n self._run_id = None\n+ self.tags = tags\n \n @property\n def run_id(self):\n@@ -28,7 +29,7 @@\n self.client.create_experiment(self.experiment_name)\n experiment = self.client.get_experiment_by_name(self.experiment_name)\n \n- run = self.client.create_run(experiment.experiment_id)\n+ run = self.client.create_run(experiment.experiment_id, tags=self.tags)\n self._run_id = run.info.run_id\n return self._run_id\n", "issue": "Pass experiment tags to MLFlowLogger\n**Is your feature request related to a problem? Please describe.**\r\nWhen using MLFlowLogger, I'm unable to easily set experiment tags, like username or run name.\r\n\r\n**Describe the solution you'd like**\r\nAdd parameter `tags=None` which is passed to `MLFlowLogger`. Tags will be passed to `create_run` method\r\n\r\n**Describe alternatives you've considered**\r\nManually hack logger, get experiment from it and set tag there\r\n\r\nIf you don't see any drawbacks, I can make a PR\r\n\n", "before_files": [{"content": "from time import time\nfrom logging import getLogger\n\nimport mlflow\n\nfrom .base import LightningLoggerBase, rank_zero_only\n\nlogger = getLogger(__name__)\n\n\nclass MLFlowLogger(LightningLoggerBase):\n def __init__(self, experiment_name, tracking_uri=None):\n super().__init__()\n self.client = mlflow.tracking.MlflowClient(tracking_uri)\n self.experiment_name = experiment_name\n self._run_id = None\n\n @property\n def run_id(self):\n if self._run_id is not None:\n return self._run_id\n\n experiment = self.client.get_experiment_by_name(self.experiment_name)\n if experiment is None:\n logger.warning(\n f\"Experiment with name f{self.experiment_name} not found. Creating it.\"\n )\n self.client.create_experiment(self.experiment_name)\n experiment = self.client.get_experiment_by_name(self.experiment_name)\n\n run = self.client.create_run(experiment.experiment_id)\n self._run_id = run.info.run_id\n return self._run_id\n\n @rank_zero_only\n def log_hyperparams(self, params):\n for k, v in vars(params).items():\n self.client.log_param(self.run_id, k, v)\n\n @rank_zero_only\n def log_metrics(self, metrics, step_num=None):\n timestamp_ms = int(time() * 1000)\n for k, v in metrics.items():\n if isinstance(v, str):\n logger.warning(\n f\"Discarding metric with string value {k}={v}\"\n )\n continue\n self.client.log_metric(self.run_id, k, v, timestamp_ms, step_num)\n\n def save(self):\n pass\n\n @rank_zero_only\n def finalize(self, status=\"FINISHED\"):\n if status == 'success':\n status = 'FINISHED'\n self.client.set_terminated(self.run_id, status)\n", "path": "pytorch_lightning/logging/mlflow_logger.py"}]} | 1,179 | 247 |
gh_patches_debug_13962 | rasdani/github-patches | git_diff | conan-io__conan-center-index-5416 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] all: "Access is denied" in os.rename() on Windows
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **almost all packages affected**
* Operating System+version: **Windows 10**
* Compiler+version: **MSVC 16**
* Conan version: **conan 1.35.2**
* Python version: **Python 3.8.7**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
[settings]
os_build=Windows
os=Windows
arch=x86_64
arch_build=x86_64
compiler=Visual Studio
compiler.version=16
compiler.runtime=MD
build_type=Release
```
### Steps to reproduce (Include if Applicable)
This is a known issue. Solution provided by https://github.com/conan-io/conan/pull/6774
However most recipes still use `os.rename()` and not `tools.rename()`.
### Log
```
b2/4.2.0: Configuring sources in C:\Users\xxx\.conan\data\b2\4.2.0\_\_\source
ERROR: b2/4.2.0: Error in source() method, line 58
os.rename(extracted_dir, "source")
PermissionError: [WinError 5] Access is denied: 'build-4.2.0' -> 'source'
```
</issue>
<code>
[start of recipes/zulu-openjdk/all/conanfile.py]
1 from conans import ConanFile, tools
2 from conans.errors import ConanInvalidConfiguration
3 import os, glob
4
5
6 class ZuluOpenJDK(ConanFile):
7 name = "zulu-openjdk"
8 url = "https://github.com/conan-io/conan-center-index/"
9 description = "A OpenJDK distribution"
10 homepage = "https://www.azul.com"
11 license = "https://www.azul.com/products/zulu-and-zulu-enterprise/zulu-terms-of-use/"
12 topics = ("java", "jdk", "openjdk")
13 settings = "os", "arch"
14
15 @property
16 def _source_subfolder(self):
17 return "source_subfolder"
18
19 @property
20 def _jni_folder(self):
21 folder = {"Linux": "linux", "Macos": "darwin", "Windows": "win32"}.get(str(self.settings.os))
22 return os.path.join("include", folder)
23
24 def configure(self):
25 if self.settings.arch != "x86_64":
26 raise ConanInvalidConfiguration("Unsupported Architecture. This package currently only supports x86_64.")
27 if self.settings.os not in ["Windows", "Macos", "Linux"]:
28 raise ConanInvalidConfiguration("Unsupported os. This package currently only support Linux/Macos/Windows")
29
30 def source(self):
31 url = self.conan_data["sources"][self.version]["url"][str(self.settings.os)]
32 checksum = self.conan_data["sources"][self.version]["sha256"][str(self.settings.os)]
33 tools.get(url, sha256=checksum)
34 os.rename(glob.glob("zulu*")[0], self._source_subfolder)
35
36 def build(self):
37 pass # nothing to do, but this shall trigger no warnings ;-)
38
39 def package(self):
40 self.copy(pattern="*", dst="bin", src=os.path.join(self._source_subfolder, "bin"), excludes=("msvcp140.dll", "vcruntime140.dll"))
41 self.copy(pattern="*", dst="include", src=os.path.join(self._source_subfolder, "include"))
42 self.copy(pattern="*", dst="lib", src=os.path.join(self._source_subfolder, "lib"))
43 self.copy(pattern="*", dst="res", src=os.path.join(self._source_subfolder, "conf"))
44 # conf folder is required for security settings, to avoid
45 # java.lang.SecurityException: Can't read cryptographic policy directory: unlimited
46 # https://github.com/conan-io/conan-center-index/pull/4491#issuecomment-774555069
47 self.copy(pattern="*", dst="conf", src=os.path.join(self._source_subfolder, "conf"))
48 self.copy(pattern="*", dst="licenses", src=os.path.join(self._source_subfolder, "legal"))
49 self.copy(pattern="*", dst=os.path.join("lib", "jmods"), src=os.path.join(self._source_subfolder, "jmods"))
50
51 def package_info(self):
52 self.cpp_info.includedirs.append(self._jni_folder)
53 self.cpp_info.libdirs = []
54
55 java_home = self.package_folder
56 bin_path = os.path.join(java_home, "bin")
57
58 self.output.info("Creating JAVA_HOME environment variable with : {0}".format(java_home))
59 self.env_info.JAVA_HOME = java_home
60
61 self.output.info("Appending PATH environment variable with : {0}".format(bin_path))
62 self.env_info.PATH.append(bin_path)
63
[end of recipes/zulu-openjdk/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/zulu-openjdk/all/conanfile.py b/recipes/zulu-openjdk/all/conanfile.py
--- a/recipes/zulu-openjdk/all/conanfile.py
+++ b/recipes/zulu-openjdk/all/conanfile.py
@@ -28,10 +28,8 @@
raise ConanInvalidConfiguration("Unsupported os. This package currently only support Linux/Macos/Windows")
def source(self):
- url = self.conan_data["sources"][self.version]["url"][str(self.settings.os)]
- checksum = self.conan_data["sources"][self.version]["sha256"][str(self.settings.os)]
- tools.get(url, sha256=checksum)
- os.rename(glob.glob("zulu*")[0], self._source_subfolder)
+ tools.get(**self.conan_data["sources"][self.version][str(self.settings.os)],
+ destination=self._source_subfolder, strip_root=True)
def build(self):
pass # nothing to do, but this shall trigger no warnings ;-)
| {"golden_diff": "diff --git a/recipes/zulu-openjdk/all/conanfile.py b/recipes/zulu-openjdk/all/conanfile.py\n--- a/recipes/zulu-openjdk/all/conanfile.py\n+++ b/recipes/zulu-openjdk/all/conanfile.py\n@@ -28,10 +28,8 @@\n raise ConanInvalidConfiguration(\"Unsupported os. This package currently only support Linux/Macos/Windows\")\n \n def source(self):\n- url = self.conan_data[\"sources\"][self.version][\"url\"][str(self.settings.os)]\n- checksum = self.conan_data[\"sources\"][self.version][\"sha256\"][str(self.settings.os)]\n- tools.get(url, sha256=checksum)\n- os.rename(glob.glob(\"zulu*\")[0], self._source_subfolder)\n+ tools.get(**self.conan_data[\"sources\"][self.version][str(self.settings.os)],\n+ destination=self._source_subfolder, strip_root=True)\n \n def build(self):\n pass # nothing to do, but this shall trigger no warnings ;-)\n", "issue": "[package] all: \"Access is denied\" in os.rename() on Windows\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **almost all packages affected**\r\n * Operating System+version: **Windows 10**\r\n * Compiler+version: **MSVC 16**\r\n * Conan version: **conan 1.35.2**\r\n * Python version: **Python 3.8.7**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos_build=Windows\r\nos=Windows\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=Visual Studio\r\ncompiler.version=16\r\ncompiler.runtime=MD\r\nbuild_type=Release\r\n```\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\nThis is a known issue. Solution provided by https://github.com/conan-io/conan/pull/6774\r\nHowever most recipes still use `os.rename()` and not `tools.rename()`. \r\n\r\n### Log\r\n```\r\nb2/4.2.0: Configuring sources in C:\\Users\\xxx\\.conan\\data\\b2\\4.2.0\\_\\_\\source\r\nERROR: b2/4.2.0: Error in source() method, line 58\r\nos.rename(extracted_dir, \"source\")\r\nPermissionError: [WinError 5] Access is denied: 'build-4.2.0' -> 'source'\r\n```\r\n\n", "before_files": [{"content": "from conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os, glob\n\n\nclass ZuluOpenJDK(ConanFile):\n name = \"zulu-openjdk\"\n url = \"https://github.com/conan-io/conan-center-index/\"\n description = \"A OpenJDK distribution\"\n homepage = \"https://www.azul.com\"\n license = \"https://www.azul.com/products/zulu-and-zulu-enterprise/zulu-terms-of-use/\"\n topics = (\"java\", \"jdk\", \"openjdk\")\n settings = \"os\", \"arch\"\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _jni_folder(self):\n folder = {\"Linux\": \"linux\", \"Macos\": \"darwin\", \"Windows\": \"win32\"}.get(str(self.settings.os))\n return os.path.join(\"include\", folder)\n\n def configure(self):\n if self.settings.arch != \"x86_64\":\n raise ConanInvalidConfiguration(\"Unsupported Architecture. This package currently only supports x86_64.\")\n if self.settings.os not in [\"Windows\", \"Macos\", \"Linux\"]:\n raise ConanInvalidConfiguration(\"Unsupported os. This package currently only support Linux/Macos/Windows\")\n\n def source(self):\n url = self.conan_data[\"sources\"][self.version][\"url\"][str(self.settings.os)]\n checksum = self.conan_data[\"sources\"][self.version][\"sha256\"][str(self.settings.os)]\n tools.get(url, sha256=checksum)\n os.rename(glob.glob(\"zulu*\")[0], self._source_subfolder)\n\n def build(self):\n pass # nothing to do, but this shall trigger no warnings ;-)\n\n def package(self):\n self.copy(pattern=\"*\", dst=\"bin\", src=os.path.join(self._source_subfolder, \"bin\"), excludes=(\"msvcp140.dll\", \"vcruntime140.dll\"))\n self.copy(pattern=\"*\", dst=\"include\", src=os.path.join(self._source_subfolder, \"include\"))\n self.copy(pattern=\"*\", dst=\"lib\", src=os.path.join(self._source_subfolder, \"lib\"))\n self.copy(pattern=\"*\", dst=\"res\", src=os.path.join(self._source_subfolder, \"conf\"))\n # conf folder is required for security settings, to avoid\n # java.lang.SecurityException: Can't read cryptographic policy directory: unlimited\n # https://github.com/conan-io/conan-center-index/pull/4491#issuecomment-774555069\n self.copy(pattern=\"*\", dst=\"conf\", src=os.path.join(self._source_subfolder, \"conf\"))\n self.copy(pattern=\"*\", dst=\"licenses\", src=os.path.join(self._source_subfolder, \"legal\"))\n self.copy(pattern=\"*\", dst=os.path.join(\"lib\", \"jmods\"), src=os.path.join(self._source_subfolder, \"jmods\"))\n\n def package_info(self):\n self.cpp_info.includedirs.append(self._jni_folder)\n self.cpp_info.libdirs = []\n\n java_home = self.package_folder\n bin_path = os.path.join(java_home, \"bin\")\n\n self.output.info(\"Creating JAVA_HOME environment variable with : {0}\".format(java_home))\n self.env_info.JAVA_HOME = java_home\n\n self.output.info(\"Appending PATH environment variable with : {0}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/zulu-openjdk/all/conanfile.py"}]} | 1,748 | 228 |
gh_patches_debug_5474 | rasdani/github-patches | git_diff | scrapy__scrapy-4646 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow run pyw scripts
scrapy/commands/runspider.py check for Python script source but it fails to allow .pyw files.
Check at row 14 is:
` if fext != '.py':`
but it should be:
` if fext != '.py' and fext != '.pyw':`
</issue>
<code>
[start of scrapy/commands/runspider.py]
1 import sys
2 import os
3 from importlib import import_module
4
5 from scrapy.utils.spider import iter_spider_classes
6 from scrapy.exceptions import UsageError
7 from scrapy.commands import BaseRunSpiderCommand
8
9
10 def _import_file(filepath):
11 abspath = os.path.abspath(filepath)
12 dirname, file = os.path.split(abspath)
13 fname, fext = os.path.splitext(file)
14 if fext != '.py':
15 raise ValueError(f"Not a Python source file: {abspath}")
16 if dirname:
17 sys.path = [dirname] + sys.path
18 try:
19 module = import_module(fname)
20 finally:
21 if dirname:
22 sys.path.pop(0)
23 return module
24
25
26 class Command(BaseRunSpiderCommand):
27
28 requires_project = False
29 default_settings = {'SPIDER_LOADER_WARN_ONLY': True}
30
31 def syntax(self):
32 return "[options] <spider_file>"
33
34 def short_desc(self):
35 return "Run a self-contained spider (without creating a project)"
36
37 def long_desc(self):
38 return "Run the spider defined in the given file"
39
40 def run(self, args, opts):
41 if len(args) != 1:
42 raise UsageError()
43 filename = args[0]
44 if not os.path.exists(filename):
45 raise UsageError(f"File not found: {filename}\n")
46 try:
47 module = _import_file(filename)
48 except (ImportError, ValueError) as e:
49 raise UsageError(f"Unable to load {filename!r}: {e}\n")
50 spclasses = list(iter_spider_classes(module))
51 if not spclasses:
52 raise UsageError(f"No spider found in file: {filename}\n")
53 spidercls = spclasses.pop()
54
55 self.crawler_process.crawl(spidercls, **opts.spargs)
56 self.crawler_process.start()
57
58 if self.crawler_process.bootstrap_failed:
59 self.exitcode = 1
60
[end of scrapy/commands/runspider.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/commands/runspider.py b/scrapy/commands/runspider.py
--- a/scrapy/commands/runspider.py
+++ b/scrapy/commands/runspider.py
@@ -11,7 +11,7 @@
abspath = os.path.abspath(filepath)
dirname, file = os.path.split(abspath)
fname, fext = os.path.splitext(file)
- if fext != '.py':
+ if fext not in ('.py', '.pyw'):
raise ValueError(f"Not a Python source file: {abspath}")
if dirname:
sys.path = [dirname] + sys.path
| {"golden_diff": "diff --git a/scrapy/commands/runspider.py b/scrapy/commands/runspider.py\n--- a/scrapy/commands/runspider.py\n+++ b/scrapy/commands/runspider.py\n@@ -11,7 +11,7 @@\n abspath = os.path.abspath(filepath)\n dirname, file = os.path.split(abspath)\n fname, fext = os.path.splitext(file)\n- if fext != '.py':\n+ if fext not in ('.py', '.pyw'):\n raise ValueError(f\"Not a Python source file: {abspath}\")\n if dirname:\n sys.path = [dirname] + sys.path\n", "issue": "Allow run pyw scripts\nscrapy/commands/runspider.py check for Python script source but it fails to allow .pyw files.\r\nCheck at row 14 is:\r\n` if fext != '.py':`\r\nbut it should be:\r\n` if fext != '.py' and fext != '.pyw':`\n", "before_files": [{"content": "import sys\nimport os\nfrom importlib import import_module\n\nfrom scrapy.utils.spider import iter_spider_classes\nfrom scrapy.exceptions import UsageError\nfrom scrapy.commands import BaseRunSpiderCommand\n\n\ndef _import_file(filepath):\n abspath = os.path.abspath(filepath)\n dirname, file = os.path.split(abspath)\n fname, fext = os.path.splitext(file)\n if fext != '.py':\n raise ValueError(f\"Not a Python source file: {abspath}\")\n if dirname:\n sys.path = [dirname] + sys.path\n try:\n module = import_module(fname)\n finally:\n if dirname:\n sys.path.pop(0)\n return module\n\n\nclass Command(BaseRunSpiderCommand):\n\n requires_project = False\n default_settings = {'SPIDER_LOADER_WARN_ONLY': True}\n\n def syntax(self):\n return \"[options] <spider_file>\"\n\n def short_desc(self):\n return \"Run a self-contained spider (without creating a project)\"\n\n def long_desc(self):\n return \"Run the spider defined in the given file\"\n\n def run(self, args, opts):\n if len(args) != 1:\n raise UsageError()\n filename = args[0]\n if not os.path.exists(filename):\n raise UsageError(f\"File not found: {filename}\\n\")\n try:\n module = _import_file(filename)\n except (ImportError, ValueError) as e:\n raise UsageError(f\"Unable to load {filename!r}: {e}\\n\")\n spclasses = list(iter_spider_classes(module))\n if not spclasses:\n raise UsageError(f\"No spider found in file: {filename}\\n\")\n spidercls = spclasses.pop()\n\n self.crawler_process.crawl(spidercls, **opts.spargs)\n self.crawler_process.start()\n\n if self.crawler_process.bootstrap_failed:\n self.exitcode = 1\n", "path": "scrapy/commands/runspider.py"}]} | 1,130 | 141 |
gh_patches_debug_39235 | rasdani/github-patches | git_diff | ResonantGeoData__ResonantGeoData-648 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python client issues
- trailing `/` on `api_url` causes failure
- `create_rgd_client` silently failing if `api_url` is bad
@mvandenburgh, would you please look into these
</issue>
<code>
[start of django-rgd/client/rgd_client/client.py]
1 import getpass
2 import logging
3 import os
4 from typing import List, Optional, Type
5
6 import requests
7
8 from .plugin import CorePlugin
9 from .session import RgdClientSession, clone_session
10 from .utils import API_KEY_DIR_PATH, API_KEY_FILE_NAME, DEFAULT_RGD_API
11
12 logger = logging.getLogger(__name__)
13
14
15 class RgdClient:
16 def __init__(
17 self,
18 api_url: str = DEFAULT_RGD_API,
19 username: Optional[str] = None,
20 password: Optional[str] = None,
21 save: Optional[bool] = True,
22 ) -> None:
23 """
24 Initialize the base RGD Client.
25
26 Args:
27 api_url: The base url of the RGD API instance.
28 username: The username to authenticate to the instance with, if any.
29 password: The password associated with the provided username. If None, a prompt will be provided.
30 save: Whether or not to save the logged-in user's API key to disk for future use.
31
32 Returns:
33 A base RgdClient instance.
34 """
35 # Look for an API key in the environment. If it's not there, check username/password
36 api_key = _read_api_key(api_url=api_url, username=username, password=password)
37 if api_key is None:
38 if username is not None and password is None:
39 password = getpass.getpass()
40
41 # Get an API key for this user and save it to disk
42 if username and password:
43 api_key = _get_api_key(api_url, username, password, save)
44 if api_key is None:
45 logger.error(
46 'Failed to retrieve API key; are your username and password correct?'
47 )
48
49 self.session = RgdClientSession(base_url=api_url, auth_token=api_key)
50 self.rgd = CorePlugin(clone_session(self.session))
51
52 def clear_token(self):
53 """Delete a locally-stored API key."""
54 (API_KEY_DIR_PATH / API_KEY_FILE_NAME).unlink(missing_ok=True)
55
56
57 def _get_api_key(api_url: str, username: str, password: str, save: bool) -> Optional[str]:
58 """Get an RGD API Key for the given user from the server, and save it if requested."""
59 resp = requests.post(f'{api_url}/api-token-auth', {'username': username, 'password': password})
60 token = resp.json().get('token')
61 if token is None:
62 return None
63 if save:
64 API_KEY_DIR_PATH.mkdir(parents=True, exist_ok=True)
65 with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'w') as fd:
66 fd.write(token)
67 return token
68
69
70 def _read_api_key(api_url: str, username: str = None, password: str = None) -> Optional[str]:
71 """
72 Retrieve an RGD API Key from the users environment.
73
74 This function checks for an environment variable named RGD_API_TOKEN and returns it if it exists.
75 If it does not exist, it looks for a file located at ~/.rgd/token and returns its contents.
76 """
77 token = os.getenv('RGD_API_TOKEN', None)
78 if token is not None:
79 return token
80
81 try:
82 # read the first line of the text file at ~/.rgd/token
83 with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'r') as fd:
84 api_key = fd.readline().strip()
85 except FileNotFoundError:
86 return None
87
88 # Make sure API key works by hitting a protected endpoint
89 resp = requests.get(f'{api_url}/rgd/collection', headers={'Authorization': f'Token {api_key}'})
90
91 # If it doesn't, try to get a new one and save it to ~/.rgd/token, as the current one is corrupted
92 if resp.status_code == 401:
93 logger.error('API key is invalid.')
94 # If username + password were provided, try to get a new API key with them
95 if username is not None and password is not None:
96 logger.info('Attempting to fetch a new API key...')
97 api_key = _get_api_key(api_url, username, password, save=True)
98 if api_key is not None:
99 logger.info('Succeeded.')
100 return api_key
101 else:
102 logger.error('Provide your username and password next time to fetch a new one.')
103 return None
104
105 return api_key
106
107
108 def create_rgd_client(
109 api_url: str = DEFAULT_RGD_API,
110 username: Optional[str] = None,
111 password: Optional[str] = None,
112 save: Optional[bool] = True,
113 extra_plugins: Optional[List[Type]] = None,
114 ):
115 # Avoid circular import
116 from ._plugin_utils import _inject_plugin_deps, _plugin_classes, _plugin_instances
117
118 # Create initial client
119 client = RgdClient(api_url, username, password, save)
120
121 # Perform plugin initialization
122 plugin_classes = _plugin_classes(extra_plugins=extra_plugins)
123 plugin_instances = _plugin_instances(client, plugin_classes)
124 _inject_plugin_deps(plugin_instances)
125
126 return client
127
[end of django-rgd/client/rgd_client/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django-rgd/client/rgd_client/client.py b/django-rgd/client/rgd_client/client.py
--- a/django-rgd/client/rgd_client/client.py
+++ b/django-rgd/client/rgd_client/client.py
@@ -74,16 +74,16 @@
This function checks for an environment variable named RGD_API_TOKEN and returns it if it exists.
If it does not exist, it looks for a file located at ~/.rgd/token and returns its contents.
"""
- token = os.getenv('RGD_API_TOKEN', None)
- if token is not None:
- return token
-
- try:
- # read the first line of the text file at ~/.rgd/token
- with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'r') as fd:
- api_key = fd.readline().strip()
- except FileNotFoundError:
- return None
+ api_key = os.getenv('RGD_API_TOKEN', None)
+ save = False
+ if api_key is None:
+ try:
+ # read the first line of the text file at ~/.rgd/token
+ with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'r') as fd:
+ api_key = fd.readline().strip()
+ save = True # save any new api key to disk
+ except FileNotFoundError:
+ return None
# Make sure API key works by hitting a protected endpoint
resp = requests.get(f'{api_url}/rgd/collection', headers={'Authorization': f'Token {api_key}'})
@@ -92,7 +92,10 @@
if resp.status_code == 401:
logger.error('API key is invalid.')
# If username + password were provided, try to get a new API key with them
- if username is not None and password is not None:
+ # Note we only do this if `save` is `True`, i.e. if the user originally attempted to
+ # instantiate the client with an API key located on disk. If they instead provided an env
+ # var, do not assume that they want a key saved and do not attempt to fetch a new one.
+ if save and username is not None and password is not None:
logger.info('Attempting to fetch a new API key...')
api_key = _get_api_key(api_url, username, password, save=True)
if api_key is not None:
@@ -102,6 +105,9 @@
logger.error('Provide your username and password next time to fetch a new one.')
return None
+ # If the response failed with an error status other than 401, raise an exception
+ resp.raise_for_status()
+
return api_key
@@ -115,6 +121,10 @@
# Avoid circular import
from ._plugin_utils import _inject_plugin_deps, _plugin_classes, _plugin_instances
+ # Strip trailing slash
+ if api_url.endswith('/'):
+ api_url = api_url.rstrip('/')
+
# Create initial client
client = RgdClient(api_url, username, password, save)
| {"golden_diff": "diff --git a/django-rgd/client/rgd_client/client.py b/django-rgd/client/rgd_client/client.py\n--- a/django-rgd/client/rgd_client/client.py\n+++ b/django-rgd/client/rgd_client/client.py\n@@ -74,16 +74,16 @@\n This function checks for an environment variable named RGD_API_TOKEN and returns it if it exists.\n If it does not exist, it looks for a file located at ~/.rgd/token and returns its contents.\n \"\"\"\n- token = os.getenv('RGD_API_TOKEN', None)\n- if token is not None:\n- return token\n-\n- try:\n- # read the first line of the text file at ~/.rgd/token\n- with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'r') as fd:\n- api_key = fd.readline().strip()\n- except FileNotFoundError:\n- return None\n+ api_key = os.getenv('RGD_API_TOKEN', None)\n+ save = False\n+ if api_key is None:\n+ try:\n+ # read the first line of the text file at ~/.rgd/token\n+ with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'r') as fd:\n+ api_key = fd.readline().strip()\n+ save = True # save any new api key to disk\n+ except FileNotFoundError:\n+ return None\n \n # Make sure API key works by hitting a protected endpoint\n resp = requests.get(f'{api_url}/rgd/collection', headers={'Authorization': f'Token {api_key}'})\n@@ -92,7 +92,10 @@\n if resp.status_code == 401:\n logger.error('API key is invalid.')\n # If username + password were provided, try to get a new API key with them\n- if username is not None and password is not None:\n+ # Note we only do this if `save` is `True`, i.e. if the user originally attempted to\n+ # instantiate the client with an API key located on disk. If they instead provided an env\n+ # var, do not assume that they want a key saved and do not attempt to fetch a new one.\n+ if save and username is not None and password is not None:\n logger.info('Attempting to fetch a new API key...')\n api_key = _get_api_key(api_url, username, password, save=True)\n if api_key is not None:\n@@ -102,6 +105,9 @@\n logger.error('Provide your username and password next time to fetch a new one.')\n return None\n \n+ # If the response failed with an error status other than 401, raise an exception\n+ resp.raise_for_status()\n+\n return api_key\n \n \n@@ -115,6 +121,10 @@\n # Avoid circular import\n from ._plugin_utils import _inject_plugin_deps, _plugin_classes, _plugin_instances\n \n+ # Strip trailing slash\n+ if api_url.endswith('/'):\n+ api_url = api_url.rstrip('/')\n+\n # Create initial client\n client = RgdClient(api_url, username, password, save)\n", "issue": "Python client issues\n- trailing `/` on `api_url` causes failure\r\n- `create_rgd_client` silently failing if `api_url` is bad\r\n\r\n@mvandenburgh, would you please look into these\n", "before_files": [{"content": "import getpass\nimport logging\nimport os\nfrom typing import List, Optional, Type\n\nimport requests\n\nfrom .plugin import CorePlugin\nfrom .session import RgdClientSession, clone_session\nfrom .utils import API_KEY_DIR_PATH, API_KEY_FILE_NAME, DEFAULT_RGD_API\n\nlogger = logging.getLogger(__name__)\n\n\nclass RgdClient:\n def __init__(\n self,\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n save: Optional[bool] = True,\n ) -> None:\n \"\"\"\n Initialize the base RGD Client.\n\n Args:\n api_url: The base url of the RGD API instance.\n username: The username to authenticate to the instance with, if any.\n password: The password associated with the provided username. If None, a prompt will be provided.\n save: Whether or not to save the logged-in user's API key to disk for future use.\n\n Returns:\n A base RgdClient instance.\n \"\"\"\n # Look for an API key in the environment. If it's not there, check username/password\n api_key = _read_api_key(api_url=api_url, username=username, password=password)\n if api_key is None:\n if username is not None and password is None:\n password = getpass.getpass()\n\n # Get an API key for this user and save it to disk\n if username and password:\n api_key = _get_api_key(api_url, username, password, save)\n if api_key is None:\n logger.error(\n 'Failed to retrieve API key; are your username and password correct?'\n )\n\n self.session = RgdClientSession(base_url=api_url, auth_token=api_key)\n self.rgd = CorePlugin(clone_session(self.session))\n\n def clear_token(self):\n \"\"\"Delete a locally-stored API key.\"\"\"\n (API_KEY_DIR_PATH / API_KEY_FILE_NAME).unlink(missing_ok=True)\n\n\ndef _get_api_key(api_url: str, username: str, password: str, save: bool) -> Optional[str]:\n \"\"\"Get an RGD API Key for the given user from the server, and save it if requested.\"\"\"\n resp = requests.post(f'{api_url}/api-token-auth', {'username': username, 'password': password})\n token = resp.json().get('token')\n if token is None:\n return None\n if save:\n API_KEY_DIR_PATH.mkdir(parents=True, exist_ok=True)\n with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'w') as fd:\n fd.write(token)\n return token\n\n\ndef _read_api_key(api_url: str, username: str = None, password: str = None) -> Optional[str]:\n \"\"\"\n Retrieve an RGD API Key from the users environment.\n\n This function checks for an environment variable named RGD_API_TOKEN and returns it if it exists.\n If it does not exist, it looks for a file located at ~/.rgd/token and returns its contents.\n \"\"\"\n token = os.getenv('RGD_API_TOKEN', None)\n if token is not None:\n return token\n\n try:\n # read the first line of the text file at ~/.rgd/token\n with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'r') as fd:\n api_key = fd.readline().strip()\n except FileNotFoundError:\n return None\n\n # Make sure API key works by hitting a protected endpoint\n resp = requests.get(f'{api_url}/rgd/collection', headers={'Authorization': f'Token {api_key}'})\n\n # If it doesn't, try to get a new one and save it to ~/.rgd/token, as the current one is corrupted\n if resp.status_code == 401:\n logger.error('API key is invalid.')\n # If username + password were provided, try to get a new API key with them\n if username is not None and password is not None:\n logger.info('Attempting to fetch a new API key...')\n api_key = _get_api_key(api_url, username, password, save=True)\n if api_key is not None:\n logger.info('Succeeded.')\n return api_key\n else:\n logger.error('Provide your username and password next time to fetch a new one.')\n return None\n\n return api_key\n\n\ndef create_rgd_client(\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n save: Optional[bool] = True,\n extra_plugins: Optional[List[Type]] = None,\n):\n # Avoid circular import\n from ._plugin_utils import _inject_plugin_deps, _plugin_classes, _plugin_instances\n\n # Create initial client\n client = RgdClient(api_url, username, password, save)\n\n # Perform plugin initialization\n plugin_classes = _plugin_classes(extra_plugins=extra_plugins)\n plugin_instances = _plugin_instances(client, plugin_classes)\n _inject_plugin_deps(plugin_instances)\n\n return client\n", "path": "django-rgd/client/rgd_client/client.py"}]} | 1,958 | 700 |
gh_patches_debug_5872 | rasdani/github-patches | git_diff | gammapy__gammapy-3695 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Warning incorrectly raised by SpectrumDatasetMaker.make_exposure with use_region_center=True
**Gammapy version**
v0.19
**Bug description**
If using `SpectrumDatasetMaker.make_exposure` with pointlike IRFs, a warning is incorrectly raised if the option `user_region_center` is set to True.
See this lien of code:
https://github.com/gammapy/gammapy/blob/ba7d377bf48bd53d268d2dc14be1c1eb013a1e42/gammapy/makers/spectrum.py#L67
**Expected behavior**
No warning should be raised.
**To Reproduce**
**Other information**
</issue>
<code>
[start of gammapy/makers/spectrum.py]
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 import logging
3 from regions import CircleSkyRegion
4 from .map import MapDatasetMaker
5
6 __all__ = ["SpectrumDatasetMaker"]
7
8 log = logging.getLogger(__name__)
9
10
11 class SpectrumDatasetMaker(MapDatasetMaker):
12 """Make spectrum for a single IACT observation.
13
14 The irfs and background are computed at a single fixed offset,
15 which is recommend only for point-sources.
16
17 Parameters
18 ----------
19 selection : list
20 List of str, selecting which maps to make.
21 Available: 'counts', 'exposure', 'background', 'edisp'
22 By default, all spectra are made.
23 containment_correction : bool
24 Apply containment correction for point sources and circular on regions.
25 background_oversampling : int
26 Background evaluation oversampling factor in energy.
27 use_region_center : bool
28 Approximate the IRFs by the value at the center of the region
29 """
30
31 tag = "SpectrumDatasetMaker"
32 available_selection = ["counts", "background", "exposure", "edisp"]
33
34 def __init__(
35 self,
36 selection=None,
37 containment_correction=False,
38 background_oversampling=None,
39 use_region_center=True,
40 ):
41 self.containment_correction = containment_correction
42 self.use_region_center = use_region_center
43 super().__init__(
44 selection=selection, background_oversampling=background_oversampling
45 )
46
47 def make_exposure(self, geom, observation):
48 """Make exposure.
49
50 Parameters
51 ----------
52 geom : `~gammapy.maps.RegionGeom`
53 Reference map geom.
54 observation: `~gammapy.data.Observation`
55 Observation to compute effective area for.
56
57 Returns
58 -------
59 exposure : `~gammapy.maps.RegionNDMap`
60 Exposure map.
61 """
62 exposure = super().make_exposure(
63 geom, observation, use_region_center=self.use_region_center
64 )
65
66 is_pointlike = exposure.meta.get("is_pointlike", False)
67 if is_pointlike:
68 log.warning(
69 "MapMaker: use_region_center=False should not be used with point-like IRF. "
70 "Results are likely inaccurate."
71 )
72
73 if self.containment_correction:
74 if is_pointlike:
75 raise ValueError(
76 "Cannot apply containment correction for point-like IRF."
77 )
78
79 if not isinstance(geom.region, CircleSkyRegion):
80 raise TypeError(
81 "Containment correction only supported for circular regions."
82 )
83 offset = geom.separation(observation.pointing_radec)
84 containment = observation.psf.containment(
85 rad=geom.region.radius,
86 offset=offset,
87 energy_true=geom.axes["energy_true"].center,
88 )
89 exposure.quantity *= containment.reshape(geom.data_shape)
90
91 return exposure
92
[end of gammapy/makers/spectrum.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gammapy/makers/spectrum.py b/gammapy/makers/spectrum.py
--- a/gammapy/makers/spectrum.py
+++ b/gammapy/makers/spectrum.py
@@ -64,7 +64,7 @@
)
is_pointlike = exposure.meta.get("is_pointlike", False)
- if is_pointlike:
+ if is_pointlike and self.use_region_center is False:
log.warning(
"MapMaker: use_region_center=False should not be used with point-like IRF. "
"Results are likely inaccurate."
| {"golden_diff": "diff --git a/gammapy/makers/spectrum.py b/gammapy/makers/spectrum.py\n--- a/gammapy/makers/spectrum.py\n+++ b/gammapy/makers/spectrum.py\n@@ -64,7 +64,7 @@\n )\n \n is_pointlike = exposure.meta.get(\"is_pointlike\", False)\n- if is_pointlike:\n+ if is_pointlike and self.use_region_center is False:\n log.warning(\n \"MapMaker: use_region_center=False should not be used with point-like IRF. \"\n \"Results are likely inaccurate.\"\n", "issue": "Warning incorrectly raised by SpectrumDatasetMaker.make_exposure with use_region_center=True\n**Gammapy version**\r\nv0.19\r\n\r\n**Bug description**\r\nIf using `SpectrumDatasetMaker.make_exposure` with pointlike IRFs, a warning is incorrectly raised if the option `user_region_center` is set to True.\r\n\r\nSee this lien of code:\r\nhttps://github.com/gammapy/gammapy/blob/ba7d377bf48bd53d268d2dc14be1c1eb013a1e42/gammapy/makers/spectrum.py#L67\r\n\r\n**Expected behavior**\r\nNo warning should be raised.\r\n\r\n**To Reproduce**\r\n\r\n**Other information**\r\n\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nfrom regions import CircleSkyRegion\nfrom .map import MapDatasetMaker\n\n__all__ = [\"SpectrumDatasetMaker\"]\n\nlog = logging.getLogger(__name__)\n\n\nclass SpectrumDatasetMaker(MapDatasetMaker):\n \"\"\"Make spectrum for a single IACT observation.\n\n The irfs and background are computed at a single fixed offset,\n which is recommend only for point-sources.\n\n Parameters\n ----------\n selection : list\n List of str, selecting which maps to make.\n Available: 'counts', 'exposure', 'background', 'edisp'\n By default, all spectra are made.\n containment_correction : bool\n Apply containment correction for point sources and circular on regions.\n background_oversampling : int\n Background evaluation oversampling factor in energy.\n use_region_center : bool\n Approximate the IRFs by the value at the center of the region\n \"\"\"\n\n tag = \"SpectrumDatasetMaker\"\n available_selection = [\"counts\", \"background\", \"exposure\", \"edisp\"]\n\n def __init__(\n self,\n selection=None,\n containment_correction=False,\n background_oversampling=None,\n use_region_center=True,\n ):\n self.containment_correction = containment_correction\n self.use_region_center = use_region_center\n super().__init__(\n selection=selection, background_oversampling=background_oversampling\n )\n\n def make_exposure(self, geom, observation):\n \"\"\"Make exposure.\n\n Parameters\n ----------\n geom : `~gammapy.maps.RegionGeom`\n Reference map geom.\n observation: `~gammapy.data.Observation`\n Observation to compute effective area for.\n\n Returns\n -------\n exposure : `~gammapy.maps.RegionNDMap`\n Exposure map.\n \"\"\"\n exposure = super().make_exposure(\n geom, observation, use_region_center=self.use_region_center\n )\n\n is_pointlike = exposure.meta.get(\"is_pointlike\", False)\n if is_pointlike:\n log.warning(\n \"MapMaker: use_region_center=False should not be used with point-like IRF. \"\n \"Results are likely inaccurate.\"\n )\n\n if self.containment_correction:\n if is_pointlike:\n raise ValueError(\n \"Cannot apply containment correction for point-like IRF.\"\n )\n\n if not isinstance(geom.region, CircleSkyRegion):\n raise TypeError(\n \"Containment correction only supported for circular regions.\"\n )\n offset = geom.separation(observation.pointing_radec)\n containment = observation.psf.containment(\n rad=geom.region.radius,\n offset=offset,\n energy_true=geom.axes[\"energy_true\"].center,\n )\n exposure.quantity *= containment.reshape(geom.data_shape)\n\n return exposure\n", "path": "gammapy/makers/spectrum.py"}]} | 1,475 | 131 |
gh_patches_debug_54333 | rasdani/github-patches | git_diff | keras-team__keras-11960 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Suggesting keras.utils.*_utils packages should not be part of the official API
In general, all `keras.utils.*_utils.*` functions and classes that are documented on keras.io are available directly in `keras.utils` and documented as such. However there are a few discrepancies:
* `keras.utils.vis_utils.model_to_dot` is not available in `keras.utils`.
* `keras.utils.np_utils.to_categorical` sometimes appears in the documentation, instead of `keras.utils.to_categorical`.
* `keras.utils.io_utils.HDF5Matrix` sometimes appears in the documentation, instead of `keras.utils.HDF5Matrix`.
This introduces some confusion as to what is part of the official Keras API or not: in particular, are `keras.utils.*_utils` packages part of the Keras API or not? Possibly as a result of this confusion, tf.keras is not consistent with keras-team/keras, as it has no `tf.keras.utils.*_utils` packages, and is missing `model_to_dot` altogether. Arguably this is a tf.keras issue, but the fact that only three utility functions are placed in `keras.utils.*_utils` packages is surprising IMHO.
I will propose a PR to fix this by:
* Adding `model_to_dot` to `keras.utils`
* Fixing the documentation to remove all references to `keras.utils.*_utils` packages.
</issue>
<code>
[start of keras/utils/__init__.py]
1 from __future__ import absolute_import
2 from . import np_utils
3 from . import generic_utils
4 from . import data_utils
5 from . import io_utils
6 from . import conv_utils
7
8 # Globally-importable utils.
9 from .io_utils import HDF5Matrix
10 from .io_utils import H5Dict
11 from .data_utils import get_file
12 from .data_utils import Sequence
13 from .data_utils import GeneratorEnqueuer
14 from .data_utils import OrderedEnqueuer
15 from .generic_utils import CustomObjectScope
16 from .generic_utils import custom_object_scope
17 from .generic_utils import get_custom_objects
18 from .generic_utils import serialize_keras_object
19 from .generic_utils import deserialize_keras_object
20 from .generic_utils import Progbar
21 from .layer_utils import convert_all_kernels_in_model
22 from .layer_utils import get_source_inputs
23 from .layer_utils import print_summary
24 from .vis_utils import plot_model
25 from .np_utils import to_categorical
26 from .np_utils import normalize
27 from .multi_gpu_utils import multi_gpu_model
28
[end of keras/utils/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/keras/utils/__init__.py b/keras/utils/__init__.py
--- a/keras/utils/__init__.py
+++ b/keras/utils/__init__.py
@@ -21,6 +21,7 @@
from .layer_utils import convert_all_kernels_in_model
from .layer_utils import get_source_inputs
from .layer_utils import print_summary
+from .vis_utils import model_to_dot
from .vis_utils import plot_model
from .np_utils import to_categorical
from .np_utils import normalize
| {"golden_diff": "diff --git a/keras/utils/__init__.py b/keras/utils/__init__.py\n--- a/keras/utils/__init__.py\n+++ b/keras/utils/__init__.py\n@@ -21,6 +21,7 @@\n from .layer_utils import convert_all_kernels_in_model\n from .layer_utils import get_source_inputs\n from .layer_utils import print_summary\n+from .vis_utils import model_to_dot\n from .vis_utils import plot_model\n from .np_utils import to_categorical\n from .np_utils import normalize\n", "issue": "Suggesting keras.utils.*_utils packages should not be part of the official API\nIn general, all `keras.utils.*_utils.*` functions and classes that are documented on keras.io are available directly in `keras.utils` and documented as such. However there are a few discrepancies:\r\n* `keras.utils.vis_utils.model_to_dot` is not available in `keras.utils`.\r\n* `keras.utils.np_utils.to_categorical` sometimes appears in the documentation, instead of `keras.utils.to_categorical`.\r\n* `keras.utils.io_utils.HDF5Matrix` sometimes appears in the documentation, instead of `keras.utils.HDF5Matrix`.\r\n\r\nThis introduces some confusion as to what is part of the official Keras API or not: in particular, are `keras.utils.*_utils` packages part of the Keras API or not? Possibly as a result of this confusion, tf.keras is not consistent with keras-team/keras, as it has no `tf.keras.utils.*_utils` packages, and is missing `model_to_dot` altogether. Arguably this is a tf.keras issue, but the fact that only three utility functions are placed in `keras.utils.*_utils` packages is surprising IMHO.\r\n\r\nI will propose a PR to fix this by:\r\n* Adding `model_to_dot` to `keras.utils`\r\n* Fixing the documentation to remove all references to `keras.utils.*_utils` packages.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom . import np_utils\nfrom . import generic_utils\nfrom . import data_utils\nfrom . import io_utils\nfrom . import conv_utils\n\n# Globally-importable utils.\nfrom .io_utils import HDF5Matrix\nfrom .io_utils import H5Dict\nfrom .data_utils import get_file\nfrom .data_utils import Sequence\nfrom .data_utils import GeneratorEnqueuer\nfrom .data_utils import OrderedEnqueuer\nfrom .generic_utils import CustomObjectScope\nfrom .generic_utils import custom_object_scope\nfrom .generic_utils import get_custom_objects\nfrom .generic_utils import serialize_keras_object\nfrom .generic_utils import deserialize_keras_object\nfrom .generic_utils import Progbar\nfrom .layer_utils import convert_all_kernels_in_model\nfrom .layer_utils import get_source_inputs\nfrom .layer_utils import print_summary\nfrom .vis_utils import plot_model\nfrom .np_utils import to_categorical\nfrom .np_utils import normalize\nfrom .multi_gpu_utils import multi_gpu_model\n", "path": "keras/utils/__init__.py"}]} | 1,102 | 119 |
gh_patches_debug_39833 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-5832 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Shell spider doesn't return any results for Bulgaria
There are around 100 Shell gas stations in Bulgaria but none of them are shown in the latest spider result. They are listed on their Bulgarian website (https://www.shell.bg/motorists/shell-station-locator.html).
</issue>
<code>
[start of locations/storefinders/geo_me.py]
1 from scrapy import Spider
2 from scrapy.http import JsonRequest
3 from scrapy.signals import spider_idle
4
5 from locations.dict_parser import DictParser
6 from locations.hours import DAYS, DAYS_EN, OpeningHours, day_range
7 from locations.items import Feature
8
9 # To use this store finder, specify key = x where x is the unique
10 # identifier of the store finder in domain x.geoapp.me.
11 #
12 # It is likely there are additional fields of data worth extracting
13 # from the store finder. These should be added by overriding the
14 # parse_item function. Two parameters are passed, item (and ATP
15 # "Feature" class) and location (a dict which is returned from the
16 # store locator JSON response for a particular location).
17 #
18 # This spider has two crawling steps which are executed in order:
19 # 1. Obtain list of all locations by using the API to do bounding
20 # box searches across the world. The only thing of interest
21 # returned for each location in this step is a unique identifier
22 # and coordinates.
23 # 2. Iterating through the all locations list produced by step (1),
24 # request the nearest 50 (API limit) locations for each location
25 # in the all locations list. Remove from the all locations list
26 # and locations that were returned with a nearest location
27 # search. Repeat until the all locations list is empty. The
28 # nearest location search returns all details of a location.
29 #
30 # Note that due to the way the two crawling steps are required to
31 # operate, numerous duplicate locations will be dropped during
32 # extraction. It is common for locations to be present in more than
33 # one nearby cluster of locations that the "nearest to" search
34 # iterates through.
35
36
37 class GeoMeSpider(Spider):
38 key = ""
39 api_version = "2"
40 url_within_bounds_template = "https://{}.geoapp.me/api/v{}/locations/within_bounds?sw[]={}&sw[]={}&ne[]={}&ne[]={}"
41 url_nearest_to_template = "https://{}.geoapp.me/api/v{}/locations/nearest_to?lat={}&lng={}&limit=50"
42 locations_found = {}
43
44 def start_requests(self):
45 self.crawler.signals.connect(self.start_location_requests, signal=spider_idle)
46 yield JsonRequest(
47 url=self.url_within_bounds_template.format(self.key, self.api_version, -90, -180, 90, 180),
48 callback=self.parse_bounding_box,
49 )
50
51 def parse_bounding_box(self, response):
52 for cluster in response.json().get("clusters", []):
53 if b := cluster.get("bounds"):
54 yield JsonRequest(
55 url=self.url_within_bounds_template.format(
56 self.key, self.api_version, b["sw"][0], b["sw"][1], b["ne"][0], b["ne"][1]
57 ),
58 callback=self.parse_bounding_box,
59 )
60 for location in response.json().get("locations", []):
61 self.locations_found[location["id"]] = (float(location["lat"]), float(location["lng"]))
62
63 def start_location_requests(self):
64 self.crawler.signals.disconnect(self.start_location_requests, signal=spider_idle)
65 if len(self.locations_found) > 0:
66 first_search_location = self.locations_found.popitem()
67 first_request = JsonRequest(
68 url=self.url_nearest_to_template.format(
69 self.key, self.api_version, first_search_location[1][0], first_search_location[1][1]
70 ),
71 callback=self.parse_locations,
72 )
73 self.crawler.engine.crawl(first_request)
74
75 def parse_locations(self, response):
76 for location in response.json()["locations"]:
77 if location.get("inactive"):
78 continue
79 location["street_address"] = location.pop("address")
80 item = DictParser.parse(location)
81 self.extract_hours(item, location)
82 yield from self.parse_item(item, location) or []
83
84 # Remove found location from the list of locations which
85 # are still waiting to be found.
86 if self.locations_found.get(location["id"]):
87 self.locations_found.pop(location["id"])
88
89 # Get the next location to do a "nearest to" search from.
90 if len(self.locations_found) > 0:
91 next_search_location = self.locations_found.popitem()
92 yield JsonRequest(
93 url=self.url_nearest_to_template.format(
94 self.key, self.api_version, next_search_location[1][0], next_search_location[1][1]
95 ),
96 callback=self.parse_locations,
97 )
98
99 def extract_hours(self, item, location):
100 item["opening_hours"] = OpeningHours()
101 if location.get("open_status") == "twenty_four_hour":
102 item["opening_hours"].add_days_range(DAYS, "00:00", "23:59")
103 return
104 open_hours = location.get("opening_hours")
105 if not open_hours:
106 return
107 for spec in open_hours:
108 days = spec["days"]
109 day_from = day_to = days[0]
110 if len(days) == 2:
111 day_to = days[1]
112 for day in day_range(DAYS_EN[day_from], DAYS_EN[day_to]):
113 for hours in spec["hours"]:
114 item["opening_hours"].add_range(day, hours[0], hours[1])
115
116 def parse_item(self, item: Feature, location: dict, **kwargs):
117 yield item
118
[end of locations/storefinders/geo_me.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/storefinders/geo_me.py b/locations/storefinders/geo_me.py
--- a/locations/storefinders/geo_me.py
+++ b/locations/storefinders/geo_me.py
@@ -1,3 +1,5 @@
+import random
+
from scrapy import Spider
from scrapy.http import JsonRequest
from scrapy.signals import spider_idle
@@ -62,41 +64,42 @@
def start_location_requests(self):
self.crawler.signals.disconnect(self.start_location_requests, signal=spider_idle)
- if len(self.locations_found) > 0:
- first_search_location = self.locations_found.popitem()
- first_request = JsonRequest(
- url=self.url_nearest_to_template.format(
- self.key, self.api_version, first_search_location[1][0], first_search_location[1][1]
- ),
- callback=self.parse_locations,
- )
- self.crawler.engine.crawl(first_request)
+ self.crawler.engine.crawl(self.get_next_location())
def parse_locations(self, response):
for location in response.json()["locations"]:
+ # Remove found location from the list of locations which
+ # are still waiting to be found.
+ if self.locations_found.get(location["id"]):
+ self.locations_found.pop(location["id"])
+
if location.get("inactive"):
continue
+
location["street_address"] = location.pop("address")
item = DictParser.parse(location)
self.extract_hours(item, location)
yield from self.parse_item(item, location) or []
- # Remove found location from the list of locations which
- # are still waiting to be found.
- if self.locations_found.get(location["id"]):
- self.locations_found.pop(location["id"])
-
# Get the next location to do a "nearest to" search from.
- if len(self.locations_found) > 0:
- next_search_location = self.locations_found.popitem()
- yield JsonRequest(
- url=self.url_nearest_to_template.format(
- self.key, self.api_version, next_search_location[1][0], next_search_location[1][1]
- ),
- callback=self.parse_locations,
- )
+ yield self.get_next_location()
+
+ def get_next_location(self) -> JsonRequest:
+ if len(self.locations_found) == 0:
+ return
+ next_search_location_id = random.choice(list(self.locations_found))
+ next_search_location_coords = self.locations_found[next_search_location_id]
+ self.locations_found.pop(next_search_location_id)
+ return JsonRequest(
+ url=self.url_nearest_to_template.format(
+ self.key, self.api_version, next_search_location_coords[0], next_search_location_coords[1]
+ ),
+ callback=self.parse_locations,
+ dont_filter=True,
+ )
- def extract_hours(self, item, location):
+ @staticmethod
+ def extract_hours(item: Feature, location: dict):
item["opening_hours"] = OpeningHours()
if location.get("open_status") == "twenty_four_hour":
item["opening_hours"].add_days_range(DAYS, "00:00", "23:59")
| {"golden_diff": "diff --git a/locations/storefinders/geo_me.py b/locations/storefinders/geo_me.py\n--- a/locations/storefinders/geo_me.py\n+++ b/locations/storefinders/geo_me.py\n@@ -1,3 +1,5 @@\n+import random\n+\n from scrapy import Spider\n from scrapy.http import JsonRequest\n from scrapy.signals import spider_idle\n@@ -62,41 +64,42 @@\n \n def start_location_requests(self):\n self.crawler.signals.disconnect(self.start_location_requests, signal=spider_idle)\n- if len(self.locations_found) > 0:\n- first_search_location = self.locations_found.popitem()\n- first_request = JsonRequest(\n- url=self.url_nearest_to_template.format(\n- self.key, self.api_version, first_search_location[1][0], first_search_location[1][1]\n- ),\n- callback=self.parse_locations,\n- )\n- self.crawler.engine.crawl(first_request)\n+ self.crawler.engine.crawl(self.get_next_location())\n \n def parse_locations(self, response):\n for location in response.json()[\"locations\"]:\n+ # Remove found location from the list of locations which\n+ # are still waiting to be found.\n+ if self.locations_found.get(location[\"id\"]):\n+ self.locations_found.pop(location[\"id\"])\n+\n if location.get(\"inactive\"):\n continue\n+\n location[\"street_address\"] = location.pop(\"address\")\n item = DictParser.parse(location)\n self.extract_hours(item, location)\n yield from self.parse_item(item, location) or []\n \n- # Remove found location from the list of locations which\n- # are still waiting to be found.\n- if self.locations_found.get(location[\"id\"]):\n- self.locations_found.pop(location[\"id\"])\n-\n # Get the next location to do a \"nearest to\" search from.\n- if len(self.locations_found) > 0:\n- next_search_location = self.locations_found.popitem()\n- yield JsonRequest(\n- url=self.url_nearest_to_template.format(\n- self.key, self.api_version, next_search_location[1][0], next_search_location[1][1]\n- ),\n- callback=self.parse_locations,\n- )\n+ yield self.get_next_location()\n+\n+ def get_next_location(self) -> JsonRequest:\n+ if len(self.locations_found) == 0:\n+ return\n+ next_search_location_id = random.choice(list(self.locations_found))\n+ next_search_location_coords = self.locations_found[next_search_location_id]\n+ self.locations_found.pop(next_search_location_id)\n+ return JsonRequest(\n+ url=self.url_nearest_to_template.format(\n+ self.key, self.api_version, next_search_location_coords[0], next_search_location_coords[1]\n+ ),\n+ callback=self.parse_locations,\n+ dont_filter=True,\n+ )\n \n- def extract_hours(self, item, location):\n+ @staticmethod\n+ def extract_hours(item: Feature, location: dict):\n item[\"opening_hours\"] = OpeningHours()\n if location.get(\"open_status\") == \"twenty_four_hour\":\n item[\"opening_hours\"].add_days_range(DAYS, \"00:00\", \"23:59\")\n", "issue": "Shell spider doesn't return any results for Bulgaria\nThere are around 100 Shell gas stations in Bulgaria but none of them are shown in the latest spider result. They are listed on their Bulgarian website (https://www.shell.bg/motorists/shell-station-locator.html).\n", "before_files": [{"content": "from scrapy import Spider\nfrom scrapy.http import JsonRequest\nfrom scrapy.signals import spider_idle\n\nfrom locations.dict_parser import DictParser\nfrom locations.hours import DAYS, DAYS_EN, OpeningHours, day_range\nfrom locations.items import Feature\n\n# To use this store finder, specify key = x where x is the unique\n# identifier of the store finder in domain x.geoapp.me.\n#\n# It is likely there are additional fields of data worth extracting\n# from the store finder. These should be added by overriding the\n# parse_item function. Two parameters are passed, item (and ATP\n# \"Feature\" class) and location (a dict which is returned from the\n# store locator JSON response for a particular location).\n#\n# This spider has two crawling steps which are executed in order:\n# 1. Obtain list of all locations by using the API to do bounding\n# box searches across the world. The only thing of interest\n# returned for each location in this step is a unique identifier\n# and coordinates.\n# 2. Iterating through the all locations list produced by step (1),\n# request the nearest 50 (API limit) locations for each location\n# in the all locations list. Remove from the all locations list\n# and locations that were returned with a nearest location\n# search. Repeat until the all locations list is empty. The\n# nearest location search returns all details of a location.\n#\n# Note that due to the way the two crawling steps are required to\n# operate, numerous duplicate locations will be dropped during\n# extraction. It is common for locations to be present in more than\n# one nearby cluster of locations that the \"nearest to\" search\n# iterates through.\n\n\nclass GeoMeSpider(Spider):\n key = \"\"\n api_version = \"2\"\n url_within_bounds_template = \"https://{}.geoapp.me/api/v{}/locations/within_bounds?sw[]={}&sw[]={}&ne[]={}&ne[]={}\"\n url_nearest_to_template = \"https://{}.geoapp.me/api/v{}/locations/nearest_to?lat={}&lng={}&limit=50\"\n locations_found = {}\n\n def start_requests(self):\n self.crawler.signals.connect(self.start_location_requests, signal=spider_idle)\n yield JsonRequest(\n url=self.url_within_bounds_template.format(self.key, self.api_version, -90, -180, 90, 180),\n callback=self.parse_bounding_box,\n )\n\n def parse_bounding_box(self, response):\n for cluster in response.json().get(\"clusters\", []):\n if b := cluster.get(\"bounds\"):\n yield JsonRequest(\n url=self.url_within_bounds_template.format(\n self.key, self.api_version, b[\"sw\"][0], b[\"sw\"][1], b[\"ne\"][0], b[\"ne\"][1]\n ),\n callback=self.parse_bounding_box,\n )\n for location in response.json().get(\"locations\", []):\n self.locations_found[location[\"id\"]] = (float(location[\"lat\"]), float(location[\"lng\"]))\n\n def start_location_requests(self):\n self.crawler.signals.disconnect(self.start_location_requests, signal=spider_idle)\n if len(self.locations_found) > 0:\n first_search_location = self.locations_found.popitem()\n first_request = JsonRequest(\n url=self.url_nearest_to_template.format(\n self.key, self.api_version, first_search_location[1][0], first_search_location[1][1]\n ),\n callback=self.parse_locations,\n )\n self.crawler.engine.crawl(first_request)\n\n def parse_locations(self, response):\n for location in response.json()[\"locations\"]:\n if location.get(\"inactive\"):\n continue\n location[\"street_address\"] = location.pop(\"address\")\n item = DictParser.parse(location)\n self.extract_hours(item, location)\n yield from self.parse_item(item, location) or []\n\n # Remove found location from the list of locations which\n # are still waiting to be found.\n if self.locations_found.get(location[\"id\"]):\n self.locations_found.pop(location[\"id\"])\n\n # Get the next location to do a \"nearest to\" search from.\n if len(self.locations_found) > 0:\n next_search_location = self.locations_found.popitem()\n yield JsonRequest(\n url=self.url_nearest_to_template.format(\n self.key, self.api_version, next_search_location[1][0], next_search_location[1][1]\n ),\n callback=self.parse_locations,\n )\n\n def extract_hours(self, item, location):\n item[\"opening_hours\"] = OpeningHours()\n if location.get(\"open_status\") == \"twenty_four_hour\":\n item[\"opening_hours\"].add_days_range(DAYS, \"00:00\", \"23:59\")\n return\n open_hours = location.get(\"opening_hours\")\n if not open_hours:\n return\n for spec in open_hours:\n days = spec[\"days\"]\n day_from = day_to = days[0]\n if len(days) == 2:\n day_to = days[1]\n for day in day_range(DAYS_EN[day_from], DAYS_EN[day_to]):\n for hours in spec[\"hours\"]:\n item[\"opening_hours\"].add_range(day, hours[0], hours[1])\n\n def parse_item(self, item: Feature, location: dict, **kwargs):\n yield item\n", "path": "locations/storefinders/geo_me.py"}]} | 1,993 | 695 |
gh_patches_debug_403 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-1740 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
2 link limit on nav items
affects the footer, need more than 2 items
</issue>
<code>
[start of meinberlin/apps/cms/models/navigation_menues.py]
1 from django.db import models
2 from modelcluster.fields import ParentalKey
3 from modelcluster.models import ClusterableModel
4 from wagtail.admin import edit_handlers
5 from wagtail.core.models import Orderable
6 from wagtail.snippets.models import register_snippet
7
8
9 class MenuItem(models.Model):
10 title = models.CharField(max_length=255)
11 link_page = models.ForeignKey('wagtailcore.Page')
12
13 @property
14 def url(self):
15 return self.link_page.url
16
17 def __str__(self):
18 return self.title
19
20 panels = [
21 edit_handlers.FieldPanel('title'),
22 edit_handlers.PageChooserPanel('link_page')
23 ]
24
25
26 @register_snippet
27 class NavigationMenu(ClusterableModel):
28 title = models.CharField(max_length=255, null=False, blank=False)
29
30 def __str__(self):
31 return self.title
32
33 panels = [
34 edit_handlers.FieldPanel('title'),
35 edit_handlers.InlinePanel('items', max_num=2)
36 ]
37
38
39 class NavigationMenuItem(Orderable, MenuItem):
40 parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')
41
[end of meinberlin/apps/cms/models/navigation_menues.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/cms/models/navigation_menues.py b/meinberlin/apps/cms/models/navigation_menues.py
--- a/meinberlin/apps/cms/models/navigation_menues.py
+++ b/meinberlin/apps/cms/models/navigation_menues.py
@@ -32,7 +32,7 @@
panels = [
edit_handlers.FieldPanel('title'),
- edit_handlers.InlinePanel('items', max_num=2)
+ edit_handlers.InlinePanel('items')
]
| {"golden_diff": "diff --git a/meinberlin/apps/cms/models/navigation_menues.py b/meinberlin/apps/cms/models/navigation_menues.py\n--- a/meinberlin/apps/cms/models/navigation_menues.py\n+++ b/meinberlin/apps/cms/models/navigation_menues.py\n@@ -32,7 +32,7 @@\n \n panels = [\n edit_handlers.FieldPanel('title'),\n- edit_handlers.InlinePanel('items', max_num=2)\n+ edit_handlers.InlinePanel('items')\n ]\n", "issue": "2 link limit on nav items\naffects the footer, need more than 2 items \n", "before_files": [{"content": "from django.db import models\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.admin import edit_handlers\nfrom wagtail.core.models import Orderable\nfrom wagtail.snippets.models import register_snippet\n\n\nclass MenuItem(models.Model):\n title = models.CharField(max_length=255)\n link_page = models.ForeignKey('wagtailcore.Page')\n\n @property\n def url(self):\n return self.link_page.url\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.PageChooserPanel('link_page')\n ]\n\n\n@register_snippet\nclass NavigationMenu(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.InlinePanel('items', max_num=2)\n ]\n\n\nclass NavigationMenuItem(Orderable, MenuItem):\n parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')\n", "path": "meinberlin/apps/cms/models/navigation_menues.py"}]} | 885 | 113 |
gh_patches_debug_18165 | rasdani/github-patches | git_diff | Textualize__textual-4234 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Type warning with `AwaitComplete`
It seems that there is a type error when it comes to awaiting `AwaitComplete`. As an example, given this code:
```python
from textual.app import App
from textual.widgets import TabbedContent, TabPane
class AwaitableTypeWarningApp(App[None]):
async def on_mount(self) -> None:
await self.query_one(TabbedContent).add_pane(TabPane("Test"))
await self.query_one(TabbedContent).remove_pane("some-tab")
```
pyright reports:
```
/Users/davep/develop/python/textual-sandbox/await_type_warning.py
/Users/davep/develop/python/textual-sandbox/await_type_warning.py:7:15 - error: "AwaitComplete" is not awaitable
"AwaitComplete" is incompatible with protocol "Awaitable[_T_co@Awaitable]"
"__await__" is an incompatible type
Type "() -> Iterator[None]" cannot be assigned to type "() -> Generator[Any, None, _T_co@Awaitable]"
Function return type "Iterator[None]" is incompatible with type "Generator[Any, None, _T_co@Awaitable]"
"Iterator[None]" is incompatible with "Generator[Any, None, _T_co@Awaitable]" (reportGeneralTypeIssues)
/Users/davep/develop/python/textual-sandbox/await_type_warning.py:8:15 - error: "AwaitComplete" is not awaitable
"AwaitComplete" is incompatible with protocol "Awaitable[_T_co@Awaitable]"
"__await__" is an incompatible type
Type "() -> Iterator[None]" cannot be assigned to type "() -> Generator[Any, None, _T_co@Awaitable]"
Function return type "Iterator[None]" is incompatible with type "Generator[Any, None, _T_co@Awaitable]"
"Iterator[None]" is incompatible with "Generator[Any, None, _T_co@Awaitable]" (reportGeneralTypeIssues)
2 errors, 0 warnings, 0 informations
```
</issue>
<code>
[start of src/textual/await_complete.py]
1 from __future__ import annotations
2
3 from asyncio import Future, gather
4 from typing import Any, Coroutine, Iterator, TypeVar
5
6 import rich.repr
7
8 ReturnType = TypeVar("ReturnType")
9
10
11 @rich.repr.auto(angular=True)
12 class AwaitComplete:
13 """An 'optionally-awaitable' object."""
14
15 def __init__(self, *coroutines: Coroutine[Any, Any, Any]) -> None:
16 """Create an AwaitComplete.
17
18 Args:
19 coroutines: One or more coroutines to execute.
20 """
21 self.coroutines: tuple[Coroutine[Any, Any, Any], ...] = coroutines
22 self._future: Future = gather(*self.coroutines)
23
24 async def __call__(self) -> Any:
25 return await self
26
27 def __await__(self) -> Iterator[None]:
28 return self._future.__await__()
29
30 @property
31 def is_done(self) -> bool:
32 """Returns True if the task has completed."""
33 return self._future.done()
34
35 @property
36 def exception(self) -> BaseException | None:
37 """An exception if it occurred in any of the coroutines."""
38 if self._future.done():
39 return self._future.exception()
40 return None
41
42 @classmethod
43 def nothing(cls):
44 """Returns an already completed instance of AwaitComplete."""
45 instance = cls()
46 instance._future = Future()
47 instance._future.set_result(None) # Mark it as completed with no result
48 return instance
49
[end of src/textual/await_complete.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/textual/await_complete.py b/src/textual/await_complete.py
--- a/src/textual/await_complete.py
+++ b/src/textual/await_complete.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from asyncio import Future, gather
-from typing import Any, Coroutine, Iterator, TypeVar
+from typing import Any, Coroutine, Generator, TypeVar
import rich.repr
@@ -19,12 +19,12 @@
coroutines: One or more coroutines to execute.
"""
self.coroutines: tuple[Coroutine[Any, Any, Any], ...] = coroutines
- self._future: Future = gather(*self.coroutines)
+ self._future: Future[Any] = gather(*self.coroutines)
async def __call__(self) -> Any:
return await self
- def __await__(self) -> Iterator[None]:
+ def __await__(self) -> Generator[Any, None, Any]:
return self._future.__await__()
@property
| {"golden_diff": "diff --git a/src/textual/await_complete.py b/src/textual/await_complete.py\n--- a/src/textual/await_complete.py\n+++ b/src/textual/await_complete.py\n@@ -1,7 +1,7 @@\n from __future__ import annotations\n \n from asyncio import Future, gather\n-from typing import Any, Coroutine, Iterator, TypeVar\n+from typing import Any, Coroutine, Generator, TypeVar\n \n import rich.repr\n \n@@ -19,12 +19,12 @@\n coroutines: One or more coroutines to execute.\n \"\"\"\n self.coroutines: tuple[Coroutine[Any, Any, Any], ...] = coroutines\n- self._future: Future = gather(*self.coroutines)\n+ self._future: Future[Any] = gather(*self.coroutines)\n \n async def __call__(self) -> Any:\n return await self\n \n- def __await__(self) -> Iterator[None]:\n+ def __await__(self) -> Generator[Any, None, Any]:\n return self._future.__await__()\n \n @property\n", "issue": "Type warning with `AwaitComplete`\nIt seems that there is a type error when it comes to awaiting `AwaitComplete`. As an example, given this code:\r\n\r\n```python\r\nfrom textual.app import App\r\nfrom textual.widgets import TabbedContent, TabPane\r\n\r\nclass AwaitableTypeWarningApp(App[None]):\r\n\r\n async def on_mount(self) -> None:\r\n await self.query_one(TabbedContent).add_pane(TabPane(\"Test\"))\r\n await self.query_one(TabbedContent).remove_pane(\"some-tab\")\r\n```\r\n\r\npyright reports:\r\n\r\n```\r\n/Users/davep/develop/python/textual-sandbox/await_type_warning.py\r\n /Users/davep/develop/python/textual-sandbox/await_type_warning.py:7:15 - error: \"AwaitComplete\" is not awaitable\r\n \u00a0\u00a0\"AwaitComplete\" is incompatible with protocol \"Awaitable[_T_co@Awaitable]\"\r\n \u00a0\u00a0\u00a0\u00a0\"__await__\" is an incompatible type\r\n \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0Type \"() -> Iterator[None]\" cannot be assigned to type \"() -> Generator[Any, None, _T_co@Awaitable]\"\r\n \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0Function return type \"Iterator[None]\" is incompatible with type \"Generator[Any, None, _T_co@Awaitable]\"\r\n \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\"Iterator[None]\" is incompatible with \"Generator[Any, None, _T_co@Awaitable]\" (reportGeneralTypeIssues)\r\n /Users/davep/develop/python/textual-sandbox/await_type_warning.py:8:15 - error: \"AwaitComplete\" is not awaitable\r\n \u00a0\u00a0\"AwaitComplete\" is incompatible with protocol \"Awaitable[_T_co@Awaitable]\"\r\n \u00a0\u00a0\u00a0\u00a0\"__await__\" is an incompatible type\r\n \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0Type \"() -> Iterator[None]\" cannot be assigned to type \"() -> Generator[Any, None, _T_co@Awaitable]\"\r\n \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0Function return type \"Iterator[None]\" is incompatible with type \"Generator[Any, None, _T_co@Awaitable]\"\r\n \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\"Iterator[None]\" is incompatible with \"Generator[Any, None, _T_co@Awaitable]\" (reportGeneralTypeIssues)\r\n2 errors, 0 warnings, 0 informations \r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom asyncio import Future, gather\nfrom typing import Any, Coroutine, Iterator, TypeVar\n\nimport rich.repr\n\nReturnType = TypeVar(\"ReturnType\")\n\n\[email protected](angular=True)\nclass AwaitComplete:\n \"\"\"An 'optionally-awaitable' object.\"\"\"\n\n def __init__(self, *coroutines: Coroutine[Any, Any, Any]) -> None:\n \"\"\"Create an AwaitComplete.\n\n Args:\n coroutines: One or more coroutines to execute.\n \"\"\"\n self.coroutines: tuple[Coroutine[Any, Any, Any], ...] = coroutines\n self._future: Future = gather(*self.coroutines)\n\n async def __call__(self) -> Any:\n return await self\n\n def __await__(self) -> Iterator[None]:\n return self._future.__await__()\n\n @property\n def is_done(self) -> bool:\n \"\"\"Returns True if the task has completed.\"\"\"\n return self._future.done()\n\n @property\n def exception(self) -> BaseException | None:\n \"\"\"An exception if it occurred in any of the coroutines.\"\"\"\n if self._future.done():\n return self._future.exception()\n return None\n\n @classmethod\n def nothing(cls):\n \"\"\"Returns an already completed instance of AwaitComplete.\"\"\"\n instance = cls()\n instance._future = Future()\n instance._future.set_result(None) # Mark it as completed with no result\n return instance\n", "path": "src/textual/await_complete.py"}]} | 1,417 | 229 |
gh_patches_debug_2649 | rasdani/github-patches | git_diff | webkom__lego-3128 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Broken link on weekly mails
The link used to unsubscribe from the mail is broken, because `frontend_url` is undefined. Probably due to the weekly mails being handled differently than all other notifications.
</issue>
<code>
[start of lego/apps/email/tasks.py]
1 from datetime import timedelta
2
3 from django.conf import settings
4 from django.template.loader import render_to_string
5 from django.utils import timezone
6
7 from premailer import transform
8 from structlog import get_logger
9
10 from lego import celery_app
11 from lego.apps.events.constants import EVENT_TYPE_TRANSLATIONS
12 from lego.apps.events.models import Event
13 from lego.apps.joblistings.constants import JOB_TYPE_TRANSLATIONS
14 from lego.apps.joblistings.models import Joblisting
15 from lego.apps.notifications.constants import EMAIL, WEEKLY_MAIL
16 from lego.apps.notifications.models import NotificationSetting
17 from lego.apps.permissions.utils import get_permission_handler
18 from lego.apps.restricted.message_processor import MessageProcessor
19 from lego.apps.tags.models import Tag
20 from lego.apps.users.models import AbakusGroup
21 from lego.utils.tasks import AbakusTask
22
23 log = get_logger()
24
25
26 def create_weekly_mail(user):
27 three_days_ago_timestamp = timezone.now() - timedelta(days=3)
28 last_sunday_timestamp = timezone.now() - timedelta(days=7)
29
30 weekly_tag = Tag.objects.filter(tag="weekly").first()
31 # Check if weekly tag exists so it does not crash if some idiot deletes the weekly tag
32 todays_weekly = (
33 weekly_tag.article_set.filter(created_at__gt=three_days_ago_timestamp).first()
34 if weekly_tag
35 else None
36 )
37
38 events_next_week = Event.objects.filter(
39 pools__activation_date__gt=timezone.now(),
40 pools__activation_date__lt=timezone.now() + timedelta(days=7),
41 ).distinct()
42
43 permission_handler = get_permission_handler(events_next_week.model)
44 filtered_events = permission_handler.filter_queryset(user, events_next_week)
45
46 filtered_events = filter(
47 lambda event: event.get_possible_pools(user, True) or event.is_admitted(user),
48 filtered_events,
49 )
50
51 joblistings_last_week = Joblisting.objects.filter(
52 created_at__gt=last_sunday_timestamp, visible_from__lt=timezone.now()
53 )
54
55 joblistings = []
56 for joblisting in joblistings_last_week:
57 joblistings.append(
58 {
59 "id": joblisting.id,
60 "company_name": joblisting.company.name,
61 "type": JOB_TYPE_TRANSLATIONS[joblisting.job_type],
62 "title": joblisting.title,
63 }
64 )
65
66 events = []
67 for event in filtered_events:
68 pools = []
69 for pool in event.pools.all():
70 pools.append(
71 {
72 "name": pool.name,
73 "activation_date": pool.activation_date.strftime("%d/%m kl. %H:%M"),
74 }
75 )
76
77 events.append(
78 {
79 "title": event.title,
80 "id": event.id,
81 "pools": pools,
82 "start_time": event.start_time.strftime("%d/%m kl %H:%M"),
83 "url": event.get_absolute_url(),
84 "type": EVENT_TYPE_TRANSLATIONS[event.event_type],
85 }
86 )
87
88 html_body = render_to_string(
89 "email/email/weekly_mail.html",
90 {
91 "events": events,
92 "todays_weekly": ""
93 if todays_weekly is None
94 else todays_weekly.get_absolute_url(),
95 "joblistings": joblistings,
96 },
97 )
98 if events or joblistings or todays_weekly:
99 return html_body
100 return None
101
102
103 @celery_app.task(serializer="json", bind=True, base=AbakusTask)
104 def send_weekly_email(self, logger_context=None):
105 self.setup_logger(logger_context)
106
107 week_number = timezone.now().isocalendar().week
108
109 # Set to just PR and Webkom for testing purposes
110 all_users = set(
111 AbakusGroup.objects.get(name="Webkom").restricted_lookup()[0]
112 + AbakusGroup.objects.get(name="PR").restricted_lookup()[0]
113 )
114 recipients = []
115
116 for user in all_users:
117 if not user.email_lists_enabled:
118 # Don't send emails to users that don't want mail.
119 continue
120
121 if EMAIL not in NotificationSetting.active_channels(user, WEEKLY_MAIL):
122 continue
123 recipients.append(user)
124
125 datatuple = (
126 (
127 f"Ukesmail uke {week_number}",
128 transform(html) if (html := create_weekly_mail(user)) is not None else None,
129 settings.DEFAULT_FROM_EMAIL,
130 [user.email],
131 )
132 for user in recipients
133 )
134 datatuple = tuple(tuppel for tuppel in datatuple if tuppel[1] is not None)
135 if datatuple:
136 MessageProcessor.send_mass_mail_html(datatuple)
137
[end of lego/apps/email/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lego/apps/email/tasks.py b/lego/apps/email/tasks.py
--- a/lego/apps/email/tasks.py
+++ b/lego/apps/email/tasks.py
@@ -93,6 +93,7 @@
if todays_weekly is None
else todays_weekly.get_absolute_url(),
"joblistings": joblistings,
+ "frontend_url": settings.FRONTEND_URL,
},
)
if events or joblistings or todays_weekly:
| {"golden_diff": "diff --git a/lego/apps/email/tasks.py b/lego/apps/email/tasks.py\n--- a/lego/apps/email/tasks.py\n+++ b/lego/apps/email/tasks.py\n@@ -93,6 +93,7 @@\n if todays_weekly is None\n else todays_weekly.get_absolute_url(),\n \"joblistings\": joblistings,\n+ \"frontend_url\": settings.FRONTEND_URL,\n },\n )\n if events or joblistings or todays_weekly:\n", "issue": "Broken link on weekly mails\nThe link used to unsubscribe from the mail is broken, because `frontend_url` is undefined. Probably due to the weekly mails being handled differently than all other notifications.\n", "before_files": [{"content": "from datetime import timedelta\n\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\nfrom django.utils import timezone\n\nfrom premailer import transform\nfrom structlog import get_logger\n\nfrom lego import celery_app\nfrom lego.apps.events.constants import EVENT_TYPE_TRANSLATIONS\nfrom lego.apps.events.models import Event\nfrom lego.apps.joblistings.constants import JOB_TYPE_TRANSLATIONS\nfrom lego.apps.joblistings.models import Joblisting\nfrom lego.apps.notifications.constants import EMAIL, WEEKLY_MAIL\nfrom lego.apps.notifications.models import NotificationSetting\nfrom lego.apps.permissions.utils import get_permission_handler\nfrom lego.apps.restricted.message_processor import MessageProcessor\nfrom lego.apps.tags.models import Tag\nfrom lego.apps.users.models import AbakusGroup\nfrom lego.utils.tasks import AbakusTask\n\nlog = get_logger()\n\n\ndef create_weekly_mail(user):\n three_days_ago_timestamp = timezone.now() - timedelta(days=3)\n last_sunday_timestamp = timezone.now() - timedelta(days=7)\n\n weekly_tag = Tag.objects.filter(tag=\"weekly\").first()\n # Check if weekly tag exists so it does not crash if some idiot deletes the weekly tag\n todays_weekly = (\n weekly_tag.article_set.filter(created_at__gt=three_days_ago_timestamp).first()\n if weekly_tag\n else None\n )\n\n events_next_week = Event.objects.filter(\n pools__activation_date__gt=timezone.now(),\n pools__activation_date__lt=timezone.now() + timedelta(days=7),\n ).distinct()\n\n permission_handler = get_permission_handler(events_next_week.model)\n filtered_events = permission_handler.filter_queryset(user, events_next_week)\n\n filtered_events = filter(\n lambda event: event.get_possible_pools(user, True) or event.is_admitted(user),\n filtered_events,\n )\n\n joblistings_last_week = Joblisting.objects.filter(\n created_at__gt=last_sunday_timestamp, visible_from__lt=timezone.now()\n )\n\n joblistings = []\n for joblisting in joblistings_last_week:\n joblistings.append(\n {\n \"id\": joblisting.id,\n \"company_name\": joblisting.company.name,\n \"type\": JOB_TYPE_TRANSLATIONS[joblisting.job_type],\n \"title\": joblisting.title,\n }\n )\n\n events = []\n for event in filtered_events:\n pools = []\n for pool in event.pools.all():\n pools.append(\n {\n \"name\": pool.name,\n \"activation_date\": pool.activation_date.strftime(\"%d/%m kl. %H:%M\"),\n }\n )\n\n events.append(\n {\n \"title\": event.title,\n \"id\": event.id,\n \"pools\": pools,\n \"start_time\": event.start_time.strftime(\"%d/%m kl %H:%M\"),\n \"url\": event.get_absolute_url(),\n \"type\": EVENT_TYPE_TRANSLATIONS[event.event_type],\n }\n )\n\n html_body = render_to_string(\n \"email/email/weekly_mail.html\",\n {\n \"events\": events,\n \"todays_weekly\": \"\"\n if todays_weekly is None\n else todays_weekly.get_absolute_url(),\n \"joblistings\": joblistings,\n },\n )\n if events or joblistings or todays_weekly:\n return html_body\n return None\n\n\n@celery_app.task(serializer=\"json\", bind=True, base=AbakusTask)\ndef send_weekly_email(self, logger_context=None):\n self.setup_logger(logger_context)\n\n week_number = timezone.now().isocalendar().week\n\n # Set to just PR and Webkom for testing purposes\n all_users = set(\n AbakusGroup.objects.get(name=\"Webkom\").restricted_lookup()[0]\n + AbakusGroup.objects.get(name=\"PR\").restricted_lookup()[0]\n )\n recipients = []\n\n for user in all_users:\n if not user.email_lists_enabled:\n # Don't send emails to users that don't want mail.\n continue\n\n if EMAIL not in NotificationSetting.active_channels(user, WEEKLY_MAIL):\n continue\n recipients.append(user)\n\n datatuple = (\n (\n f\"Ukesmail uke {week_number}\",\n transform(html) if (html := create_weekly_mail(user)) is not None else None,\n settings.DEFAULT_FROM_EMAIL,\n [user.email],\n )\n for user in recipients\n )\n datatuple = tuple(tuppel for tuppel in datatuple if tuppel[1] is not None)\n if datatuple:\n MessageProcessor.send_mass_mail_html(datatuple)\n", "path": "lego/apps/email/tasks.py"}]} | 1,877 | 111 |
gh_patches_debug_36490 | rasdani/github-patches | git_diff | DataDog__dd-agent-1339 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[ssh] Multiple instances not differentiated
Any metrics sent through the ssh_check.py are not differentiated by instance. If multiple instances are running, the sftp.response_time metric will only be tagged with the host sending the metric: https://github.com/DataDog/dd-agent/blob/master/checks.d/ssh_check.py#L84
</issue>
<code>
[start of checks.d/ssh_check.py]
1 # stdlib
2 import time
3 import socket
4 # 3p
5 import paramiko
6 from collections import namedtuple
7 # project
8 from checks import AgentCheck
9
10 class CheckSSH(AgentCheck):
11
12 OPTIONS = [
13 ('host', True, None, str),
14 ('port', False, 22, int),
15 ('username', True, None, str),
16 ('password', False, None, str),
17 ('private_key_file', False, None, str),
18 ('sftp_check', False, True, bool),
19 ('add_missing_keys', False, False, bool),
20 ]
21
22 Config = namedtuple('Config', [
23 'host',
24 'port',
25 'username',
26 'password',
27 'private_key_file',
28 'sftp_check',
29 'add_missing_keys',
30 ]
31 )
32 def _load_conf(self, instance):
33 params = []
34 for option, required, default, expected_type in self.OPTIONS:
35 value = instance.get(option)
36 if required and (not value or type(value)) != expected_type :
37 raise Exception("Please specify a valid {0}".format(option))
38
39 if value is None or type(value) != expected_type:
40 self.log.debug("Bad or missing value for {0} parameter. Using default".format(option))
41 value = default
42
43 params.append(value)
44 return self.Config._make(params)
45
46 def check(self, instance):
47 conf = self._load_conf(instance)
48
49 try:
50 private_key = paramiko.RSAKey.from_private_key_file (conf.private_key_file)
51 except Exception:
52 self.warning("Private key could not be found")
53 private_key = None
54
55 client = paramiko.SSHClient()
56 if conf.add_missing_keys:
57 client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
58 client.load_system_host_keys()
59
60 exception_message = None
61 #Service Availability to check status of SSH
62 try:
63 client.connect(conf.host, port=conf.port, username=conf.username, password=conf.password, pkey=private_key)
64 self.service_check('ssh.can_connect', AgentCheck.OK, message=exception_message)
65
66 except Exception as e:
67 exception_message = str(e)
68 status = AgentCheck.CRITICAL
69 self.service_check('ssh.can_connect', status, message=exception_message)
70 if conf.sftp_check:
71 self.service_check('sftp.can_connect', status, message=exception_message)
72 raise Exception (e)
73
74 #Service Availability to check status of SFTP
75 if conf.sftp_check:
76 try:
77 sftp = client.open_sftp()
78 #Check response time of SFTP
79 start_time = time.time()
80 result = sftp.listdir('.')
81 status = AgentCheck.OK
82 end_time = time.time()
83 time_taken = end_time - start_time
84 self.gauge('sftp.response_time', time_taken)
85
86 except Exception as e:
87 exception_message = str(e)
88 status = AgentCheck.CRITICAL
89
90 if exception_message is None:
91 exception_message = "No errors occured"
92
93 self.service_check('sftp.can_connect', status, message=exception_message)
94
[end of checks.d/ssh_check.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checks.d/ssh_check.py b/checks.d/ssh_check.py
--- a/checks.d/ssh_check.py
+++ b/checks.d/ssh_check.py
@@ -45,6 +45,7 @@
def check(self, instance):
conf = self._load_conf(instance)
+ tags = ["instance:{0}-{1}".format(conf.host, conf.port)]
try:
private_key = paramiko.RSAKey.from_private_key_file (conf.private_key_file)
@@ -60,15 +61,19 @@
exception_message = None
#Service Availability to check status of SSH
try:
- client.connect(conf.host, port=conf.port, username=conf.username, password=conf.password, pkey=private_key)
- self.service_check('ssh.can_connect', AgentCheck.OK, message=exception_message)
+ client.connect(conf.host, port=conf.port, username=conf.username,
+ password=conf.password, pkey=private_key)
+ self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,
+ message=exception_message)
except Exception as e:
exception_message = str(e)
status = AgentCheck.CRITICAL
- self.service_check('ssh.can_connect', status, message=exception_message)
+ self.service_check('ssh.can_connect', status, tags=tags,
+ message=exception_message)
if conf.sftp_check:
- self.service_check('sftp.can_connect', status, message=exception_message)
+ self.service_check('sftp.can_connect', status, tags=tags,
+ message=exception_message)
raise Exception (e)
#Service Availability to check status of SFTP
@@ -81,7 +86,7 @@
status = AgentCheck.OK
end_time = time.time()
time_taken = end_time - start_time
- self.gauge('sftp.response_time', time_taken)
+ self.gauge('sftp.response_time', time_taken, tags=tags)
except Exception as e:
exception_message = str(e)
@@ -90,4 +95,5 @@
if exception_message is None:
exception_message = "No errors occured"
- self.service_check('sftp.can_connect', status, message=exception_message)
+ self.service_check('sftp.can_connect', status, tags=tags,
+ message=exception_message)
| {"golden_diff": "diff --git a/checks.d/ssh_check.py b/checks.d/ssh_check.py\n--- a/checks.d/ssh_check.py\n+++ b/checks.d/ssh_check.py\n@@ -45,6 +45,7 @@\n \n def check(self, instance):\n conf = self._load_conf(instance)\n+ tags = [\"instance:{0}-{1}\".format(conf.host, conf.port)] \n \n try:\n private_key = paramiko.RSAKey.from_private_key_file (conf.private_key_file)\n@@ -60,15 +61,19 @@\n exception_message = None\n #Service Availability to check status of SSH\n try:\n- client.connect(conf.host, port=conf.port, username=conf.username, password=conf.password, pkey=private_key)\n- self.service_check('ssh.can_connect', AgentCheck.OK, message=exception_message)\n+ client.connect(conf.host, port=conf.port, username=conf.username,\n+ password=conf.password, pkey=private_key)\n+ self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,\n+ message=exception_message)\n \n except Exception as e:\n exception_message = str(e)\n status = AgentCheck.CRITICAL\n- self.service_check('ssh.can_connect', status, message=exception_message)\n+ self.service_check('ssh.can_connect', status, tags=tags,\n+ message=exception_message)\n if conf.sftp_check:\n- self.service_check('sftp.can_connect', status, message=exception_message)\n+ self.service_check('sftp.can_connect', status, tags=tags,\n+ message=exception_message)\n raise Exception (e)\n \n #Service Availability to check status of SFTP\n@@ -81,7 +86,7 @@\n status = AgentCheck.OK\n end_time = time.time()\n time_taken = end_time - start_time\n- self.gauge('sftp.response_time', time_taken)\n+ self.gauge('sftp.response_time', time_taken, tags=tags)\n \n except Exception as e:\n exception_message = str(e)\n@@ -90,4 +95,5 @@\n if exception_message is None:\n exception_message = \"No errors occured\"\n \n- self.service_check('sftp.can_connect', status, message=exception_message)\n+ self.service_check('sftp.can_connect', status, tags=tags,\n+ message=exception_message)\n", "issue": "[ssh] Multiple instances not differentiated\nAny metrics sent through the ssh_check.py are not differentiated by instance. If multiple instances are running, the sftp.response_time metric will only be tagged with the host sending the metric: https://github.com/DataDog/dd-agent/blob/master/checks.d/ssh_check.py#L84\n\n", "before_files": [{"content": "# stdlib\nimport time\nimport socket\n# 3p\nimport paramiko\nfrom collections import namedtuple\n# project\nfrom checks import AgentCheck\n\nclass CheckSSH(AgentCheck):\n\n OPTIONS = [\n ('host', True, None, str),\n ('port', False, 22, int),\n ('username', True, None, str),\n ('password', False, None, str),\n ('private_key_file', False, None, str),\n ('sftp_check', False, True, bool),\n ('add_missing_keys', False, False, bool),\n ]\n\n Config = namedtuple('Config', [\n 'host',\n 'port',\n 'username',\n 'password',\n 'private_key_file',\n 'sftp_check',\n 'add_missing_keys',\n ]\n )\n def _load_conf(self, instance):\n params = []\n for option, required, default, expected_type in self.OPTIONS:\n value = instance.get(option)\n if required and (not value or type(value)) != expected_type :\n raise Exception(\"Please specify a valid {0}\".format(option))\n\n if value is None or type(value) != expected_type:\n self.log.debug(\"Bad or missing value for {0} parameter. Using default\".format(option))\n value = default\n\n params.append(value)\n return self.Config._make(params)\n\n def check(self, instance):\n conf = self._load_conf(instance)\n\n try:\n private_key = paramiko.RSAKey.from_private_key_file (conf.private_key_file)\n except Exception:\n self.warning(\"Private key could not be found\")\n private_key = None\n\n client = paramiko.SSHClient()\n if conf.add_missing_keys:\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.load_system_host_keys()\n\n exception_message = None\n #Service Availability to check status of SSH\n try:\n client.connect(conf.host, port=conf.port, username=conf.username, password=conf.password, pkey=private_key)\n self.service_check('ssh.can_connect', AgentCheck.OK, message=exception_message)\n\n except Exception as e:\n exception_message = str(e)\n status = AgentCheck.CRITICAL\n self.service_check('ssh.can_connect', status, message=exception_message)\n if conf.sftp_check:\n self.service_check('sftp.can_connect', status, message=exception_message)\n raise Exception (e)\n\n #Service Availability to check status of SFTP\n if conf.sftp_check:\n try:\n sftp = client.open_sftp()\n #Check response time of SFTP\n start_time = time.time()\n result = sftp.listdir('.')\n status = AgentCheck.OK\n end_time = time.time()\n time_taken = end_time - start_time\n self.gauge('sftp.response_time', time_taken)\n\n except Exception as e:\n exception_message = str(e)\n status = AgentCheck.CRITICAL\n\n if exception_message is None:\n exception_message = \"No errors occured\"\n\n self.service_check('sftp.can_connect', status, message=exception_message)\n", "path": "checks.d/ssh_check.py"}]} | 1,455 | 529 |
gh_patches_debug_26905 | rasdani/github-patches | git_diff | saleor__saleor-2978 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make sure all GraphQL types that accept or return a country code use an enum type
Currently some types return an enum, some return a string and `VoucherInput` takes a list of strings.
</issue>
<code>
[start of saleor/graphql/shipping/types.py]
1 import decimal
2
3 import graphene
4 from graphene import relay
5 from graphene.types import Scalar
6 from graphene_django import DjangoObjectType
7 from measurement.measures import Weight
8
9 from ...core.weight import convert_weight, get_default_weight_unit
10 from ...shipping import ShippingMethodType, models
11 from ..core.types.common import CountableDjangoObjectType
12 from ..core.types.money import MoneyRange
13
14 ShippingMethodTypeEnum = graphene.Enum(
15 'ShippingMethodTypeEnum',
16 [(code.upper(), code) for code, name in ShippingMethodType.CHOICES])
17
18
19 class ShippingMethod(DjangoObjectType):
20 type = ShippingMethodTypeEnum(description='Type of the shipping method.')
21
22 class Meta:
23 description = """
24 Shipping method are the methods you'll use to get
25 customer's orders to them.
26 They are directly exposed to the customers."""
27 model = models.ShippingMethod
28 interfaces = [relay.Node]
29 exclude_fields = ['shipping_zone', 'orders']
30
31
32 class ShippingZone(CountableDjangoObjectType):
33 price_range = graphene.Field(
34 MoneyRange, description='Lowest and highest prices for the shipping.')
35 countries = graphene.List(
36 graphene.String,
37 description='List of countries available for the method.')
38 shipping_methods = graphene.List(
39 ShippingMethod,
40 description=(
41 'List of shipping methods available for orders'
42 ' shipped to countries within this shipping zone.'))
43
44 class Meta:
45 description = """
46 Represents a shipping zone in the shop. Zones are the concept
47 used only for grouping shipping methods in the dashboard,
48 and are never exposed to the customers directly."""
49 model = models.ShippingZone
50 interfaces = [relay.Node]
51 filter_fields = {
52 'name': ['icontains'],
53 'countries': ['icontains'],
54 'shipping_methods__price': ['gte', 'lte']}
55
56 def resolve_price_range(self, info):
57 return self.price_range
58
59 def resolve_countries(self, info):
60 return self.countries
61
62 def resolve_shipping_methods(self, info):
63 return self.shipping_methods.all()
64
65
66 class WeightScalar(Scalar):
67 @staticmethod
68 def parse_value(value):
69 """Excepts value to be a string "amount unit"
70 separated by a single space.
71 """
72 try:
73 value = decimal.Decimal(value)
74 except decimal.DecimalException:
75 return None
76 default_unit = get_default_weight_unit()
77 return Weight(**{default_unit: value})
78
79 @staticmethod
80 def serialize(weight):
81 if isinstance(weight, Weight):
82 default_unit = get_default_weight_unit()
83 if weight.unit != default_unit:
84 weight = convert_weight(weight, default_unit)
85 return str(weight)
86 return None
87
88 @staticmethod
89 def parse_literal(node):
90 return node
91
[end of saleor/graphql/shipping/types.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/graphql/shipping/types.py b/saleor/graphql/shipping/types.py
--- a/saleor/graphql/shipping/types.py
+++ b/saleor/graphql/shipping/types.py
@@ -8,7 +8,7 @@
from ...core.weight import convert_weight, get_default_weight_unit
from ...shipping import ShippingMethodType, models
-from ..core.types.common import CountableDjangoObjectType
+from ..core.types.common import CountableDjangoObjectType, CountryDisplay
from ..core.types.money import MoneyRange
ShippingMethodTypeEnum = graphene.Enum(
@@ -33,7 +33,7 @@
price_range = graphene.Field(
MoneyRange, description='Lowest and highest prices for the shipping.')
countries = graphene.List(
- graphene.String,
+ CountryDisplay,
description='List of countries available for the method.')
shipping_methods = graphene.List(
ShippingMethod,
@@ -57,7 +57,9 @@
return self.price_range
def resolve_countries(self, info):
- return self.countries
+ return [
+ CountryDisplay(code=country.code, country=country.name)
+ for country in self.countries]
def resolve_shipping_methods(self, info):
return self.shipping_methods.all()
| {"golden_diff": "diff --git a/saleor/graphql/shipping/types.py b/saleor/graphql/shipping/types.py\n--- a/saleor/graphql/shipping/types.py\n+++ b/saleor/graphql/shipping/types.py\n@@ -8,7 +8,7 @@\n \n from ...core.weight import convert_weight, get_default_weight_unit\n from ...shipping import ShippingMethodType, models\n-from ..core.types.common import CountableDjangoObjectType\n+from ..core.types.common import CountableDjangoObjectType, CountryDisplay\n from ..core.types.money import MoneyRange\n \n ShippingMethodTypeEnum = graphene.Enum(\n@@ -33,7 +33,7 @@\n price_range = graphene.Field(\n MoneyRange, description='Lowest and highest prices for the shipping.')\n countries = graphene.List(\n- graphene.String,\n+ CountryDisplay,\n description='List of countries available for the method.')\n shipping_methods = graphene.List(\n ShippingMethod,\n@@ -57,7 +57,9 @@\n return self.price_range\n \n def resolve_countries(self, info):\n- return self.countries\n+ return [\n+ CountryDisplay(code=country.code, country=country.name)\n+ for country in self.countries]\n \n def resolve_shipping_methods(self, info):\n return self.shipping_methods.all()\n", "issue": "Make sure all GraphQL types that accept or return a country code use an enum type\nCurrently some types return an enum, some return a string and `VoucherInput` takes a list of strings.\n", "before_files": [{"content": "import decimal\n\nimport graphene\nfrom graphene import relay\nfrom graphene.types import Scalar\nfrom graphene_django import DjangoObjectType\nfrom measurement.measures import Weight\n\nfrom ...core.weight import convert_weight, get_default_weight_unit\nfrom ...shipping import ShippingMethodType, models\nfrom ..core.types.common import CountableDjangoObjectType\nfrom ..core.types.money import MoneyRange\n\nShippingMethodTypeEnum = graphene.Enum(\n 'ShippingMethodTypeEnum',\n [(code.upper(), code) for code, name in ShippingMethodType.CHOICES])\n\n\nclass ShippingMethod(DjangoObjectType):\n type = ShippingMethodTypeEnum(description='Type of the shipping method.')\n\n class Meta:\n description = \"\"\"\n Shipping method are the methods you'll use to get\n customer's orders to them.\n They are directly exposed to the customers.\"\"\"\n model = models.ShippingMethod\n interfaces = [relay.Node]\n exclude_fields = ['shipping_zone', 'orders']\n\n\nclass ShippingZone(CountableDjangoObjectType):\n price_range = graphene.Field(\n MoneyRange, description='Lowest and highest prices for the shipping.')\n countries = graphene.List(\n graphene.String,\n description='List of countries available for the method.')\n shipping_methods = graphene.List(\n ShippingMethod,\n description=(\n 'List of shipping methods available for orders'\n ' shipped to countries within this shipping zone.'))\n\n class Meta:\n description = \"\"\"\n Represents a shipping zone in the shop. Zones are the concept\n used only for grouping shipping methods in the dashboard,\n and are never exposed to the customers directly.\"\"\"\n model = models.ShippingZone\n interfaces = [relay.Node]\n filter_fields = {\n 'name': ['icontains'],\n 'countries': ['icontains'],\n 'shipping_methods__price': ['gte', 'lte']}\n\n def resolve_price_range(self, info):\n return self.price_range\n\n def resolve_countries(self, info):\n return self.countries\n\n def resolve_shipping_methods(self, info):\n return self.shipping_methods.all()\n\n\nclass WeightScalar(Scalar):\n @staticmethod\n def parse_value(value):\n \"\"\"Excepts value to be a string \"amount unit\"\n separated by a single space.\n \"\"\"\n try:\n value = decimal.Decimal(value)\n except decimal.DecimalException:\n return None\n default_unit = get_default_weight_unit()\n return Weight(**{default_unit: value})\n\n @staticmethod\n def serialize(weight):\n if isinstance(weight, Weight):\n default_unit = get_default_weight_unit()\n if weight.unit != default_unit:\n weight = convert_weight(weight, default_unit)\n return str(weight)\n return None\n\n @staticmethod\n def parse_literal(node):\n return node\n", "path": "saleor/graphql/shipping/types.py"}]} | 1,327 | 273 |
gh_patches_debug_3082 | rasdani/github-patches | git_diff | Pyomo__pyomo-1385 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error with TransformationFactory('core.relax_integers')
When I use `TransformationFactory('core.relax_integrality').apply_to(m)`, a warning came up.
`WARNING: DEPRECATED: core.relax_integrality is deprecated. Use core.relax_integers (deprecated in TBD)`
When I changed the code to `TransformationFactory('core.relax_integers').apply_to(m)`, an error came up:
```
TransformationFactory('core.relax_integers').apply_to(m)
AttributeError: 'NoneType' object has no attribute 'apply_to'
```
Error with TransformationFactory('core.relax_integers')
When I use `TransformationFactory('core.relax_integrality').apply_to(m)`, a warning came up.
`WARNING: DEPRECATED: core.relax_integrality is deprecated. Use core.relax_integers (deprecated in TBD)`
When I changed the code to `TransformationFactory('core.relax_integers').apply_to(m)`, an error came up:
```
TransformationFactory('core.relax_integers').apply_to(m)
AttributeError: 'NoneType' object has no attribute 'apply_to'
```
</issue>
<code>
[start of pyomo/core/plugins/transform/relax_integrality.py]
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 from pyomo.common import deprecated
12 from pyomo.core.base import TransformationFactory
13 from pyomo.core.plugins.transform.discrete_vars import RelaxIntegerVars
14
15
16 @TransformationFactory.register(
17 'core.relax_integrality',
18 doc="[DEPRECATED] Create a model where integer variables are replaced with "
19 "real variables.")
20 class RelaxIntegrality(RelaxIntegerVars):
21 """
22 This plugin relaxes integrality in a Pyomo model.
23 """
24
25 @deprecated(
26 "core.relax_integrality is deprecated. Use core.relax_integers",
27 version='TBD')
28 def __init__(self, **kwds):
29 super(RelaxIntegrality, self).__init__(**kwds)
30
[end of pyomo/core/plugins/transform/relax_integrality.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyomo/core/plugins/transform/relax_integrality.py b/pyomo/core/plugins/transform/relax_integrality.py
--- a/pyomo/core/plugins/transform/relax_integrality.py
+++ b/pyomo/core/plugins/transform/relax_integrality.py
@@ -23,7 +23,7 @@
"""
@deprecated(
- "core.relax_integrality is deprecated. Use core.relax_integers",
+ "core.relax_integrality is deprecated. Use core.relax_integer_vars",
version='TBD')
def __init__(self, **kwds):
super(RelaxIntegrality, self).__init__(**kwds)
| {"golden_diff": "diff --git a/pyomo/core/plugins/transform/relax_integrality.py b/pyomo/core/plugins/transform/relax_integrality.py\n--- a/pyomo/core/plugins/transform/relax_integrality.py\n+++ b/pyomo/core/plugins/transform/relax_integrality.py\n@@ -23,7 +23,7 @@\n \"\"\"\n \n @deprecated(\n- \"core.relax_integrality is deprecated. Use core.relax_integers\",\n+ \"core.relax_integrality is deprecated. Use core.relax_integer_vars\",\n version='TBD')\n def __init__(self, **kwds):\n super(RelaxIntegrality, self).__init__(**kwds)\n", "issue": "Error with TransformationFactory('core.relax_integers')\nWhen I use `TransformationFactory('core.relax_integrality').apply_to(m)`, a warning came up.\r\n\r\n`WARNING: DEPRECATED: core.relax_integrality is deprecated. Use core.relax_integers (deprecated in TBD)`\r\n\r\nWhen I changed the code to `TransformationFactory('core.relax_integers').apply_to(m)`, an error came up:\r\n\r\n```\r\nTransformationFactory('core.relax_integers').apply_to(m)\r\nAttributeError: 'NoneType' object has no attribute 'apply_to'\r\n```\r\n\r\n\nError with TransformationFactory('core.relax_integers')\nWhen I use `TransformationFactory('core.relax_integrality').apply_to(m)`, a warning came up.\r\n\r\n`WARNING: DEPRECATED: core.relax_integrality is deprecated. Use core.relax_integers (deprecated in TBD)`\r\n\r\nWhen I changed the code to `TransformationFactory('core.relax_integers').apply_to(m)`, an error came up:\r\n\r\n```\r\nTransformationFactory('core.relax_integers').apply_to(m)\r\nAttributeError: 'NoneType' object has no attribute 'apply_to'\r\n```\r\n\r\n\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nfrom pyomo.common import deprecated\nfrom pyomo.core.base import TransformationFactory\nfrom pyomo.core.plugins.transform.discrete_vars import RelaxIntegerVars\n\n\[email protected](\n 'core.relax_integrality',\n doc=\"[DEPRECATED] Create a model where integer variables are replaced with \"\n \"real variables.\")\nclass RelaxIntegrality(RelaxIntegerVars):\n \"\"\"\n This plugin relaxes integrality in a Pyomo model.\n \"\"\"\n\n @deprecated(\n \"core.relax_integrality is deprecated. Use core.relax_integers\",\n version='TBD')\n def __init__(self, **kwds):\n super(RelaxIntegrality, self).__init__(**kwds)\n", "path": "pyomo/core/plugins/transform/relax_integrality.py"}]} | 1,108 | 158 |
gh_patches_debug_31825 | rasdani/github-patches | git_diff | Pylons__pyramid-3332 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update Hello World example to align with TryPyramid.com
We've updated the Hello World example on https://trypyramid.com per https://github.com/Pylons/trypyramid.com/pull/205 so we should update it on our official docs.
Verify that line numbers and narrative still align in docs whenever a `literalinclude` of Python files occurs.
- [x] README.rst
- [x] docs/designdefense.rst
- [x] docs/index.rst
- [x] docs/narr/helloworld.py
- [x] docs/narr/hellotraversal.py (take care to adapt code)
- [x] docs/narr/configuration.rst ("Declarative Configuration" section only)
- [x] docs/narr/firstapp.rst
- [x] docs/narr/hellotraversal.rst
- [x] docs/quick_tour/hello_world/app.py
- [x] docs/quick_tour.rst ("Hello World" section only)
Note that I excluded `docs/quick_tutorial/*` because it deliberately starts with imperative configuration, then introduces declarative configuration in [Step 7](http://docs.pylonsproject.org/projects/pyramid/en/latest/quick_tutorial/views.html).
</issue>
<code>
[start of docs/quick_tour/hello_world/app.py]
1 from wsgiref.simple_server import make_server
2 from pyramid.config import Configurator
3 from pyramid.response import Response
4
5
6 def hello_world(request):
7 return Response('<h1>Hello World!</h1>')
8
9
10 if __name__ == '__main__':
11 with Configurator() as config:
12 config.add_route('hello', '/')
13 config.add_view(hello_world, route_name='hello')
14 app = config.make_wsgi_app()
15 server = make_server('0.0.0.0', 6543, app)
16 server.serve_forever()
17
[end of docs/quick_tour/hello_world/app.py]
[start of docs/narr/helloworld.py]
1 from wsgiref.simple_server import make_server
2 from pyramid.config import Configurator
3 from pyramid.response import Response
4
5
6 def hello_world(request):
7 return Response('Hello %(name)s!' % request.matchdict)
8
9 if __name__ == '__main__':
10 with Configurator() as config:
11 config.add_route('hello', '/hello/{name}')
12 config.add_view(hello_world, route_name='hello')
13 app = config.make_wsgi_app()
14 server = make_server('0.0.0.0', 8080, app)
15 server.serve_forever()
16
[end of docs/narr/helloworld.py]
[start of docs/narr/hellotraversal.py]
1 from wsgiref.simple_server import make_server
2 from pyramid.config import Configurator
3 from pyramid.response import Response
4
5 class Resource(dict):
6 pass
7
8 def get_root(request):
9 return Resource({'a': Resource({'b': Resource({'c': Resource()})})})
10
11 def hello_world_of_resources(context, request):
12 output = "Here's a resource and its children: %s" % context
13 return Response(output)
14
15 if __name__ == '__main__':
16 config = Configurator(root_factory=get_root)
17 config.add_view(hello_world_of_resources, context=Resource)
18 app = config.make_wsgi_app()
19 server = make_server('0.0.0.0', 8080, app)
20 server.serve_forever()
21
22
23
[end of docs/narr/hellotraversal.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/narr/hellotraversal.py b/docs/narr/hellotraversal.py
--- a/docs/narr/hellotraversal.py
+++ b/docs/narr/hellotraversal.py
@@ -2,21 +2,24 @@
from pyramid.config import Configurator
from pyramid.response import Response
+
class Resource(dict):
pass
+
def get_root(request):
return Resource({'a': Resource({'b': Resource({'c': Resource()})})})
+
def hello_world_of_resources(context, request):
output = "Here's a resource and its children: %s" % context
return Response(output)
+
if __name__ == '__main__':
- config = Configurator(root_factory=get_root)
- config.add_view(hello_world_of_resources, context=Resource)
- app = config.make_wsgi_app()
- server = make_server('0.0.0.0', 8080, app)
+ with Configurator() as config:
+ config.set_root_factory(get_root)
+ config.add_view(hello_world_of_resources, context=Resource)
+ app = config.make_wsgi_app()
+ server = make_server('0.0.0.0', 6543, app)
server.serve_forever()
-
-
diff --git a/docs/narr/helloworld.py b/docs/narr/helloworld.py
--- a/docs/narr/helloworld.py
+++ b/docs/narr/helloworld.py
@@ -4,12 +4,13 @@
def hello_world(request):
- return Response('Hello %(name)s!' % request.matchdict)
+ return Response('Hello World!')
+
if __name__ == '__main__':
with Configurator() as config:
- config.add_route('hello', '/hello/{name}')
+ config.add_route('hello', '/')
config.add_view(hello_world, route_name='hello')
app = config.make_wsgi_app()
- server = make_server('0.0.0.0', 8080, app)
+ server = make_server('0.0.0.0', 6543, app)
server.serve_forever()
diff --git a/docs/quick_tour/hello_world/app.py b/docs/quick_tour/hello_world/app.py
--- a/docs/quick_tour/hello_world/app.py
+++ b/docs/quick_tour/hello_world/app.py
@@ -4,7 +4,7 @@
def hello_world(request):
- return Response('<h1>Hello World!</h1>')
+ return Response('Hello World!')
if __name__ == '__main__':
| {"golden_diff": "diff --git a/docs/narr/hellotraversal.py b/docs/narr/hellotraversal.py\n--- a/docs/narr/hellotraversal.py\n+++ b/docs/narr/hellotraversal.py\n@@ -2,21 +2,24 @@\n from pyramid.config import Configurator\n from pyramid.response import Response\n \n+\n class Resource(dict):\n pass\n \n+\n def get_root(request):\n return Resource({'a': Resource({'b': Resource({'c': Resource()})})})\n \n+\n def hello_world_of_resources(context, request):\n output = \"Here's a resource and its children: %s\" % context\n return Response(output)\n \n+\n if __name__ == '__main__':\n- config = Configurator(root_factory=get_root)\n- config.add_view(hello_world_of_resources, context=Resource)\n- app = config.make_wsgi_app()\n- server = make_server('0.0.0.0', 8080, app)\n+ with Configurator() as config:\n+ config.set_root_factory(get_root)\n+ config.add_view(hello_world_of_resources, context=Resource)\n+ app = config.make_wsgi_app()\n+ server = make_server('0.0.0.0', 6543, app)\n server.serve_forever()\n-\n-\ndiff --git a/docs/narr/helloworld.py b/docs/narr/helloworld.py\n--- a/docs/narr/helloworld.py\n+++ b/docs/narr/helloworld.py\n@@ -4,12 +4,13 @@\n \n \n def hello_world(request):\n- return Response('Hello %(name)s!' % request.matchdict)\n+ return Response('Hello World!')\n+\n \n if __name__ == '__main__':\n with Configurator() as config:\n- config.add_route('hello', '/hello/{name}')\n+ config.add_route('hello', '/')\n config.add_view(hello_world, route_name='hello')\n app = config.make_wsgi_app()\n- server = make_server('0.0.0.0', 8080, app)\n+ server = make_server('0.0.0.0', 6543, app)\n server.serve_forever()\ndiff --git a/docs/quick_tour/hello_world/app.py b/docs/quick_tour/hello_world/app.py\n--- a/docs/quick_tour/hello_world/app.py\n+++ b/docs/quick_tour/hello_world/app.py\n@@ -4,7 +4,7 @@\n \n \n def hello_world(request):\n- return Response('<h1>Hello World!</h1>')\n+ return Response('Hello World!')\n \n \n if __name__ == '__main__':\n", "issue": "Update Hello World example to align with TryPyramid.com\nWe've updated the Hello World example on https://trypyramid.com per https://github.com/Pylons/trypyramid.com/pull/205 so we should update it on our official docs.\r\n\r\nVerify that line numbers and narrative still align in docs whenever a `literalinclude` of Python files occurs.\r\n\r\n- [x] README.rst\r\n- [x] docs/designdefense.rst\r\n- [x] docs/index.rst\r\n- [x] docs/narr/helloworld.py\r\n- [x] docs/narr/hellotraversal.py (take care to adapt code)\r\n- [x] docs/narr/configuration.rst (\"Declarative Configuration\" section only)\r\n- [x] docs/narr/firstapp.rst\r\n- [x] docs/narr/hellotraversal.rst\r\n- [x] docs/quick_tour/hello_world/app.py\r\n- [x] docs/quick_tour.rst (\"Hello World\" section only)\r\n\r\nNote that I excluded `docs/quick_tutorial/*` because it deliberately starts with imperative configuration, then introduces declarative configuration in [Step 7](http://docs.pylonsproject.org/projects/pyramid/en/latest/quick_tutorial/views.html).\n", "before_files": [{"content": "from wsgiref.simple_server import make_server\nfrom pyramid.config import Configurator\nfrom pyramid.response import Response\n\n\ndef hello_world(request):\n return Response('<h1>Hello World!</h1>')\n\n\nif __name__ == '__main__':\n with Configurator() as config:\n config.add_route('hello', '/')\n config.add_view(hello_world, route_name='hello')\n app = config.make_wsgi_app()\n server = make_server('0.0.0.0', 6543, app)\n server.serve_forever()\n", "path": "docs/quick_tour/hello_world/app.py"}, {"content": "from wsgiref.simple_server import make_server\nfrom pyramid.config import Configurator\nfrom pyramid.response import Response\n\n\ndef hello_world(request):\n return Response('Hello %(name)s!' % request.matchdict)\n\nif __name__ == '__main__':\n with Configurator() as config:\n config.add_route('hello', '/hello/{name}')\n config.add_view(hello_world, route_name='hello')\n app = config.make_wsgi_app()\n server = make_server('0.0.0.0', 8080, app)\n server.serve_forever()\n", "path": "docs/narr/helloworld.py"}, {"content": "from wsgiref.simple_server import make_server\nfrom pyramid.config import Configurator\nfrom pyramid.response import Response\n\nclass Resource(dict):\n pass\n\ndef get_root(request):\n return Resource({'a': Resource({'b': Resource({'c': Resource()})})})\n\ndef hello_world_of_resources(context, request):\n output = \"Here's a resource and its children: %s\" % context\n return Response(output)\n\nif __name__ == '__main__':\n config = Configurator(root_factory=get_root)\n config.add_view(hello_world_of_resources, context=Resource)\n app = config.make_wsgi_app()\n server = make_server('0.0.0.0', 8080, app)\n server.serve_forever()\n\n\n", "path": "docs/narr/hellotraversal.py"}]} | 1,346 | 571 |
gh_patches_debug_2562 | rasdani/github-patches | git_diff | saulpw__visidata-1887 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
resize-cols-input missing from Column -> Resize menu
**Small description**
resize-cols-input is missing from the Column -> Resize menu
**Expected result**
when I go to the menu under Column -> Resize, I expect resize-cols-input (gz_) to be given as an option
**Additional context**
checked against v2.11
(Submitting pull request to fix this now.)
</issue>
<code>
[start of visidata/features/layout.py]
1 from visidata import VisiData, vd, Column, Sheet, Fanout
2
3 @Column.api
4 def setWidth(self, w):
5 if self.width != w:
6 if self.width == 0 or w == 0: # hide/unhide
7 vd.addUndo(setattr, self, '_width', self.width)
8 self._width = w
9
10
11 @Column.api
12 def toggleWidth(self, width):
13 'Change column width to either given `width` or default value.'
14 if self.width != width:
15 self.width = width
16 else:
17 self.width = int(self.sheet.options.default_width)
18
19
20 @Column.api
21 def toggleVisibility(self):
22 if self.height == 1:
23 self.height = self.sheet.options.default_height
24 else:
25 self.height = 1
26
27 @VisiData.api
28 def unhide_cols(vd, cols, rows):
29 'sets appropriate width if column was either hidden (0) or unseen (None)'
30 for c in cols:
31 c.setWidth(abs(c.width or 0) or c.getMaxWidth(rows))
32
33
34 Sheet.addCommand('_', 'resize-col-max', 'cursorCol.toggleWidth(cursorCol.getMaxWidth(visibleRows))', 'toggle width of current column between full and default width'),
35 Sheet.addCommand('z_', 'resize-col-input', 'width = int(input("set width= ", value=cursorCol.width)); cursorCol.setWidth(width)', 'adjust width of current column to N')
36 Sheet.addCommand('g_', 'resize-cols-max', 'for c in visibleCols: c.setWidth(c.getMaxWidth(visibleRows))', 'toggle widths of all visible columns between full and default width'),
37 Sheet.addCommand('gz_', 'resize-cols-input', 'width = int(input("set width= ", value=cursorCol.width)); Fanout(visibleCols).setWidth(width)', 'adjust widths of all visible columns to N')
38
39 Sheet.addCommand('-', 'hide-col', 'cursorCol.hide()', 'Hide current column')
40 Sheet.addCommand('z-', 'resize-col-half', 'cursorCol.setWidth(cursorCol.width//2)', 'reduce width of current column by half'),
41
42 Sheet.addCommand('gv', 'unhide-cols', 'unhide_cols(columns, visibleRows)', 'Show all columns')
43 Sheet.addCommand('v', 'visibility-sheet', 'for c in visibleCols: c.toggleVisibility()')
44 Sheet.addCommand('zv', 'visibility-col', 'cursorCol.toggleVisibility()')
45
46 vd.addMenuItems('''
47 Column > Hide > hide-col
48 Column > Unhide all > unhide-cols
49 Column > Resize > half > resize-col-half
50 Column > Resize > current column to max > resize-col-max
51 Column > Resize > current column to input > resize-col-input
52 Column > Resize > all columns max > resize-cols-max
53 ''')
54
[end of visidata/features/layout.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/visidata/features/layout.py b/visidata/features/layout.py
--- a/visidata/features/layout.py
+++ b/visidata/features/layout.py
@@ -50,4 +50,5 @@
Column > Resize > current column to max > resize-col-max
Column > Resize > current column to input > resize-col-input
Column > Resize > all columns max > resize-cols-max
+ Column > Resize > all columns to input > resize-cols-input
''')
| {"golden_diff": "diff --git a/visidata/features/layout.py b/visidata/features/layout.py\n--- a/visidata/features/layout.py\n+++ b/visidata/features/layout.py\n@@ -50,4 +50,5 @@\n Column > Resize > current column to max > resize-col-max\n Column > Resize > current column to input > resize-col-input\n Column > Resize > all columns max > resize-cols-max\n+ Column > Resize > all columns to input > resize-cols-input\n ''')\n", "issue": "resize-cols-input missing from Column -> Resize menu\n**Small description**\r\nresize-cols-input is missing from the Column -> Resize menu\r\n\r\n**Expected result**\r\nwhen I go to the menu under Column -> Resize, I expect resize-cols-input (gz_) to be given as an option\r\n\r\n**Additional context**\r\nchecked against v2.11\r\n\r\n(Submitting pull request to fix this now.)\r\n\r\n\n", "before_files": [{"content": "from visidata import VisiData, vd, Column, Sheet, Fanout\n\[email protected]\ndef setWidth(self, w):\n if self.width != w:\n if self.width == 0 or w == 0: # hide/unhide\n vd.addUndo(setattr, self, '_width', self.width)\n self._width = w\n\n\[email protected]\ndef toggleWidth(self, width):\n 'Change column width to either given `width` or default value.'\n if self.width != width:\n self.width = width\n else:\n self.width = int(self.sheet.options.default_width)\n\n\[email protected]\ndef toggleVisibility(self):\n if self.height == 1:\n self.height = self.sheet.options.default_height\n else:\n self.height = 1\n\[email protected]\ndef unhide_cols(vd, cols, rows):\n 'sets appropriate width if column was either hidden (0) or unseen (None)'\n for c in cols:\n c.setWidth(abs(c.width or 0) or c.getMaxWidth(rows))\n\n\nSheet.addCommand('_', 'resize-col-max', 'cursorCol.toggleWidth(cursorCol.getMaxWidth(visibleRows))', 'toggle width of current column between full and default width'),\nSheet.addCommand('z_', 'resize-col-input', 'width = int(input(\"set width= \", value=cursorCol.width)); cursorCol.setWidth(width)', 'adjust width of current column to N')\nSheet.addCommand('g_', 'resize-cols-max', 'for c in visibleCols: c.setWidth(c.getMaxWidth(visibleRows))', 'toggle widths of all visible columns between full and default width'),\nSheet.addCommand('gz_', 'resize-cols-input', 'width = int(input(\"set width= \", value=cursorCol.width)); Fanout(visibleCols).setWidth(width)', 'adjust widths of all visible columns to N')\n\nSheet.addCommand('-', 'hide-col', 'cursorCol.hide()', 'Hide current column')\nSheet.addCommand('z-', 'resize-col-half', 'cursorCol.setWidth(cursorCol.width//2)', 'reduce width of current column by half'),\n\nSheet.addCommand('gv', 'unhide-cols', 'unhide_cols(columns, visibleRows)', 'Show all columns')\nSheet.addCommand('v', 'visibility-sheet', 'for c in visibleCols: c.toggleVisibility()')\nSheet.addCommand('zv', 'visibility-col', 'cursorCol.toggleVisibility()')\n\nvd.addMenuItems('''\n Column > Hide > hide-col\n Column > Unhide all > unhide-cols\n Column > Resize > half > resize-col-half\n Column > Resize > current column to max > resize-col-max\n Column > Resize > current column to input > resize-col-input\n Column > Resize > all columns max > resize-cols-max\n''')\n", "path": "visidata/features/layout.py"}]} | 1,301 | 107 |
gh_patches_debug_17321 | rasdani/github-patches | git_diff | sopel-irc__sopel-1155 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[etymology] HTML entities not decoded before output
The `etymology` module does not decode HTML entities in the snippet before sending it to the channel. This results in printing snippets like this:
````
<Sopel> "Old English wæter, from Proto-Germanic *watar (source also
of Old Saxon watar, Old Frisian wetir, Dutch water, Old High German
wazzar, German Wasser, Old Norse vatn, Gothic wato 'water'), from
PIE *wod-or, from root *wed- (1) 'water, wet' (source also of
Hittite [...]" - http://etymonline.com/?term=water
````
</issue>
<code>
[start of sopel/modules/etymology.py]
1 # coding=utf-8
2 """
3 etymology.py - Sopel Etymology Module
4 Copyright 2007-9, Sean B. Palmer, inamidst.com
5 Licensed under the Eiffel Forum License 2.
6
7 http://sopel.chat
8 """
9 from __future__ import unicode_literals, absolute_import, print_function, division
10
11 import re
12 from sopel import web
13 from sopel.module import commands, example, NOLIMIT
14
15 etyuri = 'http://etymonline.com/?term=%s'
16 etysearch = 'http://etymonline.com/?search=%s'
17
18 r_definition = re.compile(r'(?ims)<dd[^>]*>.*?</dd>')
19 r_tag = re.compile(r'<(?!!)[^>]+>')
20 r_whitespace = re.compile(r'[\t\r\n ]+')
21
22 abbrs = [
23 'cf', 'lit', 'etc', 'Ger', 'Du', 'Skt', 'Rus', 'Eng', 'Amer.Eng', 'Sp',
24 'Fr', 'N', 'E', 'S', 'W', 'L', 'Gen', 'J.C', 'dial', 'Gk',
25 '19c', '18c', '17c', '16c', 'St', 'Capt', 'obs', 'Jan', 'Feb', 'Mar',
26 'Apr', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'c', 'tr', 'e', 'g'
27 ]
28 t_sentence = r'^.*?(?<!%s)(?:\.(?= [A-Z0-9]|\Z)|\Z)'
29 r_sentence = re.compile(t_sentence % ')(?<!'.join(abbrs))
30
31
32 def unescape(s):
33 s = s.replace('>', '>')
34 s = s.replace('<', '<')
35 s = s.replace('&', '&')
36 return s
37
38
39 def text(html):
40 html = r_tag.sub('', html)
41 html = r_whitespace.sub(' ', html)
42 return unescape(html).strip()
43
44
45 def etymology(word):
46 # @@ <nsh> sbp, would it be possible to have a flag for .ety to get 2nd/etc
47 # entries? - http://swhack.com/logs/2006-07-19#T15-05-29
48
49 if len(word) > 25:
50 raise ValueError("Word too long: %s[...]" % word[:10])
51 word = {'axe': 'ax/axe'}.get(word, word)
52
53 bytes = web.get(etyuri % word)
54 definitions = r_definition.findall(bytes)
55
56 if not definitions:
57 return None
58
59 defn = text(definitions[0])
60 m = r_sentence.match(defn)
61 if not m:
62 return None
63 sentence = m.group(0)
64
65 maxlength = 275
66 if len(sentence) > maxlength:
67 sentence = sentence[:maxlength]
68 words = sentence[:-5].split(' ')
69 words.pop()
70 sentence = ' '.join(words) + ' [...]'
71
72 sentence = '"' + sentence.replace('"', "'") + '"'
73 return sentence + ' - ' + (etyuri % word)
74
75
76 @commands('ety')
77 @example('.ety word')
78 def f_etymology(bot, trigger):
79 """Look up the etymology of a word"""
80 word = trigger.group(2)
81
82 try:
83 result = etymology(word)
84 except IOError:
85 msg = "Can't connect to etymonline.com (%s)" % (etyuri % word)
86 bot.msg(trigger.sender, msg)
87 return NOLIMIT
88 except (AttributeError, TypeError):
89 result = None
90
91 if result is not None:
92 bot.msg(trigger.sender, result)
93 else:
94 uri = etysearch % word
95 msg = 'Can\'t find the etymology for "%s". Try %s' % (word, uri)
96 bot.msg(trigger.sender, msg)
97 return NOLIMIT
98
[end of sopel/modules/etymology.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/etymology.py b/sopel/modules/etymology.py
--- a/sopel/modules/etymology.py
+++ b/sopel/modules/etymology.py
@@ -8,6 +8,14 @@
"""
from __future__ import unicode_literals, absolute_import, print_function, division
+try:
+ from html import unescape
+except ImportError:
+ from HTMLParser import HTMLParser
+
+ # pep8 dictates a blank line here...
+ def unescape(s):
+ return HTMLParser.unescape.__func__(HTMLParser, s)
import re
from sopel import web
from sopel.module import commands, example, NOLIMIT
@@ -29,13 +37,6 @@
r_sentence = re.compile(t_sentence % ')(?<!'.join(abbrs))
-def unescape(s):
- s = s.replace('>', '>')
- s = s.replace('<', '<')
- s = s.replace('&', '&')
- return s
-
-
def text(html):
html = r_tag.sub('', html)
html = r_whitespace.sub(' ', html)
| {"golden_diff": "diff --git a/sopel/modules/etymology.py b/sopel/modules/etymology.py\n--- a/sopel/modules/etymology.py\n+++ b/sopel/modules/etymology.py\n@@ -8,6 +8,14 @@\n \"\"\"\n from __future__ import unicode_literals, absolute_import, print_function, division\n \n+try:\n+ from html import unescape\n+except ImportError:\n+ from HTMLParser import HTMLParser\n+\n+ # pep8 dictates a blank line here...\n+ def unescape(s):\n+ return HTMLParser.unescape.__func__(HTMLParser, s)\n import re\n from sopel import web\n from sopel.module import commands, example, NOLIMIT\n@@ -29,13 +37,6 @@\n r_sentence = re.compile(t_sentence % ')(?<!'.join(abbrs))\n \n \n-def unescape(s):\n- s = s.replace('>', '>')\n- s = s.replace('<', '<')\n- s = s.replace('&', '&')\n- return s\n-\n-\n def text(html):\n html = r_tag.sub('', html)\n html = r_whitespace.sub(' ', html)\n", "issue": "[etymology] HTML entities not decoded before output\nThe `etymology` module does not decode HTML entities in the snippet before sending it to the channel. This results in printing snippets like this:\r\n\r\n````\r\n<Sopel> \"Old English wæter, from Proto-Germanic *watar (source also\r\n of Old Saxon watar, Old Frisian wetir, Dutch water, Old High German\r\n wazzar, German Wasser, Old Norse vatn, Gothic wato 'water'), from\r\n PIE *wod-or, from root *wed- (1) 'water, wet' (source also of\r\n Hittite [...]\" -\u00a0http://etymonline.com/?term=water\r\n````\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\netymology.py - Sopel Etymology Module\nCopyright 2007-9, Sean B. Palmer, inamidst.com\nLicensed under the Eiffel Forum License 2.\n\nhttp://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\nfrom sopel import web\nfrom sopel.module import commands, example, NOLIMIT\n\netyuri = 'http://etymonline.com/?term=%s'\netysearch = 'http://etymonline.com/?search=%s'\n\nr_definition = re.compile(r'(?ims)<dd[^>]*>.*?</dd>')\nr_tag = re.compile(r'<(?!!)[^>]+>')\nr_whitespace = re.compile(r'[\\t\\r\\n ]+')\n\nabbrs = [\n 'cf', 'lit', 'etc', 'Ger', 'Du', 'Skt', 'Rus', 'Eng', 'Amer.Eng', 'Sp',\n 'Fr', 'N', 'E', 'S', 'W', 'L', 'Gen', 'J.C', 'dial', 'Gk',\n '19c', '18c', '17c', '16c', 'St', 'Capt', 'obs', 'Jan', 'Feb', 'Mar',\n 'Apr', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'c', 'tr', 'e', 'g'\n]\nt_sentence = r'^.*?(?<!%s)(?:\\.(?= [A-Z0-9]|\\Z)|\\Z)'\nr_sentence = re.compile(t_sentence % ')(?<!'.join(abbrs))\n\n\ndef unescape(s):\n s = s.replace('>', '>')\n s = s.replace('<', '<')\n s = s.replace('&', '&')\n return s\n\n\ndef text(html):\n html = r_tag.sub('', html)\n html = r_whitespace.sub(' ', html)\n return unescape(html).strip()\n\n\ndef etymology(word):\n # @@ <nsh> sbp, would it be possible to have a flag for .ety to get 2nd/etc\n # entries? - http://swhack.com/logs/2006-07-19#T15-05-29\n\n if len(word) > 25:\n raise ValueError(\"Word too long: %s[...]\" % word[:10])\n word = {'axe': 'ax/axe'}.get(word, word)\n\n bytes = web.get(etyuri % word)\n definitions = r_definition.findall(bytes)\n\n if not definitions:\n return None\n\n defn = text(definitions[0])\n m = r_sentence.match(defn)\n if not m:\n return None\n sentence = m.group(0)\n\n maxlength = 275\n if len(sentence) > maxlength:\n sentence = sentence[:maxlength]\n words = sentence[:-5].split(' ')\n words.pop()\n sentence = ' '.join(words) + ' [...]'\n\n sentence = '\"' + sentence.replace('\"', \"'\") + '\"'\n return sentence + ' - ' + (etyuri % word)\n\n\n@commands('ety')\n@example('.ety word')\ndef f_etymology(bot, trigger):\n \"\"\"Look up the etymology of a word\"\"\"\n word = trigger.group(2)\n\n try:\n result = etymology(word)\n except IOError:\n msg = \"Can't connect to etymonline.com (%s)\" % (etyuri % word)\n bot.msg(trigger.sender, msg)\n return NOLIMIT\n except (AttributeError, TypeError):\n result = None\n\n if result is not None:\n bot.msg(trigger.sender, result)\n else:\n uri = etysearch % word\n msg = 'Can\\'t find the etymology for \"%s\". Try %s' % (word, uri)\n bot.msg(trigger.sender, msg)\n return NOLIMIT\n", "path": "sopel/modules/etymology.py"}]} | 1,768 | 253 |
gh_patches_debug_3625 | rasdani/github-patches | git_diff | coala__coala-1597 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
coala_delete_orig: Modify message
Modify message about `Couldn't delete... <filename>`
@sils1297 please suggest a better message.
</issue>
<code>
[start of coalib/coala_delete_orig.py]
1 import os
2
3 from pyprint.ConsolePrinter import ConsolePrinter
4
5 from coalib.output.printers.LogPrinter import LogPrinter
6 from coalib.parsing import Globbing
7 from coalib.settings.ConfigurationGathering import get_config_directory
8 from coalib.settings.Section import Section
9
10
11 def main(log_printer=None, section: Section=None):
12 start_path = get_config_directory(section)
13 log_printer = log_printer or LogPrinter(ConsolePrinter())
14
15 if start_path is None:
16 return 255
17
18 orig_files = Globbing.glob(os.path.abspath(
19 os.path.join(start_path, '**', '*.orig')))
20
21 not_deleted = 0
22 for ofile in orig_files:
23 log_printer.info("Deleting old backup file... "
24 + os.path.relpath(ofile))
25 try:
26 os.remove(ofile)
27 except OSError as oserror:
28 not_deleted += 1
29 log_printer.warn("Couldn't delete... {}. {}".format(
30 os.path.relpath(ofile), oserror.strerror))
31
32 if not_deleted:
33 log_printer.warn(str(not_deleted) + " .orig backup files could not be"
34 " deleted, possibly because you lack the permission"
35 " to do so. coala may not be able to create"
36 " backup files when patches are applied.")
37 return 0
38
[end of coalib/coala_delete_orig.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/coalib/coala_delete_orig.py b/coalib/coala_delete_orig.py
--- a/coalib/coala_delete_orig.py
+++ b/coalib/coala_delete_orig.py
@@ -26,7 +26,7 @@
os.remove(ofile)
except OSError as oserror:
not_deleted += 1
- log_printer.warn("Couldn't delete... {}. {}".format(
+ log_printer.warn("Couldn't delete {}. {}".format(
os.path.relpath(ofile), oserror.strerror))
if not_deleted:
| {"golden_diff": "diff --git a/coalib/coala_delete_orig.py b/coalib/coala_delete_orig.py\n--- a/coalib/coala_delete_orig.py\n+++ b/coalib/coala_delete_orig.py\n@@ -26,7 +26,7 @@\n os.remove(ofile)\n except OSError as oserror:\n not_deleted += 1\n- log_printer.warn(\"Couldn't delete... {}. {}\".format(\n+ log_printer.warn(\"Couldn't delete {}. {}\".format(\n os.path.relpath(ofile), oserror.strerror))\n \n if not_deleted:\n", "issue": "coala_delete_orig: Modify message\nModify message about `Couldn't delete... <filename>`\n\n@sils1297 please suggest a better message.\n\n", "before_files": [{"content": "import os\n\nfrom pyprint.ConsolePrinter import ConsolePrinter\n\nfrom coalib.output.printers.LogPrinter import LogPrinter\nfrom coalib.parsing import Globbing\nfrom coalib.settings.ConfigurationGathering import get_config_directory\nfrom coalib.settings.Section import Section\n\n\ndef main(log_printer=None, section: Section=None):\n start_path = get_config_directory(section)\n log_printer = log_printer or LogPrinter(ConsolePrinter())\n\n if start_path is None:\n return 255\n\n orig_files = Globbing.glob(os.path.abspath(\n os.path.join(start_path, '**', '*.orig')))\n\n not_deleted = 0\n for ofile in orig_files:\n log_printer.info(\"Deleting old backup file... \"\n + os.path.relpath(ofile))\n try:\n os.remove(ofile)\n except OSError as oserror:\n not_deleted += 1\n log_printer.warn(\"Couldn't delete... {}. {}\".format(\n os.path.relpath(ofile), oserror.strerror))\n\n if not_deleted:\n log_printer.warn(str(not_deleted) + \" .orig backup files could not be\"\n \" deleted, possibly because you lack the permission\"\n \" to do so. coala may not be able to create\"\n \" backup files when patches are applied.\")\n return 0\n", "path": "coalib/coala_delete_orig.py"}]} | 916 | 123 |
gh_patches_debug_26164 | rasdani/github-patches | git_diff | spack__spack-19604 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installation issue: ucx
I'm trying to build openmpi with ucx but on compiling ucx I get build errors with `error: implicit declaration of function`.
```console
637 CC libuct_rdmacm_la-rdmacm_iface.lo
638 CC libuct_rdmacm_la-rdmacm_ep.lo
639 CC libuct_rdmacm_la-rdmacm_cm.lo
640 CC libuct_rdmacm_la-rdmacm_listener.lo
641 CC libuct_rdmacm_la-rdmacm_cm_ep.lo
642 rdmacm_cm.c: In function 'uct_rdmacm_cm_id_to_dev_addr':
>> 643 rdmacm_cm.c:146:9: error: implicit declaration of function 'rdma_init_qp_attr' [-Werror=implicit-function-declaration]
644 146 | if (rdma_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask)) {
645 | ^~~~~~~~~~~~~~~~~
646 rdmacm_cm.c: In function 'uct_rdmacm_cm_handle_event_connect_response':
>> 647 rdmacm_cm.c:269:9: error: implicit declaration of function 'rdma_establish' [-Werror=implicit-function-declaration]
648 269 | if (rdma_establish(event->id)) {
649 | ^~~~~~~~~~~~~~
650 cc1: all warnings being treated as errors
651 make[4]: *** [Makefile:670: libuct_rdmacm_la-rdmacm_cm.lo] Error 1
652 make[4]: *** Waiting for unfinished jobs....
653 make[4]: Leaving directory '/tmp/root/spack-stage/spack-stage-ucx-1.8.0-344rhrrnr7m3kpod3hg6bbwi4ml3nn5k/spack-src/src/uct/ib/rdmacm'
```
### Steps to reproduce the issue
```console
$ spack install openmpi+thread_multiple+pmi fabrics=ucx schedulers=slurm %[email protected] ucx%[email protected]
```
### Information on your system
* **Spack:** 0.15.3-387-3a02d1a84
* **Python:** 3.6.8
* **Platform:** linux-centos8-zen2
### Additional information
* [spack-build-out.txt](https://github.com/spack/spack/files/5021896/spack-build-out.txt)
I have slurm locally installed, with the following in my .spack/packages.yaml :
```
slurm:
buildable: False
paths:
slurm@20-02-3-1: /usr
```
@hppritcha
### General information
<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->
- [x] I have run `spack debug report` and reported the version of Spack/Python/Platform
- [x] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers
- [x] I have uploaded the build log and environment files
- [x] I have searched the issues of this repo and believe this is not a duplicate
</issue>
<code>
[start of var/spack/repos/builtin/packages/rdma-core/package.py]
1 # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class RdmaCore(CMakePackage):
10 """RDMA core userspace libraries and daemons"""
11
12 homepage = "https://github.com/linux-rdma/rdma-core"
13 url = "https://github.com/linux-rdma/rdma-core/releases/download/v17.1/rdma-core-17.1.tar.gz"
14
15 version('20', sha256='bc846989f807cd2b03643927d2b99fbf6f849cb1e766ab49bc9e81ce769d5421')
16 version('17.1', sha256='b47444b7c05d3906deb8771eec3e634984dd83f5e620d5e37d3a83f74f0cc1ba')
17 version('13', sha256='e5230fd7cda610753ad1252b40a28b1e9cf836423a10d8c2525b081527760d97')
18
19 depends_on('pkgconfig', type='build')
20 depends_on('libnl')
21 conflicts('platform=darwin', msg='rdma-core requires FreeBSD or Linux')
22 conflicts('%intel', msg='rdma-core cannot be built with intel (use gcc instead)')
23
24 # NOTE: specify CMAKE_INSTALL_RUNDIR explicitly to prevent rdma-core from
25 # using the spack staging build dir (which may be a very long file
26 # system path) as a component in compile-time static strings such as
27 # IBACM_SERVER_PATH.
28 def cmake_args(self):
29 cmake_args = ["-DCMAKE_INSTALL_SYSCONFDIR=" +
30 self.spec.prefix.etc,
31 "-DCMAKE_INSTALL_RUNDIR=/var/run"]
32 return cmake_args
33
[end of var/spack/repos/builtin/packages/rdma-core/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/var/spack/repos/builtin/packages/rdma-core/package.py b/var/spack/repos/builtin/packages/rdma-core/package.py
--- a/var/spack/repos/builtin/packages/rdma-core/package.py
+++ b/var/spack/repos/builtin/packages/rdma-core/package.py
@@ -12,6 +12,15 @@
homepage = "https://github.com/linux-rdma/rdma-core"
url = "https://github.com/linux-rdma/rdma-core/releases/download/v17.1/rdma-core-17.1.tar.gz"
+ version('32.0', sha256='8197e20a59990b9b06a2e4c83f4a96802fc080ec1669392b643b59b6023931fc')
+ version('31.0', sha256='51ae9a3ab81cd6834436813fafc310c8b7007feae9d09a53fdd5c169e648d50b')
+ version('30.0', sha256='23e1bd2d7b38149a1621ee577a3428ac652e305adb8e0eee923cbe71356a9bf9')
+ version('28.1', sha256='d9961fd9b0867f17cb6a30a728562f00528b63dd72d1168d838220ab44e5c713')
+ version('27.1', sha256='39eeb3ab5f868ef3a5f7623d1ee69adca04efabe2a37de8080f354b8f4ef0ad7')
+ version('26.2', sha256='115087ab438bea3530a0d520640f1eeb5872b902ee2263acf83dcc7835d296c6')
+ version('25.4', sha256='f622491b0aac819f05c73174e0c7a9e630cc02fc0914d5ba1bb1d87fc4d313fd')
+ version('24.3', sha256='3a02d2d864258acc763849c635c815e3fa6a798a1464511cd3a2a370ddd6ee89')
+ version('23.4', sha256='6bfe009e9a382085def3b004d9396f7255a2e0c90c36647d1df0b86773d21a79')
version('20', sha256='bc846989f807cd2b03643927d2b99fbf6f849cb1e766ab49bc9e81ce769d5421')
version('17.1', sha256='b47444b7c05d3906deb8771eec3e634984dd83f5e620d5e37d3a83f74f0cc1ba')
version('13', sha256='e5230fd7cda610753ad1252b40a28b1e9cf836423a10d8c2525b081527760d97')
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/rdma-core/package.py b/var/spack/repos/builtin/packages/rdma-core/package.py\n--- a/var/spack/repos/builtin/packages/rdma-core/package.py\n+++ b/var/spack/repos/builtin/packages/rdma-core/package.py\n@@ -12,6 +12,15 @@\n homepage = \"https://github.com/linux-rdma/rdma-core\"\n url = \"https://github.com/linux-rdma/rdma-core/releases/download/v17.1/rdma-core-17.1.tar.gz\"\n \n+ version('32.0', sha256='8197e20a59990b9b06a2e4c83f4a96802fc080ec1669392b643b59b6023931fc')\n+ version('31.0', sha256='51ae9a3ab81cd6834436813fafc310c8b7007feae9d09a53fdd5c169e648d50b')\n+ version('30.0', sha256='23e1bd2d7b38149a1621ee577a3428ac652e305adb8e0eee923cbe71356a9bf9')\n+ version('28.1', sha256='d9961fd9b0867f17cb6a30a728562f00528b63dd72d1168d838220ab44e5c713')\n+ version('27.1', sha256='39eeb3ab5f868ef3a5f7623d1ee69adca04efabe2a37de8080f354b8f4ef0ad7')\n+ version('26.2', sha256='115087ab438bea3530a0d520640f1eeb5872b902ee2263acf83dcc7835d296c6')\n+ version('25.4', sha256='f622491b0aac819f05c73174e0c7a9e630cc02fc0914d5ba1bb1d87fc4d313fd')\n+ version('24.3', sha256='3a02d2d864258acc763849c635c815e3fa6a798a1464511cd3a2a370ddd6ee89')\n+ version('23.4', sha256='6bfe009e9a382085def3b004d9396f7255a2e0c90c36647d1df0b86773d21a79')\n version('20', sha256='bc846989f807cd2b03643927d2b99fbf6f849cb1e766ab49bc9e81ce769d5421')\n version('17.1', sha256='b47444b7c05d3906deb8771eec3e634984dd83f5e620d5e37d3a83f74f0cc1ba')\n version('13', sha256='e5230fd7cda610753ad1252b40a28b1e9cf836423a10d8c2525b081527760d97')\n", "issue": "Installation issue: ucx\nI'm trying to build openmpi with ucx but on compiling ucx I get build errors with `error: implicit declaration of function`.\r\n\r\n```console\r\n 637 CC libuct_rdmacm_la-rdmacm_iface.lo\r\n 638 CC libuct_rdmacm_la-rdmacm_ep.lo\r\n 639 CC libuct_rdmacm_la-rdmacm_cm.lo\r\n 640 CC libuct_rdmacm_la-rdmacm_listener.lo\r\n 641 CC libuct_rdmacm_la-rdmacm_cm_ep.lo\r\n 642 rdmacm_cm.c: In function 'uct_rdmacm_cm_id_to_dev_addr':\r\n >> 643 rdmacm_cm.c:146:9: error: implicit declaration of function 'rdma_init_qp_attr' [-Werror=implicit-function-declaration]\r\n 644 146 | if (rdma_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask)) {\r\n 645 | ^~~~~~~~~~~~~~~~~\r\n 646 rdmacm_cm.c: In function 'uct_rdmacm_cm_handle_event_connect_response':\r\n >> 647 rdmacm_cm.c:269:9: error: implicit declaration of function 'rdma_establish' [-Werror=implicit-function-declaration]\r\n 648 269 | if (rdma_establish(event->id)) {\r\n 649 | ^~~~~~~~~~~~~~\r\n 650 cc1: all warnings being treated as errors\r\n 651 make[4]: *** [Makefile:670: libuct_rdmacm_la-rdmacm_cm.lo] Error 1\r\n 652 make[4]: *** Waiting for unfinished jobs....\r\n 653 make[4]: Leaving directory '/tmp/root/spack-stage/spack-stage-ucx-1.8.0-344rhrrnr7m3kpod3hg6bbwi4ml3nn5k/spack-src/src/uct/ib/rdmacm'\r\n\r\n```\r\n\r\n### Steps to reproduce the issue\r\n\r\n```console\r\n$ spack install openmpi+thread_multiple+pmi fabrics=ucx schedulers=slurm %[email protected] ucx%[email protected]\r\n```\r\n\r\n### Information on your system\r\n\r\n* **Spack:** 0.15.3-387-3a02d1a84\r\n* **Python:** 3.6.8\r\n* **Platform:** linux-centos8-zen2\r\n\r\n### Additional information\r\n\r\n* [spack-build-out.txt](https://github.com/spack/spack/files/5021896/spack-build-out.txt)\r\n\r\nI have slurm locally installed, with the following in my .spack/packages.yaml :\r\n\r\n```\r\n slurm:\r\n buildable: False\r\n paths:\r\n slurm@20-02-3-1: /usr\r\n```\r\n\r\n\r\n@hppritcha \r\n\r\n### General information\r\n\r\n<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->\r\n- [x] I have run `spack debug report` and reported the version of Spack/Python/Platform\r\n- [x] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers\r\n- [x] I have uploaded the build log and environment files\r\n- [x] I have searched the issues of this repo and believe this is not a duplicate\r\n\n", "before_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass RdmaCore(CMakePackage):\n \"\"\"RDMA core userspace libraries and daemons\"\"\"\n\n homepage = \"https://github.com/linux-rdma/rdma-core\"\n url = \"https://github.com/linux-rdma/rdma-core/releases/download/v17.1/rdma-core-17.1.tar.gz\"\n\n version('20', sha256='bc846989f807cd2b03643927d2b99fbf6f849cb1e766ab49bc9e81ce769d5421')\n version('17.1', sha256='b47444b7c05d3906deb8771eec3e634984dd83f5e620d5e37d3a83f74f0cc1ba')\n version('13', sha256='e5230fd7cda610753ad1252b40a28b1e9cf836423a10d8c2525b081527760d97')\n\n depends_on('pkgconfig', type='build')\n depends_on('libnl')\n conflicts('platform=darwin', msg='rdma-core requires FreeBSD or Linux')\n conflicts('%intel', msg='rdma-core cannot be built with intel (use gcc instead)')\n\n# NOTE: specify CMAKE_INSTALL_RUNDIR explicitly to prevent rdma-core from\n# using the spack staging build dir (which may be a very long file\n# system path) as a component in compile-time static strings such as\n# IBACM_SERVER_PATH.\n def cmake_args(self):\n cmake_args = [\"-DCMAKE_INSTALL_SYSCONFDIR=\" +\n self.spec.prefix.etc,\n \"-DCMAKE_INSTALL_RUNDIR=/var/run\"]\n return cmake_args\n", "path": "var/spack/repos/builtin/packages/rdma-core/package.py"}]} | 1,921 | 990 |
gh_patches_debug_6708 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-554 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Security Group filter "default-vpc" does not function correctly.
Using the following policy:
```
policies:
- name: default-sg-allows-all-traffic
description: |
Find whether the default security group allows all traffic.
resource: security-group
filters:
- type: default-vpc
```
Comes up with no results, even when I have confirmed that my account has a default vpc and a default security group associated with it.
</issue>
<code>
[start of c7n/filters/vpc.py]
1 # Copyright 2016 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from c7n.utils import local_session, type_schema
16
17 from .core import Filter, ValueFilter
18 from .related import RelatedResourceFilter
19
20
21 class SecurityGroupFilter(RelatedResourceFilter):
22
23 schema = type_schema(
24 'security-group', rinherit=ValueFilter.schema,
25 match_resource={'type': 'boolean'},
26 operator={'enum': ['and', 'or']})
27
28 RelatedResource = "c7n.resources.vpc.SecurityGroup"
29 AnnotationKey = "matched-security-groups"
30
31
32 class SubnetFilter(RelatedResourceFilter):
33
34 schema = type_schema(
35 'subnet', rinherit=ValueFilter.schema,
36 match_resource={'type': 'boolean'},
37 operator={'enum': ['and', 'or']})
38
39 RelatedResource = "c7n.resources.vpc.Subnet"
40 AnnotationKey = "matched-subnets"
41
42
43 class DefaultVpcBase(Filter):
44
45 vpcs = None
46 default_vpc = None
47
48 def match(self, vpc_id):
49 if self.default_vpc is None:
50 self.log.debug("querying default vpc %s" % vpc_id)
51 client = local_session(self.manager.session_factory).client('ec2')
52 vpcs = [v['VpcId'] for v
53 in client.describe_vpcs(VpcIds=[vpc_id])['Vpcs']
54 if v['IsDefault']]
55 if not vpcs:
56 self.default_vpc = ""
57 else:
58 self.default_vpc = vpcs.pop()
59 return vpc_id == self.default_vpc and True or False
60
[end of c7n/filters/vpc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/c7n/filters/vpc.py b/c7n/filters/vpc.py
--- a/c7n/filters/vpc.py
+++ b/c7n/filters/vpc.py
@@ -52,8 +52,6 @@
vpcs = [v['VpcId'] for v
in client.describe_vpcs(VpcIds=[vpc_id])['Vpcs']
if v['IsDefault']]
- if not vpcs:
- self.default_vpc = ""
- else:
+ if vpcs:
self.default_vpc = vpcs.pop()
return vpc_id == self.default_vpc and True or False
| {"golden_diff": "diff --git a/c7n/filters/vpc.py b/c7n/filters/vpc.py\n--- a/c7n/filters/vpc.py\n+++ b/c7n/filters/vpc.py\n@@ -52,8 +52,6 @@\n vpcs = [v['VpcId'] for v\n in client.describe_vpcs(VpcIds=[vpc_id])['Vpcs']\n if v['IsDefault']]\n- if not vpcs:\n- self.default_vpc = \"\"\n- else:\n+ if vpcs:\n self.default_vpc = vpcs.pop()\n return vpc_id == self.default_vpc and True or False\n", "issue": "Security Group filter \"default-vpc\" does not function correctly.\nUsing the following policy:\n\n```\npolicies:\n - name: default-sg-allows-all-traffic\n description: |\n Find whether the default security group allows all traffic.\n resource: security-group\n filters:\n - type: default-vpc\n```\n\nComes up with no results, even when I have confirmed that my account has a default vpc and a default security group associated with it.\n\n", "before_files": [{"content": "# Copyright 2016 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom c7n.utils import local_session, type_schema\n\nfrom .core import Filter, ValueFilter\nfrom .related import RelatedResourceFilter\n\n\nclass SecurityGroupFilter(RelatedResourceFilter):\n\n schema = type_schema(\n 'security-group', rinherit=ValueFilter.schema,\n match_resource={'type': 'boolean'},\n operator={'enum': ['and', 'or']})\n\n RelatedResource = \"c7n.resources.vpc.SecurityGroup\"\n AnnotationKey = \"matched-security-groups\"\n\n\nclass SubnetFilter(RelatedResourceFilter):\n\n schema = type_schema(\n 'subnet', rinherit=ValueFilter.schema,\n match_resource={'type': 'boolean'},\n operator={'enum': ['and', 'or']})\n\n RelatedResource = \"c7n.resources.vpc.Subnet\"\n AnnotationKey = \"matched-subnets\" \n\n\nclass DefaultVpcBase(Filter):\n\n vpcs = None\n default_vpc = None\n\n def match(self, vpc_id):\n if self.default_vpc is None:\n self.log.debug(\"querying default vpc %s\" % vpc_id)\n client = local_session(self.manager.session_factory).client('ec2')\n vpcs = [v['VpcId'] for v\n in client.describe_vpcs(VpcIds=[vpc_id])['Vpcs']\n if v['IsDefault']]\n if not vpcs:\n self.default_vpc = \"\"\n else:\n self.default_vpc = vpcs.pop()\n return vpc_id == self.default_vpc and True or False\n", "path": "c7n/filters/vpc.py"}]} | 1,220 | 146 |
gh_patches_debug_34822 | rasdani/github-patches | git_diff | angr__angr-840 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ClaripyZeroDivisionError without actual zero
When stepping through a BB with symbolic registers,
division by symbolic register causes state.step() to fail with ClaripyZeroDivisionError.
Example binary [MALWARE!!!]: https://www.dropbox.com/s/n9drwyle246ai86/E022DE72CCE8129BD5AC8A0675996318?dl=0
Example code:
```
# coding=utf-8
import angr
p = angr.Project('E022DE72CCE8129BD5AC8A0675996318', load_options={"auto_load_libs": False})
rebase_delta = p.loader.main_object.image_base_delta
start_state = p.factory.blank_state(addr=rebase_delta + 0x4470)
start_state.step()
```
</issue>
<code>
[start of angr/concretization_strategies/__init__.py]
1 class SimConcretizationStrategy(object):
2 """
3 Concretization strategies control the resolution of symbolic memory indices
4 in SimuVEX. By subclassing this class and setting it as a concretization strategy
5 (on state.memory.read_strategies and state.memory.write_strategies), SimuVEX's
6 memory index concretization behavior can be modified.
7 """
8
9 def __init__(self, filter=None, exact=True): #pylint:disable=redefined-builtin
10 """
11 Initializes the base SimConcretizationStrategy.
12
13 :param filter: A function, taking arguments of (SimMemory, claripy.AST) that determins
14 if this strategy can handle resolving the provided AST.
15 :param exact: A flag (default: True) that determines if the convenience resolution
16 functions provided by this class use exact or approximate resolution.
17 """
18 self._exact = exact
19 self._filter = filter
20
21 def _min(self, memory, addr, **kwargs):
22 """
23 Gets the minimum solution of an address.
24 """
25 return memory.state.se.min(addr, exact=kwargs.pop('exact', self._exact), **kwargs)
26
27 def _max(self, memory, addr, **kwargs):
28 """
29 Gets the maximum solution of an address.
30 """
31 return memory.state.se.max(addr, exact=kwargs.pop('exact', self._exact), **kwargs)
32
33 def _any(self, memory, addr, **kwargs):
34 """
35 Gets any solution of an address.
36 """
37 return memory.state.se.eval(addr, exact=kwargs.pop('exact', self._exact), **kwargs)
38
39 def _eval(self, memory, addr, n, **kwargs):
40 """
41 Gets n solutions for an address.
42 """
43 return memory.state.se.eval_upto(addr, n, exact=kwargs.pop('exact', self._exact), **kwargs)
44
45 def _range(self, memory, addr, **kwargs):
46 """
47 Gets the (min, max) range of solutions for an address.
48 """
49 return (self._min(memory, addr, **kwargs), self._max(memory, addr, **kwargs))
50
51 def concretize(self, memory, addr):
52 """
53 Concretizes the address into a list of values.
54 If this strategy cannot handle this address, returns None.
55 """
56 if self._filter is None or self._filter(memory, addr):
57 return self._concretize(memory, addr)
58
59 def _concretize(self, memory, addr):
60 """
61 Should be implemented by child classes to handle concretization.
62 """
63 raise NotImplementedError()
64
65 def copy(self):
66 """
67 Returns a copy of the strategy, if there is data that should be kept separate between
68 states. If not, returns self.
69 """
70 return self
71
72 def merge(self, others):
73 """
74 Merges this strategy with others (if there is data that should be kept separate between
75 states. If not, is a no-op.
76 """
77 pass
78
79 from .any import SimConcretizationStrategyAny
80 from .controlled_data import SimConcretizationStrategyControlledData
81 from .eval import SimConcretizationStrategyEval
82 from .max import SimConcretizationStrategyMax
83 from .nonzero import SimConcretizationStrategyNonzero
84 from .nonzero_range import SimConcretizationStrategyNonzeroRange
85 from .norepeats import SimConcretizationStrategyNorepeats
86 from .norepeats_range import SimConcretizationStrategyNorepeatsRange
87 from .range import SimConcretizationStrategyRange
88 from .single import SimConcretizationStrategySingle
89 from .solutions import SimConcretizationStrategySolutions
90
[end of angr/concretization_strategies/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/angr/concretization_strategies/__init__.py b/angr/concretization_strategies/__init__.py
--- a/angr/concretization_strategies/__init__.py
+++ b/angr/concretization_strategies/__init__.py
@@ -1,3 +1,5 @@
+import claripy
+
class SimConcretizationStrategy(object):
"""
Concretization strategies control the resolution of symbolic memory indices
@@ -18,29 +20,41 @@
self._exact = exact
self._filter = filter
+ @staticmethod
+ def _tweak(addr, kwargs):
+ """
+ Utility method used from in here that adds a bogus constraint to extra_constraints making it so that the addr
+ expression can actually be evaluated in all cases
+ """
+ kwargs['extra_constraints'] = kwargs.get('extra_constraints', ()) + (addr == claripy.BVS('TEMP', len(addr)),)
+
def _min(self, memory, addr, **kwargs):
"""
Gets the minimum solution of an address.
"""
- return memory.state.se.min(addr, exact=kwargs.pop('exact', self._exact), **kwargs)
+ self._tweak(addr, kwargs)
+ return memory.state.solver.min(addr, exact=kwargs.pop('exact', self._exact), **kwargs)
def _max(self, memory, addr, **kwargs):
"""
Gets the maximum solution of an address.
"""
- return memory.state.se.max(addr, exact=kwargs.pop('exact', self._exact), **kwargs)
+ self._tweak(addr, kwargs)
+ return memory.state.solver.max(addr, exact=kwargs.pop('exact', self._exact), **kwargs)
def _any(self, memory, addr, **kwargs):
"""
Gets any solution of an address.
"""
- return memory.state.se.eval(addr, exact=kwargs.pop('exact', self._exact), **kwargs)
+ self._tweak(addr, kwargs)
+ return memory.state.solver.eval(addr, exact=kwargs.pop('exact', self._exact), **kwargs)
def _eval(self, memory, addr, n, **kwargs):
"""
Gets n solutions for an address.
"""
- return memory.state.se.eval_upto(addr, n, exact=kwargs.pop('exact', self._exact), **kwargs)
+ self._tweak(addr, kwargs)
+ return memory.state.solver.eval_upto(addr, n, exact=kwargs.pop('exact', self._exact), **kwargs)
def _range(self, memory, addr, **kwargs):
"""
| {"golden_diff": "diff --git a/angr/concretization_strategies/__init__.py b/angr/concretization_strategies/__init__.py\n--- a/angr/concretization_strategies/__init__.py\n+++ b/angr/concretization_strategies/__init__.py\n@@ -1,3 +1,5 @@\n+import claripy\n+\n class SimConcretizationStrategy(object):\n \"\"\"\n Concretization strategies control the resolution of symbolic memory indices\n@@ -18,29 +20,41 @@\n self._exact = exact\n self._filter = filter\n \n+ @staticmethod\n+ def _tweak(addr, kwargs):\n+ \"\"\"\n+ Utility method used from in here that adds a bogus constraint to extra_constraints making it so that the addr\n+ expression can actually be evaluated in all cases\n+ \"\"\"\n+ kwargs['extra_constraints'] = kwargs.get('extra_constraints', ()) + (addr == claripy.BVS('TEMP', len(addr)),)\n+\n def _min(self, memory, addr, **kwargs):\n \"\"\"\n Gets the minimum solution of an address.\n \"\"\"\n- return memory.state.se.min(addr, exact=kwargs.pop('exact', self._exact), **kwargs)\n+ self._tweak(addr, kwargs)\n+ return memory.state.solver.min(addr, exact=kwargs.pop('exact', self._exact), **kwargs)\n \n def _max(self, memory, addr, **kwargs):\n \"\"\"\n Gets the maximum solution of an address.\n \"\"\"\n- return memory.state.se.max(addr, exact=kwargs.pop('exact', self._exact), **kwargs)\n+ self._tweak(addr, kwargs)\n+ return memory.state.solver.max(addr, exact=kwargs.pop('exact', self._exact), **kwargs)\n \n def _any(self, memory, addr, **kwargs):\n \"\"\"\n Gets any solution of an address.\n \"\"\"\n- return memory.state.se.eval(addr, exact=kwargs.pop('exact', self._exact), **kwargs)\n+ self._tweak(addr, kwargs)\n+ return memory.state.solver.eval(addr, exact=kwargs.pop('exact', self._exact), **kwargs)\n \n def _eval(self, memory, addr, n, **kwargs):\n \"\"\"\n Gets n solutions for an address.\n \"\"\"\n- return memory.state.se.eval_upto(addr, n, exact=kwargs.pop('exact', self._exact), **kwargs)\n+ self._tweak(addr, kwargs)\n+ return memory.state.solver.eval_upto(addr, n, exact=kwargs.pop('exact', self._exact), **kwargs)\n \n def _range(self, memory, addr, **kwargs):\n \"\"\"\n", "issue": "ClaripyZeroDivisionError without actual zero\nWhen stepping through a BB with symbolic registers, \r\ndivision by symbolic register causes state.step() to fail with ClaripyZeroDivisionError.\r\n\r\nExample binary [MALWARE!!!]: https://www.dropbox.com/s/n9drwyle246ai86/E022DE72CCE8129BD5AC8A0675996318?dl=0\r\n\r\n\r\nExample code:\r\n\r\n```\r\n# coding=utf-8\r\nimport angr\r\np = angr.Project('E022DE72CCE8129BD5AC8A0675996318', load_options={\"auto_load_libs\": False})\r\nrebase_delta = p.loader.main_object.image_base_delta\r\nstart_state = p.factory.blank_state(addr=rebase_delta + 0x4470)\r\nstart_state.step()\r\n```\n", "before_files": [{"content": "class SimConcretizationStrategy(object):\n \"\"\"\n Concretization strategies control the resolution of symbolic memory indices\n in SimuVEX. By subclassing this class and setting it as a concretization strategy\n (on state.memory.read_strategies and state.memory.write_strategies), SimuVEX's\n memory index concretization behavior can be modified.\n \"\"\"\n\n def __init__(self, filter=None, exact=True): #pylint:disable=redefined-builtin\n \"\"\"\n Initializes the base SimConcretizationStrategy.\n\n :param filter: A function, taking arguments of (SimMemory, claripy.AST) that determins\n if this strategy can handle resolving the provided AST.\n :param exact: A flag (default: True) that determines if the convenience resolution\n functions provided by this class use exact or approximate resolution.\n \"\"\"\n self._exact = exact\n self._filter = filter\n\n def _min(self, memory, addr, **kwargs):\n \"\"\"\n Gets the minimum solution of an address.\n \"\"\"\n return memory.state.se.min(addr, exact=kwargs.pop('exact', self._exact), **kwargs)\n\n def _max(self, memory, addr, **kwargs):\n \"\"\"\n Gets the maximum solution of an address.\n \"\"\"\n return memory.state.se.max(addr, exact=kwargs.pop('exact', self._exact), **kwargs)\n\n def _any(self, memory, addr, **kwargs):\n \"\"\"\n Gets any solution of an address.\n \"\"\"\n return memory.state.se.eval(addr, exact=kwargs.pop('exact', self._exact), **kwargs)\n\n def _eval(self, memory, addr, n, **kwargs):\n \"\"\"\n Gets n solutions for an address.\n \"\"\"\n return memory.state.se.eval_upto(addr, n, exact=kwargs.pop('exact', self._exact), **kwargs)\n\n def _range(self, memory, addr, **kwargs):\n \"\"\"\n Gets the (min, max) range of solutions for an address.\n \"\"\"\n return (self._min(memory, addr, **kwargs), self._max(memory, addr, **kwargs))\n\n def concretize(self, memory, addr):\n \"\"\"\n Concretizes the address into a list of values.\n If this strategy cannot handle this address, returns None.\n \"\"\"\n if self._filter is None or self._filter(memory, addr):\n return self._concretize(memory, addr)\n\n def _concretize(self, memory, addr):\n \"\"\"\n Should be implemented by child classes to handle concretization.\n \"\"\"\n raise NotImplementedError()\n\n def copy(self):\n \"\"\"\n Returns a copy of the strategy, if there is data that should be kept separate between\n states. If not, returns self.\n \"\"\"\n return self\n\n def merge(self, others):\n \"\"\"\n Merges this strategy with others (if there is data that should be kept separate between\n states. If not, is a no-op.\n \"\"\"\n pass\n\nfrom .any import SimConcretizationStrategyAny\nfrom .controlled_data import SimConcretizationStrategyControlledData\nfrom .eval import SimConcretizationStrategyEval\nfrom .max import SimConcretizationStrategyMax\nfrom .nonzero import SimConcretizationStrategyNonzero\nfrom .nonzero_range import SimConcretizationStrategyNonzeroRange\nfrom .norepeats import SimConcretizationStrategyNorepeats\nfrom .norepeats_range import SimConcretizationStrategyNorepeatsRange\nfrom .range import SimConcretizationStrategyRange\nfrom .single import SimConcretizationStrategySingle\nfrom .solutions import SimConcretizationStrategySolutions\n", "path": "angr/concretization_strategies/__init__.py"}]} | 1,693 | 573 |
gh_patches_debug_32386 | rasdani/github-patches | git_diff | Cloud-CV__EvalAI-1683 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a feature to show the authentication token to the user on the MyProfile Page
**Deliverables:**
- [x] Add API endpoint to fetch or create (if doesn't exists) the token from database.
- [x] Add Frontend with features to show, copy and download the token in JSON format
</issue>
<code>
[start of apps/accounts/urls.py]
1 from django.conf.urls import url
2
3 from . import views
4
5 urlpatterns = [
6
7 url(r'^user/disable$', views.disable_user, name='disable_user'),
8
9 ]
10
[end of apps/accounts/urls.py]
[start of apps/accounts/views.py]
1 from django.contrib.auth import logout
2
3 from rest_framework.response import Response
4 from rest_framework import permissions, status
5 from rest_framework.decorators import (api_view,
6 authentication_classes,
7 permission_classes,)
8 from rest_framework_expiring_authtoken.authentication import (ExpiringTokenAuthentication,)
9
10
11 @api_view(['POST'])
12 @permission_classes((permissions.IsAuthenticated,))
13 @authentication_classes((ExpiringTokenAuthentication,))
14 def disable_user(request):
15
16 user = request.user
17 user.is_active = False
18 user.save()
19 logout(request)
20 return Response(status=status.HTTP_200_OK)
21
[end of apps/accounts/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/accounts/urls.py b/apps/accounts/urls.py
--- a/apps/accounts/urls.py
+++ b/apps/accounts/urls.py
@@ -5,5 +5,6 @@
urlpatterns = [
url(r'^user/disable$', views.disable_user, name='disable_user'),
+ url(r'^user/get_auth_token$', views.get_auth_token, name='get_auth_token'),
]
diff --git a/apps/accounts/views.py b/apps/accounts/views.py
--- a/apps/accounts/views.py
+++ b/apps/accounts/views.py
@@ -1,12 +1,18 @@
from django.contrib.auth import logout
+from django.contrib.auth.models import User
+from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from rest_framework import permissions, status
from rest_framework.decorators import (api_view,
authentication_classes,
- permission_classes,)
+ permission_classes,
+ throttle_classes,)
+from rest_framework.throttling import UserRateThrottle
from rest_framework_expiring_authtoken.authentication import (ExpiringTokenAuthentication,)
+from .permissions import HasVerifiedEmail
+
@api_view(['POST'])
@permission_classes((permissions.IsAuthenticated,))
@@ -18,3 +24,24 @@
user.save()
logout(request)
return Response(status=status.HTTP_200_OK)
+
+
+@throttle_classes([UserRateThrottle])
+@api_view(['GET'])
+@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
+@authentication_classes((ExpiringTokenAuthentication,))
+def get_auth_token(request):
+ try:
+ user = User.objects.get(email=request.user.email)
+ except User.DoesNotExist:
+ response_data = {"error": "This User account doesn't exist."}
+ Response(response_data, status.HTTP_404_NOT_FOUND)
+
+ try:
+ token = Token.objects.get(user=user)
+ except Token.DoesNotExist:
+ token = Token.objects.create(user=user)
+ token.save()
+
+ response_data = {"token": "{}".format(token)}
+ return Response(response_data, status=status.HTTP_200_OK)
| {"golden_diff": "diff --git a/apps/accounts/urls.py b/apps/accounts/urls.py\n--- a/apps/accounts/urls.py\n+++ b/apps/accounts/urls.py\n@@ -5,5 +5,6 @@\n urlpatterns = [\n \n url(r'^user/disable$', views.disable_user, name='disable_user'),\n+ url(r'^user/get_auth_token$', views.get_auth_token, name='get_auth_token'),\n \n ]\ndiff --git a/apps/accounts/views.py b/apps/accounts/views.py\n--- a/apps/accounts/views.py\n+++ b/apps/accounts/views.py\n@@ -1,12 +1,18 @@\n from django.contrib.auth import logout\n+from django.contrib.auth.models import User\n \n+from rest_framework.authtoken.models import Token\n from rest_framework.response import Response\n from rest_framework import permissions, status\n from rest_framework.decorators import (api_view,\n authentication_classes,\n- permission_classes,)\n+ permission_classes,\n+ throttle_classes,)\n+from rest_framework.throttling import UserRateThrottle\n from rest_framework_expiring_authtoken.authentication import (ExpiringTokenAuthentication,)\n \n+from .permissions import HasVerifiedEmail\n+\n \n @api_view(['POST'])\n @permission_classes((permissions.IsAuthenticated,))\n@@ -18,3 +24,24 @@\n user.save()\n logout(request)\n return Response(status=status.HTTP_200_OK)\n+\n+\n+@throttle_classes([UserRateThrottle])\n+@api_view(['GET'])\n+@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))\n+@authentication_classes((ExpiringTokenAuthentication,))\n+def get_auth_token(request):\n+ try:\n+ user = User.objects.get(email=request.user.email)\n+ except User.DoesNotExist:\n+ response_data = {\"error\": \"This User account doesn't exist.\"}\n+ Response(response_data, status.HTTP_404_NOT_FOUND)\n+\n+ try:\n+ token = Token.objects.get(user=user)\n+ except Token.DoesNotExist:\n+ token = Token.objects.create(user=user)\n+ token.save()\n+\n+ response_data = {\"token\": \"{}\".format(token)}\n+ return Response(response_data, status=status.HTTP_200_OK)\n", "issue": "Add a feature to show the authentication token to the user on the MyProfile Page\n**Deliverables:**\r\n\r\n- [x] Add API endpoint to fetch or create (if doesn't exists) the token from database.\r\n\r\n- [x] Add Frontend with features to show, copy and download the token in JSON format\r\n\n", "before_files": [{"content": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n\n url(r'^user/disable$', views.disable_user, name='disable_user'),\n\n]\n", "path": "apps/accounts/urls.py"}, {"content": "from django.contrib.auth import logout\n\nfrom rest_framework.response import Response\nfrom rest_framework import permissions, status\nfrom rest_framework.decorators import (api_view,\n authentication_classes,\n permission_classes,)\nfrom rest_framework_expiring_authtoken.authentication import (ExpiringTokenAuthentication,)\n\n\n@api_view(['POST'])\n@permission_classes((permissions.IsAuthenticated,))\n@authentication_classes((ExpiringTokenAuthentication,))\ndef disable_user(request):\n\n user = request.user\n user.is_active = False\n user.save()\n logout(request)\n return Response(status=status.HTTP_200_OK)\n", "path": "apps/accounts/views.py"}]} | 818 | 454 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.