problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_10785 | rasdani/github-patches | git_diff | chainer__chainer-916 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Show output of nvcc after stacktrace
In the current implementation, `nvcc` prints error messages in stdout/stderr before stack trace is printed. It is hard to read.
</issue>
<code>
[start of cupy/cuda/compiler.py]
1 import hashlib
2 import os
3 import re
4 import subprocess
5 import sys
6 import tempfile
7
8 import filelock
9 import six
10
11 from cupy.cuda import device
12 from cupy.cuda import function
13
14
15 def _get_arch():
16 cc = device.Device().compute_capability
17 return 'sm_%s' % cc
18
19
20 class TemporaryDirectory(object):
21
22 def __enter__(self):
23 self.path = tempfile.mkdtemp()
24 return self.path
25
26 def __exit__(self, exc_type, exc_value, traceback):
27 if exc_value is not None:
28 return
29
30 for name in os.listdir(self.path):
31 os.unlink(os.path.join(self.path, name))
32 os.rmdir(self.path)
33
34
35 def _run_nvcc(cmd, cwd):
36 try:
37 return subprocess.check_output(cmd, cwd=cwd)
38 except OSError as e:
39 msg = 'Failed to run `nvcc` command. ' \
40 'Check PATH environment variable: ' \
41 + str(e)
42 raise OSError(msg)
43
44
45 def nvcc(source, options=(), arch=None):
46 if not arch:
47 arch = _get_arch()
48 cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)
49
50 with TemporaryDirectory() as root_dir:
51 path = os.path.join(root_dir, 'kern')
52 cu_path = '%s.cu' % path
53 cubin_path = '%s.cubin' % path
54
55 with open(cu_path, 'w') as cu_file:
56 cu_file.write(source)
57
58 cmd.append(cu_path)
59 _run_nvcc(cmd, root_dir)
60
61 with open(cubin_path, 'rb') as bin_file:
62 return bin_file.read()
63
64
65 def preprocess(source, options=()):
66 cmd = ['nvcc', '--preprocess'] + list(options)
67 with TemporaryDirectory() as root_dir:
68 path = os.path.join(root_dir, 'kern')
69 cu_path = '%s.cu' % path
70
71 with open(cu_path, 'w') as cu_file:
72 cu_file.write(source)
73
74 cmd.append(cu_path)
75 pp_src = _run_nvcc(cmd, root_dir)
76
77 if isinstance(pp_src, six.binary_type):
78 pp_src = pp_src.decode('utf-8')
79 return re.sub('(?m)^#.*$', '', pp_src)
80
81
82 _default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')
83
84
85 def get_cache_dir():
86 return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)
87
88
89 _empty_file_preprocess_cache = {}
90
91
92 def compile_with_cache(source, options=(), arch=None, cache_dir=None):
93 global _empty_file_preprocess_cache
94 if cache_dir is None:
95 cache_dir = get_cache_dir()
96 if arch is None:
97 arch = _get_arch()
98
99 if 'win32' == sys.platform:
100 options += ('-Xcompiler', '/wd 4819')
101 if sys.maxsize == 9223372036854775807:
102 options += '-m64',
103 elif sys.maxsize == 2147483647:
104 options += '-m32',
105
106 env = (arch, options)
107 if '#include' in source:
108 pp_src = '%s %s' % (env, preprocess(source, options))
109 else:
110 base = _empty_file_preprocess_cache.get(env, None)
111 if base is None:
112 base = _empty_file_preprocess_cache[env] = preprocess('', options)
113 pp_src = '%s %s %s' % (env, base, source)
114
115 if isinstance(pp_src, six.text_type):
116 pp_src = pp_src.encode('utf-8')
117 name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()
118
119 mod = function.Module()
120
121 if not os.path.exists(cache_dir):
122 os.makedirs(cache_dir)
123
124 lock_path = os.path.join(cache_dir, 'lock_file.lock')
125
126 path = os.path.join(cache_dir, name)
127 with filelock.FileLock(lock_path) as lock:
128 if os.path.exists(path):
129 with open(path, 'rb') as file:
130 cubin = file.read()
131 mod.load(cubin)
132 else:
133 lock.release()
134 cubin = nvcc(source, options, arch)
135 mod.load(cubin)
136 lock.acquire()
137 with open(path, 'wb') as cubin_file:
138 cubin_file.write(cubin)
139
140 return mod
141
[end of cupy/cuda/compiler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py
--- a/cupy/cuda/compiler.py
+++ b/cupy/cuda/compiler.py
@@ -34,7 +34,14 @@
def _run_nvcc(cmd, cwd):
try:
- return subprocess.check_output(cmd, cwd=cwd)
+ return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ msg = ('`nvcc` command returns non-zero exit status. \n'
+ 'command: {0}\n'
+ 'return-code: {1}\n'
+ 'stdout/stderr: \n'
+ '{2}'.format(e.cmd, e.returncode, e.output))
+ raise RuntimeError(msg)
except OSError as e:
msg = 'Failed to run `nvcc` command. ' \
'Check PATH environment variable: ' \
| {"golden_diff": "diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py\n--- a/cupy/cuda/compiler.py\n+++ b/cupy/cuda/compiler.py\n@@ -34,7 +34,14 @@\n \n def _run_nvcc(cmd, cwd):\n try:\n- return subprocess.check_output(cmd, cwd=cwd)\n+ return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)\n+ except subprocess.CalledProcessError as e:\n+ msg = ('`nvcc` command returns non-zero exit status. \\n'\n+ 'command: {0}\\n'\n+ 'return-code: {1}\\n'\n+ 'stdout/stderr: \\n'\n+ '{2}'.format(e.cmd, e.returncode, e.output))\n+ raise RuntimeError(msg)\n except OSError as e:\n msg = 'Failed to run `nvcc` command. ' \\\n 'Check PATH environment variable: ' \\\n", "issue": "Show output of nvcc after stacktrace\nIn the current implementation, `nvcc` prints error messages in stdout/stderr before stack trace is printed. It is hard to read.\n\n", "before_files": [{"content": "import hashlib\nimport os\nimport re\nimport subprocess\nimport sys\nimport tempfile\n\nimport filelock\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\n\n\ndef _get_arch():\n cc = device.Device().compute_capability\n return 'sm_%s' % cc\n\n\nclass TemporaryDirectory(object):\n\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef _run_nvcc(cmd, cwd):\n try:\n return subprocess.check_output(cmd, cwd=cwd)\n except OSError as e:\n msg = 'Failed to run `nvcc` command. ' \\\n 'Check PATH environment variable: ' \\\n + str(e)\n raise OSError(msg)\n\n\ndef nvcc(source, options=(), arch=None):\n if not arch:\n arch = _get_arch()\n cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)\n\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n cubin_path = '%s.cubin' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n _run_nvcc(cmd, root_dir)\n\n with open(cubin_path, 'rb') as bin_file:\n return bin_file.read()\n\n\ndef preprocess(source, options=()):\n cmd = ['nvcc', '--preprocess'] + list(options)\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n pp_src = _run_nvcc(cmd, root_dir)\n\n if isinstance(pp_src, six.binary_type):\n pp_src = pp_src.decode('utf-8')\n return re.sub('(?m)^#.*$', '', pp_src)\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None):\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n if 'win32' == sys.platform:\n options += ('-Xcompiler', '/wd 4819')\n if sys.maxsize == 9223372036854775807:\n options += '-m64',\n elif sys.maxsize == 2147483647:\n options += '-m32',\n\n env = (arch, options)\n if '#include' in source:\n pp_src = '%s %s' % (env, preprocess(source, options))\n else:\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n base = _empty_file_preprocess_cache[env] = preprocess('', options)\n pp_src = '%s %s %s' % (env, base, source)\n\n if isinstance(pp_src, six.text_type):\n pp_src = pp_src.encode('utf-8')\n name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()\n\n mod = function.Module()\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n lock_path = os.path.join(cache_dir, 'lock_file.lock')\n\n path = os.path.join(cache_dir, name)\n with filelock.FileLock(lock_path) as lock:\n if os.path.exists(path):\n with open(path, 'rb') as file:\n cubin = file.read()\n mod.load(cubin)\n else:\n lock.release()\n cubin = nvcc(source, options, arch)\n mod.load(cubin)\n lock.acquire()\n with open(path, 'wb') as cubin_file:\n cubin_file.write(cubin)\n\n return mod\n", "path": "cupy/cuda/compiler.py"}]} | 1,883 | 206 |
gh_patches_debug_26736 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-446 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
add axis labels to ITER parameters plot
The ITER parameters plot [here](http://docs.plasmapy.org/en/stable/auto_examples/plot_physics.html#sphx-glr-auto-examples-plot-physics-py) could use some labels and units in said labels.
</issue>
<code>
[start of plasmapy/__init__.py]
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2
3 # Packages may add whatever they like to this file, but
4 # should keep this content at the top.
5 # ----------------------------------------------------------------------------
6 from ._base_init import *
7 # ----------------------------------------------------------------------------
8
9 # Enforce Python version check during package import.
10 # This is the same check as the one at the top of setup.py
11 import sys
12
13 __name__ = "plasmapy"
14
15 __doc__ = ("A community-developed and community-driven open source "
16 "core Python package for plasma physics.")
17
18
19 class UnsupportedPythonError(Exception):
20 pass
21
22
23 if sys.version_info < tuple((int(val) for val in "3.6".split('.'))):
24 raise UnsupportedPythonError("plasmapy does not support Python < {}".format(3.6))
25
26 if not _ASTROPY_SETUP_:
27 # For egg_info test builds to pass, put package imports here.
28 from . import atomic
29 from . import classes
30 from . import constants
31 from . import diagnostics
32 from . import mathematics
33 from . import physics
34 from . import utils
35
36 __citation__ = """@misc{plasmapy_community_2018_1238132,
37 author = {PlasmaPy Community and
38 Murphy, Nicholas A. and
39 Leonard, Andrew J. and
40 Sta\'nczak, Dominik and
41 Kozlowski, Pawel M. and
42 Langendorf, Samuel J. and
43 Haggerty, Colby C. and
44 Beckers, Jasper P. and
45 Mumford, Stuart J. and
46 Parashar, Tulasi N. and
47 Huang, Yi-Min},
48 title = {{PlasmaPy: an open source community-developed
49 Python package for plasma physics}},
50 month = apr,
51 year = 2018,
52 doi = {10.5281/zenodo.1238132},
53 url = {https://doi.org/10.5281/zenodo.1238132}
54 }"""
55
[end of plasmapy/__init__.py]
[start of plasmapy/examples/plot_physics.py]
1 """
2 Analysing ITER parameters
3 =========================
4
5 Let's try to look at ITER plasma conditions using the `physics` subpackage.
6 """
7
8 from astropy import units as u
9 from plasmapy import physics
10 import matplotlib.pyplot as plt
11 import numpy as np
12 from mpl_toolkits.mplot3d import Axes3D
13
14 ######################################################
15 # The radius of electric field shielding clouds, also known as the Debye length,
16 # would be
17
18 electron_temperature = 8.8 * u.keV
19 electron_concentration = 10.1e19 / u.m**3
20 print(physics.Debye_length(electron_temperature, electron_concentration))
21
22 ############################################################
23 # Note that we can also neglect the unit for the concentration, as
24 # 1/m^3 is the a standard unit for this kind of Quantity:
25
26 print(physics.Debye_length(electron_temperature, 10.1e19))
27
28 ############################################################
29 # Assuming the magnetic field as 5.3 Teslas (which is the value at the major
30 # radius):
31
32 B = 5.3 * u.T
33
34 print(physics.gyrofrequency(B, particle='e'))
35
36 print(physics.gyroradius(B, T_i=electron_temperature, particle='e'))
37
38 ######################################################################
39 # The electron inertial length would be
40 print(physics.inertial_length(electron_concentration, particle='e'))
41
42 ######################################################################
43 # In these conditions, they should reach thermal velocities of about
44 print(physics.thermal_speed(T=electron_temperature, particle='e'))
45
46 ######################################################################
47 # And the Langmuir wave plasma frequency should be on the order of
48 print(physics.plasma_frequency(electron_concentration))
49
50 ############################################################
51 # Let's try to recreate some plots and get a feel for some of these quantities.
52
53 n_e = np.logspace(4, 30, 100) / u.m**3
54 plt.plot(n_e, physics.plasma_frequency(n_e))
55 plt.scatter(
56 electron_concentration,
57 physics.plasma_frequency(electron_concentration))
58
[end of plasmapy/examples/plot_physics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py
--- a/plasmapy/__init__.py
+++ b/plasmapy/__init__.py
@@ -33,6 +33,29 @@
from . import physics
from . import utils
+def online_help(query):
+ """
+ Search the online PlasmaPy documentation for the given query from plasmapy.org
+ Opens the results in the default web browser.
+ Requires an active Internet connection.
+ Redirects to Astropy.units in case of query 'unit' or 'units'
+
+ Parameters
+ ----------
+ query : str
+ The search query.
+ """
+ from urllib.parse import urlencode
+ import webbrowser
+
+ url = 'http://docs.plasmapy.org/en/stable/search.html?\
+ {0}&check_keywords=yes&area=default'.format(urlencode({'q': query}))
+
+ if(query.lower() in ('unit', 'units')):
+ url = 'http://docs.astropy.org/en/stable/units/'
+
+ webbrowser.open(url)
+
__citation__ = """@misc{plasmapy_community_2018_1238132,
author = {PlasmaPy Community and
Murphy, Nicholas A. and
@@ -51,4 +74,4 @@
year = 2018,
doi = {10.5281/zenodo.1238132},
url = {https://doi.org/10.5281/zenodo.1238132}
-}"""
+}"""
\ No newline at end of file
diff --git a/plasmapy/examples/plot_physics.py b/plasmapy/examples/plot_physics.py
--- a/plasmapy/examples/plot_physics.py
+++ b/plasmapy/examples/plot_physics.py
@@ -55,3 +55,6 @@
plt.scatter(
electron_concentration,
physics.plasma_frequency(electron_concentration))
+plt.xlabel("Electron Concentration (m^-3)")
+plt.ylabel("Langmuir Wave Plasma Frequency (rad/s)")
+plt.show()
| {"golden_diff": "diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py\n--- a/plasmapy/__init__.py\n+++ b/plasmapy/__init__.py\n@@ -33,6 +33,29 @@\n from . import physics\n from . import utils\n \n+def online_help(query):\n+ \"\"\"\n+ Search the online PlasmaPy documentation for the given query from plasmapy.org\n+ Opens the results in the default web browser.\n+ Requires an active Internet connection.\n+ Redirects to Astropy.units in case of query 'unit' or 'units'\n+\n+ Parameters\n+ ----------\n+ query : str\n+ The search query.\n+ \"\"\"\n+ from urllib.parse import urlencode\n+ import webbrowser\n+\n+ url = 'http://docs.plasmapy.org/en/stable/search.html?\\\n+ {0}&check_keywords=yes&area=default'.format(urlencode({'q': query}))\n+\n+ if(query.lower() in ('unit', 'units')):\n+ url = 'http://docs.astropy.org/en/stable/units/'\n+\n+ webbrowser.open(url)\n+\n __citation__ = \"\"\"@misc{plasmapy_community_2018_1238132,\n author = {PlasmaPy Community and\n Murphy, Nicholas A. and\n@@ -51,4 +74,4 @@\n year = 2018,\n doi = {10.5281/zenodo.1238132},\n url = {https://doi.org/10.5281/zenodo.1238132}\n-}\"\"\"\n+}\"\"\"\n\\ No newline at end of file\ndiff --git a/plasmapy/examples/plot_physics.py b/plasmapy/examples/plot_physics.py\n--- a/plasmapy/examples/plot_physics.py\n+++ b/plasmapy/examples/plot_physics.py\n@@ -55,3 +55,6 @@\n plt.scatter(\n electron_concentration,\n physics.plasma_frequency(electron_concentration))\n+plt.xlabel(\"Electron Concentration (m^-3)\")\n+plt.ylabel(\"Langmuir Wave Plasma Frequency (rad/s)\")\n+plt.show()\n", "issue": "add axis labels to ITER parameters plot\nThe ITER parameters plot [here](http://docs.plasmapy.org/en/stable/auto_examples/plot_physics.html#sphx-glr-auto-examples-plot-physics-py) could use some labels and units in said labels.\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n# Packages may add whatever they like to this file, but\n# should keep this content at the top.\n# ----------------------------------------------------------------------------\nfrom ._base_init import *\n# ----------------------------------------------------------------------------\n\n# Enforce Python version check during package import.\n# This is the same check as the one at the top of setup.py\nimport sys\n\n__name__ = \"plasmapy\"\n\n__doc__ = (\"A community-developed and community-driven open source \"\n \"core Python package for plasma physics.\")\n\n\nclass UnsupportedPythonError(Exception):\n pass\n\n\nif sys.version_info < tuple((int(val) for val in \"3.6\".split('.'))):\n raise UnsupportedPythonError(\"plasmapy does not support Python < {}\".format(3.6))\n\nif not _ASTROPY_SETUP_:\n # For egg_info test builds to pass, put package imports here.\n from . import atomic\n from . import classes\n from . import constants\n from . import diagnostics\n from . import mathematics\n from . import physics\n from . import utils\n\n__citation__ = \"\"\"@misc{plasmapy_community_2018_1238132,\n author = {PlasmaPy Community and\n Murphy, Nicholas A. and\n Leonard, Andrew J. and\n Sta\\'nczak, Dominik and\n Kozlowski, Pawel M. and\n Langendorf, Samuel J. and\n Haggerty, Colby C. and\n Beckers, Jasper P. and\n Mumford, Stuart J. and\n Parashar, Tulasi N. and\n Huang, Yi-Min},\n title = {{PlasmaPy: an open source community-developed \n Python package for plasma physics}},\n month = apr,\n year = 2018,\n doi = {10.5281/zenodo.1238132},\n url = {https://doi.org/10.5281/zenodo.1238132}\n}\"\"\"\n", "path": "plasmapy/__init__.py"}, {"content": "\"\"\"\nAnalysing ITER parameters\n=========================\n\nLet's try to look at ITER plasma conditions using the `physics` subpackage.\n\"\"\"\n\nfrom astropy import units as u\nfrom plasmapy import physics\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\n######################################################\n# The radius of electric field shielding clouds, also known as the Debye length,\n# would be\n\nelectron_temperature = 8.8 * u.keV\nelectron_concentration = 10.1e19 / u.m**3\nprint(physics.Debye_length(electron_temperature, electron_concentration))\n\n############################################################\n# Note that we can also neglect the unit for the concentration, as\n# 1/m^3 is the a standard unit for this kind of Quantity:\n\nprint(physics.Debye_length(electron_temperature, 10.1e19))\n\n############################################################\n# Assuming the magnetic field as 5.3 Teslas (which is the value at the major\n# radius):\n\nB = 5.3 * u.T\n\nprint(physics.gyrofrequency(B, particle='e'))\n\nprint(physics.gyroradius(B, T_i=electron_temperature, particle='e'))\n\n######################################################################\n# The electron inertial length would be\nprint(physics.inertial_length(electron_concentration, particle='e'))\n\n######################################################################\n# In these conditions, they should reach thermal velocities of about\nprint(physics.thermal_speed(T=electron_temperature, particle='e'))\n\n######################################################################\n# And the Langmuir wave plasma frequency should be on the order of\nprint(physics.plasma_frequency(electron_concentration))\n\n############################################################\n# Let's try to recreate some plots and get a feel for some of these quantities.\n\nn_e = np.logspace(4, 30, 100) / u.m**3\nplt.plot(n_e, physics.plasma_frequency(n_e))\nplt.scatter(\n electron_concentration,\n physics.plasma_frequency(electron_concentration))\n", "path": "plasmapy/examples/plot_physics.py"}]} | 1,736 | 502 |
gh_patches_debug_1817 | rasdani/github-patches | git_diff | pyca__cryptography-8318 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect docstrings in x25519 and x448 `.public_key()` methods
See:
https://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x25519.py#L60-L64
https://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x448.py#L60-L64
In both instances, the method does not return serialised bytes, but a public key object. The full [generated documentation](https://cryptography.io/en/latest/hazmat/primitives/asymmetric/x25519/#cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.public_key) is correct, as are the Ed* docstrings.
</issue>
<code>
[start of src/cryptography/hazmat/primitives/asymmetric/x25519.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5
6 import abc
7
8 from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
9 from cryptography.hazmat.primitives import _serialization
10
11
12 class X25519PublicKey(metaclass=abc.ABCMeta):
13 @classmethod
14 def from_public_bytes(cls, data: bytes) -> "X25519PublicKey":
15 from cryptography.hazmat.backends.openssl.backend import backend
16
17 if not backend.x25519_supported():
18 raise UnsupportedAlgorithm(
19 "X25519 is not supported by this version of OpenSSL.",
20 _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,
21 )
22
23 return backend.x25519_load_public_bytes(data)
24
25 @abc.abstractmethod
26 def public_bytes(
27 self,
28 encoding: _serialization.Encoding,
29 format: _serialization.PublicFormat,
30 ) -> bytes:
31 """
32 The serialized bytes of the public key.
33 """
34
35
36 class X25519PrivateKey(metaclass=abc.ABCMeta):
37 @classmethod
38 def generate(cls) -> "X25519PrivateKey":
39 from cryptography.hazmat.backends.openssl.backend import backend
40
41 if not backend.x25519_supported():
42 raise UnsupportedAlgorithm(
43 "X25519 is not supported by this version of OpenSSL.",
44 _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,
45 )
46 return backend.x25519_generate_key()
47
48 @classmethod
49 def from_private_bytes(cls, data: bytes) -> "X25519PrivateKey":
50 from cryptography.hazmat.backends.openssl.backend import backend
51
52 if not backend.x25519_supported():
53 raise UnsupportedAlgorithm(
54 "X25519 is not supported by this version of OpenSSL.",
55 _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,
56 )
57
58 return backend.x25519_load_private_bytes(data)
59
60 @abc.abstractmethod
61 def public_key(self) -> X25519PublicKey:
62 """
63 The serialized bytes of the public key.
64 """
65
66 @abc.abstractmethod
67 def private_bytes(
68 self,
69 encoding: _serialization.Encoding,
70 format: _serialization.PrivateFormat,
71 encryption_algorithm: _serialization.KeySerializationEncryption,
72 ) -> bytes:
73 """
74 The serialized bytes of the private key.
75 """
76
77 @abc.abstractmethod
78 def exchange(self, peer_public_key: X25519PublicKey) -> bytes:
79 """
80 Performs a key exchange operation using the provided peer's public key.
81 """
82
[end of src/cryptography/hazmat/primitives/asymmetric/x25519.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/primitives/asymmetric/x25519.py b/src/cryptography/hazmat/primitives/asymmetric/x25519.py
--- a/src/cryptography/hazmat/primitives/asymmetric/x25519.py
+++ b/src/cryptography/hazmat/primitives/asymmetric/x25519.py
@@ -60,7 +60,7 @@
@abc.abstractmethod
def public_key(self) -> X25519PublicKey:
"""
- The serialized bytes of the public key.
+ Returns the public key assosciated with this private key
"""
@abc.abstractmethod
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/asymmetric/x25519.py b/src/cryptography/hazmat/primitives/asymmetric/x25519.py\n--- a/src/cryptography/hazmat/primitives/asymmetric/x25519.py\n+++ b/src/cryptography/hazmat/primitives/asymmetric/x25519.py\n@@ -60,7 +60,7 @@\n @abc.abstractmethod\n def public_key(self) -> X25519PublicKey:\n \"\"\"\n- The serialized bytes of the public key.\n+ Returns the public key assosciated with this private key\n \"\"\"\n \n @abc.abstractmethod\n", "issue": "Incorrect docstrings in x25519 and x448 `.public_key()` methods\nSee:\r\n\r\nhttps://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x25519.py#L60-L64\r\n\r\nhttps://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x448.py#L60-L64\r\n\r\nIn both instances, the method does not return serialised bytes, but a public key object. The full [generated documentation](https://cryptography.io/en/latest/hazmat/primitives/asymmetric/x25519/#cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.public_key) is correct, as are the Ed* docstrings.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport abc\n\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.primitives import _serialization\n\n\nclass X25519PublicKey(metaclass=abc.ABCMeta):\n @classmethod\n def from_public_bytes(cls, data: bytes) -> \"X25519PublicKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n\n return backend.x25519_load_public_bytes(data)\n\n @abc.abstractmethod\n def public_bytes(\n self,\n encoding: _serialization.Encoding,\n format: _serialization.PublicFormat,\n ) -> bytes:\n \"\"\"\n The serialized bytes of the public key.\n \"\"\"\n\n\nclass X25519PrivateKey(metaclass=abc.ABCMeta):\n @classmethod\n def generate(cls) -> \"X25519PrivateKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n return backend.x25519_generate_key()\n\n @classmethod\n def from_private_bytes(cls, data: bytes) -> \"X25519PrivateKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n\n return backend.x25519_load_private_bytes(data)\n\n @abc.abstractmethod\n def public_key(self) -> X25519PublicKey:\n \"\"\"\n The serialized bytes of the public key.\n \"\"\"\n\n @abc.abstractmethod\n def private_bytes(\n self,\n encoding: _serialization.Encoding,\n format: _serialization.PrivateFormat,\n encryption_algorithm: _serialization.KeySerializationEncryption,\n ) -> bytes:\n \"\"\"\n The serialized bytes of the private key.\n \"\"\"\n\n @abc.abstractmethod\n def exchange(self, peer_public_key: X25519PublicKey) -> bytes:\n \"\"\"\n Performs a key exchange operation using the provided peer's public key.\n \"\"\"\n", "path": "src/cryptography/hazmat/primitives/asymmetric/x25519.py"}]} | 1,559 | 149 |
gh_patches_debug_20188 | rasdani/github-patches | git_diff | getsentry__sentry-python-2080 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Threading integration breaks OpenCensus' threading integration
### How do you use Sentry?
Sentry Saas (sentry.io)
### Version
1.21.1
### Steps to Reproduce
1. Enable threading Sentry integration (it seems to be enabled by default)
2. Enable [threading OpenCensus integration](https://pypi.org/project/opencensus-ext-threading/)
3. Start a thread
```python
import threading
import sentry_sdk.integrations.threading
import opencensus.trace.config_integration
sentry_sdk.init(integrations=[
sentry_sdk.integrations.threading.ThreadingIntegration(),
])
opencensus.trace.config_integration.trace_integrations(
integrations=["threading"],
)
thread = threading.Thread(target=print, args=("foo",))
thread.start()
thread.join()
```
### Expected Result
No error, output: `foo`
### Actual Result
Exceptions raised on every thread's startup
```python
Exception in thread Thread-1 (print):
Traceback (most recent call last):
File "/usr/lib/python3.10/threading.py", line 1016, in _bootstrap_inner
self.run()
File ".venv/lib/python3.10/site-packages/sentry_sdk/integrations/threading.py", line 69, in run
reraise(*_capture_exception())
File ".venv/lib/python3.10/site-packages/sentry_sdk/_compat.py", line 60, in reraise
raise value
File ".venv/lib/python3.10/site-packages/sentry_sdk/integrations/threading.py", line 67, in run
return old_run_func(self, *a, **kw)
File ".venv/lib/python3.10/site-packages/opencensus/ext/threading/trace.py", line 80, in call
*self._opencensus_context
AttributeError: 'Thread' object has no attribute '_opencensus_context'
```
### Additional details
* Python: 3.10
* OpenCensus threading extension: 0.1.2
My suggestion is to use [`functools.update_wrapper`](https://docs.python.org/3/library/functools.html#functools.update_wrapper) (or the convenience decorator [`functools.wraps`](https://docs.python.org/3/library/functools.html#functools.wraps)) to set the name of [`sentry_start`](https://github.com/getsentry/sentry-python/blob/f763061ed9d9e99d85b3e95adc3ed63b623fc4a0/sentry_sdk/integrations/threading.py#L35-54) to `"start"` (and maybe the same for the result of `_wrap_run`, to `"run"`), so it "holds up to basic introspection".
PS: please include 'Additional details' block in issue template
</issue>
<code>
[start of sentry_sdk/integrations/threading.py]
1 from __future__ import absolute_import
2
3 import sys
4 from threading import Thread, current_thread
5
6 from sentry_sdk import Hub
7 from sentry_sdk._compat import reraise
8 from sentry_sdk._types import TYPE_CHECKING
9 from sentry_sdk.integrations import Integration
10 from sentry_sdk.utils import event_from_exception, capture_internal_exceptions
11
12 if TYPE_CHECKING:
13 from typing import Any
14 from typing import TypeVar
15 from typing import Callable
16 from typing import Optional
17
18 from sentry_sdk._types import ExcInfo
19
20 F = TypeVar("F", bound=Callable[..., Any])
21
22
23 class ThreadingIntegration(Integration):
24 identifier = "threading"
25
26 def __init__(self, propagate_hub=False):
27 # type: (bool) -> None
28 self.propagate_hub = propagate_hub
29
30 @staticmethod
31 def setup_once():
32 # type: () -> None
33 old_start = Thread.start
34
35 def sentry_start(self, *a, **kw):
36 # type: (Thread, *Any, **Any) -> Any
37 hub = Hub.current
38 integration = hub.get_integration(ThreadingIntegration)
39 if integration is not None:
40 if not integration.propagate_hub:
41 hub_ = None
42 else:
43 hub_ = Hub(hub)
44 # Patching instance methods in `start()` creates a reference cycle if
45 # done in a naive way. See
46 # https://github.com/getsentry/sentry-python/pull/434
47 #
48 # In threading module, using current_thread API will access current thread instance
49 # without holding it to avoid a reference cycle in an easier way.
50 with capture_internal_exceptions():
51 new_run = _wrap_run(hub_, getattr(self.run, "__func__", self.run))
52 self.run = new_run # type: ignore
53
54 return old_start(self, *a, **kw)
55
56 Thread.start = sentry_start # type: ignore
57
58
59 def _wrap_run(parent_hub, old_run_func):
60 # type: (Optional[Hub], F) -> F
61 def run(*a, **kw):
62 # type: (*Any, **Any) -> Any
63 hub = parent_hub or Hub.current
64 with hub:
65 try:
66 self = current_thread()
67 return old_run_func(self, *a, **kw)
68 except Exception:
69 reraise(*_capture_exception())
70
71 return run # type: ignore
72
73
74 def _capture_exception():
75 # type: () -> ExcInfo
76 hub = Hub.current
77 exc_info = sys.exc_info()
78
79 if hub.get_integration(ThreadingIntegration) is not None:
80 # If an integration is there, a client has to be there.
81 client = hub.client # type: Any
82
83 event, hint = event_from_exception(
84 exc_info,
85 client_options=client.options,
86 mechanism={"type": "threading", "handled": False},
87 )
88 hub.capture_event(event, hint=hint)
89
90 return exc_info
91
[end of sentry_sdk/integrations/threading.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry_sdk/integrations/threading.py b/sentry_sdk/integrations/threading.py
--- a/sentry_sdk/integrations/threading.py
+++ b/sentry_sdk/integrations/threading.py
@@ -1,6 +1,7 @@
from __future__ import absolute_import
import sys
+from functools import wraps
from threading import Thread, current_thread
from sentry_sdk import Hub
@@ -32,6 +33,7 @@
# type: () -> None
old_start = Thread.start
+ @wraps(old_start)
def sentry_start(self, *a, **kw):
# type: (Thread, *Any, **Any) -> Any
hub = Hub.current
@@ -58,6 +60,7 @@
def _wrap_run(parent_hub, old_run_func):
# type: (Optional[Hub], F) -> F
+ @wraps(old_run_func)
def run(*a, **kw):
# type: (*Any, **Any) -> Any
hub = parent_hub or Hub.current
| {"golden_diff": "diff --git a/sentry_sdk/integrations/threading.py b/sentry_sdk/integrations/threading.py\n--- a/sentry_sdk/integrations/threading.py\n+++ b/sentry_sdk/integrations/threading.py\n@@ -1,6 +1,7 @@\n from __future__ import absolute_import\n \n import sys\n+from functools import wraps\n from threading import Thread, current_thread\n \n from sentry_sdk import Hub\n@@ -32,6 +33,7 @@\n # type: () -> None\n old_start = Thread.start\n \n+ @wraps(old_start)\n def sentry_start(self, *a, **kw):\n # type: (Thread, *Any, **Any) -> Any\n hub = Hub.current\n@@ -58,6 +60,7 @@\n \n def _wrap_run(parent_hub, old_run_func):\n # type: (Optional[Hub], F) -> F\n+ @wraps(old_run_func)\n def run(*a, **kw):\n # type: (*Any, **Any) -> Any\n hub = parent_hub or Hub.current\n", "issue": "Threading integration breaks OpenCensus' threading integration\n### How do you use Sentry?\r\n\r\nSentry Saas (sentry.io)\r\n\r\n### Version\r\n\r\n1.21.1\r\n\r\n### Steps to Reproduce\r\n\r\n1. Enable threading Sentry integration (it seems to be enabled by default)\r\n2. Enable [threading OpenCensus integration](https://pypi.org/project/opencensus-ext-threading/)\r\n3. Start a thread\r\n\r\n```python\r\nimport threading\r\n\r\nimport sentry_sdk.integrations.threading\r\nimport opencensus.trace.config_integration\r\n\r\nsentry_sdk.init(integrations=[\r\n sentry_sdk.integrations.threading.ThreadingIntegration(),\r\n])\r\n\r\nopencensus.trace.config_integration.trace_integrations(\r\n integrations=[\"threading\"],\r\n)\r\n\r\nthread = threading.Thread(target=print, args=(\"foo\",))\r\nthread.start()\r\nthread.join()\r\n```\r\n\r\n### Expected Result\r\n\r\nNo error, output: `foo`\r\n\r\n### Actual Result\r\n\r\nExceptions raised on every thread's startup\r\n\r\n```python\r\nException in thread Thread-1 (print):\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\n self.run()\r\n File \".venv/lib/python3.10/site-packages/sentry_sdk/integrations/threading.py\", line 69, in run\r\n reraise(*_capture_exception())\r\n File \".venv/lib/python3.10/site-packages/sentry_sdk/_compat.py\", line 60, in reraise\r\n raise value\r\n File \".venv/lib/python3.10/site-packages/sentry_sdk/integrations/threading.py\", line 67, in run\r\n return old_run_func(self, *a, **kw)\r\n File \".venv/lib/python3.10/site-packages/opencensus/ext/threading/trace.py\", line 80, in call\r\n *self._opencensus_context\r\nAttributeError: 'Thread' object has no attribute '_opencensus_context'\r\n```\r\n\r\n### Additional details\r\n\r\n* Python: 3.10\r\n* OpenCensus threading extension: 0.1.2\r\n\r\nMy suggestion is to use [`functools.update_wrapper`](https://docs.python.org/3/library/functools.html#functools.update_wrapper) (or the convenience decorator [`functools.wraps`](https://docs.python.org/3/library/functools.html#functools.wraps)) to set the name of [`sentry_start`](https://github.com/getsentry/sentry-python/blob/f763061ed9d9e99d85b3e95adc3ed63b623fc4a0/sentry_sdk/integrations/threading.py#L35-54) to `\"start\"` (and maybe the same for the result of `_wrap_run`, to `\"run\"`), so it \"holds up to basic introspection\".\r\n\r\nPS: please include 'Additional details' block in issue template\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport sys\nfrom threading import Thread, current_thread\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk._types import TYPE_CHECKING\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.utils import event_from_exception, capture_internal_exceptions\n\nif TYPE_CHECKING:\n from typing import Any\n from typing import TypeVar\n from typing import Callable\n from typing import Optional\n\n from sentry_sdk._types import ExcInfo\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n\nclass ThreadingIntegration(Integration):\n identifier = \"threading\"\n\n def __init__(self, propagate_hub=False):\n # type: (bool) -> None\n self.propagate_hub = propagate_hub\n\n @staticmethod\n def setup_once():\n # type: () -> None\n old_start = Thread.start\n\n def sentry_start(self, *a, **kw):\n # type: (Thread, *Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(ThreadingIntegration)\n if integration is not None:\n if not integration.propagate_hub:\n hub_ = None\n else:\n hub_ = Hub(hub)\n # Patching instance methods in `start()` creates a reference cycle if\n # done in a naive way. See\n # https://github.com/getsentry/sentry-python/pull/434\n #\n # In threading module, using current_thread API will access current thread instance\n # without holding it to avoid a reference cycle in an easier way.\n with capture_internal_exceptions():\n new_run = _wrap_run(hub_, getattr(self.run, \"__func__\", self.run))\n self.run = new_run # type: ignore\n\n return old_start(self, *a, **kw)\n\n Thread.start = sentry_start # type: ignore\n\n\ndef _wrap_run(parent_hub, old_run_func):\n # type: (Optional[Hub], F) -> F\n def run(*a, **kw):\n # type: (*Any, **Any) -> Any\n hub = parent_hub or Hub.current\n with hub:\n try:\n self = current_thread()\n return old_run_func(self, *a, **kw)\n except Exception:\n reraise(*_capture_exception())\n\n return run # type: ignore\n\n\ndef _capture_exception():\n # type: () -> ExcInfo\n hub = Hub.current\n exc_info = sys.exc_info()\n\n if hub.get_integration(ThreadingIntegration) is not None:\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n event, hint = event_from_exception(\n exc_info,\n client_options=client.options,\n mechanism={\"type\": \"threading\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n return exc_info\n", "path": "sentry_sdk/integrations/threading.py"}]} | 2,010 | 239 |
gh_patches_debug_29915 | rasdani/github-patches | git_diff | deepchecks__deepchecks-398 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: confusion matrix chart displays wrong output
To reproduce: https://www.kaggle.com/itay94/notebook05f499eb19

</issue>
<code>
[start of deepchecks/checks/performance/confusion_matrix_report.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """The confusion_matrix_report check module."""
12 import numpy as np
13 import sklearn
14 from sklearn.base import BaseEstimator
15
16 import plotly.figure_factory as ff
17 from deepchecks import CheckResult, Dataset
18 from deepchecks.base.check import SingleDatasetBaseCheck
19 from deepchecks.utils.metrics import ModelType, task_type_validation
20
21
22 __all__ = ['ConfusionMatrixReport']
23
24
25 class ConfusionMatrixReport(SingleDatasetBaseCheck):
26 """Calculate the confusion matrix of the model on the given dataset."""
27
28 def run(self, dataset: Dataset, model: BaseEstimator) -> CheckResult:
29 """Run check.
30
31 Args:
32 model (BaseEstimator): A scikit-learn-compatible fitted estimator instance
33 dataset: a Dataset object
34
35 Returns:
36 CheckResult: value is numpy array of the confusion matrix, displays the confusion matrix
37
38 Raises:
39 DeepchecksValueError: If the object is not a Dataset instance with a label
40 """
41 return self._confusion_matrix_report(dataset, model)
42
43 def _confusion_matrix_report(self, dataset: Dataset, model):
44 Dataset.validate_dataset(dataset)
45 dataset.validate_label()
46 task_type_validation(model, dataset, [ModelType.MULTICLASS, ModelType.BINARY])
47
48 label = dataset.label_name
49 ds_x = dataset.data[dataset.features]
50 ds_y = dataset.data[label]
51 y_pred = model.predict(ds_x)
52
53 confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)
54
55 labels = [str(val) for val in np.unique(ds_y)]
56 fig = ff.create_annotated_heatmap(confusion_matrix, x=labels, y=labels, colorscale='Viridis')
57 fig.update_layout(width=600, height=600)
58 fig.update_xaxes(title='Predicted Value')
59 fig.update_yaxes(title='True value', autorange='reversed')
60 fig['data'][0]['showscale'] = True
61 fig['layout']['xaxis']['side'] = 'bottom'
62
63 return CheckResult(confusion_matrix, display=fig)
64
[end of deepchecks/checks/performance/confusion_matrix_report.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/deepchecks/checks/performance/confusion_matrix_report.py b/deepchecks/checks/performance/confusion_matrix_report.py
--- a/deepchecks/checks/performance/confusion_matrix_report.py
+++ b/deepchecks/checks/performance/confusion_matrix_report.py
@@ -9,11 +9,10 @@
# ----------------------------------------------------------------------------
#
"""The confusion_matrix_report check module."""
-import numpy as np
import sklearn
from sklearn.base import BaseEstimator
-import plotly.figure_factory as ff
+import plotly.express as px
from deepchecks import CheckResult, Dataset
from deepchecks.base.check import SingleDatasetBaseCheck
from deepchecks.utils.metrics import ModelType, task_type_validation
@@ -49,15 +48,12 @@
ds_x = dataset.data[dataset.features]
ds_y = dataset.data[label]
y_pred = model.predict(ds_x)
-
confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)
- labels = [str(val) for val in np.unique(ds_y)]
- fig = ff.create_annotated_heatmap(confusion_matrix, x=labels, y=labels, colorscale='Viridis')
+ # Figure
+ fig = px.imshow(confusion_matrix, x=dataset.classes, y=dataset.classes, text_auto=True)
fig.update_layout(width=600, height=600)
- fig.update_xaxes(title='Predicted Value')
- fig.update_yaxes(title='True value', autorange='reversed')
- fig['data'][0]['showscale'] = True
- fig['layout']['xaxis']['side'] = 'bottom'
+ fig.update_xaxes(title='Predicted Value', type='category')
+ fig.update_yaxes(title='True value', type='category')
return CheckResult(confusion_matrix, display=fig)
| {"golden_diff": "diff --git a/deepchecks/checks/performance/confusion_matrix_report.py b/deepchecks/checks/performance/confusion_matrix_report.py\n--- a/deepchecks/checks/performance/confusion_matrix_report.py\n+++ b/deepchecks/checks/performance/confusion_matrix_report.py\n@@ -9,11 +9,10 @@\n # ----------------------------------------------------------------------------\n #\n \"\"\"The confusion_matrix_report check module.\"\"\"\n-import numpy as np\n import sklearn\n from sklearn.base import BaseEstimator\n \n-import plotly.figure_factory as ff\n+import plotly.express as px\n from deepchecks import CheckResult, Dataset\n from deepchecks.base.check import SingleDatasetBaseCheck\n from deepchecks.utils.metrics import ModelType, task_type_validation\n@@ -49,15 +48,12 @@\n ds_x = dataset.data[dataset.features]\n ds_y = dataset.data[label]\n y_pred = model.predict(ds_x)\n-\n confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)\n \n- labels = [str(val) for val in np.unique(ds_y)]\n- fig = ff.create_annotated_heatmap(confusion_matrix, x=labels, y=labels, colorscale='Viridis')\n+ # Figure\n+ fig = px.imshow(confusion_matrix, x=dataset.classes, y=dataset.classes, text_auto=True)\n fig.update_layout(width=600, height=600)\n- fig.update_xaxes(title='Predicted Value')\n- fig.update_yaxes(title='True value', autorange='reversed')\n- fig['data'][0]['showscale'] = True\n- fig['layout']['xaxis']['side'] = 'bottom'\n+ fig.update_xaxes(title='Predicted Value', type='category')\n+ fig.update_yaxes(title='True value', type='category')\n \n return CheckResult(confusion_matrix, display=fig)\n", "issue": "BUG: confusion matrix chart displays wrong output\nTo reproduce: https://www.kaggle.com/itay94/notebook05f499eb19\r\n\r\n\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"The confusion_matrix_report check module.\"\"\"\nimport numpy as np\nimport sklearn\nfrom sklearn.base import BaseEstimator\n\nimport plotly.figure_factory as ff\nfrom deepchecks import CheckResult, Dataset\nfrom deepchecks.base.check import SingleDatasetBaseCheck\nfrom deepchecks.utils.metrics import ModelType, task_type_validation\n\n\n__all__ = ['ConfusionMatrixReport']\n\n\nclass ConfusionMatrixReport(SingleDatasetBaseCheck):\n \"\"\"Calculate the confusion matrix of the model on the given dataset.\"\"\"\n\n def run(self, dataset: Dataset, model: BaseEstimator) -> CheckResult:\n \"\"\"Run check.\n\n Args:\n model (BaseEstimator): A scikit-learn-compatible fitted estimator instance\n dataset: a Dataset object\n\n Returns:\n CheckResult: value is numpy array of the confusion matrix, displays the confusion matrix\n\n Raises:\n DeepchecksValueError: If the object is not a Dataset instance with a label\n \"\"\"\n return self._confusion_matrix_report(dataset, model)\n\n def _confusion_matrix_report(self, dataset: Dataset, model):\n Dataset.validate_dataset(dataset)\n dataset.validate_label()\n task_type_validation(model, dataset, [ModelType.MULTICLASS, ModelType.BINARY])\n\n label = dataset.label_name\n ds_x = dataset.data[dataset.features]\n ds_y = dataset.data[label]\n y_pred = model.predict(ds_x)\n\n confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)\n\n labels = [str(val) for val in np.unique(ds_y)]\n fig = ff.create_annotated_heatmap(confusion_matrix, x=labels, y=labels, colorscale='Viridis')\n fig.update_layout(width=600, height=600)\n fig.update_xaxes(title='Predicted Value')\n fig.update_yaxes(title='True value', autorange='reversed')\n fig['data'][0]['showscale'] = True\n fig['layout']['xaxis']['side'] = 'bottom'\n\n return CheckResult(confusion_matrix, display=fig)\n", "path": "deepchecks/checks/performance/confusion_matrix_report.py"}]} | 1,303 | 400 |
gh_patches_debug_42763 | rasdani/github-patches | git_diff | getredash__redash-1002 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add events to track embeds usage
Add events to track embeds usage
</issue>
<code>
[start of redash/handlers/embed.py]
1 import json
2
3 from funcy import project
4 from flask import render_template, request
5 from flask_login import login_required, current_user
6 from flask_restful import abort
7
8 from redash import models, settings
9 from redash import serializers
10 from redash.utils import json_dumps
11 from redash.handlers import routes
12 from redash.handlers.base import org_scoped_rule
13 from redash.permissions import require_access, view_only
14 from authentication import current_org
15
16
17 @routes.route(org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])
18 @login_required
19 def embed(query_id, visualization_id, org_slug=None):
20 # TODO: add event for embed access
21 query = models.Query.get_by_id_and_org(query_id, current_org)
22 require_access(query.groups, current_user, view_only)
23 vis = query.visualizations.where(models.Visualization.id == visualization_id).first()
24 qr = {}
25
26 if vis is not None:
27 vis = vis.to_dict()
28 qr = query.latest_query_data
29 if qr is None:
30 abort(400, message="No Results for this query")
31 else:
32 qr = qr.to_dict()
33 else:
34 abort(404, message="Visualization not found.")
35
36 client_config = {}
37 client_config.update(settings.COMMON_CLIENT_CONFIG)
38
39 qr = project(qr, ('data', 'id', 'retrieved_at'))
40 vis = project(vis, ('description', 'name', 'id', 'options', 'query', 'type', 'updated_at'))
41 vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))
42
43 return render_template("embed.html",
44 client_config=json_dumps(client_config),
45 visualization=json_dumps(vis),
46 query_result=json_dumps(qr))
47
48
49 @routes.route(org_scoped_rule('/public/dashboards/<token>'), methods=['GET'])
50 @login_required
51 def public_dashboard(token, org_slug=None):
52 # TODO: verify object is a dashboard?
53 if not isinstance(current_user, models.ApiUser):
54 api_key = models.ApiKey.get_by_api_key(token)
55 dashboard = api_key.object
56 else:
57 dashboard = current_user.object
58
59 user = {
60 'permissions': [],
61 'apiKey': current_user.id
62 }
63
64 headers = {
65 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'
66 }
67
68 response = render_template("public.html",
69 headless='embed' in request.args,
70 user=json.dumps(user),
71 seed_data=json_dumps({
72 'dashboard': serializers.public_dashboard(dashboard)
73 }),
74 client_config=json.dumps(settings.COMMON_CLIENT_CONFIG))
75
76 return response, 200, headers
77
[end of redash/handlers/embed.py]
[start of redash/handlers/base.py]
1 import time
2 from flask import request, Blueprint
3 from flask_restful import Resource, abort
4 from flask_login import current_user, login_required
5 from peewee import DoesNotExist
6
7 from redash import settings
8 from redash.tasks import record_event
9 from redash.models import ApiUser
10 from redash.authentication import current_org
11
12 routes = Blueprint('redash', __name__, template_folder=settings.fix_assets_path('templates'))
13
14
15 class BaseResource(Resource):
16 decorators = [login_required]
17
18 def __init__(self, *args, **kwargs):
19 super(BaseResource, self).__init__(*args, **kwargs)
20 self._user = None
21
22 def dispatch_request(self, *args, **kwargs):
23 kwargs.pop('org_slug', None)
24
25 return super(BaseResource, self).dispatch_request(*args, **kwargs)
26
27 @property
28 def current_user(self):
29 return current_user._get_current_object()
30
31 @property
32 def current_org(self):
33 return current_org._get_current_object()
34
35 def record_event(self, options):
36 if isinstance(self.current_user, ApiUser):
37 options.update({
38 'api_key': self.current_user.name,
39 'org_id': self.current_org.id
40 })
41 else:
42 options.update({
43 'user_id': self.current_user.id,
44 'org_id': self.current_org.id
45 })
46
47 options.update({
48 'user_agent': request.user_agent.string,
49 'ip': request.remote_addr
50 })
51
52 if 'timestamp' not in options:
53 options['timestamp'] = int(time.time())
54
55 record_event.delay(options)
56
57
58 def require_fields(req, fields):
59 for f in fields:
60 if f not in req:
61 abort(400)
62
63
64 def get_object_or_404(fn, *args, **kwargs):
65 try:
66 return fn(*args, **kwargs)
67 except DoesNotExist:
68 abort(404)
69
70
71 def org_scoped_rule(rule):
72 if settings.MULTI_ORG:
73 return "/<org_slug:org_slug>{}".format(rule)
74
75 return rule
76
[end of redash/handlers/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/handlers/base.py b/redash/handlers/base.py
--- a/redash/handlers/base.py
+++ b/redash/handlers/base.py
@@ -5,7 +5,7 @@
from peewee import DoesNotExist
from redash import settings
-from redash.tasks import record_event
+from redash.tasks import record_event as record_event_task
from redash.models import ApiUser
from redash.authentication import current_org
@@ -33,26 +33,30 @@
return current_org._get_current_object()
def record_event(self, options):
- if isinstance(self.current_user, ApiUser):
- options.update({
- 'api_key': self.current_user.name,
- 'org_id': self.current_org.id
- })
- else:
- options.update({
- 'user_id': self.current_user.id,
- 'org_id': self.current_org.id
- })
+ record_event(self.current_org, self.current_user, options)
+
+def record_event(org, user, options):
+ if isinstance(user, ApiUser):
+ options.update({
+ 'api_key': user.name,
+ 'org_id': org.id
+ })
+ else:
options.update({
- 'user_agent': request.user_agent.string,
- 'ip': request.remote_addr
+ 'user_id': user.id,
+ 'org_id': org.id
})
- if 'timestamp' not in options:
- options['timestamp'] = int(time.time())
+ options.update({
+ 'user_agent': request.user_agent.string,
+ 'ip': request.remote_addr
+ })
+
+ if 'timestamp' not in options:
+ options['timestamp'] = int(time.time())
- record_event.delay(options)
+ record_event_task.delay(options)
def require_fields(req, fields):
diff --git a/redash/handlers/embed.py b/redash/handlers/embed.py
--- a/redash/handlers/embed.py
+++ b/redash/handlers/embed.py
@@ -9,7 +9,7 @@
from redash import serializers
from redash.utils import json_dumps
from redash.handlers import routes
-from redash.handlers.base import org_scoped_rule
+from redash.handlers.base import org_scoped_rule, record_event
from redash.permissions import require_access, view_only
from authentication import current_org
@@ -17,7 +17,6 @@
@routes.route(org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])
@login_required
def embed(query_id, visualization_id, org_slug=None):
- # TODO: add event for embed access
query = models.Query.get_by_id_and_org(query_id, current_org)
require_access(query.groups, current_user, view_only)
vis = query.visualizations.where(models.Visualization.id == visualization_id).first()
@@ -33,6 +32,15 @@
else:
abort(404, message="Visualization not found.")
+ record_event(current_org, current_user, {
+ 'action': 'view',
+ 'object_id': visualization_id,
+ 'object_type': 'visualization',
+ 'query_id': query_id,
+ 'embed': True,
+ 'referer': request.headers.get('Referer')
+ })
+
client_config = {}
client_config.update(settings.COMMON_CLIENT_CONFIG)
@@ -65,6 +73,15 @@
'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'
}
+ record_event(current_org, current_user, {
+ 'action': 'view',
+ 'object_id': dashboard.id,
+ 'object_type': 'dashboard',
+ 'public': True,
+ 'headless': 'embed' in request.args,
+ 'referer': request.headers.get('Referer')
+ })
+
response = render_template("public.html",
headless='embed' in request.args,
user=json.dumps(user),
| {"golden_diff": "diff --git a/redash/handlers/base.py b/redash/handlers/base.py\n--- a/redash/handlers/base.py\n+++ b/redash/handlers/base.py\n@@ -5,7 +5,7 @@\n from peewee import DoesNotExist\n \n from redash import settings\n-from redash.tasks import record_event\n+from redash.tasks import record_event as record_event_task\n from redash.models import ApiUser\n from redash.authentication import current_org\n \n@@ -33,26 +33,30 @@\n return current_org._get_current_object()\n \n def record_event(self, options):\n- if isinstance(self.current_user, ApiUser):\n- options.update({\n- 'api_key': self.current_user.name,\n- 'org_id': self.current_org.id\n- })\n- else:\n- options.update({\n- 'user_id': self.current_user.id,\n- 'org_id': self.current_org.id\n- })\n+ record_event(self.current_org, self.current_user, options)\n \n+\n+def record_event(org, user, options):\n+ if isinstance(user, ApiUser):\n+ options.update({\n+ 'api_key': user.name,\n+ 'org_id': org.id\n+ })\n+ else:\n options.update({\n- 'user_agent': request.user_agent.string,\n- 'ip': request.remote_addr\n+ 'user_id': user.id,\n+ 'org_id': org.id\n })\n \n- if 'timestamp' not in options:\n- options['timestamp'] = int(time.time())\n+ options.update({\n+ 'user_agent': request.user_agent.string,\n+ 'ip': request.remote_addr\n+ })\n+\n+ if 'timestamp' not in options:\n+ options['timestamp'] = int(time.time())\n \n- record_event.delay(options)\n+ record_event_task.delay(options)\n \n \n def require_fields(req, fields):\ndiff --git a/redash/handlers/embed.py b/redash/handlers/embed.py\n--- a/redash/handlers/embed.py\n+++ b/redash/handlers/embed.py\n@@ -9,7 +9,7 @@\n from redash import serializers\n from redash.utils import json_dumps\n from redash.handlers import routes\n-from redash.handlers.base import org_scoped_rule\n+from redash.handlers.base import org_scoped_rule, record_event\n from redash.permissions import require_access, view_only\n from authentication import current_org\n \n@@ -17,7 +17,6 @@\n @routes.route(org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])\n @login_required\n def embed(query_id, visualization_id, org_slug=None):\n- # TODO: add event for embed access\n query = models.Query.get_by_id_and_org(query_id, current_org)\n require_access(query.groups, current_user, view_only)\n vis = query.visualizations.where(models.Visualization.id == visualization_id).first()\n@@ -33,6 +32,15 @@\n else:\n abort(404, message=\"Visualization not found.\")\n \n+ record_event(current_org, current_user, {\n+ 'action': 'view',\n+ 'object_id': visualization_id,\n+ 'object_type': 'visualization',\n+ 'query_id': query_id,\n+ 'embed': True,\n+ 'referer': request.headers.get('Referer')\n+ })\n+\n client_config = {}\n client_config.update(settings.COMMON_CLIENT_CONFIG)\n \n@@ -65,6 +73,15 @@\n 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'\n }\n \n+ record_event(current_org, current_user, {\n+ 'action': 'view',\n+ 'object_id': dashboard.id,\n+ 'object_type': 'dashboard',\n+ 'public': True,\n+ 'headless': 'embed' in request.args,\n+ 'referer': request.headers.get('Referer')\n+ })\n+\n response = render_template(\"public.html\",\n headless='embed' in request.args,\n user=json.dumps(user),\n", "issue": "Add events to track embeds usage\n\nAdd events to track embeds usage\n\n", "before_files": [{"content": "import json\n\nfrom funcy import project\nfrom flask import render_template, request\nfrom flask_login import login_required, current_user\nfrom flask_restful import abort\n\nfrom redash import models, settings\nfrom redash import serializers\nfrom redash.utils import json_dumps\nfrom redash.handlers import routes\nfrom redash.handlers.base import org_scoped_rule\nfrom redash.permissions import require_access, view_only\nfrom authentication import current_org\n\n\[email protected](org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])\n@login_required\ndef embed(query_id, visualization_id, org_slug=None):\n # TODO: add event for embed access\n query = models.Query.get_by_id_and_org(query_id, current_org)\n require_access(query.groups, current_user, view_only)\n vis = query.visualizations.where(models.Visualization.id == visualization_id).first()\n qr = {}\n\n if vis is not None:\n vis = vis.to_dict()\n qr = query.latest_query_data\n if qr is None:\n abort(400, message=\"No Results for this query\")\n else:\n qr = qr.to_dict()\n else:\n abort(404, message=\"Visualization not found.\")\n\n client_config = {}\n client_config.update(settings.COMMON_CLIENT_CONFIG)\n\n qr = project(qr, ('data', 'id', 'retrieved_at'))\n vis = project(vis, ('description', 'name', 'id', 'options', 'query', 'type', 'updated_at'))\n vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))\n\n return render_template(\"embed.html\",\n client_config=json_dumps(client_config),\n visualization=json_dumps(vis),\n query_result=json_dumps(qr))\n\n\[email protected](org_scoped_rule('/public/dashboards/<token>'), methods=['GET'])\n@login_required\ndef public_dashboard(token, org_slug=None):\n # TODO: verify object is a dashboard?\n if not isinstance(current_user, models.ApiUser):\n api_key = models.ApiKey.get_by_api_key(token)\n dashboard = api_key.object\n else:\n dashboard = current_user.object\n\n user = {\n 'permissions': [],\n 'apiKey': current_user.id\n }\n\n headers = {\n 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'\n }\n\n response = render_template(\"public.html\",\n headless='embed' in request.args,\n user=json.dumps(user),\n seed_data=json_dumps({\n 'dashboard': serializers.public_dashboard(dashboard)\n }),\n client_config=json.dumps(settings.COMMON_CLIENT_CONFIG))\n\n return response, 200, headers\n", "path": "redash/handlers/embed.py"}, {"content": "import time\nfrom flask import request, Blueprint\nfrom flask_restful import Resource, abort\nfrom flask_login import current_user, login_required\nfrom peewee import DoesNotExist\n\nfrom redash import settings\nfrom redash.tasks import record_event\nfrom redash.models import ApiUser\nfrom redash.authentication import current_org\n\nroutes = Blueprint('redash', __name__, template_folder=settings.fix_assets_path('templates'))\n\n\nclass BaseResource(Resource):\n decorators = [login_required]\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n if isinstance(self.current_user, ApiUser):\n options.update({\n 'api_key': self.current_user.name,\n 'org_id': self.current_org.id\n })\n else:\n options.update({\n 'user_id': self.current_user.id,\n 'org_id': self.current_org.id\n })\n\n options.update({\n 'user_agent': request.user_agent.string,\n 'ip': request.remote_addr\n })\n\n if 'timestamp' not in options:\n options['timestamp'] = int(time.time())\n\n record_event.delay(options)\n\n\ndef require_fields(req, fields):\n for f in fields:\n if f not in req:\n abort(400)\n\n\ndef get_object_or_404(fn, *args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except DoesNotExist:\n abort(404)\n\n\ndef org_scoped_rule(rule):\n if settings.MULTI_ORG:\n return \"/<org_slug:org_slug>{}\".format(rule)\n\n return rule\n", "path": "redash/handlers/base.py"}]} | 1,916 | 887 |
gh_patches_debug_23847 | rasdani/github-patches | git_diff | AppDaemon__appdaemon-1661 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve packaging system
The `requirements.txt` file is used in a bit confusing way. It it usual practice for this file to contain all the dependencies present in the project virtualenv, created using `pip freeze > requirements.txt`, so that the virtualenv can be easily re-created by any developer locally using `pip install -r requirements.txt.`
But this file is also used as the `install_requires` inside `setup.py`, creating a bit of a conflict.
If one installs the appdaemon locally with `pip install .`, then run `pip freeze > requirements.txt`, the `requirements.txt` file is modified, since all the transitive dependencies are also loaded, as is expected. So the question is: shouldn't the declaration of the project dependencies be moved in a more appropriate place, separate from `requirements.txt`?
## Proposed solution
I propose to more crealry declare the project dependencies, by upgrading the way we use the Python packaging system.
At the moment the package is created from the `setup.py`, read by `setuptools` when building the package.
The way to package applications has changed a lot over the years. Nowadays if one were to follow the official [Python packaging guide](https://packaging.python.org/en/latest/tutorials/packaging-projects/), the recommended setup is to use a `pyproject.toml` to describe all the metadata of the project, including the build tool and the project dependencies.
I suggest we move from the current `setup.py` to a more modern and "standard" solution, by migrating to a `pyproject.toml` file to contain all the project dependencies.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from setuptools import setup, find_packages
5
6 from appdaemon.version import __version__
7
8 # sudo apt-get install python3-aiohttp-dbg
9
10 with open("requirements.txt") as f:
11 install_requires = [x for x in f.read().split("\n") if x]
12
13 with open("README.md") as f:
14 long_description = f.read()
15
16 setup(
17 name="appdaemon",
18 version=__version__,
19 description="Apps for the Home Assistant home automation package.",
20 long_description=long_description,
21 long_description_content_type="text/markdown",
22 author="Andrew I Cockburn",
23 author_email="[email protected]",
24 url="https://github.com/home-assistant/appdaemon.git",
25 packages=find_packages(exclude=["contrib", "docs", "tests*"]),
26 include_package_data=True,
27 install_requires=install_requires,
28 license="Apache License 2.0",
29 python_requires=">=3.7",
30 zip_safe=False,
31 keywords=["appdaemon", "home", "automation"],
32 entry_points={"console_scripts": ["appdaemon = appdaemon.__main__:main"]},
33 classifiers=[
34 "Development Status :: 5 - Production/Stable",
35 "Intended Audience :: Developers",
36 "License :: OSI Approved :: Apache Software License",
37 "Natural Language :: English",
38 "Programming Language :: Python :: 3.7",
39 "Programming Language :: Python :: 3.8",
40 "Programming Language :: Python :: 3.9",
41 "Programming Language :: Python :: 3.10",
42 "Topic :: Home Automation",
43 ],
44 )
45
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
deleted file mode 100755
--- a/setup.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from setuptools import setup, find_packages
-
-from appdaemon.version import __version__
-
-# sudo apt-get install python3-aiohttp-dbg
-
-with open("requirements.txt") as f:
- install_requires = [x for x in f.read().split("\n") if x]
-
-with open("README.md") as f:
- long_description = f.read()
-
-setup(
- name="appdaemon",
- version=__version__,
- description="Apps for the Home Assistant home automation package.",
- long_description=long_description,
- long_description_content_type="text/markdown",
- author="Andrew I Cockburn",
- author_email="[email protected]",
- url="https://github.com/home-assistant/appdaemon.git",
- packages=find_packages(exclude=["contrib", "docs", "tests*"]),
- include_package_data=True,
- install_requires=install_requires,
- license="Apache License 2.0",
- python_requires=">=3.7",
- zip_safe=False,
- keywords=["appdaemon", "home", "automation"],
- entry_points={"console_scripts": ["appdaemon = appdaemon.__main__:main"]},
- classifiers=[
- "Development Status :: 5 - Production/Stable",
- "Intended Audience :: Developers",
- "License :: OSI Approved :: Apache Software License",
- "Natural Language :: English",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
- "Programming Language :: Python :: 3.9",
- "Programming Language :: Python :: 3.10",
- "Topic :: Home Automation",
- ],
-)
| {"golden_diff": "diff --git a/setup.py b/setup.py\ndeleted file mode 100755\n--- a/setup.py\n+++ /dev/null\n@@ -1,44 +0,0 @@\n-#!/usr/bin/env python\n-# -*- coding: utf-8 -*-\n-\n-from setuptools import setup, find_packages\n-\n-from appdaemon.version import __version__\n-\n-# sudo apt-get install python3-aiohttp-dbg\n-\n-with open(\"requirements.txt\") as f:\n- install_requires = [x for x in f.read().split(\"\\n\") if x]\n-\n-with open(\"README.md\") as f:\n- long_description = f.read()\n-\n-setup(\n- name=\"appdaemon\",\n- version=__version__,\n- description=\"Apps for the Home Assistant home automation package.\",\n- long_description=long_description,\n- long_description_content_type=\"text/markdown\",\n- author=\"Andrew I Cockburn\",\n- author_email=\"[email protected]\",\n- url=\"https://github.com/home-assistant/appdaemon.git\",\n- packages=find_packages(exclude=[\"contrib\", \"docs\", \"tests*\"]),\n- include_package_data=True,\n- install_requires=install_requires,\n- license=\"Apache License 2.0\",\n- python_requires=\">=3.7\",\n- zip_safe=False,\n- keywords=[\"appdaemon\", \"home\", \"automation\"],\n- entry_points={\"console_scripts\": [\"appdaemon = appdaemon.__main__:main\"]},\n- classifiers=[\n- \"Development Status :: 5 - Production/Stable\",\n- \"Intended Audience :: Developers\",\n- \"License :: OSI Approved :: Apache Software License\",\n- \"Natural Language :: English\",\n- \"Programming Language :: Python :: 3.7\",\n- \"Programming Language :: Python :: 3.8\",\n- \"Programming Language :: Python :: 3.9\",\n- \"Programming Language :: Python :: 3.10\",\n- \"Topic :: Home Automation\",\n- ],\n-)\n", "issue": "Improve packaging system\nThe `requirements.txt` file is used in a bit confusing way. It it usual practice for this file to contain all the dependencies present in the project virtualenv, created using `pip freeze > requirements.txt`, so that the virtualenv can be easily re-created by any developer locally using `pip install -r requirements.txt.`\r\nBut this file is also used as the `install_requires` inside `setup.py`, creating a bit of a conflict.\r\nIf one installs the appdaemon locally with `pip install .`, then run `pip freeze > requirements.txt`, the `requirements.txt` file is modified, since all the transitive dependencies are also loaded, as is expected. So the question is: shouldn't the declaration of the project dependencies be moved in a more appropriate place, separate from `requirements.txt`?\r\n\r\n## Proposed solution\r\nI propose to more crealry declare the project dependencies, by upgrading the way we use the Python packaging system.\r\nAt the moment the package is created from the `setup.py`, read by `setuptools` when building the package.\r\nThe way to package applications has changed a lot over the years. Nowadays if one were to follow the official [Python packaging guide](https://packaging.python.org/en/latest/tutorials/packaging-projects/), the recommended setup is to use a `pyproject.toml` to describe all the metadata of the project, including the build tool and the project dependencies.\r\nI suggest we move from the current `setup.py` to a more modern and \"standard\" solution, by migrating to a `pyproject.toml` file to contain all the project dependencies.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\nfrom appdaemon.version import __version__\n\n# sudo apt-get install python3-aiohttp-dbg\n\nwith open(\"requirements.txt\") as f:\n install_requires = [x for x in f.read().split(\"\\n\") if x]\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nsetup(\n name=\"appdaemon\",\n version=__version__,\n description=\"Apps for the Home Assistant home automation package.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Andrew I Cockburn\",\n author_email=\"[email protected]\",\n url=\"https://github.com/home-assistant/appdaemon.git\",\n packages=find_packages(exclude=[\"contrib\", \"docs\", \"tests*\"]),\n include_package_data=True,\n install_requires=install_requires,\n license=\"Apache License 2.0\",\n python_requires=\">=3.7\",\n zip_safe=False,\n keywords=[\"appdaemon\", \"home\", \"automation\"],\n entry_points={\"console_scripts\": [\"appdaemon = appdaemon.__main__:main\"]},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Home Automation\",\n ],\n)\n", "path": "setup.py"}]} | 1,299 | 428 |
gh_patches_debug_6209 | rasdani/github-patches | git_diff | gratipay__gratipay.com-3021 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
A suspicious user trying to log in results in a 500
https://app.getsentry.com/gratipay/gratipay-com/group/32039756/
<bountysource-plugin>
---
Want to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/4727299-a-suspicious-user-trying-to-log-in-results-in-a-500?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).
</bountysource-plugin>
A suspicious user trying to log in results in a 500
https://app.getsentry.com/gratipay/gratipay-com/group/32039756/
<bountysource-plugin>
---
Want to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/4727299-a-suspicious-user-trying-to-log-in-results-in-a-500?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).
</bountysource-plugin>
</issue>
<code>
[start of gratipay/security/user.py]
1
2 from datetime import timedelta
3 import uuid
4
5 from aspen.utils import utcnow
6 from gratipay.models.participant import Participant
7 from gratipay.utils import set_cookie
8
9
10 SESSION = b'session'
11 SESSION_REFRESH = timedelta(hours=1)
12 SESSION_TIMEOUT = timedelta(hours=6)
13
14
15 class User(object):
16 """Represent a user of our website.
17 """
18
19 participant = None
20
21
22 # Constructors
23 # ============
24
25 @classmethod
26 def from_session_token(cls, token):
27 """Find a participant based on token and return a User.
28 """
29 self = cls()
30 self.participant = Participant.from_session_token(token)
31 return self
32
33 @classmethod
34 def from_api_key(cls, api_key):
35 """Find a participant based on token and return a User.
36 """
37 self = cls()
38 self.participant = Participant.from_api_key(api_key)
39 return self
40
41 @classmethod
42 def from_username(cls, username):
43 """Find a participant based on username and return a User.
44 """
45 self = cls()
46 self.participant = Participant.from_username(username)
47 return self
48
49 def __str__(self):
50 if self.participant is None:
51 out = '<Anonymous>'
52 else:
53 out = '<User: %s>' % self.participant.username
54 return out
55 __repr__ = __str__
56
57
58 # Authentication Helpers
59 # ======================
60
61 def sign_in(self, cookies):
62 """Start a new session for the user.
63 """
64 token = uuid.uuid4().hex
65 expires = utcnow() + SESSION_TIMEOUT
66 self.participant.update_session(token, expires)
67 set_cookie(cookies, SESSION, token, expires)
68
69 def keep_signed_in(self, cookies):
70 """Extend the user's current session.
71 """
72 new_expires = utcnow() + SESSION_TIMEOUT
73 if new_expires - self.participant.session_expires > SESSION_REFRESH:
74 self.participant.set_session_expires(new_expires)
75 token = self.participant.session_token
76 set_cookie(cookies, SESSION, token, expires=new_expires)
77
78 def sign_out(self, cookies):
79 """End the user's current session.
80 """
81 self.participant.update_session(None, None)
82 self.participant = None
83 set_cookie(cookies, SESSION, '')
84
85
86 # Roles
87 # =====
88
89 @property
90 def ADMIN(self):
91 return not self.ANON and self.participant.is_admin
92
93 @property
94 def ANON(self):
95 return self.participant is None or self.participant.is_suspicious is True
96 # Append "is True" here because otherwise Python will return the result
97 # of evaluating the right side of the or expression, which can be None.
98
99 def get_highest_role(self, owner):
100 """Return a string representing the highest role this user has.
101
102 :param string owner: the username of the owner of the resource we're
103 concerned with, or None
104
105 """
106 def is_owner():
107 if self.participant is not None:
108 if owner is not None:
109 if self.participant.username == owner:
110 return True
111 return False
112
113 if self.ADMIN:
114 return 'admin'
115 elif is_owner():
116 return 'owner'
117 elif not self.ANON:
118 return 'authenticated'
119 else:
120 return 'anonymous'
121
[end of gratipay/security/user.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gratipay/security/user.py b/gratipay/security/user.py
--- a/gratipay/security/user.py
+++ b/gratipay/security/user.py
@@ -92,9 +92,7 @@
@property
def ANON(self):
- return self.participant is None or self.participant.is_suspicious is True
- # Append "is True" here because otherwise Python will return the result
- # of evaluating the right side of the or expression, which can be None.
+ return self.participant is None
def get_highest_role(self, owner):
"""Return a string representing the highest role this user has.
| {"golden_diff": "diff --git a/gratipay/security/user.py b/gratipay/security/user.py\n--- a/gratipay/security/user.py\n+++ b/gratipay/security/user.py\n@@ -92,9 +92,7 @@\n \n @property\n def ANON(self):\n- return self.participant is None or self.participant.is_suspicious is True\n- # Append \"is True\" here because otherwise Python will return the result\n- # of evaluating the right side of the or expression, which can be None.\n+ return self.participant is None\n \n def get_highest_role(self, owner):\n \"\"\"Return a string representing the highest role this user has.\n", "issue": "A suspicious user trying to log in results in a 500\nhttps://app.getsentry.com/gratipay/gratipay-com/group/32039756/\n\n<bountysource-plugin>\n\n---\n\nWant to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/4727299-a-suspicious-user-trying-to-log-in-results-in-a-500?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).\n</bountysource-plugin>\n\nA suspicious user trying to log in results in a 500\nhttps://app.getsentry.com/gratipay/gratipay-com/group/32039756/\n\n<bountysource-plugin>\n\n---\n\nWant to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/4727299-a-suspicious-user-trying-to-log-in-results-in-a-500?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).\n</bountysource-plugin>\n\n", "before_files": [{"content": "\nfrom datetime import timedelta\nimport uuid\n\nfrom aspen.utils import utcnow\nfrom gratipay.models.participant import Participant\nfrom gratipay.utils import set_cookie\n\n\nSESSION = b'session'\nSESSION_REFRESH = timedelta(hours=1)\nSESSION_TIMEOUT = timedelta(hours=6)\n\n\nclass User(object):\n \"\"\"Represent a user of our website.\n \"\"\"\n\n participant = None\n\n\n # Constructors\n # ============\n\n @classmethod\n def from_session_token(cls, token):\n \"\"\"Find a participant based on token and return a User.\n \"\"\"\n self = cls()\n self.participant = Participant.from_session_token(token)\n return self\n\n @classmethod\n def from_api_key(cls, api_key):\n \"\"\"Find a participant based on token and return a User.\n \"\"\"\n self = cls()\n self.participant = Participant.from_api_key(api_key)\n return self\n\n @classmethod\n def from_username(cls, username):\n \"\"\"Find a participant based on username and return a User.\n \"\"\"\n self = cls()\n self.participant = Participant.from_username(username)\n return self\n\n def __str__(self):\n if self.participant is None:\n out = '<Anonymous>'\n else:\n out = '<User: %s>' % self.participant.username\n return out\n __repr__ = __str__\n\n\n # Authentication Helpers\n # ======================\n\n def sign_in(self, cookies):\n \"\"\"Start a new session for the user.\n \"\"\"\n token = uuid.uuid4().hex\n expires = utcnow() + SESSION_TIMEOUT\n self.participant.update_session(token, expires)\n set_cookie(cookies, SESSION, token, expires)\n\n def keep_signed_in(self, cookies):\n \"\"\"Extend the user's current session.\n \"\"\"\n new_expires = utcnow() + SESSION_TIMEOUT\n if new_expires - self.participant.session_expires > SESSION_REFRESH:\n self.participant.set_session_expires(new_expires)\n token = self.participant.session_token\n set_cookie(cookies, SESSION, token, expires=new_expires)\n\n def sign_out(self, cookies):\n \"\"\"End the user's current session.\n \"\"\"\n self.participant.update_session(None, None)\n self.participant = None\n set_cookie(cookies, SESSION, '')\n\n\n # Roles\n # =====\n\n @property\n def ADMIN(self):\n return not self.ANON and self.participant.is_admin\n\n @property\n def ANON(self):\n return self.participant is None or self.participant.is_suspicious is True\n # Append \"is True\" here because otherwise Python will return the result\n # of evaluating the right side of the or expression, which can be None.\n\n def get_highest_role(self, owner):\n \"\"\"Return a string representing the highest role this user has.\n\n :param string owner: the username of the owner of the resource we're\n concerned with, or None\n\n \"\"\"\n def is_owner():\n if self.participant is not None:\n if owner is not None:\n if self.participant.username == owner:\n return True\n return False\n\n if self.ADMIN:\n return 'admin'\n elif is_owner():\n return 'owner'\n elif not self.ANON:\n return 'authenticated'\n else:\n return 'anonymous'\n", "path": "gratipay/security/user.py"}]} | 1,862 | 146 |
gh_patches_debug_14055 | rasdani/github-patches | git_diff | spack__spack-12932 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installation issue: py-adios
### Steps to reproduce the issue
```console
$ spack install -j 64 py-adios ^[email protected]:
```
The result is lots of errors like:
```adios_mpi.cpp:47080:21: error: 'PyThreadState' {aka 'struct _ts'} has no member named 'exc_type'; did you mean 'curexc_type'?```
And similar.
### Platform and user environment
```commandline
$ uname -a
Linux quokka1.ccs.ornl.gov 4.14.0-115.el7a.aarch64 #1 SMP Tue Sep 25 12:32:51 EDT 2018 aarch64 aarch64 aarch64 GNU/Linux
```
### Additional information
py-adios includes a file (adios_mpi.cpp) which was generated using Cython 0.28.2. This version of Cython generates code that won't compile against the Python 3.7 C API. (This was fixed in the 0.29 release.) The Adios developers have been notified (https://github.com/ornladios/ADIOS/issues/202).
I think the workaround is to add a ```depends_on()``` line for python <=3.6.x. If/when the issue is fixed in Adios, we can add a ```when``` clause to the dependency. I'll issue a pull request shortly.
</issue>
<code>
[start of var/spack/repos/builtin/packages/py-adios/package.py]
1 # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class PyAdios(PythonPackage):
10 """NumPy bindings of ADIOS1"""
11
12 homepage = "https://www.olcf.ornl.gov/center-projects/adios/"
13 url = "https://github.com/ornladios/ADIOS/archive/v1.12.0.tar.gz"
14 git = "https://github.com/ornladios/ADIOS.git"
15
16 maintainers = ['ax3l']
17
18 version('develop', branch='master')
19 version('1.13.0', '68af36b821debbdf4748b20320a990ce')
20 version('1.12.0', '84a1c71b6698009224f6f748c5257fc9')
21 version('1.11.1', '5639bfc235e50bf17ba9dafb14ea4185')
22 version('1.11.0', '5eead5b2ccf962f5e6d5f254d29d5238')
23 version('1.10.0', 'eff450a4c0130479417cfd63186957f3')
24 version('1.9.0', '310ff02388bbaa2b1c1710ee970b5678')
25
26 variant('mpi', default=True,
27 description='Enable MPI support')
28
29 for v in ['1.9.0', '1.10.0', '1.11.0', '1.11.1', '1.12.0', '1.13.0',
30 'develop']:
31 depends_on('adios@{0} ~mpi'.format(v),
32 when='@{0} ~mpi'.format(v),
33 type=['build', 'link', 'run'])
34 depends_on('adios@{0} +mpi'.format(v),
35 when='@{0} +mpi'.format(v),
36 type=['build', 'link', 'run'])
37
38 depends_on('py-numpy', type=['build', 'run'])
39 depends_on('mpi', when='+mpi')
40 depends_on('py-mpi4py', type=['run'], when='+mpi')
41 depends_on('py-cython', type=['build'])
42
43 phases = ['build_clib', 'install']
44 build_directory = 'wrappers/numpy'
45
46 def setup_file(self):
47 """Returns the name of the setup file to use."""
48 if '+mpi' in self.spec:
49 return 'setup_mpi.py'
50 else:
51 return 'setup.py'
52
53 def build_clib(self, spec, prefix):
54 # calls: make [MPI=y] python
55 args = ''
56 if '+mpi' in self.spec:
57 args = 'MPI=y '
58 args += 'python'
59 with working_dir(self.build_directory):
60 make(args)
61
[end of var/spack/repos/builtin/packages/py-adios/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/var/spack/repos/builtin/packages/py-adios/package.py b/var/spack/repos/builtin/packages/py-adios/package.py
--- a/var/spack/repos/builtin/packages/py-adios/package.py
+++ b/var/spack/repos/builtin/packages/py-adios/package.py
@@ -35,6 +35,14 @@
when='@{0} +mpi'.format(v),
type=['build', 'link', 'run'])
+ # NOTE: this dependency is a work-around for a bug in Adios itself.
+ # Specifically, Adios uses code that was generated by Cython 0.28.2.
+ # This code won't compile against the Python 3.7 C API.
+ # See https://github.com/ornladios/ADIOS/issues/202 and
+ # the first entry under "Bug Fixes" at
+ # https://github.com/cython/cython/blob/0.29.x/CHANGES.rst
+ depends_on('python@:3.6')
+
depends_on('py-numpy', type=['build', 'run'])
depends_on('mpi', when='+mpi')
depends_on('py-mpi4py', type=['run'], when='+mpi')
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/py-adios/package.py b/var/spack/repos/builtin/packages/py-adios/package.py\n--- a/var/spack/repos/builtin/packages/py-adios/package.py\n+++ b/var/spack/repos/builtin/packages/py-adios/package.py\n@@ -35,6 +35,14 @@\n when='@{0} +mpi'.format(v),\n type=['build', 'link', 'run'])\n \n+ # NOTE: this dependency is a work-around for a bug in Adios itself.\n+ # Specifically, Adios uses code that was generated by Cython 0.28.2.\n+ # This code won't compile against the Python 3.7 C API.\n+ # See https://github.com/ornladios/ADIOS/issues/202 and\n+ # the first entry under \"Bug Fixes\" at\n+ # https://github.com/cython/cython/blob/0.29.x/CHANGES.rst\n+ depends_on('python@:3.6')\n+\n depends_on('py-numpy', type=['build', 'run'])\n depends_on('mpi', when='+mpi')\n depends_on('py-mpi4py', type=['run'], when='+mpi')\n", "issue": "Installation issue: py-adios\n### Steps to reproduce the issue\r\n```console\r\n$ spack install -j 64 py-adios ^[email protected]:\r\n```\r\n\r\nThe result is lots of errors like:\r\n```adios_mpi.cpp:47080:21: error: 'PyThreadState' {aka 'struct _ts'} has no member named 'exc_type'; did you mean 'curexc_type'?```\r\n\r\nAnd similar.\r\n\r\n### Platform and user environment\r\n```commandline\r\n$ uname -a\r\nLinux quokka1.ccs.ornl.gov 4.14.0-115.el7a.aarch64 #1 SMP Tue Sep 25 12:32:51 EDT 2018 aarch64 aarch64 aarch64 GNU/Linux\r\n``` \r\n\r\n### Additional information\r\npy-adios includes a file (adios_mpi.cpp) which was generated using Cython 0.28.2. This version of Cython generates code that won't compile against the Python 3.7 C API. (This was fixed in the 0.29 release.) The Adios developers have been notified (https://github.com/ornladios/ADIOS/issues/202).\r\n\r\nI think the workaround is to add a ```depends_on()``` line for python <=3.6.x. If/when the issue is fixed in Adios, we can add a ```when``` clause to the dependency. I'll issue a pull request shortly.\r\n\n", "before_files": [{"content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyAdios(PythonPackage):\n \"\"\"NumPy bindings of ADIOS1\"\"\"\n\n homepage = \"https://www.olcf.ornl.gov/center-projects/adios/\"\n url = \"https://github.com/ornladios/ADIOS/archive/v1.12.0.tar.gz\"\n git = \"https://github.com/ornladios/ADIOS.git\"\n\n maintainers = ['ax3l']\n\n version('develop', branch='master')\n version('1.13.0', '68af36b821debbdf4748b20320a990ce')\n version('1.12.0', '84a1c71b6698009224f6f748c5257fc9')\n version('1.11.1', '5639bfc235e50bf17ba9dafb14ea4185')\n version('1.11.0', '5eead5b2ccf962f5e6d5f254d29d5238')\n version('1.10.0', 'eff450a4c0130479417cfd63186957f3')\n version('1.9.0', '310ff02388bbaa2b1c1710ee970b5678')\n\n variant('mpi', default=True,\n description='Enable MPI support')\n\n for v in ['1.9.0', '1.10.0', '1.11.0', '1.11.1', '1.12.0', '1.13.0',\n 'develop']:\n depends_on('adios@{0} ~mpi'.format(v),\n when='@{0} ~mpi'.format(v),\n type=['build', 'link', 'run'])\n depends_on('adios@{0} +mpi'.format(v),\n when='@{0} +mpi'.format(v),\n type=['build', 'link', 'run'])\n\n depends_on('py-numpy', type=['build', 'run'])\n depends_on('mpi', when='+mpi')\n depends_on('py-mpi4py', type=['run'], when='+mpi')\n depends_on('py-cython', type=['build'])\n\n phases = ['build_clib', 'install']\n build_directory = 'wrappers/numpy'\n\n def setup_file(self):\n \"\"\"Returns the name of the setup file to use.\"\"\"\n if '+mpi' in self.spec:\n return 'setup_mpi.py'\n else:\n return 'setup.py'\n\n def build_clib(self, spec, prefix):\n # calls: make [MPI=y] python\n args = ''\n if '+mpi' in self.spec:\n args = 'MPI=y '\n args += 'python'\n with working_dir(self.build_directory):\n make(args)\n", "path": "var/spack/repos/builtin/packages/py-adios/package.py"}]} | 1,744 | 279 |
gh_patches_debug_11980 | rasdani/github-patches | git_diff | pypi__warehouse-1659 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update elasticsearch-dsl to 5.1.0
There's a new version of [elasticsearch-dsl](https://pypi.python.org/pypi/elasticsearch-dsl) available.
You are currently using **5.0.0**. I have updated it to **5.1.0**
These links might come in handy: <a href="http://pypi.python.org/pypi/elasticsearch-dsl">PyPI</a> | <a href="https://pyup.io/changelogs/elasticsearch-dsl/">Changelog</a> | <a href="https://github.com/elasticsearch/elasticsearch-dsl-py">Repo</a>
### Changelog
>
>### 5.1.0
>------------------
> * Renamed ``Result`` and ``ResultMeta`` to ``Hit`` and ``HitMeta`` respectively
> * ``Response`` now stores ``Search`` which it gets as first arg to ``__init__``
> * aggregation results are now wrapped in classes and properly deserialized
> * ``Date`` fields now allow for numerical timestamps in the java format (in millis)
> * Added API documentation
> * replaced generated classes with manually created
*Got merge conflicts? Close this PR and delete the branch. I'll create a new PR for you.*
Happy merging! 🤖
</issue>
<code>
[start of warehouse/search.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import urllib.parse
14
15 import certifi
16 import elasticsearch
17 import venusian
18
19 from elasticsearch_dsl import Index
20
21
22 def doc_type(cls):
23 def callback(scanner, _name, item):
24 types_ = scanner.config.registry.setdefault("search.doc_types", set())
25 types_.add(item)
26
27 venusian.attach(cls, callback)
28
29 return cls
30
31
32 def get_index(name, doc_types, *, using, shards=1, replicas=0, interval="1s"):
33 index = Index(name, using=using)
34 for doc_type in doc_types:
35 index.doc_type(doc_type)
36 index.settings(
37 number_of_shards=shards,
38 number_of_replicas=replicas,
39 refresh_interval=interval,
40 )
41 return index
42
43
44 def es(request):
45 client = request.registry["elasticsearch.client"]
46 doc_types = request.registry.get("search.doc_types", set())
47 index_name = request.registry["elasticsearch.index"]
48 index = get_index(
49 index_name,
50 doc_types,
51 using=client,
52 shards=request.registry.get("elasticsearch.shards", 1),
53 replicas=request.registry.get("elasticsearch.replicas", 0),
54 )
55 return index.search()
56
57
58 def includeme(config):
59 p = urllib.parse.urlparse(config.registry.settings["elasticsearch.url"])
60 qs = urllib.parse.parse_qs(p.query)
61 config.registry["elasticsearch.client"] = elasticsearch.Elasticsearch(
62 [urllib.parse.urlunparse(p[:2] + ("",) * 4)],
63 verify_certs=True,
64 ca_certs=certifi.where(),
65 timeout=30,
66 retry_on_timeout=True,
67 )
68 config.registry["elasticsearch.index"] = p.path.strip("/")
69 config.registry["elasticsearch.shards"] = int(qs.get("shards", ["1"])[0])
70 config.registry["elasticsearch.replicas"] = \
71 int(qs.get("replicas", ["0"])[0])
72 config.add_request_method(es, name="es", reify=True)
73
[end of warehouse/search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/search.py b/warehouse/search.py
--- a/warehouse/search.py
+++ b/warehouse/search.py
@@ -16,7 +16,7 @@
import elasticsearch
import venusian
-from elasticsearch_dsl import Index
+from elasticsearch_dsl import Index, serializer
def doc_type(cls):
@@ -64,6 +64,7 @@
ca_certs=certifi.where(),
timeout=30,
retry_on_timeout=True,
+ serializer=serializer.serializer,
)
config.registry["elasticsearch.index"] = p.path.strip("/")
config.registry["elasticsearch.shards"] = int(qs.get("shards", ["1"])[0])
| {"golden_diff": "diff --git a/warehouse/search.py b/warehouse/search.py\n--- a/warehouse/search.py\n+++ b/warehouse/search.py\n@@ -16,7 +16,7 @@\n import elasticsearch\n import venusian\n \n-from elasticsearch_dsl import Index\n+from elasticsearch_dsl import Index, serializer\n \n \n def doc_type(cls):\n@@ -64,6 +64,7 @@\n ca_certs=certifi.where(),\n timeout=30,\n retry_on_timeout=True,\n+ serializer=serializer.serializer,\n )\n config.registry[\"elasticsearch.index\"] = p.path.strip(\"/\")\n config.registry[\"elasticsearch.shards\"] = int(qs.get(\"shards\", [\"1\"])[0])\n", "issue": "Update elasticsearch-dsl to 5.1.0\n\nThere's a new version of [elasticsearch-dsl](https://pypi.python.org/pypi/elasticsearch-dsl) available.\nYou are currently using **5.0.0**. I have updated it to **5.1.0**\n\n\n\nThese links might come in handy: <a href=\"http://pypi.python.org/pypi/elasticsearch-dsl\">PyPI</a> | <a href=\"https://pyup.io/changelogs/elasticsearch-dsl/\">Changelog</a> | <a href=\"https://github.com/elasticsearch/elasticsearch-dsl-py\">Repo</a> \n\n\n\n### Changelog\n> \n>### 5.1.0\n\n>------------------\n> * Renamed ``Result`` and ``ResultMeta`` to ``Hit`` and ``HitMeta`` respectively\n> * ``Response`` now stores ``Search`` which it gets as first arg to ``__init__``\n> * aggregation results are now wrapped in classes and properly deserialized\n> * ``Date`` fields now allow for numerical timestamps in the java format (in millis)\n> * Added API documentation\n> * replaced generated classes with manually created\n\n\n\n\n\n\n\n*Got merge conflicts? Close this PR and delete the branch. I'll create a new PR for you.*\n\nHappy merging! \ud83e\udd16\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport urllib.parse\n\nimport certifi\nimport elasticsearch\nimport venusian\n\nfrom elasticsearch_dsl import Index\n\n\ndef doc_type(cls):\n def callback(scanner, _name, item):\n types_ = scanner.config.registry.setdefault(\"search.doc_types\", set())\n types_.add(item)\n\n venusian.attach(cls, callback)\n\n return cls\n\n\ndef get_index(name, doc_types, *, using, shards=1, replicas=0, interval=\"1s\"):\n index = Index(name, using=using)\n for doc_type in doc_types:\n index.doc_type(doc_type)\n index.settings(\n number_of_shards=shards,\n number_of_replicas=replicas,\n refresh_interval=interval,\n )\n return index\n\n\ndef es(request):\n client = request.registry[\"elasticsearch.client\"]\n doc_types = request.registry.get(\"search.doc_types\", set())\n index_name = request.registry[\"elasticsearch.index\"]\n index = get_index(\n index_name,\n doc_types,\n using=client,\n shards=request.registry.get(\"elasticsearch.shards\", 1),\n replicas=request.registry.get(\"elasticsearch.replicas\", 0),\n )\n return index.search()\n\n\ndef includeme(config):\n p = urllib.parse.urlparse(config.registry.settings[\"elasticsearch.url\"])\n qs = urllib.parse.parse_qs(p.query)\n config.registry[\"elasticsearch.client\"] = elasticsearch.Elasticsearch(\n [urllib.parse.urlunparse(p[:2] + (\"\",) * 4)],\n verify_certs=True,\n ca_certs=certifi.where(),\n timeout=30,\n retry_on_timeout=True,\n )\n config.registry[\"elasticsearch.index\"] = p.path.strip(\"/\")\n config.registry[\"elasticsearch.shards\"] = int(qs.get(\"shards\", [\"1\"])[0])\n config.registry[\"elasticsearch.replicas\"] = \\\n int(qs.get(\"replicas\", [\"0\"])[0])\n config.add_request_method(es, name=\"es\", reify=True)\n", "path": "warehouse/search.py"}]} | 1,503 | 154 |
gh_patches_debug_638 | rasdani/github-patches | git_diff | pex-tool__pex-2278 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.150
On the docket:
+ [x] Add support for Pip 23.3.1. #2276
+ [x] Support .egg-info dist metadata. #2264
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.149"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.149"
+__version__ = "2.1.150"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.149\"\n+__version__ = \"2.1.150\"\n", "issue": "Release 2.1.150\nOn the docket:\r\n+ [x] Add support for Pip 23.3.1. #2276\r\n+ [x] Support .egg-info dist metadata. #2264\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.149\"\n", "path": "pex/version.py"}]} | 638 | 99 |
gh_patches_debug_2529 | rasdani/github-patches | git_diff | systemd__mkosi-1847 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tput smam breaks build
Using latest on Debian Sid.
```
‣ Running finalize script…
‣ Creating tar archive /home/ander/Desktop/mkosi/tools/mkosi.workspace/.mkosi-tmp9zitpbja/staging/image.tar…
‣ /home/ander/Desktop/mkosi/tools/mkosi.output/image size is 1016.1M, consumes 1016.1M.
‣ "tput smam" returned non-zero exit code 1.
make: *** [Makefile:13: build] Error 1
```
`tput` in Debian do not understand `smam`?
```
$ tput smam; echo $?
1
```
Introduced in e651b88.
Ugly workaround:
```
$ ln -s /usr/bin/true ~/.local/bin/tput
```
</issue>
<code>
[start of mkosi/__main__.py]
1 # SPDX-License-Identifier: LGPL-2.1+
2 # PYTHON_ARGCOMPLETE_OK
3
4 import contextlib
5 import logging
6 import shutil
7 import subprocess
8 import sys
9 from collections.abc import Iterator
10
11 from mkosi import run_verb
12 from mkosi.config import MkosiConfigParser
13 from mkosi.log import ARG_DEBUG, log_setup
14 from mkosi.run import ensure_exc_info, run
15
16
17 @contextlib.contextmanager
18 def propagate_failed_return() -> Iterator[None]:
19 try:
20 yield
21 except SystemExit as e:
22 if ARG_DEBUG.get():
23 sys.excepthook(*ensure_exc_info())
24
25 sys.exit(e.code)
26 except KeyboardInterrupt:
27 if ARG_DEBUG.get():
28 sys.excepthook(*ensure_exc_info())
29 else:
30 logging.error("Interrupted")
31
32 sys.exit(1)
33 except subprocess.CalledProcessError as e:
34 # Failures from qemu, ssh and systemd-nspawn are expected and we won't log stacktraces for those.
35 if ARG_DEBUG.get() and e.cmd and e.cmd[0] not in ("qemu", "ssh", "systemd-nspawn"):
36 sys.excepthook(*ensure_exc_info())
37
38 # We always log when subprocess.CalledProcessError is raised, so we don't log again here.
39 sys.exit(e.returncode)
40
41
42 @propagate_failed_return()
43 def main() -> None:
44 log_setup()
45 args, presets = MkosiConfigParser().parse()
46
47 if ARG_DEBUG.get():
48 logging.getLogger().setLevel(logging.DEBUG)
49
50 try:
51 run_verb(args, presets)
52 finally:
53 if sys.stderr.isatty() and shutil.which("tput"):
54 run(["tput", "cnorm"])
55 run(["tput", "smam"])
56
57
58 if __name__ == "__main__":
59 main()
60
[end of mkosi/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkosi/__main__.py b/mkosi/__main__.py
--- a/mkosi/__main__.py
+++ b/mkosi/__main__.py
@@ -51,8 +51,8 @@
run_verb(args, presets)
finally:
if sys.stderr.isatty() and shutil.which("tput"):
- run(["tput", "cnorm"])
- run(["tput", "smam"])
+ run(["tput", "cnorm"], check=False)
+ run(["tput", "smam"], check=False)
if __name__ == "__main__":
| {"golden_diff": "diff --git a/mkosi/__main__.py b/mkosi/__main__.py\n--- a/mkosi/__main__.py\n+++ b/mkosi/__main__.py\n@@ -51,8 +51,8 @@\n run_verb(args, presets)\n finally:\n if sys.stderr.isatty() and shutil.which(\"tput\"):\n- run([\"tput\", \"cnorm\"])\n- run([\"tput\", \"smam\"])\n+ run([\"tput\", \"cnorm\"], check=False)\n+ run([\"tput\", \"smam\"], check=False)\n \n \n if __name__ == \"__main__\":\n", "issue": "tput smam breaks build\nUsing latest on Debian Sid.\r\n\r\n```\r\n\u2023 Running finalize script\u2026\r\n\u2023 Creating tar archive /home/ander/Desktop/mkosi/tools/mkosi.workspace/.mkosi-tmp9zitpbja/staging/image.tar\u2026\r\n\u2023 /home/ander/Desktop/mkosi/tools/mkosi.output/image size is 1016.1M, consumes 1016.1M.\r\n\u2023 \"tput smam\" returned non-zero exit code 1.\r\nmake: *** [Makefile:13: build] Error 1\r\n```\r\n\r\n`tput` in Debian do not understand `smam`?\r\n\r\n```\r\n$ tput smam; echo $?\r\n1\r\n```\r\n\r\nIntroduced in e651b88.\r\n\r\nUgly workaround:\r\n```\r\n$ ln -s /usr/bin/true ~/.local/bin/tput\r\n```\n", "before_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\n# PYTHON_ARGCOMPLETE_OK\n\nimport contextlib\nimport logging\nimport shutil\nimport subprocess\nimport sys\nfrom collections.abc import Iterator\n\nfrom mkosi import run_verb\nfrom mkosi.config import MkosiConfigParser\nfrom mkosi.log import ARG_DEBUG, log_setup\nfrom mkosi.run import ensure_exc_info, run\n\n\[email protected]\ndef propagate_failed_return() -> Iterator[None]:\n try:\n yield\n except SystemExit as e:\n if ARG_DEBUG.get():\n sys.excepthook(*ensure_exc_info())\n\n sys.exit(e.code)\n except KeyboardInterrupt:\n if ARG_DEBUG.get():\n sys.excepthook(*ensure_exc_info())\n else:\n logging.error(\"Interrupted\")\n\n sys.exit(1)\n except subprocess.CalledProcessError as e:\n # Failures from qemu, ssh and systemd-nspawn are expected and we won't log stacktraces for those.\n if ARG_DEBUG.get() and e.cmd and e.cmd[0] not in (\"qemu\", \"ssh\", \"systemd-nspawn\"):\n sys.excepthook(*ensure_exc_info())\n\n # We always log when subprocess.CalledProcessError is raised, so we don't log again here.\n sys.exit(e.returncode)\n\n\n@propagate_failed_return()\ndef main() -> None:\n log_setup()\n args, presets = MkosiConfigParser().parse()\n\n if ARG_DEBUG.get():\n logging.getLogger().setLevel(logging.DEBUG)\n\n try:\n run_verb(args, presets)\n finally:\n if sys.stderr.isatty() and shutil.which(\"tput\"):\n run([\"tput\", \"cnorm\"])\n run([\"tput\", \"smam\"])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mkosi/__main__.py"}]} | 1,226 | 137 |
gh_patches_debug_18579 | rasdani/github-patches | git_diff | falconry__falcon-62 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove responder exception handling
Can hide problems, encourage bad coding practices.
</issue>
<code>
[start of falcon/api.py]
1 """Defines the API class.
2
3 Copyright 2013 by Rackspace Hosting, Inc.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 """
18
19 import traceback
20
21 from .request import Request
22 from .response import Response
23 from . import responders
24 from .status_codes import *
25 from .api_helpers import *
26
27 from .http_error import HTTPError
28
29
30 class API(object):
31 """Provides routing and such for building a web service application
32
33 This class is the main entry point into a Falcon-based app. It provides a
34 callable WSGI interface and a simple routing engine based on URI templates.
35
36 """
37
38 __slots__ = ('_routes')
39
40 def __init__(self):
41 """Initialize default values"""
42 self._routes = []
43
44 def __call__(self, env, start_response):
45 """WSGI "app" method
46
47 Makes instances of API callable by any WSGI server. See also PEP 333.
48
49 Args:
50 env: A WSGI environment dictionary
51 start_response: A WSGI helper method for setting status and headers
52 on a response.
53
54 """
55
56 req = Request(env)
57 resp = Response()
58
59 responder, params = self._get_responder(req.path, req.method)
60
61 try:
62 responder(req, resp, **params)
63
64 except HTTPError as ex:
65 resp.status = ex.status
66 if ex.headers is not None:
67 resp.set_headers(ex.headers)
68
69 if req.client_accepts_json():
70 resp.body = ex.json()
71
72 except Exception as ex:
73 # Reset to a known state and respond with a generic error
74 req = Request(env)
75 resp = Response()
76
77 message = ['Responder raised ', ex.__class__.__name__]
78
79 details = str(ex)
80 if details:
81 message.append(': ')
82 message.append(details)
83
84 stack = traceback.format_exc()
85 message.append('\n')
86 message.append(stack)
87
88 req.log_error(''.join(message))
89 responders.server_error(req, resp)
90
91 #
92 # Set status and headers
93 #
94 use_body = not should_ignore_body(resp.status, req.method)
95 if use_body:
96 set_content_length(resp)
97
98 start_response(resp.status, resp._wsgi_headers())
99
100 # Return an iterable for the body, per the WSGI spec
101 if use_body:
102 return prepare_wsgi_content(resp)
103
104 # Default: return an empty body
105 return []
106
107 def add_route(self, uri_template, resource):
108 """Associate a URI path with a resource
109
110 Args:
111 uri_template: Relative URI template. Currently only Level 1
112 templates are supported. See also RFC 6570.
113 resource: Object which represents an HTTP/REST "resource". Falcon
114 will pass "GET" requests to on_get, "PUT" requests to on_put,
115 etc. If any HTTP methods are not supported by your resource,
116 simply don't define the corresponding request handlers, and
117 Falcon will do the right thing.
118
119 """
120
121 if not uri_template:
122 uri_template = '/'
123
124 path_template = compile_uri_template(uri_template)
125 method_map = create_http_method_map(resource)
126
127 # Insert at the head of the list in case we get duplicate
128 # adds (will cause the last one to win).
129 self._routes.insert(0, (path_template, method_map))
130
131 def _get_responder(self, path, method):
132 """Searches routes for a matching responder
133
134 Args:
135 path: URI path to search (without query stirng)
136 method: HTTP method (uppercase) requested
137 Returns:
138 A 2-member tuple, containing a responder callable and a dict
139 containing parsed path fields, if any were specified in
140 the matching route's URI template
141
142 """
143
144 for path_template, method_map in self._routes:
145 m = path_template.match(path)
146 if m:
147 params = m.groupdict()
148
149 try:
150 responder = method_map[method]
151 except KeyError:
152 responder = responders.bad_request
153
154 break
155 else:
156 responder = responders.path_not_found
157 params = {}
158
159 return (responder, params)
160
[end of falcon/api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/falcon/api.py b/falcon/api.py
--- a/falcon/api.py
+++ b/falcon/api.py
@@ -16,8 +16,6 @@
"""
-import traceback
-
from .request import Request
from .response import Response
from . import responders
@@ -69,25 +67,6 @@
if req.client_accepts_json():
resp.body = ex.json()
- except Exception as ex:
- # Reset to a known state and respond with a generic error
- req = Request(env)
- resp = Response()
-
- message = ['Responder raised ', ex.__class__.__name__]
-
- details = str(ex)
- if details:
- message.append(': ')
- message.append(details)
-
- stack = traceback.format_exc()
- message.append('\n')
- message.append(stack)
-
- req.log_error(''.join(message))
- responders.server_error(req, resp)
-
#
# Set status and headers
#
| {"golden_diff": "diff --git a/falcon/api.py b/falcon/api.py\n--- a/falcon/api.py\n+++ b/falcon/api.py\n@@ -16,8 +16,6 @@\n \n \"\"\"\n \n-import traceback\n-\n from .request import Request\n from .response import Response\n from . import responders\n@@ -69,25 +67,6 @@\n if req.client_accepts_json():\n resp.body = ex.json()\n \n- except Exception as ex:\n- # Reset to a known state and respond with a generic error\n- req = Request(env)\n- resp = Response()\n-\n- message = ['Responder raised ', ex.__class__.__name__]\n-\n- details = str(ex)\n- if details:\n- message.append(': ')\n- message.append(details)\n-\n- stack = traceback.format_exc()\n- message.append('\\n')\n- message.append(stack)\n-\n- req.log_error(''.join(message))\n- responders.server_error(req, resp)\n-\n #\n # Set status and headers\n #\n", "issue": "Remove responder exception handling\nCan hide problems, encourage bad coding practices.\n\n", "before_files": [{"content": "\"\"\"Defines the API class.\n\nCopyright 2013 by Rackspace Hosting, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nimport traceback\n\nfrom .request import Request\nfrom .response import Response\nfrom . import responders\nfrom .status_codes import *\nfrom .api_helpers import *\n\nfrom .http_error import HTTPError\n\n\nclass API(object):\n \"\"\"Provides routing and such for building a web service application\n\n This class is the main entry point into a Falcon-based app. It provides a\n callable WSGI interface and a simple routing engine based on URI templates.\n\n \"\"\"\n\n __slots__ = ('_routes')\n\n def __init__(self):\n \"\"\"Initialize default values\"\"\"\n self._routes = []\n\n def __call__(self, env, start_response):\n \"\"\"WSGI \"app\" method\n\n Makes instances of API callable by any WSGI server. See also PEP 333.\n\n Args:\n env: A WSGI environment dictionary\n start_response: A WSGI helper method for setting status and headers\n on a response.\n\n \"\"\"\n\n req = Request(env)\n resp = Response()\n\n responder, params = self._get_responder(req.path, req.method)\n\n try:\n responder(req, resp, **params)\n\n except HTTPError as ex:\n resp.status = ex.status\n if ex.headers is not None:\n resp.set_headers(ex.headers)\n\n if req.client_accepts_json():\n resp.body = ex.json()\n\n except Exception as ex:\n # Reset to a known state and respond with a generic error\n req = Request(env)\n resp = Response()\n\n message = ['Responder raised ', ex.__class__.__name__]\n\n details = str(ex)\n if details:\n message.append(': ')\n message.append(details)\n\n stack = traceback.format_exc()\n message.append('\\n')\n message.append(stack)\n\n req.log_error(''.join(message))\n responders.server_error(req, resp)\n\n #\n # Set status and headers\n #\n use_body = not should_ignore_body(resp.status, req.method)\n if use_body:\n set_content_length(resp)\n\n start_response(resp.status, resp._wsgi_headers())\n\n # Return an iterable for the body, per the WSGI spec\n if use_body:\n return prepare_wsgi_content(resp)\n\n # Default: return an empty body\n return []\n\n def add_route(self, uri_template, resource):\n \"\"\"Associate a URI path with a resource\n\n Args:\n uri_template: Relative URI template. Currently only Level 1\n templates are supported. See also RFC 6570.\n resource: Object which represents an HTTP/REST \"resource\". Falcon\n will pass \"GET\" requests to on_get, \"PUT\" requests to on_put,\n etc. If any HTTP methods are not supported by your resource,\n simply don't define the corresponding request handlers, and\n Falcon will do the right thing.\n\n \"\"\"\n\n if not uri_template:\n uri_template = '/'\n\n path_template = compile_uri_template(uri_template)\n method_map = create_http_method_map(resource)\n\n # Insert at the head of the list in case we get duplicate\n # adds (will cause the last one to win).\n self._routes.insert(0, (path_template, method_map))\n\n def _get_responder(self, path, method):\n \"\"\"Searches routes for a matching responder\n\n Args:\n path: URI path to search (without query stirng)\n method: HTTP method (uppercase) requested\n Returns:\n A 2-member tuple, containing a responder callable and a dict\n containing parsed path fields, if any were specified in\n the matching route's URI template\n\n \"\"\"\n\n for path_template, method_map in self._routes:\n m = path_template.match(path)\n if m:\n params = m.groupdict()\n\n try:\n responder = method_map[method]\n except KeyError:\n responder = responders.bad_request\n\n break\n else:\n responder = responders.path_not_found\n params = {}\n\n return (responder, params)\n", "path": "falcon/api.py"}]} | 1,927 | 224 |
gh_patches_debug_8483 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSpeech-2171 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
切换英文语音合成报错 get_input_ids() got an unexpected keyword argument 'get_tone_ids'
要切换成英文语音合成时,更改了/paddlespeech/server/conf/application.yaml这个配置文件中的tts_python里面的声学模型和声码器,声学模型用的是fastspeech2_ljspeech,声码器用的pwgan_ljspeech,并且lang改为en,但是报错 get_input_ids() got an unexpected keyword argument 'get_tone_ids'
</issue>
<code>
[start of paddlespeech/server/engine/engine_warmup.py]
1 # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import time
15
16 from paddlespeech.cli.log import logger
17 from paddlespeech.server.engine.engine_pool import get_engine_pool
18
19
20 def warm_up(engine_and_type: str, warm_up_time: int=3) -> bool:
21 engine_pool = get_engine_pool()
22
23 if "tts" in engine_and_type:
24 tts_engine = engine_pool['tts']
25 flag_online = False
26 if tts_engine.lang == 'zh':
27 sentence = "您好,欢迎使用语音合成服务。"
28 elif tts_engine.lang == 'en':
29 sentence = "Hello and welcome to the speech synthesis service."
30 else:
31 logger.error("tts engine only support lang: zh or en.")
32 sys.exit(-1)
33
34 if engine_and_type == "tts_python":
35 from paddlespeech.server.engine.tts.python.tts_engine import PaddleTTSConnectionHandler
36 elif engine_and_type == "tts_inference":
37 from paddlespeech.server.engine.tts.paddleinference.tts_engine import PaddleTTSConnectionHandler
38 elif engine_and_type == "tts_online":
39 from paddlespeech.server.engine.tts.online.python.tts_engine import PaddleTTSConnectionHandler
40 flag_online = True
41 elif engine_and_type == "tts_online-onnx":
42 from paddlespeech.server.engine.tts.online.onnx.tts_engine import PaddleTTSConnectionHandler
43 flag_online = True
44 else:
45 logger.error("Please check tte engine type.")
46
47 try:
48 logger.debug("Start to warm up tts engine.")
49 for i in range(warm_up_time):
50 connection_handler = PaddleTTSConnectionHandler(tts_engine)
51 if flag_online:
52 for wav in connection_handler.infer(
53 text=sentence,
54 lang=tts_engine.lang,
55 am=tts_engine.config.am):
56 logger.debug(
57 f"The first response time of the {i} warm up: {connection_handler.first_response_time} s"
58 )
59 break
60
61 else:
62 st = time.time()
63 connection_handler.infer(text=sentence)
64 et = time.time()
65 logger.debug(
66 f"The response time of the {i} warm up: {et - st} s")
67 except Exception as e:
68 logger.error("Failed to warm up on tts engine.")
69 logger.error(e)
70 return False
71
72 else:
73 pass
74
75 return True
76
[end of paddlespeech/server/engine/engine_warmup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/paddlespeech/server/engine/engine_warmup.py b/paddlespeech/server/engine/engine_warmup.py
--- a/paddlespeech/server/engine/engine_warmup.py
+++ b/paddlespeech/server/engine/engine_warmup.py
@@ -60,7 +60,10 @@
else:
st = time.time()
- connection_handler.infer(text=sentence)
+ connection_handler.infer(
+ text=sentence,
+ lang=tts_engine.lang,
+ am=tts_engine.config.am)
et = time.time()
logger.debug(
f"The response time of the {i} warm up: {et - st} s")
| {"golden_diff": "diff --git a/paddlespeech/server/engine/engine_warmup.py b/paddlespeech/server/engine/engine_warmup.py\n--- a/paddlespeech/server/engine/engine_warmup.py\n+++ b/paddlespeech/server/engine/engine_warmup.py\n@@ -60,7 +60,10 @@\n \n else:\n st = time.time()\n- connection_handler.infer(text=sentence)\n+ connection_handler.infer(\n+ text=sentence,\n+ lang=tts_engine.lang,\n+ am=tts_engine.config.am)\n et = time.time()\n logger.debug(\n f\"The response time of the {i} warm up: {et - st} s\")\n", "issue": "\u5207\u6362\u82f1\u6587\u8bed\u97f3\u5408\u6210\u62a5\u9519 get_input_ids() got an unexpected keyword argument 'get_tone_ids'\n\u8981\u5207\u6362\u6210\u82f1\u6587\u8bed\u97f3\u5408\u6210\u65f6\uff0c\u66f4\u6539\u4e86/paddlespeech/server/conf/application.yaml\u8fd9\u4e2a\u914d\u7f6e\u6587\u4ef6\u4e2d\u7684tts_python\u91cc\u9762\u7684\u58f0\u5b66\u6a21\u578b\u548c\u58f0\u7801\u5668\uff0c\u58f0\u5b66\u6a21\u578b\u7528\u7684\u662ffastspeech2_ljspeech\uff0c\u58f0\u7801\u5668\u7528\u7684pwgan_ljspeech\uff0c\u5e76\u4e14lang\u6539\u4e3aen\uff0c\u4f46\u662f\u62a5\u9519 get_input_ids() got an unexpected keyword argument 'get_tone_ids'\n", "before_files": [{"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport time\n\nfrom paddlespeech.cli.log import logger\nfrom paddlespeech.server.engine.engine_pool import get_engine_pool\n\n\ndef warm_up(engine_and_type: str, warm_up_time: int=3) -> bool:\n engine_pool = get_engine_pool()\n\n if \"tts\" in engine_and_type:\n tts_engine = engine_pool['tts']\n flag_online = False\n if tts_engine.lang == 'zh':\n sentence = \"\u60a8\u597d\uff0c\u6b22\u8fce\u4f7f\u7528\u8bed\u97f3\u5408\u6210\u670d\u52a1\u3002\"\n elif tts_engine.lang == 'en':\n sentence = \"Hello and welcome to the speech synthesis service.\"\n else:\n logger.error(\"tts engine only support lang: zh or en.\")\n sys.exit(-1)\n\n if engine_and_type == \"tts_python\":\n from paddlespeech.server.engine.tts.python.tts_engine import PaddleTTSConnectionHandler\n elif engine_and_type == \"tts_inference\":\n from paddlespeech.server.engine.tts.paddleinference.tts_engine import PaddleTTSConnectionHandler\n elif engine_and_type == \"tts_online\":\n from paddlespeech.server.engine.tts.online.python.tts_engine import PaddleTTSConnectionHandler\n flag_online = True\n elif engine_and_type == \"tts_online-onnx\":\n from paddlespeech.server.engine.tts.online.onnx.tts_engine import PaddleTTSConnectionHandler\n flag_online = True\n else:\n logger.error(\"Please check tte engine type.\")\n\n try:\n logger.debug(\"Start to warm up tts engine.\")\n for i in range(warm_up_time):\n connection_handler = PaddleTTSConnectionHandler(tts_engine)\n if flag_online:\n for wav in connection_handler.infer(\n text=sentence,\n lang=tts_engine.lang,\n am=tts_engine.config.am):\n logger.debug(\n f\"The first response time of the {i} warm up: {connection_handler.first_response_time} s\"\n )\n break\n\n else:\n st = time.time()\n connection_handler.infer(text=sentence)\n et = time.time()\n logger.debug(\n f\"The response time of the {i} warm up: {et - st} s\")\n except Exception as e:\n logger.error(\"Failed to warm up on tts engine.\")\n logger.error(e)\n return False\n\n else:\n pass\n\n return True\n", "path": "paddlespeech/server/engine/engine_warmup.py"}]} | 1,439 | 150 |
gh_patches_debug_35491 | rasdani/github-patches | git_diff | aws__aws-cli-4874 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Proposal: aws ecr get-login-password
This is a proposal for a new AWS CLI command for ECR
```
$ aws ecr get-login-password
cGFzc3dvcmQ=
```
This command can be used in the following ways:
```
$ aws ecr get-login-password | docker login --username AWS --password-stdin 111111111111.dkr.ecr.us-west-2.amazonaws.com
Login Succeeded
$ docker login --username AWS --password "$(aws ecr get-login-password)" 111111111111.dkr.ecr.us-west-2.amazonaws.com
Login Succeeded
```
This idea has been previously proposed by @theY4Kman https://github.com/aws/aws-cli/issues/2875#issuecomment-433565983 and @kojiromike https://github.com/aws/aws-cli/issues/3687#issue-374397564
</issue>
<code>
[start of awscli/customizations/ecr.py]
1 # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 from awscli.customizations.commands import BasicCommand
14 from awscli.customizations.utils import create_client_from_parsed_globals
15
16 from base64 import b64decode
17 import sys
18
19
20 def register_ecr_commands(cli):
21 cli.register('building-command-table.ecr', _inject_get_login)
22
23
24 def _inject_get_login(command_table, session, **kwargs):
25 command_table['get-login'] = ECRLogin(session)
26
27
28 class ECRLogin(BasicCommand):
29 """Log in with docker login"""
30 NAME = 'get-login'
31
32 DESCRIPTION = BasicCommand.FROM_FILE('ecr/get-login_description.rst')
33
34 ARG_TABLE = [
35 {
36 'name': 'registry-ids',
37 'help_text': 'A list of AWS account IDs that correspond to the '
38 'Amazon ECR registries that you want to log in to.',
39 'required': False,
40 'nargs': '+'
41 },
42 {
43 'name': 'include-email',
44 'action': 'store_true',
45 'group_name': 'include-email',
46 'dest': 'include_email',
47 'default': True,
48 'required': False,
49 'help_text': (
50 "Specify if the '-e' flag should be included in the "
51 "'docker login' command. The '-e' option has been deprecated "
52 "and is removed in docker version 17.06 and later. You must "
53 "specify --no-include-email if you're using docker version "
54 "17.06 or later. The default behavior is to include the "
55 "'-e' flag in the 'docker login' output."),
56 },
57 {
58 'name': 'no-include-email',
59 'help_text': 'Include email arg',
60 'action': 'store_false',
61 'default': True,
62 'group_name': 'include-email',
63 'dest': 'include_email',
64 'required': False,
65 },
66 ]
67
68 def _run_main(self, parsed_args, parsed_globals):
69 ecr_client = create_client_from_parsed_globals(
70 self._session, 'ecr', parsed_globals)
71 if not parsed_args.registry_ids:
72 result = ecr_client.get_authorization_token()
73 else:
74 result = ecr_client.get_authorization_token(
75 registryIds=parsed_args.registry_ids)
76 for auth in result['authorizationData']:
77 auth_token = b64decode(auth['authorizationToken']).decode()
78 username, password = auth_token.split(':')
79 command = ['docker', 'login', '-u', username, '-p', password]
80 if parsed_args.include_email:
81 command.extend(['-e', 'none'])
82 command.append(auth['proxyEndpoint'])
83 sys.stdout.write(' '.join(command))
84 sys.stdout.write('\n')
85 return 0
86
[end of awscli/customizations/ecr.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/ecr.py b/awscli/customizations/ecr.py
--- a/awscli/customizations/ecr.py
+++ b/awscli/customizations/ecr.py
@@ -18,15 +18,16 @@
def register_ecr_commands(cli):
- cli.register('building-command-table.ecr', _inject_get_login)
+ cli.register('building-command-table.ecr', _inject_commands)
-def _inject_get_login(command_table, session, **kwargs):
+def _inject_commands(command_table, session, **kwargs):
command_table['get-login'] = ECRLogin(session)
+ command_table['get-login-password'] = ECRGetLoginPassword(session)
class ECRLogin(BasicCommand):
- """Log in with docker login"""
+ """Log in with 'docker login'"""
NAME = 'get-login'
DESCRIPTION = BasicCommand.FROM_FILE('ecr/get-login_description.rst')
@@ -49,8 +50,8 @@
'help_text': (
"Specify if the '-e' flag should be included in the "
"'docker login' command. The '-e' option has been deprecated "
- "and is removed in docker version 17.06 and later. You must "
- "specify --no-include-email if you're using docker version "
+ "and is removed in Docker version 17.06 and later. You must "
+ "specify --no-include-email if you're using Docker version "
"17.06 or later. The default behavior is to include the "
"'-e' flag in the 'docker login' output."),
},
@@ -83,3 +84,24 @@
sys.stdout.write(' '.join(command))
sys.stdout.write('\n')
return 0
+
+
+class ECRGetLoginPassword(BasicCommand):
+ """Get a password to be used with container clients such as Docker"""
+ NAME = 'get-login-password'
+
+ DESCRIPTION = BasicCommand.FROM_FILE(
+ 'ecr/get-login-password_description.rst')
+
+ def _run_main(self, parsed_args, parsed_globals):
+ ecr_client = create_client_from_parsed_globals(
+ self._session,
+ 'ecr',
+ parsed_globals)
+ result = ecr_client.get_authorization_token()
+ auth = result['authorizationData'][0]
+ auth_token = b64decode(auth['authorizationToken']).decode()
+ _, password = auth_token.split(':')
+ sys.stdout.write(password)
+ sys.stdout.write('\n')
+ return 0
| {"golden_diff": "diff --git a/awscli/customizations/ecr.py b/awscli/customizations/ecr.py\n--- a/awscli/customizations/ecr.py\n+++ b/awscli/customizations/ecr.py\n@@ -18,15 +18,16 @@\n \n \n def register_ecr_commands(cli):\n- cli.register('building-command-table.ecr', _inject_get_login)\n+ cli.register('building-command-table.ecr', _inject_commands)\n \n \n-def _inject_get_login(command_table, session, **kwargs):\n+def _inject_commands(command_table, session, **kwargs):\n command_table['get-login'] = ECRLogin(session)\n+ command_table['get-login-password'] = ECRGetLoginPassword(session)\n \n \n class ECRLogin(BasicCommand):\n- \"\"\"Log in with docker login\"\"\"\n+ \"\"\"Log in with 'docker login'\"\"\"\n NAME = 'get-login'\n \n DESCRIPTION = BasicCommand.FROM_FILE('ecr/get-login_description.rst')\n@@ -49,8 +50,8 @@\n 'help_text': (\n \"Specify if the '-e' flag should be included in the \"\n \"'docker login' command. The '-e' option has been deprecated \"\n- \"and is removed in docker version 17.06 and later. You must \"\n- \"specify --no-include-email if you're using docker version \"\n+ \"and is removed in Docker version 17.06 and later. You must \"\n+ \"specify --no-include-email if you're using Docker version \"\n \"17.06 or later. The default behavior is to include the \"\n \"'-e' flag in the 'docker login' output.\"),\n },\n@@ -83,3 +84,24 @@\n sys.stdout.write(' '.join(command))\n sys.stdout.write('\\n')\n return 0\n+\n+\n+class ECRGetLoginPassword(BasicCommand):\n+ \"\"\"Get a password to be used with container clients such as Docker\"\"\"\n+ NAME = 'get-login-password'\n+\n+ DESCRIPTION = BasicCommand.FROM_FILE(\n+ 'ecr/get-login-password_description.rst')\n+\n+ def _run_main(self, parsed_args, parsed_globals):\n+ ecr_client = create_client_from_parsed_globals(\n+ self._session,\n+ 'ecr',\n+ parsed_globals)\n+ result = ecr_client.get_authorization_token()\n+ auth = result['authorizationData'][0]\n+ auth_token = b64decode(auth['authorizationToken']).decode()\n+ _, password = auth_token.split(':')\n+ sys.stdout.write(password)\n+ sys.stdout.write('\\n')\n+ return 0\n", "issue": "Proposal: aws ecr get-login-password\nThis is a proposal for a new AWS CLI command for ECR\r\n\r\n```\r\n$ aws ecr get-login-password\r\ncGFzc3dvcmQ=\r\n```\r\n\r\nThis command can be used in the following ways:\r\n\r\n```\r\n$ aws ecr get-login-password | docker login --username AWS --password-stdin 111111111111.dkr.ecr.us-west-2.amazonaws.com\r\nLogin Succeeded\r\n\r\n$ docker login --username AWS --password \"$(aws ecr get-login-password)\" 111111111111.dkr.ecr.us-west-2.amazonaws.com\r\nLogin Succeeded\r\n```\r\n\r\nThis idea has been previously proposed by @theY4Kman https://github.com/aws/aws-cli/issues/2875#issuecomment-433565983 and @kojiromike https://github.com/aws/aws-cli/issues/3687#issue-374397564\n", "before_files": [{"content": "# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.utils import create_client_from_parsed_globals\n\nfrom base64 import b64decode\nimport sys\n\n\ndef register_ecr_commands(cli):\n cli.register('building-command-table.ecr', _inject_get_login)\n\n\ndef _inject_get_login(command_table, session, **kwargs):\n command_table['get-login'] = ECRLogin(session)\n\n\nclass ECRLogin(BasicCommand):\n \"\"\"Log in with docker login\"\"\"\n NAME = 'get-login'\n\n DESCRIPTION = BasicCommand.FROM_FILE('ecr/get-login_description.rst')\n\n ARG_TABLE = [\n {\n 'name': 'registry-ids',\n 'help_text': 'A list of AWS account IDs that correspond to the '\n 'Amazon ECR registries that you want to log in to.',\n 'required': False,\n 'nargs': '+'\n },\n {\n 'name': 'include-email',\n 'action': 'store_true',\n 'group_name': 'include-email',\n 'dest': 'include_email',\n 'default': True,\n 'required': False,\n 'help_text': (\n \"Specify if the '-e' flag should be included in the \"\n \"'docker login' command. The '-e' option has been deprecated \"\n \"and is removed in docker version 17.06 and later. You must \"\n \"specify --no-include-email if you're using docker version \"\n \"17.06 or later. The default behavior is to include the \"\n \"'-e' flag in the 'docker login' output.\"),\n },\n {\n 'name': 'no-include-email',\n 'help_text': 'Include email arg',\n 'action': 'store_false',\n 'default': True,\n 'group_name': 'include-email',\n 'dest': 'include_email',\n 'required': False,\n },\n ]\n\n def _run_main(self, parsed_args, parsed_globals):\n ecr_client = create_client_from_parsed_globals(\n self._session, 'ecr', parsed_globals)\n if not parsed_args.registry_ids:\n result = ecr_client.get_authorization_token()\n else:\n result = ecr_client.get_authorization_token(\n registryIds=parsed_args.registry_ids)\n for auth in result['authorizationData']:\n auth_token = b64decode(auth['authorizationToken']).decode()\n username, password = auth_token.split(':')\n command = ['docker', 'login', '-u', username, '-p', password]\n if parsed_args.include_email:\n command.extend(['-e', 'none'])\n command.append(auth['proxyEndpoint'])\n sys.stdout.write(' '.join(command))\n sys.stdout.write('\\n')\n return 0\n", "path": "awscli/customizations/ecr.py"}]} | 1,651 | 577 |
gh_patches_debug_13272 | rasdani/github-patches | git_diff | arviz-devs__arviz-1133 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add `Matplotlib` framework classifier to `setup.py`
`Matplotlib` now has a [trove classifier on pypi](https://twitter.com/matplotlib/status/1235216347925286913). We can add:
```python
classifiers = [
'Framework :: Matplotlib',
]
```
to `arviz`'s `setup.py` to acknowledge that it is part of `Matplotlib` ecosystem.
I believe that `arviz` currently doesn't have any classifiers ([there are many!](https://pypi.org/classifiers/)). We could add something like the following to `setup.py`:
```python
classifiers = [
'Framework :: Matplotlib',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License'
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Visualization',
]
```
I'm not sure if you would say if `arviz` is:
```
'Development Status :: 5 - Production/Stable',
```
or
```
'Development Status :: 4 - Beta',
```
There may be thoughts on other classifiers to add, but I can quickly put together a PR for this
</issue>
<code>
[start of setup.py]
1 import codecs
2 import os
3 import re
4
5 import setuptools
6 from setuptools import setup, find_packages
7 from setuptools.command.install import install
8 from setuptools.command.develop import develop
9
10
11 PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
12 REQUIREMENTS_FILE = os.path.join(PROJECT_ROOT, "requirements.txt")
13 REQUIREMENTS_OPTIONAL_FILE = os.path.join(PROJECT_ROOT, "requirements-optional.txt")
14 REQUIREMENTS_DEV_FILE = os.path.join(PROJECT_ROOT, "requirements-dev.txt")
15 README_FILE = os.path.join(PROJECT_ROOT, "README.md")
16 VERSION_FILE = os.path.join(PROJECT_ROOT, "arviz", "__init__.py")
17
18
19 def get_requirements():
20 with codecs.open(REQUIREMENTS_FILE) as buff:
21 return buff.read().splitlines()
22
23
24 def get_requirements_dev():
25 with codecs.open(REQUIREMENTS_DEV_FILE) as buff:
26 return buff.read().splitlines()
27
28
29 def get_requirements_optional():
30 with codecs.open(REQUIREMENTS_OPTIONAL_FILE) as buff:
31 return buff.read().splitlines()
32
33
34 def get_long_description():
35 with codecs.open(README_FILE, "rt") as buff:
36 return buff.read()
37
38
39 def get_version():
40 lines = open(VERSION_FILE, "rt").readlines()
41 version_regex = r"^__version__ = ['\"]([^'\"]*)['\"]"
42 for line in lines:
43 mo = re.search(version_regex, line, re.M)
44 if mo:
45 return mo.group(1)
46 raise RuntimeError("Unable to find version in %s." % (VERSION_FILE,))
47
48
49 setup(
50 name="arviz",
51 license="Apache-2.0",
52 version=get_version(),
53 description="Exploratory analysis of Bayesian models",
54 author="ArviZ Developers",
55 url="http://github.com/arviz-devs/arviz",
56 packages=find_packages(),
57 install_requires=get_requirements(),
58 extras_require=dict(all=get_requirements_optional()), # test=get_requirements_dev(),
59 long_description=get_long_description(),
60 long_description_content_type="text/markdown",
61 include_package_data=True,
62 )
63
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,4 +59,19 @@
long_description=get_long_description(),
long_description_content_type="text/markdown",
include_package_data=True,
+ classifiers=[
+ "Development Status :: 4 - Beta",
+ "Framework :: Matplotlib",
+ "Intended Audience :: Science/Research",
+ "Intended Audience :: Education",
+ "License :: OSI Approved :: Apache Software License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Topic :: Scientific/Engineering",
+ "Topic :: Scientific/Engineering :: Visualization",
+ "Topic :: Scientific/Engineering :: Mathematics",
+ ],
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,4 +59,19 @@\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n+ classifiers=[\n+ \"Development Status :: 4 - Beta\",\n+ \"Framework :: Matplotlib\",\n+ \"Intended Audience :: Science/Research\",\n+ \"Intended Audience :: Education\",\n+ \"License :: OSI Approved :: Apache Software License\",\n+ \"Programming Language :: Python\",\n+ \"Programming Language :: Python :: 3\",\n+ \"Programming Language :: Python :: 3.6\",\n+ \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n+ \"Topic :: Scientific/Engineering\",\n+ \"Topic :: Scientific/Engineering :: Visualization\",\n+ \"Topic :: Scientific/Engineering :: Mathematics\",\n+ ],\n )\n", "issue": "Add `Matplotlib` framework classifier to `setup.py`\n`Matplotlib` now has a [trove classifier on pypi](https://twitter.com/matplotlib/status/1235216347925286913). We can add:\r\n\r\n```python\r\nclassifiers = [\r\n 'Framework :: Matplotlib',\r\n ]\r\n```\r\nto `arviz`'s `setup.py` to acknowledge that it is part of `Matplotlib` ecosystem.\r\n\r\nI believe that `arviz` currently doesn't have any classifiers ([there are many!](https://pypi.org/classifiers/)). We could add something like the following to `setup.py`:\r\n\r\n```python\r\nclassifiers = [\r\n 'Framework :: Matplotlib',\r\n 'Intended Audience :: Science/Research',\r\n 'License :: OSI Approved :: Apache Software License'\r\n 'Programming Language :: Python',\r\n 'Programming Language :: Python :: 3',\r\n 'Programming Language :: Python :: 3.5',\r\n 'Programming Language :: Python :: 3.6',\r\n 'Programming Language :: Python :: 3.7',\r\n 'Topic :: Scientific/Engineering :: Visualization',\r\n ]\r\n```\r\n\r\nI'm not sure if you would say if `arviz` is:\r\n```\r\n'Development Status :: 5 - Production/Stable',\r\n```\r\nor\r\n```\r\n'Development Status :: 4 - Beta',\r\n```\r\n\r\nThere may be thoughts on other classifiers to add, but I can quickly put together a PR for this\n", "before_files": [{"content": "import codecs\nimport os\nimport re\n\nimport setuptools\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\n\n\nPROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))\nREQUIREMENTS_FILE = os.path.join(PROJECT_ROOT, \"requirements.txt\")\nREQUIREMENTS_OPTIONAL_FILE = os.path.join(PROJECT_ROOT, \"requirements-optional.txt\")\nREQUIREMENTS_DEV_FILE = os.path.join(PROJECT_ROOT, \"requirements-dev.txt\")\nREADME_FILE = os.path.join(PROJECT_ROOT, \"README.md\")\nVERSION_FILE = os.path.join(PROJECT_ROOT, \"arviz\", \"__init__.py\")\n\n\ndef get_requirements():\n with codecs.open(REQUIREMENTS_FILE) as buff:\n return buff.read().splitlines()\n\n\ndef get_requirements_dev():\n with codecs.open(REQUIREMENTS_DEV_FILE) as buff:\n return buff.read().splitlines()\n\n\ndef get_requirements_optional():\n with codecs.open(REQUIREMENTS_OPTIONAL_FILE) as buff:\n return buff.read().splitlines()\n\n\ndef get_long_description():\n with codecs.open(README_FILE, \"rt\") as buff:\n return buff.read()\n\n\ndef get_version():\n lines = open(VERSION_FILE, \"rt\").readlines()\n version_regex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n for line in lines:\n mo = re.search(version_regex, line, re.M)\n if mo:\n return mo.group(1)\n raise RuntimeError(\"Unable to find version in %s.\" % (VERSION_FILE,))\n\n\nsetup(\n name=\"arviz\",\n license=\"Apache-2.0\",\n version=get_version(),\n description=\"Exploratory analysis of Bayesian models\",\n author=\"ArviZ Developers\",\n url=\"http://github.com/arviz-devs/arviz\",\n packages=find_packages(),\n install_requires=get_requirements(),\n extras_require=dict(all=get_requirements_optional()), # test=get_requirements_dev(),\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n)\n", "path": "setup.py"}]} | 1,396 | 205 |
gh_patches_debug_30956 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3602 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider superdrug is broken
During the global build at 2021-06-30-14-42-26, spider **superdrug** failed with **0 features** and **2 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/logs/superdrug.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/superdrug.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/superdrug.geojson))
</issue>
<code>
[start of locations/spiders/superdrug.py]
1 # -*- coding: utf-8 -*-
2 import json
3
4 import scrapy
5
6 from locations.items import GeojsonPointItem
7
8
9 class SuperdrugSpider(scrapy.Spider):
10 name = "superdrug"
11 item_attributes = {"brand": "Superdrug", "brand_wikidata": "Q7643261"}
12 allowed_domains = ["superdrug.com"]
13 download_delay = 0.5
14
15 start_urls = ["https://www.superdrug.com/stores/a-to-z"]
16
17 def parse(self, response):
18 urls = response.xpath('//a[@class="row store-link"]/@href').extract()
19
20 for url in urls:
21 yield scrapy.Request(response.urljoin(url), callback=self.parse_location)
22
23 def parse_location(self, response):
24 data = json.loads(
25 response.xpath(
26 '//script[@type="application/ld+json" and contains(text(), "streetAddress")]/text()'
27 ).extract_first()
28 )
29
30 properties = {
31 "name": data["name"],
32 "ref": data["name"],
33 "addr_full": data["address"]["streetAddress"],
34 "city": data["address"]["addressLocality"],
35 "state": data["address"]["addressRegion"],
36 "postcode": data["address"]["postalCode"],
37 "country": data["address"]["addressCountry"],
38 "phone": data.get("telephone"),
39 "website": response.url,
40 "lat": float(
41 response.xpath(
42 '//div[@class="store-locator store-locator__overview"]/@data-lat'
43 ).extract_first()
44 ),
45 "lon": float(
46 response.xpath(
47 '//div[@class="store-locator store-locator__overview"]/@data-lng'
48 ).extract_first()
49 ),
50 }
51 yield GeojsonPointItem(**properties)
52
[end of locations/spiders/superdrug.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/superdrug.py b/locations/spiders/superdrug.py
--- a/locations/spiders/superdrug.py
+++ b/locations/spiders/superdrug.py
@@ -4,6 +4,7 @@
import scrapy
from locations.items import GeojsonPointItem
+from locations.hours import OpeningHours
class SuperdrugSpider(scrapy.Spider):
@@ -14,6 +15,10 @@
start_urls = ["https://www.superdrug.com/stores/a-to-z"]
+ custom_settings = {
+ "USER_AGENT": "Mozilla/5.0 (X11; Linux x86_64; rv:99.0) Gecko/20100101 Firefox/99.0"
+ }
+
def parse(self, response):
urls = response.xpath('//a[@class="row store-link"]/@href').extract()
@@ -28,9 +33,11 @@
)
properties = {
- "name": data["name"],
- "ref": data["name"],
- "addr_full": data["address"]["streetAddress"],
+ "name": data["name"].replace("Superdrug", "").strip(),
+ "ref": data["@id"],
+ "street_address": data["address"]["streetAddress"]
+ .replace("Superdrug", "")
+ .strip(),
"city": data["address"]["addressLocality"],
"state": data["address"]["addressRegion"],
"postcode": data["address"]["postalCode"],
@@ -48,4 +55,15 @@
).extract_first()
),
}
+
+ oh = OpeningHours()
+
+ for rule in data["OpeningHoursSpecification"]:
+ oh.add_range(
+ day=rule["dayOfWeek"][0:2],
+ open_time=rule["opens"],
+ close_time=rule["closes"],
+ time_format="%I:%M %p",
+ )
+
yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/superdrug.py b/locations/spiders/superdrug.py\n--- a/locations/spiders/superdrug.py\n+++ b/locations/spiders/superdrug.py\n@@ -4,6 +4,7 @@\n import scrapy\n \n from locations.items import GeojsonPointItem\n+from locations.hours import OpeningHours\n \n \n class SuperdrugSpider(scrapy.Spider):\n@@ -14,6 +15,10 @@\n \n start_urls = [\"https://www.superdrug.com/stores/a-to-z\"]\n \n+ custom_settings = {\n+ \"USER_AGENT\": \"Mozilla/5.0 (X11; Linux x86_64; rv:99.0) Gecko/20100101 Firefox/99.0\"\n+ }\n+\n def parse(self, response):\n urls = response.xpath('//a[@class=\"row store-link\"]/@href').extract()\n \n@@ -28,9 +33,11 @@\n )\n \n properties = {\n- \"name\": data[\"name\"],\n- \"ref\": data[\"name\"],\n- \"addr_full\": data[\"address\"][\"streetAddress\"],\n+ \"name\": data[\"name\"].replace(\"Superdrug\", \"\").strip(),\n+ \"ref\": data[\"@id\"],\n+ \"street_address\": data[\"address\"][\"streetAddress\"]\n+ .replace(\"Superdrug\", \"\")\n+ .strip(),\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n@@ -48,4 +55,15 @@\n ).extract_first()\n ),\n }\n+\n+ oh = OpeningHours()\n+\n+ for rule in data[\"OpeningHoursSpecification\"]:\n+ oh.add_range(\n+ day=rule[\"dayOfWeek\"][0:2],\n+ open_time=rule[\"opens\"],\n+ close_time=rule[\"closes\"],\n+ time_format=\"%I:%M %p\",\n+ )\n+\n yield GeojsonPointItem(**properties)\n", "issue": "Spider superdrug is broken\nDuring the global build at 2021-06-30-14-42-26, spider **superdrug** failed with **0 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/logs/superdrug.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/superdrug.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/superdrug.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass SuperdrugSpider(scrapy.Spider):\n name = \"superdrug\"\n item_attributes = {\"brand\": \"Superdrug\", \"brand_wikidata\": \"Q7643261\"}\n allowed_domains = [\"superdrug.com\"]\n download_delay = 0.5\n\n start_urls = [\"https://www.superdrug.com/stores/a-to-z\"]\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"row store-link\"]/@href').extract()\n\n for url in urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_location)\n\n def parse_location(self, response):\n data = json.loads(\n response.xpath(\n '//script[@type=\"application/ld+json\" and contains(text(), \"streetAddress\")]/text()'\n ).extract_first()\n )\n\n properties = {\n \"name\": data[\"name\"],\n \"ref\": data[\"name\"],\n \"addr_full\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n \"country\": data[\"address\"][\"addressCountry\"],\n \"phone\": data.get(\"telephone\"),\n \"website\": response.url,\n \"lat\": float(\n response.xpath(\n '//div[@class=\"store-locator store-locator__overview\"]/@data-lat'\n ).extract_first()\n ),\n \"lon\": float(\n response.xpath(\n '//div[@class=\"store-locator store-locator__overview\"]/@data-lng'\n ).extract_first()\n ),\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/superdrug.py"}]} | 1,200 | 445 |
gh_patches_debug_59056 | rasdani/github-patches | git_diff | google__jax-19166 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unexpected behavior of `jax.scipy.stats.binom.pmf`
### Description
pmf of a random variable should be zero outside of its range. While plotting the graph for `jax.scipy.stats.binom.pmf`, I notice that for $n>5$ and $p>0.5$, there are some oscillations in the values of the pmf, which should not be there. For evidence, I am attaching a plot too.
```python
import jax
from jax import numpy as jnp
from matplotlib import pyplot as plt
x = jnp.linspace(-1, 10, 1000)
xxf = jax.scipy.stats.binom.pmf(k=x, n=5, p=0.8)
plt.plot(x, xxf)
plt.tight_layout()
plt.show()
```

The side left to the zero is as expected.
### What jax/jaxlib version are you using?
jax v0.4.23
### Which accelerator(s) are you using?
CPU
</issue>
<code>
[start of jax/_src/scipy/stats/binom.py]
1 # Copyright 2023 The JAX Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License
14
15
16 import scipy.stats as osp_stats
17
18 from jax import lax
19 import jax.numpy as jnp
20 from jax._src.numpy.util import _wraps, promote_args_inexact
21 from jax._src.scipy.special import gammaln, xlogy, xlog1py
22 from jax._src.typing import Array, ArrayLike
23
24
25 @_wraps(osp_stats.nbinom.logpmf, update_doc=False)
26 def logpmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:
27 """JAX implementation of scipy.stats.binom.logpmf."""
28 k, n, p, loc = promote_args_inexact("binom.logpmf", k, n, p, loc)
29 y = lax.sub(k, loc)
30 comb_term = lax.sub(
31 gammaln(n + 1),
32 lax.add(gammaln(y + 1), gammaln(n - y + 1))
33 )
34 log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))
35 log_probs = lax.add(comb_term, log_linear_term)
36 return jnp.where(lax.lt(k, loc), -jnp.inf, log_probs)
37
38
39 @_wraps(osp_stats.nbinom.pmf, update_doc=False)
40 def pmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:
41 """JAX implementation of scipy.stats.binom.pmf."""
42 return lax.exp(logpmf(k, n, p, loc))
43
[end of jax/_src/scipy/stats/binom.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/jax/_src/scipy/stats/binom.py b/jax/_src/scipy/stats/binom.py
--- a/jax/_src/scipy/stats/binom.py
+++ b/jax/_src/scipy/stats/binom.py
@@ -33,7 +33,7 @@
)
log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))
log_probs = lax.add(comb_term, log_linear_term)
- return jnp.where(lax.lt(k, loc), -jnp.inf, log_probs)
+ return jnp.where(lax.ge(k, loc) & lax.lt(k, loc + n + 1), log_probs, -jnp.inf)
@_wraps(osp_stats.nbinom.pmf, update_doc=False)
| {"golden_diff": "diff --git a/jax/_src/scipy/stats/binom.py b/jax/_src/scipy/stats/binom.py\n--- a/jax/_src/scipy/stats/binom.py\n+++ b/jax/_src/scipy/stats/binom.py\n@@ -33,7 +33,7 @@\n )\n log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))\n log_probs = lax.add(comb_term, log_linear_term)\n- return jnp.where(lax.lt(k, loc), -jnp.inf, log_probs)\n+ return jnp.where(lax.ge(k, loc) & lax.lt(k, loc + n + 1), log_probs, -jnp.inf)\n \n \n @_wraps(osp_stats.nbinom.pmf, update_doc=False)\n", "issue": "Unexpected behavior of `jax.scipy.stats.binom.pmf`\n### Description\r\n\r\npmf of a random variable should be zero outside of its range. While plotting the graph for `jax.scipy.stats.binom.pmf`, I notice that for $n>5$ and $p>0.5$, there are some oscillations in the values of the pmf, which should not be there. For evidence, I am attaching a plot too.\r\n\r\n```python\r\nimport jax\r\nfrom jax import numpy as jnp\r\nfrom matplotlib import pyplot as plt\r\n\r\nx = jnp.linspace(-1, 10, 1000)\r\nxxf = jax.scipy.stats.binom.pmf(k=x, n=5, p=0.8)\r\n\r\nplt.plot(x, xxf)\r\nplt.tight_layout()\r\nplt.show()\r\n```\r\n\r\nThe side left to the zero is as expected.\r\n\r\n### What jax/jaxlib version are you using?\r\n\r\njax v0.4.23\r\n\r\n### Which accelerator(s) are you using?\r\n\r\nCPU\n", "before_files": [{"content": "# Copyright 2023 The JAX Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\n\nimport scipy.stats as osp_stats\n\nfrom jax import lax\nimport jax.numpy as jnp\nfrom jax._src.numpy.util import _wraps, promote_args_inexact\nfrom jax._src.scipy.special import gammaln, xlogy, xlog1py\nfrom jax._src.typing import Array, ArrayLike\n\n\n@_wraps(osp_stats.nbinom.logpmf, update_doc=False)\ndef logpmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:\n \"\"\"JAX implementation of scipy.stats.binom.logpmf.\"\"\"\n k, n, p, loc = promote_args_inexact(\"binom.logpmf\", k, n, p, loc)\n y = lax.sub(k, loc)\n comb_term = lax.sub(\n gammaln(n + 1),\n lax.add(gammaln(y + 1), gammaln(n - y + 1))\n )\n log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))\n log_probs = lax.add(comb_term, log_linear_term)\n return jnp.where(lax.lt(k, loc), -jnp.inf, log_probs)\n\n\n@_wraps(osp_stats.nbinom.pmf, update_doc=False)\ndef pmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:\n \"\"\"JAX implementation of scipy.stats.binom.pmf.\"\"\"\n return lax.exp(logpmf(k, n, p, loc))\n", "path": "jax/_src/scipy/stats/binom.py"}]} | 1,375 | 179 |
gh_patches_debug_1261 | rasdani/github-patches | git_diff | swcarpentry__python-novice-inflammation-736 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lesson 10 - numpy.mean(data) and data.mean
In lesson 10, when the lesson refers to readings_03.py, the code shows that to calculate the mean over 'data' across all days, numpy.mean is used: numpy.mean(data, axis=1). However when looking at the file readings_03.py (at least the version I downloaded recently) uses the instruction data.mean(axis=1). Both lead to the same result, but for consistency I would suggest to either modify the readings_*.py to use numpy.mean (as this is what it has been used throughout the entire lesson), or explain explicitly that both expressions lead to the same result (it would be a good time to remind students about object attributes).
</issue>
<code>
[start of code/readings_03.py]
1 import sys
2 import numpy
3
4
5 def main():
6 script = sys.argv[0]
7 for filename in sys.argv[1:]:
8 data = numpy.loadtxt(filename, delimiter=',')
9 for m in data.mean(axis=1):
10 print(m)
11
12
13 if __name__ == '__main__':
14 main()
15
[end of code/readings_03.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/code/readings_03.py b/code/readings_03.py
--- a/code/readings_03.py
+++ b/code/readings_03.py
@@ -6,7 +6,7 @@
script = sys.argv[0]
for filename in sys.argv[1:]:
data = numpy.loadtxt(filename, delimiter=',')
- for m in data.mean(axis=1):
+ for m in numpy.mean(data, axis=1):
print(m)
| {"golden_diff": "diff --git a/code/readings_03.py b/code/readings_03.py\n--- a/code/readings_03.py\n+++ b/code/readings_03.py\n@@ -6,7 +6,7 @@\n script = sys.argv[0]\n for filename in sys.argv[1:]:\n data = numpy.loadtxt(filename, delimiter=',')\n- for m in data.mean(axis=1):\n+ for m in numpy.mean(data, axis=1):\n print(m)\n", "issue": "Lesson 10 - numpy.mean(data) and data.mean\nIn lesson 10, when the lesson refers to readings_03.py, the code shows that to calculate the mean over 'data' across all days, numpy.mean is used: numpy.mean(data, axis=1). However when looking at the file readings_03.py (at least the version I downloaded recently) uses the instruction data.mean(axis=1). Both lead to the same result, but for consistency I would suggest to either modify the readings_*.py to use numpy.mean (as this is what it has been used throughout the entire lesson), or explain explicitly that both expressions lead to the same result (it would be a good time to remind students about object attributes). \n", "before_files": [{"content": "import sys\nimport numpy\n\n\ndef main():\n script = sys.argv[0]\n for filename in sys.argv[1:]:\n data = numpy.loadtxt(filename, delimiter=',')\n for m in data.mean(axis=1):\n print(m)\n\n\nif __name__ == '__main__':\n main()\n", "path": "code/readings_03.py"}]} | 781 | 106 |
gh_patches_debug_11035 | rasdani/github-patches | git_diff | python-pillow__Pillow-821 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PyPy performance on test_image_point is awful
Hoisted from #476, test_image_point.py takes ~ 2 minutes to run, vs < 1 sec for cpython.
</issue>
<code>
[start of profile-installed.py]
1 #!/usr/bin/env python
2 import nose
3 import os
4 import sys
5 import glob
6
7 import profile
8
9 # monkey with the path, removing the local directory but adding the Tests/
10 # directory for helper.py and the other local imports there.
11
12 del(sys.path[0])
13 sys.path.insert(0, os.path.abspath('./Tests'))
14
15 # if there's no test selected (mostly) choose a working default.
16 # Something is required, because if we import the tests from the local
17 # directory, once again, we've got the non-installed PIL in the way
18 if len(sys.argv) == 1:
19 sys.argv.extend(glob.glob('Tests/test*.py'))
20
21 # Make sure that nose doesn't muck with our paths.
22 if ('--no-path-adjustment' not in sys.argv) and ('-P' not in sys.argv):
23 sys.argv.insert(1, '--no-path-adjustment')
24
25 if 'NOSE_PROCESSES' not in os.environ:
26 for arg in sys.argv:
27 if '--processes' in arg:
28 break
29 else: # for
30 sys.argv.insert(1, '--processes=-1') # -1 == number of cores
31 sys.argv.insert(1, '--process-timeout=30')
32
33 if __name__ == '__main__':
34 profile.run("nose.main()", sort=2)
35
[end of profile-installed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/profile-installed.py b/profile-installed.py
--- a/profile-installed.py
+++ b/profile-installed.py
@@ -21,14 +21,6 @@
# Make sure that nose doesn't muck with our paths.
if ('--no-path-adjustment' not in sys.argv) and ('-P' not in sys.argv):
sys.argv.insert(1, '--no-path-adjustment')
-
-if 'NOSE_PROCESSES' not in os.environ:
- for arg in sys.argv:
- if '--processes' in arg:
- break
- else: # for
- sys.argv.insert(1, '--processes=-1') # -1 == number of cores
- sys.argv.insert(1, '--process-timeout=30')
if __name__ == '__main__':
profile.run("nose.main()", sort=2)
| {"golden_diff": "diff --git a/profile-installed.py b/profile-installed.py\n--- a/profile-installed.py\n+++ b/profile-installed.py\n@@ -21,14 +21,6 @@\n # Make sure that nose doesn't muck with our paths.\n if ('--no-path-adjustment' not in sys.argv) and ('-P' not in sys.argv):\n sys.argv.insert(1, '--no-path-adjustment')\n-\n-if 'NOSE_PROCESSES' not in os.environ:\n- for arg in sys.argv:\n- if '--processes' in arg:\n- break\n- else: # for\n- sys.argv.insert(1, '--processes=-1') # -1 == number of cores\n- sys.argv.insert(1, '--process-timeout=30') \n \n if __name__ == '__main__':\n profile.run(\"nose.main()\", sort=2)\n", "issue": "PyPy performance on test_image_point is awful\nHoisted from #476, test_image_point.py takes ~ 2 minutes to run, vs < 1 sec for cpython.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport nose\nimport os\nimport sys\nimport glob\n\nimport profile\n\n# monkey with the path, removing the local directory but adding the Tests/\n# directory for helper.py and the other local imports there.\n\ndel(sys.path[0])\nsys.path.insert(0, os.path.abspath('./Tests'))\n\n# if there's no test selected (mostly) choose a working default.\n# Something is required, because if we import the tests from the local\n# directory, once again, we've got the non-installed PIL in the way\nif len(sys.argv) == 1:\n sys.argv.extend(glob.glob('Tests/test*.py'))\n\n# Make sure that nose doesn't muck with our paths.\nif ('--no-path-adjustment' not in sys.argv) and ('-P' not in sys.argv):\n sys.argv.insert(1, '--no-path-adjustment')\n\nif 'NOSE_PROCESSES' not in os.environ:\n for arg in sys.argv:\n if '--processes' in arg:\n break\n else: # for\n sys.argv.insert(1, '--processes=-1') # -1 == number of cores\n sys.argv.insert(1, '--process-timeout=30') \n \nif __name__ == '__main__':\n profile.run(\"nose.main()\", sort=2)\n", "path": "profile-installed.py"}]} | 923 | 195 |
gh_patches_debug_2448 | rasdani/github-patches | git_diff | docker__docker-py-3200 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't create config object
Much like https://github.com/docker/docker-py/issues/2025 the config model is failing to create a new object due to 'name' KeyError
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "docker\models\configs.py", line 10, in __repr__
return f"<{self.__class__.__name__}: '{self.name}'>"
File "docker\models\configs.py", line 14, in name
return self.attrs['Spec']['Name']
```
This https://github.com/docker/docker-py/pull/2793 appears to be the fix that was implemented and should likely be implements for configs as well (if not other models that might have this issue)
</issue>
<code>
[start of docker/models/configs.py]
1 from ..api import APIClient
2 from .resource import Model, Collection
3
4
5 class Config(Model):
6 """A config."""
7 id_attribute = 'ID'
8
9 def __repr__(self):
10 return f"<{self.__class__.__name__}: '{self.name}'>"
11
12 @property
13 def name(self):
14 return self.attrs['Spec']['Name']
15
16 def remove(self):
17 """
18 Remove this config.
19
20 Raises:
21 :py:class:`docker.errors.APIError`
22 If config failed to remove.
23 """
24 return self.client.api.remove_config(self.id)
25
26
27 class ConfigCollection(Collection):
28 """Configs on the Docker server."""
29 model = Config
30
31 def create(self, **kwargs):
32 obj = self.client.api.create_config(**kwargs)
33 return self.prepare_model(obj)
34 create.__doc__ = APIClient.create_config.__doc__
35
36 def get(self, config_id):
37 """
38 Get a config.
39
40 Args:
41 config_id (str): Config ID.
42
43 Returns:
44 (:py:class:`Config`): The config.
45
46 Raises:
47 :py:class:`docker.errors.NotFound`
48 If the config does not exist.
49 :py:class:`docker.errors.APIError`
50 If the server returns an error.
51 """
52 return self.prepare_model(self.client.api.inspect_config(config_id))
53
54 def list(self, **kwargs):
55 """
56 List configs. Similar to the ``docker config ls`` command.
57
58 Args:
59 filters (dict): Server-side list filtering options.
60
61 Returns:
62 (list of :py:class:`Config`): The configs.
63
64 Raises:
65 :py:class:`docker.errors.APIError`
66 If the server returns an error.
67 """
68 resp = self.client.api.configs(**kwargs)
69 return [self.prepare_model(obj) for obj in resp]
70
[end of docker/models/configs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/models/configs.py b/docker/models/configs.py
--- a/docker/models/configs.py
+++ b/docker/models/configs.py
@@ -30,6 +30,7 @@
def create(self, **kwargs):
obj = self.client.api.create_config(**kwargs)
+ obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
return self.prepare_model(obj)
create.__doc__ = APIClient.create_config.__doc__
| {"golden_diff": "diff --git a/docker/models/configs.py b/docker/models/configs.py\n--- a/docker/models/configs.py\n+++ b/docker/models/configs.py\n@@ -30,6 +30,7 @@\n \n def create(self, **kwargs):\n obj = self.client.api.create_config(**kwargs)\n+ obj.setdefault(\"Spec\", {})[\"Name\"] = kwargs.get(\"name\")\n return self.prepare_model(obj)\n create.__doc__ = APIClient.create_config.__doc__\n", "issue": "Can't create config object\nMuch like https://github.com/docker/docker-py/issues/2025 the config model is failing to create a new object due to 'name' KeyError\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"docker\\models\\configs.py\", line 10, in __repr__\r\n return f\"<{self.__class__.__name__}: '{self.name}'>\"\r\n File \"docker\\models\\configs.py\", line 14, in name\r\n return self.attrs['Spec']['Name']\r\n```\r\n\r\nThis https://github.com/docker/docker-py/pull/2793 appears to be the fix that was implemented and should likely be implements for configs as well (if not other models that might have this issue)\n", "before_files": [{"content": "from ..api import APIClient\nfrom .resource import Model, Collection\n\n\nclass Config(Model):\n \"\"\"A config.\"\"\"\n id_attribute = 'ID'\n\n def __repr__(self):\n return f\"<{self.__class__.__name__}: '{self.name}'>\"\n\n @property\n def name(self):\n return self.attrs['Spec']['Name']\n\n def remove(self):\n \"\"\"\n Remove this config.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If config failed to remove.\n \"\"\"\n return self.client.api.remove_config(self.id)\n\n\nclass ConfigCollection(Collection):\n \"\"\"Configs on the Docker server.\"\"\"\n model = Config\n\n def create(self, **kwargs):\n obj = self.client.api.create_config(**kwargs)\n return self.prepare_model(obj)\n create.__doc__ = APIClient.create_config.__doc__\n\n def get(self, config_id):\n \"\"\"\n Get a config.\n\n Args:\n config_id (str): Config ID.\n\n Returns:\n (:py:class:`Config`): The config.\n\n Raises:\n :py:class:`docker.errors.NotFound`\n If the config does not exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.prepare_model(self.client.api.inspect_config(config_id))\n\n def list(self, **kwargs):\n \"\"\"\n List configs. Similar to the ``docker config ls`` command.\n\n Args:\n filters (dict): Server-side list filtering options.\n\n Returns:\n (list of :py:class:`Config`): The configs.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.configs(**kwargs)\n return [self.prepare_model(obj) for obj in resp]\n", "path": "docker/models/configs.py"}]} | 1,225 | 102 |
gh_patches_debug_16039 | rasdani/github-patches | git_diff | kserve__kserve-2342 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Kserve defaulting causing duplicates of environment variable
/kind bug
**What steps did you take and what happened:**
Create example xgboost isvc and enable gRPC
```
apiVersion: "serving.kserve.io/v1beta1"
kind: "InferenceService"
metadata:
name: "xgboost-iris"
spec:
predictor:
xgboost:
protocolVersion: "v2"
storageUri: "gs://kfserving-examples/models/xgboost/iris"
ports:
- containerPort: 9000
name: h2c
protocol: TCP
```
The pod spec has duplicated environment variable
```
Environment:
MLSERVER_MODEL_NAME: xgboost-iris
MLSERVER_MODEL_URI: /mnt/models
MLSERVER_MODEL_NAME: xgboost-iris
MLSERVER_MODEL_URI: /mnt/models
```
Additionally, attempt to override the defaults leads to duplicated environment variable with different values
```
apiVersion: "serving.kserve.io/v1beta1"
kind: "InferenceService"
metadata:
name: "xgboost-iris"
spec:
predictor:
xgboost:
protocolVersion: "v2"
storageUri: "gs://kfserving-examples/models/xgboost/iris"
ports:
- containerPort: 9000
name: h2c
protocol: TCP
env:
- name: MLSERVER_MODEL_NAME
value: my-model
```
The pod spec:
```
Environment:
MLSERVER_MODEL_NAME: my-model
MLSERVER_MODEL_NAME: xgboost-iris
MLSERVER_MODEL_URI: /mnt/models
```
**What did you expect to happen:**
- Defaulting should not duplicate environment variable and should prioritise user's defined environment variable
**Anything else you would like to add:**
I believe it's because the defaulter always append `.Env` without checking the presence of existing environment variable. (https://github.com/kserve/kserve/blob/a6ed8e4b006e27433de2336e0e8b7cec11137dc1/pkg/apis/serving/v1beta1/inference_service_defaults.go#L264)
**Environment:**
- Kserve: 0.8.0
</issue>
<code>
[start of python/custom_transformer/model_grpc.py]
1 # Copyright 2022 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import argparse
16 import base64
17 from typing import Dict, Union
18
19 from kserve import Model, ModelServer, model_server
20 from kserve.grpc.grpc_predict_v2_pb2 import ModelInferRequest
21 from kserve.handlers.v2_datamodels import InferenceRequest
22
23
24 class ImageTransformer(Model):
25 def __init__(self, name: str, predictor_host: str, protocol: str):
26 super().__init__(name)
27 self.predictor_host = predictor_host
28 self.protocol = protocol
29 self.model_name = name
30
31 def preprocess(self, request: Union[Dict, ModelInferRequest, InferenceRequest], headers=None) -> ModelInferRequest:
32 if isinstance(request, ModelInferRequest):
33 return request
34 else:
35 payload = [
36 {
37 "name": "input-0",
38 "shape": [],
39 "datatype": "BYTES",
40 "contents": {
41 "bytes_contents": [base64.b64decode(request["inputs"][0]["data"][0])]
42 }
43 }
44 ]
45 return ModelInferRequest(model_name=self.model_name, inputs=payload)
46
47
48 parser = argparse.ArgumentParser(parents=[model_server.parser])
49 parser.add_argument(
50 "--predictor_host", help="The URL for the model predict function", required=True
51 )
52 parser.add_argument(
53 "--protocol", help="The protocol for the predictor", default="v1"
54 )
55 parser.add_argument(
56 "--model_name", help="The name that the model is served under."
57 )
58 args, _ = parser.parse_known_args()
59
60 if __name__ == "__main__":
61 model = ImageTransformer(args.model_name, predictor_host=args.predictor_host,
62 protocol=args.protocol)
63 ModelServer(workers=1).start([model])
64
[end of python/custom_transformer/model_grpc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/custom_transformer/model_grpc.py b/python/custom_transformer/model_grpc.py
--- a/python/custom_transformer/model_grpc.py
+++ b/python/custom_transformer/model_grpc.py
@@ -18,7 +18,6 @@
from kserve import Model, ModelServer, model_server
from kserve.grpc.grpc_predict_v2_pb2 import ModelInferRequest
-from kserve.handlers.v2_datamodels import InferenceRequest
class ImageTransformer(Model):
@@ -28,7 +27,7 @@
self.protocol = protocol
self.model_name = name
- def preprocess(self, request: Union[Dict, ModelInferRequest, InferenceRequest], headers=None) -> ModelInferRequest:
+ def preprocess(self, request: Union[Dict, ModelInferRequest], headers=None) -> ModelInferRequest:
if isinstance(request, ModelInferRequest):
return request
else:
| {"golden_diff": "diff --git a/python/custom_transformer/model_grpc.py b/python/custom_transformer/model_grpc.py\n--- a/python/custom_transformer/model_grpc.py\n+++ b/python/custom_transformer/model_grpc.py\n@@ -18,7 +18,6 @@\n \n from kserve import Model, ModelServer, model_server\n from kserve.grpc.grpc_predict_v2_pb2 import ModelInferRequest\n-from kserve.handlers.v2_datamodels import InferenceRequest\n \n \n class ImageTransformer(Model):\n@@ -28,7 +27,7 @@\n self.protocol = protocol\n self.model_name = name\n \n- def preprocess(self, request: Union[Dict, ModelInferRequest, InferenceRequest], headers=None) -> ModelInferRequest:\n+ def preprocess(self, request: Union[Dict, ModelInferRequest], headers=None) -> ModelInferRequest:\n if isinstance(request, ModelInferRequest):\n return request\n else:\n", "issue": "Kserve defaulting causing duplicates of environment variable \n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nCreate example xgboost isvc and enable gRPC\r\n```\r\napiVersion: \"serving.kserve.io/v1beta1\"\r\nkind: \"InferenceService\"\r\nmetadata:\r\n name: \"xgboost-iris\"\r\nspec:\r\n predictor:\r\n xgboost:\r\n protocolVersion: \"v2\"\r\n storageUri: \"gs://kfserving-examples/models/xgboost/iris\"\r\n ports:\r\n - containerPort: 9000\r\n name: h2c\r\n protocol: TCP\r\n```\r\n\r\nThe pod spec has duplicated environment variable\r\n```\r\n Environment:\r\n MLSERVER_MODEL_NAME: xgboost-iris\r\n MLSERVER_MODEL_URI: /mnt/models\r\n MLSERVER_MODEL_NAME: xgboost-iris\r\n MLSERVER_MODEL_URI: /mnt/models\r\n```\r\n\r\nAdditionally, attempt to override the defaults leads to duplicated environment variable with different values\r\n\r\n```\r\napiVersion: \"serving.kserve.io/v1beta1\"\r\nkind: \"InferenceService\"\r\nmetadata:\r\n name: \"xgboost-iris\"\r\nspec:\r\n predictor:\r\n xgboost:\r\n protocolVersion: \"v2\"\r\n storageUri: \"gs://kfserving-examples/models/xgboost/iris\"\r\n ports:\r\n - containerPort: 9000\r\n name: h2c\r\n protocol: TCP\r\n env:\r\n - name: MLSERVER_MODEL_NAME\r\n value: my-model\r\n```\r\n\r\nThe pod spec:\r\n```\r\n Environment:\r\n MLSERVER_MODEL_NAME: my-model\r\n MLSERVER_MODEL_NAME: xgboost-iris\r\n MLSERVER_MODEL_URI: /mnt/models\r\n```\r\n\r\n**What did you expect to happen:**\r\n- Defaulting should not duplicate environment variable and should prioritise user's defined environment variable\r\n\r\n**Anything else you would like to add:**\r\nI believe it's because the defaulter always append `.Env` without checking the presence of existing environment variable. (https://github.com/kserve/kserve/blob/a6ed8e4b006e27433de2336e0e8b7cec11137dc1/pkg/apis/serving/v1beta1/inference_service_defaults.go#L264)\r\n\r\n\r\n**Environment:**\r\n\r\n- Kserve: 0.8.0\n", "before_files": [{"content": "# Copyright 2022 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport base64\nfrom typing import Dict, Union\n\nfrom kserve import Model, ModelServer, model_server\nfrom kserve.grpc.grpc_predict_v2_pb2 import ModelInferRequest\nfrom kserve.handlers.v2_datamodels import InferenceRequest\n\n\nclass ImageTransformer(Model):\n def __init__(self, name: str, predictor_host: str, protocol: str):\n super().__init__(name)\n self.predictor_host = predictor_host\n self.protocol = protocol\n self.model_name = name\n\n def preprocess(self, request: Union[Dict, ModelInferRequest, InferenceRequest], headers=None) -> ModelInferRequest:\n if isinstance(request, ModelInferRequest):\n return request\n else:\n payload = [\n {\n \"name\": \"input-0\",\n \"shape\": [],\n \"datatype\": \"BYTES\",\n \"contents\": {\n \"bytes_contents\": [base64.b64decode(request[\"inputs\"][0][\"data\"][0])]\n }\n }\n ]\n return ModelInferRequest(model_name=self.model_name, inputs=payload)\n\n\nparser = argparse.ArgumentParser(parents=[model_server.parser])\nparser.add_argument(\n \"--predictor_host\", help=\"The URL for the model predict function\", required=True\n)\nparser.add_argument(\n \"--protocol\", help=\"The protocol for the predictor\", default=\"v1\"\n)\nparser.add_argument(\n \"--model_name\", help=\"The name that the model is served under.\"\n)\nargs, _ = parser.parse_known_args()\n\nif __name__ == \"__main__\":\n model = ImageTransformer(args.model_name, predictor_host=args.predictor_host,\n protocol=args.protocol)\n ModelServer(workers=1).start([model])\n", "path": "python/custom_transformer/model_grpc.py"}]} | 1,681 | 204 |
gh_patches_debug_20425 | rasdani/github-patches | git_diff | wenet-e2e__wenet-1221 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DLL load failed while importing _wenet: 找不到指定的模块。
我安装了wenet, pip install wenet.
安装提示成功了。
我用例子程序做识别。
程序如下:
import sys
import wenet
def get_text_from_wav(dir, wav):
model_dir = dir
wav_file = wav
decoder = wenet.Decoder(model_dir)
ans = decoder.decode_wav(wav_file)
print(ans)
if __name__ == '__main__':
dir = "./models"
wav = "./1.wav"
get_text_from_wav(dir,wav)
但是运行报错如下:
Traceback (most recent call last):
File "D:\codes\speech2word\main.py", line 2, in <module>
import wenet
File "D:\codes\speech2word\venv\lib\site-packages\wenet\__init__.py", line 1, in <module>
from .decoder import Decoder # noqa
File "D:\codes\speech2word\venv\lib\site-packages\wenet\decoder.py", line 17, in <module>
import _wenet
ImportError: DLL load failed while importing _wenet: 找不到指定的模块。
请问如何解决?
</issue>
<code>
[start of runtime/binding/python/setup.py]
1 #!/usr/bin/env python3
2 # Copyright (c) 2020 Xiaomi Corporation (author: Fangjun Kuang)
3 # 2022 Binbin Zhang([email protected])
4
5 import glob
6 import os
7 import platform
8 import shutil
9 import sys
10
11 import setuptools
12 from setuptools.command.build_ext import build_ext
13
14
15 def is_windows():
16 return platform.system() == "Windows"
17
18
19 def cmake_extension(name, *args, **kwargs) -> setuptools.Extension:
20 kwargs["language"] = "c++"
21 sources = []
22 return setuptools.Extension(name, sources, *args, **kwargs)
23
24
25 class BuildExtension(build_ext):
26 def build_extension(self, ext: setuptools.extension.Extension):
27 os.makedirs(self.build_temp, exist_ok=True)
28 os.makedirs(self.build_lib, exist_ok=True)
29
30 cmake_args = os.environ.get("WENET_CMAKE_ARGS",
31 "-DCMAKE_BUILD_TYPE=Release")
32 if "PYTHON_EXECUTABLE" not in cmake_args:
33 print(f"Setting PYTHON_EXECUTABLE to {sys.executable}")
34 cmake_args += f" -DPYTHON_EXECUTABLE={sys.executable}"
35
36 src_dir = os.path.dirname(os.path.abspath(__file__))
37 os.system(f"cmake {cmake_args} -B {self.build_temp} -S {src_dir}")
38 ret = os.system(f"""
39 cmake --build {self.build_temp} --target _wenet --config Release
40 """)
41 if ret != 0:
42 raise Exception(
43 "\nBuild wenet failed. Please check the error message.\n"
44 "You can ask for help by creating an issue on GitHub.\n"
45 "\nClick:\n https://github.com/wenet-e2e/wenet/issues/new\n"
46 )
47
48 libs = []
49 torch_lib = 'fc_base/libtorch-src/lib'
50 for ext in ['so', 'pyd']:
51 libs.extend(glob.glob(
52 f"{self.build_temp}/**/_wenet*.{ext}", recursive=True))
53 for ext in ['so', 'dylib', 'dll']:
54 libs.extend(glob.glob(
55 f"{self.build_temp}/**/*wenet_api.{ext}", recursive=True))
56 libs.extend(glob.glob(f'{src_dir}/{torch_lib}/*c10.{ext}'))
57 libs.extend(glob.glob(f'{src_dir}/{torch_lib}/*torch_cpu.{ext}'))
58
59 if not is_windows():
60 fst_lib = 'fc_base/openfst-build/src/lib/.libs'
61 for ext in ['so', 'dylib']:
62 libs.extend(glob.glob(f'{src_dir}/{fst_lib}/libfst.{ext}'))
63 libs.extend(glob.glob(f'{src_dir}/{torch_lib}/libgomp*')) # linux
64 libs.extend(glob.glob(f'{src_dir}/{torch_lib}/libiomp*')) # macos
65 else:
66 libs.extend(glob.glob(f'{src_dir}/{torch_lib}/asmjit.dll'))
67 libs.extend(glob.glob(f'{src_dir}/{torch_lib}/fbgemm.dll'))
68 libs.extend(glob.glob(f'{src_dir}/{torch_lib}/uv.dll'))
69
70 for lib in libs:
71 print(f"Copying {lib} to {self.build_lib}/")
72 shutil.copy(f"{lib}", f"{self.build_lib}/")
73
74
75 def read_long_description():
76 with open("README.md", encoding="utf8") as f:
77 readme = f.read()
78 return readme
79
80
81 package_name = "wenet"
82
83 setuptools.setup(
84 name=package_name,
85 version='1.0.4',
86 author="Binbin Zhang",
87 author_email="[email protected]",
88 package_dir={
89 package_name: "py",
90 },
91 packages=[package_name],
92 url="https://github.com/wenet-e2e/wenet",
93 long_description=read_long_description(),
94 long_description_content_type="text/markdown",
95 ext_modules=[cmake_extension("_wenet")],
96 cmdclass={"build_ext": BuildExtension},
97 zip_safe=False,
98 classifiers=[
99 "Programming Language :: C++",
100 "Programming Language :: Python",
101 "Topic :: Scientific/Engineering :: Artificial Intelligence",
102 ],
103 license="Apache licensed, as found in the LICENSE file",
104 python_requires=">=3.6",
105 )
106
[end of runtime/binding/python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/runtime/binding/python/setup.py b/runtime/binding/python/setup.py
--- a/runtime/binding/python/setup.py
+++ b/runtime/binding/python/setup.py
@@ -60,12 +60,12 @@
fst_lib = 'fc_base/openfst-build/src/lib/.libs'
for ext in ['so', 'dylib']:
libs.extend(glob.glob(f'{src_dir}/{fst_lib}/libfst.{ext}'))
- libs.extend(glob.glob(f'{src_dir}/{torch_lib}/libgomp*')) # linux
- libs.extend(glob.glob(f'{src_dir}/{torch_lib}/libiomp*')) # macos
else:
libs.extend(glob.glob(f'{src_dir}/{torch_lib}/asmjit.dll'))
libs.extend(glob.glob(f'{src_dir}/{torch_lib}/fbgemm.dll'))
libs.extend(glob.glob(f'{src_dir}/{torch_lib}/uv.dll'))
+ libs.extend(glob.glob(f'{src_dir}/{torch_lib}/libgomp*')) # linux
+ libs.extend(glob.glob(f'{src_dir}/{torch_lib}/libiomp5*')) # macos/win
for lib in libs:
print(f"Copying {lib} to {self.build_lib}/")
| {"golden_diff": "diff --git a/runtime/binding/python/setup.py b/runtime/binding/python/setup.py\n--- a/runtime/binding/python/setup.py\n+++ b/runtime/binding/python/setup.py\n@@ -60,12 +60,12 @@\n fst_lib = 'fc_base/openfst-build/src/lib/.libs'\n for ext in ['so', 'dylib']:\n libs.extend(glob.glob(f'{src_dir}/{fst_lib}/libfst.{ext}'))\n- libs.extend(glob.glob(f'{src_dir}/{torch_lib}/libgomp*')) # linux\n- libs.extend(glob.glob(f'{src_dir}/{torch_lib}/libiomp*')) # macos\n else:\n libs.extend(glob.glob(f'{src_dir}/{torch_lib}/asmjit.dll'))\n libs.extend(glob.glob(f'{src_dir}/{torch_lib}/fbgemm.dll'))\n libs.extend(glob.glob(f'{src_dir}/{torch_lib}/uv.dll'))\n+ libs.extend(glob.glob(f'{src_dir}/{torch_lib}/libgomp*')) # linux\n+ libs.extend(glob.glob(f'{src_dir}/{torch_lib}/libiomp5*')) # macos/win\n \n for lib in libs:\n print(f\"Copying {lib} to {self.build_lib}/\")\n", "issue": "DLL load failed while importing _wenet: \u627e\u4e0d\u5230\u6307\u5b9a\u7684\u6a21\u5757\u3002\n\u6211\u5b89\u88c5\u4e86wenet, pip install wenet.\r\n\u5b89\u88c5\u63d0\u793a\u6210\u529f\u4e86\u3002\r\n\u6211\u7528\u4f8b\u5b50\u7a0b\u5e8f\u505a\u8bc6\u522b\u3002\r\n\u7a0b\u5e8f\u5982\u4e0b\uff1a\r\nimport sys\r\nimport wenet\r\n\r\ndef get_text_from_wav(dir, wav):\r\n model_dir = dir\r\n wav_file = wav\r\n decoder = wenet.Decoder(model_dir)\r\n ans = decoder.decode_wav(wav_file)\r\n print(ans)\r\n\r\nif __name__ == '__main__':\r\n dir = \"./models\"\r\n wav = \"./1.wav\"\r\n get_text_from_wav(dir,wav)\r\n\r\n\u4f46\u662f\u8fd0\u884c\u62a5\u9519\u5982\u4e0b\uff1a\r\nTraceback (most recent call last):\r\n File \"D:\\codes\\speech2word\\main.py\", line 2, in <module>\r\n import wenet\r\n File \"D:\\codes\\speech2word\\venv\\lib\\site-packages\\wenet\\__init__.py\", line 1, in <module>\r\n from .decoder import Decoder # noqa\r\n File \"D:\\codes\\speech2word\\venv\\lib\\site-packages\\wenet\\decoder.py\", line 17, in <module>\r\n import _wenet\r\nImportError: DLL load failed while importing _wenet: \u627e\u4e0d\u5230\u6307\u5b9a\u7684\u6a21\u5757\u3002\r\n\r\n\u8bf7\u95ee\u5982\u4f55\u89e3\u51b3\uff1f\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright (c) 2020 Xiaomi Corporation (author: Fangjun Kuang)\n# 2022 Binbin Zhang([email protected])\n\nimport glob\nimport os\nimport platform\nimport shutil\nimport sys\n\nimport setuptools\nfrom setuptools.command.build_ext import build_ext\n\n\ndef is_windows():\n return platform.system() == \"Windows\"\n\n\ndef cmake_extension(name, *args, **kwargs) -> setuptools.Extension:\n kwargs[\"language\"] = \"c++\"\n sources = []\n return setuptools.Extension(name, sources, *args, **kwargs)\n\n\nclass BuildExtension(build_ext):\n def build_extension(self, ext: setuptools.extension.Extension):\n os.makedirs(self.build_temp, exist_ok=True)\n os.makedirs(self.build_lib, exist_ok=True)\n\n cmake_args = os.environ.get(\"WENET_CMAKE_ARGS\",\n \"-DCMAKE_BUILD_TYPE=Release\")\n if \"PYTHON_EXECUTABLE\" not in cmake_args:\n print(f\"Setting PYTHON_EXECUTABLE to {sys.executable}\")\n cmake_args += f\" -DPYTHON_EXECUTABLE={sys.executable}\"\n\n src_dir = os.path.dirname(os.path.abspath(__file__))\n os.system(f\"cmake {cmake_args} -B {self.build_temp} -S {src_dir}\")\n ret = os.system(f\"\"\"\n cmake --build {self.build_temp} --target _wenet --config Release\n \"\"\")\n if ret != 0:\n raise Exception(\n \"\\nBuild wenet failed. Please check the error message.\\n\"\n \"You can ask for help by creating an issue on GitHub.\\n\"\n \"\\nClick:\\n https://github.com/wenet-e2e/wenet/issues/new\\n\"\n )\n\n libs = []\n torch_lib = 'fc_base/libtorch-src/lib'\n for ext in ['so', 'pyd']:\n libs.extend(glob.glob(\n f\"{self.build_temp}/**/_wenet*.{ext}\", recursive=True))\n for ext in ['so', 'dylib', 'dll']:\n libs.extend(glob.glob(\n f\"{self.build_temp}/**/*wenet_api.{ext}\", recursive=True))\n libs.extend(glob.glob(f'{src_dir}/{torch_lib}/*c10.{ext}'))\n libs.extend(glob.glob(f'{src_dir}/{torch_lib}/*torch_cpu.{ext}'))\n\n if not is_windows():\n fst_lib = 'fc_base/openfst-build/src/lib/.libs'\n for ext in ['so', 'dylib']:\n libs.extend(glob.glob(f'{src_dir}/{fst_lib}/libfst.{ext}'))\n libs.extend(glob.glob(f'{src_dir}/{torch_lib}/libgomp*')) # linux\n libs.extend(glob.glob(f'{src_dir}/{torch_lib}/libiomp*')) # macos\n else:\n libs.extend(glob.glob(f'{src_dir}/{torch_lib}/asmjit.dll'))\n libs.extend(glob.glob(f'{src_dir}/{torch_lib}/fbgemm.dll'))\n libs.extend(glob.glob(f'{src_dir}/{torch_lib}/uv.dll'))\n\n for lib in libs:\n print(f\"Copying {lib} to {self.build_lib}/\")\n shutil.copy(f\"{lib}\", f\"{self.build_lib}/\")\n\n\ndef read_long_description():\n with open(\"README.md\", encoding=\"utf8\") as f:\n readme = f.read()\n return readme\n\n\npackage_name = \"wenet\"\n\nsetuptools.setup(\n name=package_name,\n version='1.0.4',\n author=\"Binbin Zhang\",\n author_email=\"[email protected]\",\n package_dir={\n package_name: \"py\",\n },\n packages=[package_name],\n url=\"https://github.com/wenet-e2e/wenet\",\n long_description=read_long_description(),\n long_description_content_type=\"text/markdown\",\n ext_modules=[cmake_extension(\"_wenet\")],\n cmdclass={\"build_ext\": BuildExtension},\n zip_safe=False,\n classifiers=[\n \"Programming Language :: C++\",\n \"Programming Language :: Python\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n license=\"Apache licensed, as found in the LICENSE file\",\n python_requires=\">=3.6\",\n)\n", "path": "runtime/binding/python/setup.py"}]} | 1,949 | 275 |
gh_patches_debug_63270 | rasdani/github-patches | git_diff | google__turbinia-1086 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sphinx docs build broken
Getting an error when trying to build the docs:
```
$ sphinx-build -b html -d build/doctrees docs dist/docs
Running Sphinx v4.5.0
WARNING: html_static_path entry '_static' does not exist
building [mo]: targets for 0 po files that are out of date
building [html]: targets for 19 source files that are out of date
updating environment: [new config] 19 added, 0 changed, 0 removed
reading sources... [ 5%] developer/contributing
Extension error (sphinx_markdown_tables):
Handler <function process_tables at 0x7fb9b1b0a700> for event 'source-read' threw an exception (exception: __init__() missing 1 required positional argument: 'config')
```
Trying an earlier version of sphinx and an earlier version of the repo does not resolve the issue. It seems to be something in the sphinx-markdown-tables module, but that doesn't seem to have changed that recently either (more than a month ago: https://pypi.org/project/sphinx-markdown-tables/0.0.15/#history).
</issue>
<code>
[start of docs/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 # import os
14 # import sys
15 # sys.path.insert(0, os.path.abspath('.'))
16
17 from __future__ import unicode_literals
18 import re
19
20 from recommonmark.parser import CommonMarkParser
21 from recommonmark.transform import AutoStructify
22 from docutils import nodes, transforms
23
24 # -- Project information -----------------------------------------------------
25
26 project = 'Turbinia'
27 copyright = '2020, Google Inc'
28 author = 'Turbinia maintainers'
29
30 # -- General configuration ---------------------------------------------------
31
32 # Add any Sphinx extension module names here, as strings. They can be
33 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34 # ones.
35 extensions = [
36 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',
37 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',
38 'recommonmark'
39 ]
40
41 # Add any paths that contain templates here, relative to this directory.
42 templates_path = ['_templates']
43
44 # List of patterns, relative to source directory, that match files and
45 # directories to ignore when looking for source files.
46 # This pattern also affects html_static_path and html_extra_path.
47 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']
48
49 # -- Options for HTML output -------------------------------------------------
50
51 # The theme to use for HTML and HTML Help pages. See the documentation for
52 # a list of builtin themes.
53 #
54 html_theme = 'sphinx_rtd_theme'
55
56 # The master toctree document.
57 master_doc = 'index'
58
59 # The name of the Pygments (syntax highlighting) style to use.
60 pygments_style = 'sphinx'
61
62 # Add any paths that contain custom static files (such as style sheets) here,
63 # relative to this directory. They are copied after the builtin static files,
64 # so a file named "default.css" will overwrite the builtin "default.css".
65 html_static_path = ['_static']
66
67 # The default sidebars (for documents that don't match any pattern) are
68 # defined by theme itself. Builtin themes are using these templates by
69 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
70 # 'searchbox.html']``.
71 #
72 html_sidebars = {
73 '**': [
74 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',
75 'searchbox.html'
76 ]
77 }
78
79 # Adding retries to linkchecks before declaring a link broken
80 linkcheck_retries = 3
81
82 # Output file base name for HTML help builder.
83 htmlhelp_basename = 'turbiniadoc'
84
85 html_logo = "images/turbinia-logo.jpg"
86
87
88 class ProcessLink(transforms.Transform):
89 """Transform definition to parse .md references to internal pages."""
90
91 default_priority = 1000
92
93 def find_replace(self, node):
94 """Parses URIs containing .md and replaces them with their HTML page."""
95 if isinstance(node, nodes.reference) and 'refuri' in node:
96 r = node['refuri']
97 if r.endswith('.md'):
98 r = r[:-3] + '.html'
99 node['refuri'] = r
100
101 return node
102
103 def traverse(self, node):
104 """Traverse the document tree rooted at node.
105 node : docutil node
106 current root node to traverse
107 """
108 self.find_replace(node)
109
110 for c in node.children:
111 self.traverse(c)
112
113 # pylint: disable=arguments-differ,attribute-defined-outside-init
114 # this was taken from GRR's config file for documentation
115 def apply(self):
116 self.current_level = 0
117 self.traverse(self.document)
118
119
120 def setup(app):
121 """Add custom parsers to Sphinx generation."""
122 app.add_config_value(
123 'recommonmark_config', {
124 'enable_auto_doc_ref': False,
125 }, True)
126 app.add_transform(AutoStructify)
127 app.add_transform(ProcessLink)
128
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -34,8 +34,7 @@
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',
- 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',
- 'recommonmark'
+ 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'recommonmark'
]
# Add any paths that contain templates here, relative to this directory.
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -34,8 +34,7 @@\n # ones.\n extensions = [\n 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',\n- 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',\n- 'recommonmark'\n+ 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'recommonmark'\n ]\n \n # Add any paths that contain templates here, relative to this directory.\n", "issue": "sphinx docs build broken\nGetting an error when trying to build the docs:\r\n```\r\n$ sphinx-build -b html -d build/doctrees docs dist/docs\r\nRunning Sphinx v4.5.0\r\nWARNING: html_static_path entry '_static' does not exist\r\nbuilding [mo]: targets for 0 po files that are out of date\r\nbuilding [html]: targets for 19 source files that are out of date\r\nupdating environment: [new config] 19 added, 0 changed, 0 removed\r\nreading sources... [ 5%] developer/contributing \r\nExtension error (sphinx_markdown_tables):\r\nHandler <function process_tables at 0x7fb9b1b0a700> for event 'source-read' threw an exception (exception: __init__() missing 1 required positional argument: 'config')\r\n```\r\n\r\nTrying an earlier version of sphinx and an earlier version of the repo does not resolve the issue. It seems to be something in the sphinx-markdown-tables module, but that doesn't seem to have changed that recently either (more than a month ago: https://pypi.org/project/sphinx-markdown-tables/0.0.15/#history).\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom __future__ import unicode_literals\nimport re\n\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\nfrom docutils import nodes, transforms\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Turbinia'\ncopyright = '2020, Google Inc'\nauthor = 'Turbinia maintainers'\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',\n 'recommonmark'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n '**': [\n 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',\n 'searchbox.html'\n ]\n}\n\n# Adding retries to linkchecks before declaring a link broken\nlinkcheck_retries = 3\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'turbiniadoc'\n\nhtml_logo = \"images/turbinia-logo.jpg\"\n\n\nclass ProcessLink(transforms.Transform):\n \"\"\"Transform definition to parse .md references to internal pages.\"\"\"\n\n default_priority = 1000\n\n def find_replace(self, node):\n \"\"\"Parses URIs containing .md and replaces them with their HTML page.\"\"\"\n if isinstance(node, nodes.reference) and 'refuri' in node:\n r = node['refuri']\n if r.endswith('.md'):\n r = r[:-3] + '.html'\n node['refuri'] = r\n\n return node\n\n def traverse(self, node):\n \"\"\"Traverse the document tree rooted at node.\n node : docutil node\n current root node to traverse\n \"\"\"\n self.find_replace(node)\n\n for c in node.children:\n self.traverse(c)\n\n # pylint: disable=arguments-differ,attribute-defined-outside-init\n # this was taken from GRR's config file for documentation\n def apply(self):\n self.current_level = 0\n self.traverse(self.document)\n\n\ndef setup(app):\n \"\"\"Add custom parsers to Sphinx generation.\"\"\"\n app.add_config_value(\n 'recommonmark_config', {\n 'enable_auto_doc_ref': False,\n }, True)\n app.add_transform(AutoStructify)\n app.add_transform(ProcessLink)\n", "path": "docs/conf.py"}]} | 2,036 | 135 |
gh_patches_debug_3111 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-128 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lists being marked as Optional
When defining a list the resulting schema marks the list as optional (or nullable in GraphQL terms) even if it wasn't wrapped in `typing.Optional`, we should fix that :)
</issue>
<code>
[start of strawberry/type_converter.py]
1 from collections.abc import AsyncGenerator
2
3 from graphql import (
4 GraphQLBoolean,
5 GraphQLFloat,
6 GraphQLID,
7 GraphQLInt,
8 GraphQLList,
9 GraphQLNonNull,
10 GraphQLString,
11 GraphQLUnionType,
12 )
13
14 from .exceptions import UnallowedReturnTypeForUnion, WrongReturnTypeForUnion
15 from .scalars import ID
16 from .utils.typing import is_union
17
18
19 REGISTRY = {
20 str: GraphQLString,
21 int: GraphQLInt,
22 float: GraphQLFloat,
23 bool: GraphQLBoolean,
24 ID: GraphQLID,
25 }
26
27
28 # TODO: make so that we don't pass force optional
29 # we use that when trying to get the type for a
30 # option field (which can either be a scalar or an object type)
31 def get_graphql_type_for_annotation(
32 annotation, field_name: str, force_optional: bool = False
33 ):
34 # TODO: this might lead to issues with types that have a field value
35 is_field_optional = force_optional
36
37 if hasattr(annotation, "field"):
38 graphql_type = annotation.field
39 else:
40 annotation_name = getattr(annotation, "_name", None)
41
42 if annotation_name == "List":
43 list_of_type = get_graphql_type_for_annotation(
44 annotation.__args__[0], field_name
45 )
46
47 return GraphQLList(list_of_type)
48
49 annotation_origin = getattr(annotation, "__origin__", None)
50
51 if annotation_origin == AsyncGenerator:
52 # async generators are used in subscription, we only need the yield type
53 # https://docs.python.org/3/library/typing.html#typing.AsyncGenerator
54 return get_graphql_type_for_annotation(annotation.__args__[0], field_name)
55
56 elif is_union(annotation):
57 types = annotation.__args__
58 non_none_types = [x for x in types if x != None.__class__] # noqa:E721
59
60 # optionals are represented as Union[type, None]
61 if len(non_none_types) == 1:
62 is_field_optional = True
63 graphql_type = get_graphql_type_for_annotation(
64 non_none_types[0], field_name, force_optional=True
65 )
66 else:
67 is_field_optional = None.__class__ in types
68
69 def _resolve_type(self, value, _type):
70 if not hasattr(self, "field"):
71 raise WrongReturnTypeForUnion(value.field_name, str(type(self)))
72
73 if self.field not in _type.types:
74 raise UnallowedReturnTypeForUnion(
75 value.field_name, str(type(self)), _type.types
76 )
77
78 return self.field
79
80 # TODO: union types don't work with scalar types
81 # so we want to return a nice error
82 # also we want to make sure we have been passed
83 # strawberry types
84 graphql_type = GraphQLUnionType(
85 field_name, [type.field for type in types]
86 )
87 graphql_type.resolve_type = _resolve_type
88 else:
89 graphql_type = REGISTRY.get(annotation)
90
91 if not graphql_type:
92 raise ValueError(f"Unable to get GraphQL type for {annotation}")
93
94 if is_field_optional:
95 return graphql_type
96
97 return GraphQLNonNull(graphql_type)
98
[end of strawberry/type_converter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/type_converter.py b/strawberry/type_converter.py
--- a/strawberry/type_converter.py
+++ b/strawberry/type_converter.py
@@ -44,7 +44,9 @@
annotation.__args__[0], field_name
)
- return GraphQLList(list_of_type)
+ list_type = GraphQLList(list_of_type)
+
+ return list_type if is_field_optional else GraphQLNonNull(list_type)
annotation_origin = getattr(annotation, "__origin__", None)
| {"golden_diff": "diff --git a/strawberry/type_converter.py b/strawberry/type_converter.py\n--- a/strawberry/type_converter.py\n+++ b/strawberry/type_converter.py\n@@ -44,7 +44,9 @@\n annotation.__args__[0], field_name\n )\n \n- return GraphQLList(list_of_type)\n+ list_type = GraphQLList(list_of_type)\n+\n+ return list_type if is_field_optional else GraphQLNonNull(list_type)\n \n annotation_origin = getattr(annotation, \"__origin__\", None)\n", "issue": "Lists being marked as Optional\nWhen defining a list the resulting schema marks the list as optional (or nullable in GraphQL terms) even if it wasn't wrapped in `typing.Optional`, we should fix that :)\n", "before_files": [{"content": "from collections.abc import AsyncGenerator\n\nfrom graphql import (\n GraphQLBoolean,\n GraphQLFloat,\n GraphQLID,\n GraphQLInt,\n GraphQLList,\n GraphQLNonNull,\n GraphQLString,\n GraphQLUnionType,\n)\n\nfrom .exceptions import UnallowedReturnTypeForUnion, WrongReturnTypeForUnion\nfrom .scalars import ID\nfrom .utils.typing import is_union\n\n\nREGISTRY = {\n str: GraphQLString,\n int: GraphQLInt,\n float: GraphQLFloat,\n bool: GraphQLBoolean,\n ID: GraphQLID,\n}\n\n\n# TODO: make so that we don't pass force optional\n# we use that when trying to get the type for a\n# option field (which can either be a scalar or an object type)\ndef get_graphql_type_for_annotation(\n annotation, field_name: str, force_optional: bool = False\n):\n # TODO: this might lead to issues with types that have a field value\n is_field_optional = force_optional\n\n if hasattr(annotation, \"field\"):\n graphql_type = annotation.field\n else:\n annotation_name = getattr(annotation, \"_name\", None)\n\n if annotation_name == \"List\":\n list_of_type = get_graphql_type_for_annotation(\n annotation.__args__[0], field_name\n )\n\n return GraphQLList(list_of_type)\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n if annotation_origin == AsyncGenerator:\n # async generators are used in subscription, we only need the yield type\n # https://docs.python.org/3/library/typing.html#typing.AsyncGenerator\n return get_graphql_type_for_annotation(annotation.__args__[0], field_name)\n\n elif is_union(annotation):\n types = annotation.__args__\n non_none_types = [x for x in types if x != None.__class__] # noqa:E721\n\n # optionals are represented as Union[type, None]\n if len(non_none_types) == 1:\n is_field_optional = True\n graphql_type = get_graphql_type_for_annotation(\n non_none_types[0], field_name, force_optional=True\n )\n else:\n is_field_optional = None.__class__ in types\n\n def _resolve_type(self, value, _type):\n if not hasattr(self, \"field\"):\n raise WrongReturnTypeForUnion(value.field_name, str(type(self)))\n\n if self.field not in _type.types:\n raise UnallowedReturnTypeForUnion(\n value.field_name, str(type(self)), _type.types\n )\n\n return self.field\n\n # TODO: union types don't work with scalar types\n # so we want to return a nice error\n # also we want to make sure we have been passed\n # strawberry types\n graphql_type = GraphQLUnionType(\n field_name, [type.field for type in types]\n )\n graphql_type.resolve_type = _resolve_type\n else:\n graphql_type = REGISTRY.get(annotation)\n\n if not graphql_type:\n raise ValueError(f\"Unable to get GraphQL type for {annotation}\")\n\n if is_field_optional:\n return graphql_type\n\n return GraphQLNonNull(graphql_type)\n", "path": "strawberry/type_converter.py"}]} | 1,436 | 116 |
gh_patches_debug_1075 | rasdani/github-patches | git_diff | e2nIEE__pandapower-563 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
from_mpc failed to load the case generated by to_mpc
After checking the source code, I found the to_mpc function saves the fields in a loose format. According to the from_mpc function, all the fields should be under a variable called "mpc" (default), however the to_mpc function does not follow this, which leads to a situation that the from_mpc function cannot load the case generated by the to_mpc function.
</issue>
<code>
[start of pandapower/converter/matpower/to_mpc.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
4 # and Energy System Technology (IEE), Kassel. All rights reserved.
5
6
7 import copy
8
9 import numpy as np
10 from scipy.io import savemat
11
12 from pandapower.converter.pypower import to_ppc
13
14 try:
15 import pplog as logging
16 except ImportError:
17 import logging
18
19 logger = logging.getLogger(__name__)
20
21
22 def to_mpc(net, filename=None, **kwargs):
23 """
24 This function converts a pandapower net to a matpower case files (.mat) version 2.
25 Note: python is 0-based while Matlab is 1-based.
26
27 INPUT:
28 **net** - The pandapower net.
29
30 OPTIONAL:
31 **filename** (str, None) - File path + name of the mat file which will be created. If None
32 the mpc will only be returned
33
34 ****kwargs** - please look at to_ppc() documentation
35
36 EXAMPLE:
37 import pandapower.converter as pc
38 import pandapower.networks as pn
39 net = pn.case9()
40 pc.to_mpc(net, "case9.mat")
41
42 """
43 ppc = to_ppc(net, **kwargs)
44
45 mpc = _ppc2mpc(ppc)
46 if filename is not None:
47 # savemat
48 savemat(filename, mpc)
49
50 return mpc
51
52
53 def _ppc2mpc(ppc):
54 """
55 Convert network in Pypower/Matpower format
56 Convert 0-based python to 1-based Matlab
57
58 **INPUT**:
59 * net - The pandapower format network
60 * filename - File path + name of the mat file which is created
61 """
62
63 # convert to matpower
64 # Matlab is one-based, so all entries (buses, lines, gens) have to start with 1 instead of 0
65 mpc = copy.deepcopy(ppc)
66 if len(np.where(mpc["bus"][:, 0] == 0)[0]):
67 mpc["bus"][:, 0] = mpc["bus"][:, 0] + 1
68 mpc["gen"][:, 0] = mpc["gen"][:, 0] + 1
69 mpc["branch"][:, 0:2] = mpc["branch"][:, 0:2] + 1
70 # adjust for the matpower converter -> taps should be 0 when there is no transformer, but are 1
71 mpc["branch"][np.where(mpc["branch"][:, 8] == 1), 8] = 0
72 # version is a string
73 mpc["version"] = str(mpc["version"])
74 # baseMVA has to be a float instead of int
75 mpc["baseMVA"] = mpc["baseMVA"] * 1.0
76 return mpc
77
78
79 if "__main__" == __name__:
80 pass
81
[end of pandapower/converter/matpower/to_mpc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pandapower/converter/matpower/to_mpc.py b/pandapower/converter/matpower/to_mpc.py
--- a/pandapower/converter/matpower/to_mpc.py
+++ b/pandapower/converter/matpower/to_mpc.py
@@ -42,7 +42,8 @@
"""
ppc = to_ppc(net, **kwargs)
- mpc = _ppc2mpc(ppc)
+ mpc = dict()
+ mpc["mpc"] = _ppc2mpc(ppc)
if filename is not None:
# savemat
savemat(filename, mpc)
| {"golden_diff": "diff --git a/pandapower/converter/matpower/to_mpc.py b/pandapower/converter/matpower/to_mpc.py\n--- a/pandapower/converter/matpower/to_mpc.py\n+++ b/pandapower/converter/matpower/to_mpc.py\n@@ -42,7 +42,8 @@\n \"\"\"\n ppc = to_ppc(net, **kwargs)\n \n- mpc = _ppc2mpc(ppc)\n+ mpc = dict()\n+ mpc[\"mpc\"] = _ppc2mpc(ppc)\n if filename is not None:\n # savemat\n savemat(filename, mpc)\n", "issue": "from_mpc failed to load the case generated by to_mpc\nAfter checking the source code, I found the to_mpc function saves the fields in a loose format. According to the from_mpc function, all the fields should be under a variable called \"mpc\" (default), however the to_mpc function does not follow this, which leads to a situation that the from_mpc function cannot load the case generated by the to_mpc function.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nimport copy\n\nimport numpy as np\nfrom scipy.io import savemat\n\nfrom pandapower.converter.pypower import to_ppc\n\ntry:\n import pplog as logging\nexcept ImportError:\n import logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef to_mpc(net, filename=None, **kwargs):\n \"\"\"\n This function converts a pandapower net to a matpower case files (.mat) version 2.\n Note: python is 0-based while Matlab is 1-based.\n\n INPUT:\n **net** - The pandapower net.\n\n OPTIONAL:\n **filename** (str, None) - File path + name of the mat file which will be created. If None\n the mpc will only be returned\n\n ****kwargs** - please look at to_ppc() documentation\n\n EXAMPLE:\n import pandapower.converter as pc\n import pandapower.networks as pn\n net = pn.case9()\n pc.to_mpc(net, \"case9.mat\")\n\n \"\"\"\n ppc = to_ppc(net, **kwargs)\n\n mpc = _ppc2mpc(ppc)\n if filename is not None:\n # savemat\n savemat(filename, mpc)\n\n return mpc\n\n\ndef _ppc2mpc(ppc):\n \"\"\"\n Convert network in Pypower/Matpower format\n Convert 0-based python to 1-based Matlab\n\n **INPUT**:\n * net - The pandapower format network\n * filename - File path + name of the mat file which is created\n \"\"\"\n\n # convert to matpower\n # Matlab is one-based, so all entries (buses, lines, gens) have to start with 1 instead of 0\n mpc = copy.deepcopy(ppc)\n if len(np.where(mpc[\"bus\"][:, 0] == 0)[0]):\n mpc[\"bus\"][:, 0] = mpc[\"bus\"][:, 0] + 1\n mpc[\"gen\"][:, 0] = mpc[\"gen\"][:, 0] + 1\n mpc[\"branch\"][:, 0:2] = mpc[\"branch\"][:, 0:2] + 1\n # adjust for the matpower converter -> taps should be 0 when there is no transformer, but are 1\n mpc[\"branch\"][np.where(mpc[\"branch\"][:, 8] == 1), 8] = 0\n # version is a string\n mpc[\"version\"] = str(mpc[\"version\"])\n # baseMVA has to be a float instead of int\n mpc[\"baseMVA\"] = mpc[\"baseMVA\"] * 1.0\n return mpc\n\n\nif \"__main__\" == __name__:\n pass\n", "path": "pandapower/converter/matpower/to_mpc.py"}]} | 1,469 | 147 |
gh_patches_debug_43407 | rasdani/github-patches | git_diff | deepset-ai__haystack-5083 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add FileClassifier media support
**Is your feature request related to a problem? Please describe.**
As a user I want to add WhisperTranscriber in my pipeline. I would like to use FileClassifier to classify my documents/media and direct to the correct node.
**Describe the solution you'd like**
- Add support to media files (that Whisper allows) into the FileClassifier
**Describe alternatives you've considered**
Keep as it's and don't integrate into the current pipelines
**Additional context**
This feature request is supposed to be considered after the merge of the current Whisper PR #4335.
</issue>
<code>
[start of haystack/nodes/file_classifier/file_type.py]
1 import mimetypes
2 from typing import Any, Dict, List, Union, Optional
3
4 import logging
5 from pathlib import Path
6
7 from haystack.nodes.base import BaseComponent
8 from haystack.lazy_imports import LazyImport
9
10
11 logger = logging.getLogger(__name__)
12
13 with LazyImport() as magic_import:
14 import magic
15
16
17 DEFAULT_TYPES = ["txt", "pdf", "md", "docx", "html"]
18
19
20 class FileTypeClassifier(BaseComponent):
21 """
22 Route files in an Indexing Pipeline to corresponding file converters.
23 """
24
25 outgoing_edges = len(DEFAULT_TYPES)
26
27 def __init__(self, supported_types: Optional[List[str]] = None):
28 """
29 Node that sends out files on a different output edge depending on their extension.
30
31 :param supported_types: The file types that this node can distinguish between.
32 If no value is provided, the value created by default comprises: `txt`, `pdf`, `md`, `docx`, and `html`.
33 Lists with duplicate elements are not allowed.
34 """
35 if supported_types is None:
36 supported_types = DEFAULT_TYPES
37 if len(set(supported_types)) != len(supported_types):
38 duplicates = supported_types
39 for item in set(supported_types):
40 duplicates.remove(item)
41 raise ValueError(f"supported_types can't contain duplicate values ({duplicates}).")
42
43 super().__init__()
44
45 self.supported_types = supported_types
46
47 @classmethod
48 def _calculate_outgoing_edges(cls, component_params: Dict[str, Any]) -> int:
49 supported_types = component_params.get("supported_types", DEFAULT_TYPES)
50 return len(supported_types)
51
52 def _estimate_extension(self, file_path: Path) -> str:
53 """
54 Return the extension found based on the contents of the given file
55
56 :param file_path: the path to extract the extension from
57 """
58 try:
59 magic_import.check()
60 extension = magic.from_file(str(file_path), mime=True)
61 return mimetypes.guess_extension(extension) or ""
62 except (NameError, ImportError):
63 logger.error(
64 "The type of '%s' could not be guessed, probably because 'python-magic' is not installed. Ignoring this error."
65 "Please make sure the necessary OS libraries are installed if you need this functionality ('python-magic' or 'python-magic-bin' on Windows).",
66 file_path,
67 )
68 return ""
69
70 def _get_extension(self, file_paths: List[Path]) -> str:
71 """
72 Return the extension found in the given list of files.
73 Also makes sure that all files have the same extension.
74 If this is not true, it throws an exception.
75
76 :param file_paths: the paths to extract the extension from
77 :return: a set of strings with all the extensions (without duplicates), the extension will be guessed if the file has none
78 """
79 extension = file_paths[0].suffix.lower()
80 if extension == "":
81 extension = self._estimate_extension(file_paths[0])
82
83 for path in file_paths:
84 path_suffix = path.suffix.lower()
85 if path_suffix == "":
86 path_suffix = self._estimate_extension(path)
87 if path_suffix != extension:
88 raise ValueError("Multiple file types are not allowed at once.")
89
90 return extension.lstrip(".")
91
92 def run(self, file_paths: Union[Path, List[Path], str, List[str], List[Union[Path, str]]]): # type: ignore
93 """
94 Sends out files on a different output edge depending on their extension.
95
96 :param file_paths: paths to route on different edges.
97 """
98 if not isinstance(file_paths, list):
99 file_paths = [file_paths]
100
101 paths = [Path(path) for path in file_paths]
102
103 output = {"file_paths": paths}
104 extension = self._get_extension(paths)
105 try:
106 index = self.supported_types.index(extension) + 1
107 except ValueError:
108 raise ValueError(
109 f"Files of type '{extension}' ({paths[0]}) are not supported. "
110 f"The supported types are: {self.supported_types}. "
111 "Consider using the 'supported_types' parameter to "
112 "change the types accepted by this node."
113 )
114 return output, f"output_{index}"
115
116 def run_batch(self, file_paths: Union[Path, List[Path], str, List[str], List[Union[Path, str]]]): # type: ignore
117 return self.run(file_paths=file_paths)
118
[end of haystack/nodes/file_classifier/file_type.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/haystack/nodes/file_classifier/file_type.py b/haystack/nodes/file_classifier/file_type.py
--- a/haystack/nodes/file_classifier/file_type.py
+++ b/haystack/nodes/file_classifier/file_type.py
@@ -14,7 +14,9 @@
import magic
-DEFAULT_TYPES = ["txt", "pdf", "md", "docx", "html"]
+DEFAULT_TYPES = ["txt", "pdf", "md", "docx", "html", "media"]
+
+DEFAULT_MEDIA_TYPES = ["mp3", "mp4", "mpeg", "m4a", "wav", "webm"]
class FileTypeClassifier(BaseComponent):
@@ -24,15 +26,20 @@
outgoing_edges = len(DEFAULT_TYPES)
- def __init__(self, supported_types: Optional[List[str]] = None):
+ def __init__(self, supported_types: Optional[List[str]] = None, full_analysis: bool = False):
"""
Node that sends out files on a different output edge depending on their extension.
- :param supported_types: The file types that this node can distinguish between.
- If no value is provided, the value created by default comprises: `txt`, `pdf`, `md`, `docx`, and `html`.
- Lists with duplicate elements are not allowed.
+ :param supported_types: The file types this node distinguishes. Optional.
+ If you don't provide any value, the default is: `txt`, `pdf`, `md`, `docx`, and `html`.
+ You can't use lists with duplicate elements.
+ :param full_analysis: If True, the whole file is analyzed to determine the file type.
+ If False, only the first 2049 bytes are analyzed.
"""
+ self.full_analysis = full_analysis
+ self._default_types = False
if supported_types is None:
+ self._default_types = True
supported_types = DEFAULT_TYPES
if len(set(supported_types)) != len(supported_types):
duplicates = supported_types
@@ -56,9 +63,17 @@
:param file_path: the path to extract the extension from
"""
try:
- magic_import.check()
- extension = magic.from_file(str(file_path), mime=True)
- return mimetypes.guess_extension(extension) or ""
+ with open(file_path, "rb") as f:
+ if self.full_analysis:
+ buffer = f.read()
+ else:
+ buffer = f.read(2049)
+ extension = magic.from_buffer(buffer, mime=True)
+ real_extension = mimetypes.guess_extension(extension) or ""
+ real_extension = real_extension.lstrip(".")
+ if self._default_types and real_extension in DEFAULT_MEDIA_TYPES:
+ return "media"
+ return real_extension or ""
except (NameError, ImportError):
logger.error(
"The type of '%s' could not be guessed, probably because 'python-magic' is not installed. Ignoring this error."
@@ -76,18 +91,19 @@
:param file_paths: the paths to extract the extension from
:return: a set of strings with all the extensions (without duplicates), the extension will be guessed if the file has none
"""
- extension = file_paths[0].suffix.lower()
- if extension == "":
+ extension = file_paths[0].suffix.lower().lstrip(".")
+
+ if extension == "" or (self._default_types and extension in DEFAULT_MEDIA_TYPES):
extension = self._estimate_extension(file_paths[0])
for path in file_paths:
- path_suffix = path.suffix.lower()
- if path_suffix == "":
+ path_suffix = path.suffix.lower().lstrip(".")
+ if path_suffix == "" or (self._default_types and path_suffix in DEFAULT_MEDIA_TYPES):
path_suffix = self._estimate_extension(path)
if path_suffix != extension:
- raise ValueError("Multiple file types are not allowed at once.")
+ raise ValueError("Multiple non-default file types are not allowed at once.")
- return extension.lstrip(".")
+ return extension
def run(self, file_paths: Union[Path, List[Path], str, List[str], List[Union[Path, str]]]): # type: ignore
"""
| {"golden_diff": "diff --git a/haystack/nodes/file_classifier/file_type.py b/haystack/nodes/file_classifier/file_type.py\n--- a/haystack/nodes/file_classifier/file_type.py\n+++ b/haystack/nodes/file_classifier/file_type.py\n@@ -14,7 +14,9 @@\n import magic\n \n \n-DEFAULT_TYPES = [\"txt\", \"pdf\", \"md\", \"docx\", \"html\"]\n+DEFAULT_TYPES = [\"txt\", \"pdf\", \"md\", \"docx\", \"html\", \"media\"]\n+\n+DEFAULT_MEDIA_TYPES = [\"mp3\", \"mp4\", \"mpeg\", \"m4a\", \"wav\", \"webm\"]\n \n \n class FileTypeClassifier(BaseComponent):\n@@ -24,15 +26,20 @@\n \n outgoing_edges = len(DEFAULT_TYPES)\n \n- def __init__(self, supported_types: Optional[List[str]] = None):\n+ def __init__(self, supported_types: Optional[List[str]] = None, full_analysis: bool = False):\n \"\"\"\n Node that sends out files on a different output edge depending on their extension.\n \n- :param supported_types: The file types that this node can distinguish between.\n- If no value is provided, the value created by default comprises: `txt`, `pdf`, `md`, `docx`, and `html`.\n- Lists with duplicate elements are not allowed.\n+ :param supported_types: The file types this node distinguishes. Optional.\n+ If you don't provide any value, the default is: `txt`, `pdf`, `md`, `docx`, and `html`.\n+ You can't use lists with duplicate elements.\n+ :param full_analysis: If True, the whole file is analyzed to determine the file type.\n+ If False, only the first 2049 bytes are analyzed.\n \"\"\"\n+ self.full_analysis = full_analysis\n+ self._default_types = False\n if supported_types is None:\n+ self._default_types = True\n supported_types = DEFAULT_TYPES\n if len(set(supported_types)) != len(supported_types):\n duplicates = supported_types\n@@ -56,9 +63,17 @@\n :param file_path: the path to extract the extension from\n \"\"\"\n try:\n- magic_import.check()\n- extension = magic.from_file(str(file_path), mime=True)\n- return mimetypes.guess_extension(extension) or \"\"\n+ with open(file_path, \"rb\") as f:\n+ if self.full_analysis:\n+ buffer = f.read()\n+ else:\n+ buffer = f.read(2049)\n+ extension = magic.from_buffer(buffer, mime=True)\n+ real_extension = mimetypes.guess_extension(extension) or \"\"\n+ real_extension = real_extension.lstrip(\".\")\n+ if self._default_types and real_extension in DEFAULT_MEDIA_TYPES:\n+ return \"media\"\n+ return real_extension or \"\"\n except (NameError, ImportError):\n logger.error(\n \"The type of '%s' could not be guessed, probably because 'python-magic' is not installed. Ignoring this error.\"\n@@ -76,18 +91,19 @@\n :param file_paths: the paths to extract the extension from\n :return: a set of strings with all the extensions (without duplicates), the extension will be guessed if the file has none\n \"\"\"\n- extension = file_paths[0].suffix.lower()\n- if extension == \"\":\n+ extension = file_paths[0].suffix.lower().lstrip(\".\")\n+\n+ if extension == \"\" or (self._default_types and extension in DEFAULT_MEDIA_TYPES):\n extension = self._estimate_extension(file_paths[0])\n \n for path in file_paths:\n- path_suffix = path.suffix.lower()\n- if path_suffix == \"\":\n+ path_suffix = path.suffix.lower().lstrip(\".\")\n+ if path_suffix == \"\" or (self._default_types and path_suffix in DEFAULT_MEDIA_TYPES):\n path_suffix = self._estimate_extension(path)\n if path_suffix != extension:\n- raise ValueError(\"Multiple file types are not allowed at once.\")\n+ raise ValueError(\"Multiple non-default file types are not allowed at once.\")\n \n- return extension.lstrip(\".\")\n+ return extension\n \n def run(self, file_paths: Union[Path, List[Path], str, List[str], List[Union[Path, str]]]): # type: ignore\n \"\"\"\n", "issue": "Add FileClassifier media support\n**Is your feature request related to a problem? Please describe.**\r\nAs a user I want to add WhisperTranscriber in my pipeline. I would like to use FileClassifier to classify my documents/media and direct to the correct node. \r\n\r\n**Describe the solution you'd like**\r\n- Add support to media files (that Whisper allows) into the FileClassifier\r\n\r\n**Describe alternatives you've considered**\r\nKeep as it's and don't integrate into the current pipelines\r\n\r\n**Additional context**\r\nThis feature request is supposed to be considered after the merge of the current Whisper PR #4335.\r\n\n", "before_files": [{"content": "import mimetypes\nfrom typing import Any, Dict, List, Union, Optional\n\nimport logging\nfrom pathlib import Path\n\nfrom haystack.nodes.base import BaseComponent\nfrom haystack.lazy_imports import LazyImport\n\n\nlogger = logging.getLogger(__name__)\n\nwith LazyImport() as magic_import:\n import magic\n\n\nDEFAULT_TYPES = [\"txt\", \"pdf\", \"md\", \"docx\", \"html\"]\n\n\nclass FileTypeClassifier(BaseComponent):\n \"\"\"\n Route files in an Indexing Pipeline to corresponding file converters.\n \"\"\"\n\n outgoing_edges = len(DEFAULT_TYPES)\n\n def __init__(self, supported_types: Optional[List[str]] = None):\n \"\"\"\n Node that sends out files on a different output edge depending on their extension.\n\n :param supported_types: The file types that this node can distinguish between.\n If no value is provided, the value created by default comprises: `txt`, `pdf`, `md`, `docx`, and `html`.\n Lists with duplicate elements are not allowed.\n \"\"\"\n if supported_types is None:\n supported_types = DEFAULT_TYPES\n if len(set(supported_types)) != len(supported_types):\n duplicates = supported_types\n for item in set(supported_types):\n duplicates.remove(item)\n raise ValueError(f\"supported_types can't contain duplicate values ({duplicates}).\")\n\n super().__init__()\n\n self.supported_types = supported_types\n\n @classmethod\n def _calculate_outgoing_edges(cls, component_params: Dict[str, Any]) -> int:\n supported_types = component_params.get(\"supported_types\", DEFAULT_TYPES)\n return len(supported_types)\n\n def _estimate_extension(self, file_path: Path) -> str:\n \"\"\"\n Return the extension found based on the contents of the given file\n\n :param file_path: the path to extract the extension from\n \"\"\"\n try:\n magic_import.check()\n extension = magic.from_file(str(file_path), mime=True)\n return mimetypes.guess_extension(extension) or \"\"\n except (NameError, ImportError):\n logger.error(\n \"The type of '%s' could not be guessed, probably because 'python-magic' is not installed. Ignoring this error.\"\n \"Please make sure the necessary OS libraries are installed if you need this functionality ('python-magic' or 'python-magic-bin' on Windows).\",\n file_path,\n )\n return \"\"\n\n def _get_extension(self, file_paths: List[Path]) -> str:\n \"\"\"\n Return the extension found in the given list of files.\n Also makes sure that all files have the same extension.\n If this is not true, it throws an exception.\n\n :param file_paths: the paths to extract the extension from\n :return: a set of strings with all the extensions (without duplicates), the extension will be guessed if the file has none\n \"\"\"\n extension = file_paths[0].suffix.lower()\n if extension == \"\":\n extension = self._estimate_extension(file_paths[0])\n\n for path in file_paths:\n path_suffix = path.suffix.lower()\n if path_suffix == \"\":\n path_suffix = self._estimate_extension(path)\n if path_suffix != extension:\n raise ValueError(\"Multiple file types are not allowed at once.\")\n\n return extension.lstrip(\".\")\n\n def run(self, file_paths: Union[Path, List[Path], str, List[str], List[Union[Path, str]]]): # type: ignore\n \"\"\"\n Sends out files on a different output edge depending on their extension.\n\n :param file_paths: paths to route on different edges.\n \"\"\"\n if not isinstance(file_paths, list):\n file_paths = [file_paths]\n\n paths = [Path(path) for path in file_paths]\n\n output = {\"file_paths\": paths}\n extension = self._get_extension(paths)\n try:\n index = self.supported_types.index(extension) + 1\n except ValueError:\n raise ValueError(\n f\"Files of type '{extension}' ({paths[0]}) are not supported. \"\n f\"The supported types are: {self.supported_types}. \"\n \"Consider using the 'supported_types' parameter to \"\n \"change the types accepted by this node.\"\n )\n return output, f\"output_{index}\"\n\n def run_batch(self, file_paths: Union[Path, List[Path], str, List[str], List[Union[Path, str]]]): # type: ignore\n return self.run(file_paths=file_paths)\n", "path": "haystack/nodes/file_classifier/file_type.py"}]} | 1,869 | 949 |
gh_patches_debug_6477 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2483 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`clean_stale_db --force` kills demo databases newer than 3 days
## Description
`clean_stale_db --force` is meant to only kill demo databases older than 3 days (by default), but that doesn't seem to be the case.
## Additional context
https://github.com/centerofci/mathesar/blob/master/demo/management/commands/clean_stale_db.py
</issue>
<code>
[start of demo/management/commands/clean_stale_db.py]
1 from datetime import timedelta
2
3 from django.conf import settings
4 from django.core.management import BaseCommand
5 from django.utils.timezone import now
6 from sqlalchemy import text
7 from sqlalchemy.exc import OperationalError
8
9 from db import engine
10 from db.metadata import get_empty_metadata
11 from mathesar.models.base import Database
12 from mathesar.state.django import reflect_db_objects
13
14
15 class Command(BaseCommand):
16 help = 'Cleans up the stale database created during live demo'
17
18 def add_arguments(self, parser):
19 parser.add_argument(
20 '--force',
21 action='store_true',
22 help='Force delete a database even if it in use'
23 )
24 parser.add_argument(
25 '--max-days',
26 action='store',
27 type=int,
28 default=3,
29 help='A database is considered for deletion if it has existed for more than --max-days',
30 )
31
32 def handle(self, *args, **options):
33 drop_all_stale_databases(*args, **options)
34
35
36 def drop_all_stale_databases(force=False, max_days=3, *args, **kwargs):
37 excluded_databases = [
38 settings.DATABASES["default"]["NAME"],
39 settings.DATABASES["mathesar_tables"]["NAME"],
40 getattr(settings, "MATHESAR_DEMO_TEMPLATE", None),
41 # Exclude Postgres default databases
42 'postgres',
43 'template0',
44 'template1'
45 ]
46 stale_databases = Database.objects.filter(created_at__lt=now() - timedelta(minutes=max_days))
47 deleted_databases = []
48 for database in stale_databases:
49 if database.name not in excluded_databases and database.deleted is False:
50 dropped = drop_mathesar_database(
51 database.name,
52 username=settings.DATABASES["default"]["USER"],
53 password=settings.DATABASES["default"]["PASSWORD"],
54 hostname=settings.DATABASES["default"]["HOST"],
55 root_database=settings.DATABASES["default"]["NAME"],
56 port=settings.DATABASES["default"]["PORT"],
57 force=force
58 )
59 if dropped:
60 deleted_databases.append(database.name)
61 database.delete()
62 reflect_db_objects(get_empty_metadata())
63 return deleted_databases
64
65
66 def drop_mathesar_database(
67 user_database, username, password, hostname, root_database, port, force=False
68 ):
69 user_db_engine = engine.create_future_engine(
70 username, password, hostname, user_database, port
71 )
72 try:
73 user_db_engine.connect()
74 except OperationalError:
75 # Non existent db object
76 user_db_engine.dispose()
77 return True
78 else:
79 try:
80 root_db_engine = engine.create_future_engine(
81 username, password, hostname, root_database, port,
82 )
83 with root_db_engine.connect() as conn:
84 conn.execution_options(isolation_level="AUTOCOMMIT")
85 delete_stmt = f"DROP DATABASE {user_database} {'WITH (FORCE)' if force else ''}"
86 conn.execute(text(delete_stmt))
87 # This database is not created using a config file,
88 # so their objects can be safety deleted
89 # as they won't be created again during reflection
90 return True
91 except OperationalError:
92 # Database is in use, ignore
93 pass
94 return False
95
[end of demo/management/commands/clean_stale_db.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/demo/management/commands/clean_stale_db.py b/demo/management/commands/clean_stale_db.py
--- a/demo/management/commands/clean_stale_db.py
+++ b/demo/management/commands/clean_stale_db.py
@@ -43,7 +43,7 @@
'template0',
'template1'
]
- stale_databases = Database.objects.filter(created_at__lt=now() - timedelta(minutes=max_days))
+ stale_databases = Database.objects.filter(created_at__lt=now() - timedelta(days=max_days))
deleted_databases = []
for database in stale_databases:
if database.name not in excluded_databases and database.deleted is False:
| {"golden_diff": "diff --git a/demo/management/commands/clean_stale_db.py b/demo/management/commands/clean_stale_db.py\n--- a/demo/management/commands/clean_stale_db.py\n+++ b/demo/management/commands/clean_stale_db.py\n@@ -43,7 +43,7 @@\n 'template0',\n 'template1'\n ]\n- stale_databases = Database.objects.filter(created_at__lt=now() - timedelta(minutes=max_days))\n+ stale_databases = Database.objects.filter(created_at__lt=now() - timedelta(days=max_days))\n deleted_databases = []\n for database in stale_databases:\n if database.name not in excluded_databases and database.deleted is False:\n", "issue": "`clean_stale_db --force` kills demo databases newer than 3 days\n## Description\r\n`clean_stale_db --force` is meant to only kill demo databases older than 3 days (by default), but that doesn't seem to be the case.\r\n\r\n## Additional context\r\n\r\nhttps://github.com/centerofci/mathesar/blob/master/demo/management/commands/clean_stale_db.py\n", "before_files": [{"content": "from datetime import timedelta\n\nfrom django.conf import settings\nfrom django.core.management import BaseCommand\nfrom django.utils.timezone import now\nfrom sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db import engine\nfrom db.metadata import get_empty_metadata\nfrom mathesar.models.base import Database\nfrom mathesar.state.django import reflect_db_objects\n\n\nclass Command(BaseCommand):\n help = 'Cleans up the stale database created during live demo'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--force',\n action='store_true',\n help='Force delete a database even if it in use'\n )\n parser.add_argument(\n '--max-days',\n action='store',\n type=int,\n default=3,\n help='A database is considered for deletion if it has existed for more than --max-days',\n )\n\n def handle(self, *args, **options):\n drop_all_stale_databases(*args, **options)\n\n\ndef drop_all_stale_databases(force=False, max_days=3, *args, **kwargs):\n excluded_databases = [\n settings.DATABASES[\"default\"][\"NAME\"],\n settings.DATABASES[\"mathesar_tables\"][\"NAME\"],\n getattr(settings, \"MATHESAR_DEMO_TEMPLATE\", None),\n # Exclude Postgres default databases\n 'postgres',\n 'template0',\n 'template1'\n ]\n stale_databases = Database.objects.filter(created_at__lt=now() - timedelta(minutes=max_days))\n deleted_databases = []\n for database in stale_databases:\n if database.name not in excluded_databases and database.deleted is False:\n dropped = drop_mathesar_database(\n database.name,\n username=settings.DATABASES[\"default\"][\"USER\"],\n password=settings.DATABASES[\"default\"][\"PASSWORD\"],\n hostname=settings.DATABASES[\"default\"][\"HOST\"],\n root_database=settings.DATABASES[\"default\"][\"NAME\"],\n port=settings.DATABASES[\"default\"][\"PORT\"],\n force=force\n )\n if dropped:\n deleted_databases.append(database.name)\n database.delete()\n reflect_db_objects(get_empty_metadata())\n return deleted_databases\n\n\ndef drop_mathesar_database(\n user_database, username, password, hostname, root_database, port, force=False\n):\n user_db_engine = engine.create_future_engine(\n username, password, hostname, user_database, port\n )\n try:\n user_db_engine.connect()\n except OperationalError:\n # Non existent db object\n user_db_engine.dispose()\n return True\n else:\n try:\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n delete_stmt = f\"DROP DATABASE {user_database} {'WITH (FORCE)' if force else ''}\"\n conn.execute(text(delete_stmt))\n # This database is not created using a config file,\n # so their objects can be safety deleted\n # as they won't be created again during reflection\n return True\n except OperationalError:\n # Database is in use, ignore\n pass\n return False\n", "path": "demo/management/commands/clean_stale_db.py"}]} | 1,484 | 154 |
gh_patches_debug_50802 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-1481 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pubsub fails if data key is not present
If a message is published with a string of 0 length (`topic.publish( '', url=url, title=title)`) when the message is received there is no data field in the message and a key error is thrown when trying to transform the message from the PubSub API representation.
https://github.com/GoogleCloudPlatform/gcloud-python/blob/master/gcloud/pubsub/message.py#L74
```
Traceback (most recent call last):
File "/en_notifications/en_notifications.py", line 51, in <module>
received = PS_SUBSCRIPTION.pull(max_messages=PULL_COUNT)
File "/usr/local/lib/python2.7/dist-packages/gcloud/pubsub/subscription.py", line 212, in pull
File "/usr/local/lib/python2.7/dist-packages/gcloud/pubsub/message.py", line 74, in from_api_repr
for info in response.get('receivedMessages', ())]
data = base64.b64decode(api_repr['data'])
KeyError: 'data'
```
</issue>
<code>
[start of gcloud/pubsub/message.py]
1 # Copyright 2015 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Define API Topics."""
16
17 import base64
18
19 from gcloud._helpers import _rfc3339_to_datetime
20
21
22 class Message(object):
23 """Messages can be published to a topic and received by subscribers.
24
25 See:
26 https://cloud.google.com/pubsub/reference/rest/v1/PubsubMessage
27
28 :type data: bytes
29 :param data: the payload of the message
30
31 :type message_id: string
32 :param message_id: An ID assigned to the message by the API.
33
34 :type attributes: dict or None
35 :param attributes: Extra metadata associated by the publisher with the
36 message.
37 """
38 def __init__(self, data, message_id, attributes=None):
39 self.data = data
40 self.message_id = message_id
41 self._attributes = attributes
42
43 @property
44 def attributes(self):
45 """Lazily-constructed attribute dictionary"""
46 if self._attributes is None:
47 self._attributes = {}
48 return self._attributes
49
50 @property
51 def timestamp(self):
52 """Return sortable timestamp from attributes, if passed.
53
54 Allows sorting messages in publication order (assuming consistent
55 clocks across all publishers).
56
57 :rtype: :class:`datetime.datetime`
58 :returns: timestamp (in UTC timezone) parsed from RFC 3339 timestamp
59 :raises: ValueError if timestamp not in ``attributes``, or if it does
60 not match the RFC 3339 format.
61 """
62 stamp = self.attributes.get('timestamp')
63 if stamp is None:
64 raise ValueError('No timestamp')
65 return _rfc3339_to_datetime(stamp)
66
67 @classmethod
68 def from_api_repr(cls, api_repr):
69 """Factory: construct message from API representation.
70
71 :type api_repr: dict or None
72 :param api_repr: The API representation of the message
73 """
74 data = base64.b64decode(api_repr['data'])
75 return cls(data=data, message_id=api_repr['messageId'],
76 attributes=api_repr.get('attributes'))
77
[end of gcloud/pubsub/message.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gcloud/pubsub/message.py b/gcloud/pubsub/message.py
--- a/gcloud/pubsub/message.py
+++ b/gcloud/pubsub/message.py
@@ -71,6 +71,6 @@
:type api_repr: dict or None
:param api_repr: The API representation of the message
"""
- data = base64.b64decode(api_repr['data'])
+ data = base64.b64decode(api_repr.get('data', b''))
return cls(data=data, message_id=api_repr['messageId'],
attributes=api_repr.get('attributes'))
| {"golden_diff": "diff --git a/gcloud/pubsub/message.py b/gcloud/pubsub/message.py\n--- a/gcloud/pubsub/message.py\n+++ b/gcloud/pubsub/message.py\n@@ -71,6 +71,6 @@\n :type api_repr: dict or None\n :param api_repr: The API representation of the message\n \"\"\"\n- data = base64.b64decode(api_repr['data'])\n+ data = base64.b64decode(api_repr.get('data', b''))\n return cls(data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n", "issue": "pubsub fails if data key is not present\nIf a message is published with a string of 0 length (`topic.publish( '', url=url, title=title)`) when the message is received there is no data field in the message and a key error is thrown when trying to transform the message from the PubSub API representation.\n\nhttps://github.com/GoogleCloudPlatform/gcloud-python/blob/master/gcloud/pubsub/message.py#L74\n\n```\nTraceback (most recent call last):\nFile \"/en_notifications/en_notifications.py\", line 51, in <module>\nreceived = PS_SUBSCRIPTION.pull(max_messages=PULL_COUNT)\nFile \"/usr/local/lib/python2.7/dist-packages/gcloud/pubsub/subscription.py\", line 212, in pull\nFile \"/usr/local/lib/python2.7/dist-packages/gcloud/pubsub/message.py\", line 74, in from_api_repr\nfor info in response.get('receivedMessages', ())]\ndata = base64.b64decode(api_repr['data'])\nKeyError: 'data'\n```\n\n", "before_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Topics.\"\"\"\n\nimport base64\n\nfrom gcloud._helpers import _rfc3339_to_datetime\n\n\nclass Message(object):\n \"\"\"Messages can be published to a topic and received by subscribers.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1/PubsubMessage\n\n :type data: bytes\n :param data: the payload of the message\n\n :type message_id: string\n :param message_id: An ID assigned to the message by the API.\n\n :type attributes: dict or None\n :param attributes: Extra metadata associated by the publisher with the\n message.\n \"\"\"\n def __init__(self, data, message_id, attributes=None):\n self.data = data\n self.message_id = message_id\n self._attributes = attributes\n\n @property\n def attributes(self):\n \"\"\"Lazily-constructed attribute dictionary\"\"\"\n if self._attributes is None:\n self._attributes = {}\n return self._attributes\n\n @property\n def timestamp(self):\n \"\"\"Return sortable timestamp from attributes, if passed.\n\n Allows sorting messages in publication order (assuming consistent\n clocks across all publishers).\n\n :rtype: :class:`datetime.datetime`\n :returns: timestamp (in UTC timezone) parsed from RFC 3339 timestamp\n :raises: ValueError if timestamp not in ``attributes``, or if it does\n not match the RFC 3339 format.\n \"\"\"\n stamp = self.attributes.get('timestamp')\n if stamp is None:\n raise ValueError('No timestamp')\n return _rfc3339_to_datetime(stamp)\n\n @classmethod\n def from_api_repr(cls, api_repr):\n \"\"\"Factory: construct message from API representation.\n\n :type api_repr: dict or None\n :param api_repr: The API representation of the message\n \"\"\"\n data = base64.b64decode(api_repr['data'])\n return cls(data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n", "path": "gcloud/pubsub/message.py"}]} | 1,482 | 134 |
gh_patches_debug_10522 | rasdani/github-patches | git_diff | bokeh__bokeh-4437 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't serialize timedelta column
If you have a data frame with a column of timedeltas, then make a ColumnDataSource out of it, the ColumnDataSource will be created:
``` python
In [9]: source.data['delta']
Out[9]:
[Timedelta('0 days 00:33:00'),
Timedelta('0 days 00:35:00'),
Timedelta('0 days 03:01:00')]
```
But if you use that source in a plot, even if you don't use the column, when it comes time to serialize (show/save/embed etc) the plot, it fails:
``` python
Timedelta('0 days 00:33:00') is not JSON serializable
```
Maybe we can provide some validation on ColumnDataSource creation? Or, at least provide a more helpful message on failure, as it's not immediately obvious what went wrong.
</issue>
<code>
[start of bokeh/core/json_encoder.py]
1 ''' Provide a custom JSON encoder for serializing Bokeh models.
2
3 '''
4 from __future__ import absolute_import
5
6 import logging
7 log = logging.getLogger(__name__)
8
9 import datetime as dt
10 import decimal
11 import json
12 import time
13
14 import numpy as np
15
16 from ..settings import settings
17 from ..util.dependencies import import_optional
18 from ..util.serialization import transform_series, transform_array
19
20 pd = import_optional('pandas')
21 rd = import_optional("dateutil.relativedelta")
22
23 class BokehJSONEncoder(json.JSONEncoder):
24 ''' Encode values to be used in Bokeh documents or communicated to
25 a Bokeh server.
26
27 '''
28 def transform_python_types(self, obj):
29 ''' Handle special scalars, use default json encoder otherwise
30
31 '''
32 # Pandas Timestamp
33 if pd and isinstance(obj, pd.tslib.Timestamp):
34 return obj.value / 10**6.0 #nanosecond to millisecond
35 elif np.issubdtype(type(obj), np.float):
36 return float(obj)
37 elif np.issubdtype(type(obj), np.int):
38 return int(obj)
39 elif np.issubdtype(type(obj), np.bool_):
40 return bool(obj)
41 # Datetime
42 # datetime is a subclass of date.
43 elif isinstance(obj, dt.datetime):
44 return time.mktime(obj.timetuple()) * 1000. + obj.microsecond / 1000.
45 # Date
46 elif isinstance(obj, dt.date):
47 return time.mktime(obj.timetuple()) * 1000.
48 # Numpy datetime64
49 elif isinstance(obj, np.datetime64):
50 epoch_delta = obj - np.datetime64('1970-01-01T00:00:00Z')
51 return (epoch_delta / np.timedelta64(1, 'ms'))
52 # Time
53 elif isinstance(obj, dt.time):
54 return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.
55 elif rd and isinstance(obj, rd.relativedelta):
56 return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,
57 minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)
58 # Decimal
59 elif isinstance(obj, decimal.Decimal):
60 return float(obj)
61 else:
62 return super(BokehJSONEncoder, self).default(obj)
63
64 def default(self, obj):
65 #argh! local import!
66 from ..model import Model
67 from ..colors import Color
68 from .properties import HasProps
69 ## array types
70 if pd and isinstance(obj, (pd.Series, pd.Index)):
71 return transform_series(obj)
72 elif isinstance(obj, np.ndarray):
73 return transform_array(obj)
74 elif isinstance(obj, Model):
75 return obj.ref
76 elif isinstance(obj, HasProps):
77 return obj.properties_with_values(include_defaults=False)
78 elif isinstance(obj, Color):
79 return obj.to_css()
80 else:
81 return self.transform_python_types(obj)
82
83 def serialize_json(obj, encoder=BokehJSONEncoder, indent=None, **kwargs):
84 ''' Return a serialized JSON representation of a Bokeh model.
85
86 '''
87 pretty = settings.pretty(False)
88
89 if pretty:
90 separators=(",", ": ")
91 else:
92 separators=(",", ":")
93
94 if pretty and indent is None:
95 indent = 2
96
97 return json.dumps(obj, cls=encoder, allow_nan=False, indent=indent, separators=separators, sort_keys=True, **kwargs)
98
[end of bokeh/core/json_encoder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bokeh/core/json_encoder.py b/bokeh/core/json_encoder.py
--- a/bokeh/core/json_encoder.py
+++ b/bokeh/core/json_encoder.py
@@ -42,6 +42,10 @@
# datetime is a subclass of date.
elif isinstance(obj, dt.datetime):
return time.mktime(obj.timetuple()) * 1000. + obj.microsecond / 1000.
+ # Timedelta
+ # timedelta is class in the datetime library
+ elif isinstance(obj, dt.timedelta):
+ return dict(days=obj.days, seconds=obj.seconds, microseconds=obj.microseconds)
# Date
elif isinstance(obj, dt.date):
return time.mktime(obj.timetuple()) * 1000.
| {"golden_diff": "diff --git a/bokeh/core/json_encoder.py b/bokeh/core/json_encoder.py\n--- a/bokeh/core/json_encoder.py\n+++ b/bokeh/core/json_encoder.py\n@@ -42,6 +42,10 @@\n # datetime is a subclass of date.\n elif isinstance(obj, dt.datetime):\n return time.mktime(obj.timetuple()) * 1000. + obj.microsecond / 1000.\n+ # Timedelta\n+ # timedelta is class in the datetime library\n+ elif isinstance(obj, dt.timedelta):\n+ return dict(days=obj.days, seconds=obj.seconds, microseconds=obj.microseconds)\n # Date\n elif isinstance(obj, dt.date):\n return time.mktime(obj.timetuple()) * 1000.\n", "issue": "Can't serialize timedelta column\nIf you have a data frame with a column of timedeltas, then make a ColumnDataSource out of it, the ColumnDataSource will be created:\n\n``` python\nIn [9]: source.data['delta']\nOut[9]:\n[Timedelta('0 days 00:33:00'),\n Timedelta('0 days 00:35:00'),\n Timedelta('0 days 03:01:00')]\n```\n\nBut if you use that source in a plot, even if you don't use the column, when it comes time to serialize (show/save/embed etc) the plot, it fails:\n\n``` python\nTimedelta('0 days 00:33:00') is not JSON serializable\n```\n\nMaybe we can provide some validation on ColumnDataSource creation? Or, at least provide a more helpful message on failure, as it's not immediately obvious what went wrong.\n\n", "before_files": [{"content": "''' Provide a custom JSON encoder for serializing Bokeh models.\n\n'''\nfrom __future__ import absolute_import\n\nimport logging\nlog = logging.getLogger(__name__)\n\nimport datetime as dt\nimport decimal\nimport json\nimport time\n\nimport numpy as np\n\nfrom ..settings import settings\nfrom ..util.dependencies import import_optional\nfrom ..util.serialization import transform_series, transform_array\n\npd = import_optional('pandas')\nrd = import_optional(\"dateutil.relativedelta\")\n\nclass BokehJSONEncoder(json.JSONEncoder):\n ''' Encode values to be used in Bokeh documents or communicated to\n a Bokeh server.\n\n '''\n def transform_python_types(self, obj):\n ''' Handle special scalars, use default json encoder otherwise\n\n '''\n # Pandas Timestamp\n if pd and isinstance(obj, pd.tslib.Timestamp):\n return obj.value / 10**6.0 #nanosecond to millisecond\n elif np.issubdtype(type(obj), np.float):\n return float(obj)\n elif np.issubdtype(type(obj), np.int):\n return int(obj)\n elif np.issubdtype(type(obj), np.bool_):\n return bool(obj)\n # Datetime\n # datetime is a subclass of date.\n elif isinstance(obj, dt.datetime):\n return time.mktime(obj.timetuple()) * 1000. + obj.microsecond / 1000.\n # Date\n elif isinstance(obj, dt.date):\n return time.mktime(obj.timetuple()) * 1000.\n # Numpy datetime64\n elif isinstance(obj, np.datetime64):\n epoch_delta = obj - np.datetime64('1970-01-01T00:00:00Z')\n return (epoch_delta / np.timedelta64(1, 'ms'))\n # Time\n elif isinstance(obj, dt.time):\n return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.\n elif rd and isinstance(obj, rd.relativedelta):\n return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,\n minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)\n # Decimal\n elif isinstance(obj, decimal.Decimal):\n return float(obj)\n else:\n return super(BokehJSONEncoder, self).default(obj)\n\n def default(self, obj):\n #argh! local import!\n from ..model import Model\n from ..colors import Color\n from .properties import HasProps\n ## array types\n if pd and isinstance(obj, (pd.Series, pd.Index)):\n return transform_series(obj)\n elif isinstance(obj, np.ndarray):\n return transform_array(obj)\n elif isinstance(obj, Model):\n return obj.ref\n elif isinstance(obj, HasProps):\n return obj.properties_with_values(include_defaults=False)\n elif isinstance(obj, Color):\n return obj.to_css()\n else:\n return self.transform_python_types(obj)\n\ndef serialize_json(obj, encoder=BokehJSONEncoder, indent=None, **kwargs):\n ''' Return a serialized JSON representation of a Bokeh model.\n\n '''\n pretty = settings.pretty(False)\n\n if pretty:\n separators=(\",\", \": \")\n else:\n separators=(\",\", \":\")\n\n if pretty and indent is None:\n indent = 2\n\n return json.dumps(obj, cls=encoder, allow_nan=False, indent=indent, separators=separators, sort_keys=True, **kwargs)\n", "path": "bokeh/core/json_encoder.py"}]} | 1,694 | 171 |
gh_patches_debug_629 | rasdani/github-patches | git_diff | zulip__zulip-8684 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
lint rules: Prevent `return undefined;`
We should sweep the code to replace `return undefined;` with `return;`, and then make a lint rule for it, either via eslint (if they support that) or by making a custom rule.
</issue>
<code>
[start of version.py]
1 ZULIP_VERSION = "1.7.1+git"
2
3 # Bump the minor PROVISION_VERSION to indicate that folks should provision
4 # only when going from an old version of the code to a newer version. Bump
5 # the major version to indicate that folks should provision in both
6 # directions.
7
8 # Typically, adding a dependency only requires a minor version bump, and
9 # removing a dependency requires a major version bump.
10
11 PROVISION_VERSION = '15.9'
12
[end of version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -8,4 +8,4 @@
# Typically, adding a dependency only requires a minor version bump, and
# removing a dependency requires a major version bump.
-PROVISION_VERSION = '15.9'
+PROVISION_VERSION = '15.10'
| {"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -8,4 +8,4 @@\n # Typically, adding a dependency only requires a minor version bump, and\n # removing a dependency requires a major version bump.\n \n-PROVISION_VERSION = '15.9'\n+PROVISION_VERSION = '15.10'\n", "issue": "lint rules: Prevent `return undefined;`\nWe should sweep the code to replace `return undefined;` with `return;`, and then make a lint rule for it, either via eslint (if they support that) or by making a custom rule.\n", "before_files": [{"content": "ZULIP_VERSION = \"1.7.1+git\"\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically, adding a dependency only requires a minor version bump, and\n# removing a dependency requires a major version bump.\n\nPROVISION_VERSION = '15.9'\n", "path": "version.py"}]} | 698 | 80 |
gh_patches_debug_21806 | rasdani/github-patches | git_diff | psychopy__psychopy-4624 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
404 errors on some of the page in the HELP menus
Version used
- psychopy: 2021.2.3
https://www.psychopy.org/builder/builder.html
https://www.psychopy.org/api/api.html
</issue>
<code>
[start of psychopy/app/urls.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """A central location to store information about urls
5 """
6 urls = dict()
7
8 # links based on string names
9 urls['builder'] = "https://www.psychopy.org/builder/builder.html"
10 urls['builder.loops'] = "https://www.psychopy.org/builder/flow.html#loops"
11 # NB. builder components get their urls defined by the component code
12 # (so a custom component can have a url)
13
14 urls['downloads'] = "https://github.com/psychopy/psychopy/releases"
15 urls['changelog'] = "https://www.psychopy.org/changelog.html"
16
17 general = "https://www.psychopy.org/general/"
18 urls['prefs'] = general + "prefs.html"
19 urls['prefs.general'] = general + "prefs.html#general-settings"
20 urls['prefs.app'] = general + "prefs.html#application-settings"
21 urls['prefs.coder'] = general + "prefs.html#coder-settings"
22 urls['prefs.builder'] = general + "prefs.html#builder-settings"
23 urls['prefs.connections'] = general + "prefs.html#connection-settings"
24
25 # links keyed by wxIDs (e.g. menu item IDs)
26 urls['psychopyHome'] = "https://www.psychopy.org/"
27 urls['psychopyReference'] = "https://www.psychopy.org/api/api.html"
28 urls['coderTutorial'] = "https://www.psychopy.org/coder/tutorial1.html"
29 urls['builderHelp'] = urls['builder']
30 urls['builderDemos'] = "http://code.google.com/p/psychopy/downloads/list?can=2&q=demos"
31 urls['projsAbout'] = "https://www.psychopy.org/general/projects.html"
32
[end of psychopy/app/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/psychopy/app/urls.py b/psychopy/app/urls.py
--- a/psychopy/app/urls.py
+++ b/psychopy/app/urls.py
@@ -6,7 +6,7 @@
urls = dict()
# links based on string names
-urls['builder'] = "https://www.psychopy.org/builder/builder.html"
+urls['builder'] = "https://www.psychopy.org/builder"
urls['builder.loops'] = "https://www.psychopy.org/builder/flow.html#loops"
# NB. builder components get their urls defined by the component code
# (so a custom component can have a url)
@@ -24,7 +24,7 @@
# links keyed by wxIDs (e.g. menu item IDs)
urls['psychopyHome'] = "https://www.psychopy.org/"
-urls['psychopyReference'] = "https://www.psychopy.org/api/api.html"
+urls['psychopyReference'] = "https://www.psychopy.org/api"
urls['coderTutorial'] = "https://www.psychopy.org/coder/tutorial1.html"
urls['builderHelp'] = urls['builder']
urls['builderDemos'] = "http://code.google.com/p/psychopy/downloads/list?can=2&q=demos"
| {"golden_diff": "diff --git a/psychopy/app/urls.py b/psychopy/app/urls.py\n--- a/psychopy/app/urls.py\n+++ b/psychopy/app/urls.py\n@@ -6,7 +6,7 @@\n urls = dict()\n \n # links based on string names\n-urls['builder'] = \"https://www.psychopy.org/builder/builder.html\"\n+urls['builder'] = \"https://www.psychopy.org/builder\"\n urls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n # NB. builder components get their urls defined by the component code\n # (so a custom component can have a url)\n@@ -24,7 +24,7 @@\n \n # links keyed by wxIDs (e.g. menu item IDs)\n urls['psychopyHome'] = \"https://www.psychopy.org/\"\n-urls['psychopyReference'] = \"https://www.psychopy.org/api/api.html\"\n+urls['psychopyReference'] = \"https://www.psychopy.org/api\"\n urls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\n urls['builderHelp'] = urls['builder']\n urls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\n", "issue": "404 errors on some of the page in the HELP menus\nVersion used\r\n- psychopy: 2021.2.3\r\n\r\nhttps://www.psychopy.org/builder/builder.html\r\nhttps://www.psychopy.org/api/api.html\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"A central location to store information about urls\n\"\"\"\nurls = dict()\n\n# links based on string names\nurls['builder'] = \"https://www.psychopy.org/builder/builder.html\"\nurls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n# NB. builder components get their urls defined by the component code\n# (so a custom component can have a url)\n\nurls['downloads'] = \"https://github.com/psychopy/psychopy/releases\"\nurls['changelog'] = \"https://www.psychopy.org/changelog.html\"\n\ngeneral = \"https://www.psychopy.org/general/\"\nurls['prefs'] = general + \"prefs.html\"\nurls['prefs.general'] = general + \"prefs.html#general-settings\"\nurls['prefs.app'] = general + \"prefs.html#application-settings\"\nurls['prefs.coder'] = general + \"prefs.html#coder-settings\"\nurls['prefs.builder'] = general + \"prefs.html#builder-settings\"\nurls['prefs.connections'] = general + \"prefs.html#connection-settings\"\n\n# links keyed by wxIDs (e.g. menu item IDs)\nurls['psychopyHome'] = \"https://www.psychopy.org/\"\nurls['psychopyReference'] = \"https://www.psychopy.org/api/api.html\"\nurls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\nurls['builderHelp'] = urls['builder']\nurls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\nurls['projsAbout'] = \"https://www.psychopy.org/general/projects.html\"\n", "path": "psychopy/app/urls.py"}]} | 1,017 | 286 |
gh_patches_debug_43307 | rasdani/github-patches | git_diff | crytic__slither-447 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add "now" to the timestamp detector
https://github.com/crytic/slither/blob/7cb6cf4870036f780088fa7dfec83ae3220322e2/slither/detectors/operations/block_timestamp.py#L39-L44
This could also warns about the use of `now`
</issue>
<code>
[start of slither/detectors/operations/block_timestamp.py]
1 """
2 Module detecting dangerous use of block.timestamp
3
4 """
5 from slither.core.declarations import Function
6 from slither.analyses.data_dependency.data_dependency import is_tainted, is_dependent
7 from slither.core.declarations.solidity_variables import (SolidityFunction,
8 SolidityVariableComposed)
9 from slither.detectors.abstract_detector import (AbstractDetector,
10 DetectorClassification)
11 from slither.slithir.operations import Binary, BinaryType
12
13
14 class Timestamp(AbstractDetector):
15 """
16 """
17
18 ARGUMENT = 'timestamp'
19 HELP = 'Dangerous usage of `block.timestamp`'
20 IMPACT = DetectorClassification.LOW
21 CONFIDENCE = DetectorClassification.MEDIUM
22
23 WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#block-timestamp'
24
25
26 WIKI_TITLE = 'Block timestamp'
27 WIKI_DESCRIPTION = 'Dangerous usage of `block.timestamp`. `block.timestamp` can be manipulated by miners.'
28 WIKI_EXPLOIT_SCENARIO = '''"Bob's contract relies on `block.timestamp` for its randomness. Eve is a miner and manipulates `block.timestamp` to exploit Bob's contract.'''
29 WIKI_RECOMMENDATION = 'Avoid relying on `block.timestamp`.'
30
31 def timestamp(self, func):
32 """
33 """
34
35 ret = set()
36 for node in func.nodes:
37 if node.contains_require_or_assert():
38 for var in node.variables_read:
39 if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):
40 ret.add(node)
41 for ir in node.irs:
42 if isinstance(ir, Binary) and BinaryType.return_bool(ir.type):
43 for var in ir.read:
44 if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):
45 ret.add(node)
46 return list(ret)
47
48
49 def detect_dangerous_timestamp(self, contract):
50 """
51 Args:
52 contract (Contract)
53 Returns:
54 list((Function), (list (Node)))
55 """
56 ret = []
57 for f in [f for f in contract.functions if f.contract_declarer == contract]:
58 nodes = self.timestamp(f)
59 if nodes:
60 ret.append((f, nodes))
61 return ret
62
63 def _detect(self):
64 """
65 """
66 results = []
67
68 for c in self.contracts:
69 dangerous_timestamp = self.detect_dangerous_timestamp(c)
70 for (func, nodes) in dangerous_timestamp:
71
72 info = [func, " uses timestamp for comparisons\n"]
73
74 info += ['\tDangerous comparisons:\n']
75 for node in nodes:
76 info += ['\t- ', node, '\n']
77
78 res = self.generate_result(info)
79
80 results.append(res)
81
82 return results
83
[end of slither/detectors/operations/block_timestamp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/slither/detectors/operations/block_timestamp.py b/slither/detectors/operations/block_timestamp.py
--- a/slither/detectors/operations/block_timestamp.py
+++ b/slither/detectors/operations/block_timestamp.py
@@ -2,15 +2,51 @@
Module detecting dangerous use of block.timestamp
"""
-from slither.core.declarations import Function
-from slither.analyses.data_dependency.data_dependency import is_tainted, is_dependent
-from slither.core.declarations.solidity_variables import (SolidityFunction,
- SolidityVariableComposed)
+from typing import List, Tuple
+
+from slither.analyses.data_dependency.data_dependency import is_dependent
+from slither.core.cfg.node import Node
+from slither.core.declarations import Function, Contract
+from slither.core.declarations.solidity_variables import (SolidityVariableComposed, SolidityVariable)
from slither.detectors.abstract_detector import (AbstractDetector,
DetectorClassification)
from slither.slithir.operations import Binary, BinaryType
+def _timestamp(func: Function) -> List[Node]:
+ ret = set()
+ for node in func.nodes:
+ if node.contains_require_or_assert():
+ for var in node.variables_read:
+ if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):
+ ret.add(node)
+ if is_dependent(var, SolidityVariable('now'), func.contract):
+ ret.add(node)
+ for ir in node.irs:
+ if isinstance(ir, Binary) and BinaryType.return_bool(ir.type):
+ for var in ir.read:
+ if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):
+ ret.add(node)
+ if is_dependent(var, SolidityVariable('now'), func.contract):
+ ret.add(node)
+ return list(ret)
+
+
+def _detect_dangerous_timestamp(contract: Contract) -> List[Tuple[Function, List[Node]]]:
+ """
+ Args:
+ contract (Contract)
+ Returns:
+ list((Function), (list (Node)))
+ """
+ ret = []
+ for f in [f for f in contract.functions if f.contract_declarer == contract]:
+ nodes = _timestamp(f)
+ if nodes:
+ ret.append((f, nodes))
+ return ret
+
+
class Timestamp(AbstractDetector):
"""
"""
@@ -22,51 +58,18 @@
WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#block-timestamp'
-
WIKI_TITLE = 'Block timestamp'
WIKI_DESCRIPTION = 'Dangerous usage of `block.timestamp`. `block.timestamp` can be manipulated by miners.'
WIKI_EXPLOIT_SCENARIO = '''"Bob's contract relies on `block.timestamp` for its randomness. Eve is a miner and manipulates `block.timestamp` to exploit Bob's contract.'''
WIKI_RECOMMENDATION = 'Avoid relying on `block.timestamp`.'
- def timestamp(self, func):
- """
- """
-
- ret = set()
- for node in func.nodes:
- if node.contains_require_or_assert():
- for var in node.variables_read:
- if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):
- ret.add(node)
- for ir in node.irs:
- if isinstance(ir, Binary) and BinaryType.return_bool(ir.type):
- for var in ir.read:
- if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):
- ret.add(node)
- return list(ret)
-
-
- def detect_dangerous_timestamp(self, contract):
- """
- Args:
- contract (Contract)
- Returns:
- list((Function), (list (Node)))
- """
- ret = []
- for f in [f for f in contract.functions if f.contract_declarer == contract]:
- nodes = self.timestamp(f)
- if nodes:
- ret.append((f, nodes))
- return ret
-
def _detect(self):
"""
"""
results = []
for c in self.contracts:
- dangerous_timestamp = self.detect_dangerous_timestamp(c)
+ dangerous_timestamp = _detect_dangerous_timestamp(c)
for (func, nodes) in dangerous_timestamp:
info = [func, " uses timestamp for comparisons\n"]
| {"golden_diff": "diff --git a/slither/detectors/operations/block_timestamp.py b/slither/detectors/operations/block_timestamp.py\n--- a/slither/detectors/operations/block_timestamp.py\n+++ b/slither/detectors/operations/block_timestamp.py\n@@ -2,15 +2,51 @@\n Module detecting dangerous use of block.timestamp\n \n \"\"\"\n-from slither.core.declarations import Function\n-from slither.analyses.data_dependency.data_dependency import is_tainted, is_dependent\n-from slither.core.declarations.solidity_variables import (SolidityFunction,\n- SolidityVariableComposed)\n+from typing import List, Tuple\n+\n+from slither.analyses.data_dependency.data_dependency import is_dependent\n+from slither.core.cfg.node import Node\n+from slither.core.declarations import Function, Contract\n+from slither.core.declarations.solidity_variables import (SolidityVariableComposed, SolidityVariable)\n from slither.detectors.abstract_detector import (AbstractDetector,\n DetectorClassification)\n from slither.slithir.operations import Binary, BinaryType\n \n \n+def _timestamp(func: Function) -> List[Node]:\n+ ret = set()\n+ for node in func.nodes:\n+ if node.contains_require_or_assert():\n+ for var in node.variables_read:\n+ if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):\n+ ret.add(node)\n+ if is_dependent(var, SolidityVariable('now'), func.contract):\n+ ret.add(node)\n+ for ir in node.irs:\n+ if isinstance(ir, Binary) and BinaryType.return_bool(ir.type):\n+ for var in ir.read:\n+ if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):\n+ ret.add(node)\n+ if is_dependent(var, SolidityVariable('now'), func.contract):\n+ ret.add(node)\n+ return list(ret)\n+\n+\n+def _detect_dangerous_timestamp(contract: Contract) -> List[Tuple[Function, List[Node]]]:\n+ \"\"\"\n+ Args:\n+ contract (Contract)\n+ Returns:\n+ list((Function), (list (Node)))\n+ \"\"\"\n+ ret = []\n+ for f in [f for f in contract.functions if f.contract_declarer == contract]:\n+ nodes = _timestamp(f)\n+ if nodes:\n+ ret.append((f, nodes))\n+ return ret\n+\n+\n class Timestamp(AbstractDetector):\n \"\"\"\n \"\"\"\n@@ -22,51 +58,18 @@\n \n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#block-timestamp'\n \n-\n WIKI_TITLE = 'Block timestamp'\n WIKI_DESCRIPTION = 'Dangerous usage of `block.timestamp`. `block.timestamp` can be manipulated by miners.'\n WIKI_EXPLOIT_SCENARIO = '''\"Bob's contract relies on `block.timestamp` for its randomness. Eve is a miner and manipulates `block.timestamp` to exploit Bob's contract.'''\n WIKI_RECOMMENDATION = 'Avoid relying on `block.timestamp`.'\n \n- def timestamp(self, func):\n- \"\"\"\n- \"\"\"\n-\n- ret = set()\n- for node in func.nodes:\n- if node.contains_require_or_assert():\n- for var in node.variables_read:\n- if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):\n- ret.add(node)\n- for ir in node.irs:\n- if isinstance(ir, Binary) and BinaryType.return_bool(ir.type):\n- for var in ir.read:\n- if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):\n- ret.add(node)\n- return list(ret)\n-\n-\n- def detect_dangerous_timestamp(self, contract):\n- \"\"\"\n- Args:\n- contract (Contract)\n- Returns:\n- list((Function), (list (Node)))\n- \"\"\"\n- ret = []\n- for f in [f for f in contract.functions if f.contract_declarer == contract]:\n- nodes = self.timestamp(f)\n- if nodes:\n- ret.append((f, nodes))\n- return ret\n-\n def _detect(self):\n \"\"\"\n \"\"\"\n results = []\n \n for c in self.contracts:\n- dangerous_timestamp = self.detect_dangerous_timestamp(c)\n+ dangerous_timestamp = _detect_dangerous_timestamp(c)\n for (func, nodes) in dangerous_timestamp:\n \n info = [func, \" uses timestamp for comparisons\\n\"]\n", "issue": "Add \"now\" to the timestamp detector\nhttps://github.com/crytic/slither/blob/7cb6cf4870036f780088fa7dfec83ae3220322e2/slither/detectors/operations/block_timestamp.py#L39-L44\r\n\r\nThis could also warns about the use of `now`\n", "before_files": [{"content": "\"\"\"\n Module detecting dangerous use of block.timestamp\n\n\"\"\"\nfrom slither.core.declarations import Function\nfrom slither.analyses.data_dependency.data_dependency import is_tainted, is_dependent\nfrom slither.core.declarations.solidity_variables import (SolidityFunction,\n SolidityVariableComposed)\nfrom slither.detectors.abstract_detector import (AbstractDetector,\n DetectorClassification)\nfrom slither.slithir.operations import Binary, BinaryType\n\n\nclass Timestamp(AbstractDetector):\n \"\"\"\n \"\"\"\n\n ARGUMENT = 'timestamp'\n HELP = 'Dangerous usage of `block.timestamp`'\n IMPACT = DetectorClassification.LOW\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#block-timestamp'\n\n\n WIKI_TITLE = 'Block timestamp'\n WIKI_DESCRIPTION = 'Dangerous usage of `block.timestamp`. `block.timestamp` can be manipulated by miners.'\n WIKI_EXPLOIT_SCENARIO = '''\"Bob's contract relies on `block.timestamp` for its randomness. Eve is a miner and manipulates `block.timestamp` to exploit Bob's contract.'''\n WIKI_RECOMMENDATION = 'Avoid relying on `block.timestamp`.'\n\n def timestamp(self, func):\n \"\"\"\n \"\"\"\n\n ret = set()\n for node in func.nodes:\n if node.contains_require_or_assert():\n for var in node.variables_read:\n if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):\n ret.add(node)\n for ir in node.irs:\n if isinstance(ir, Binary) and BinaryType.return_bool(ir.type):\n for var in ir.read:\n if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):\n ret.add(node)\n return list(ret)\n\n\n def detect_dangerous_timestamp(self, contract):\n \"\"\"\n Args:\n contract (Contract)\n Returns:\n list((Function), (list (Node)))\n \"\"\"\n ret = []\n for f in [f for f in contract.functions if f.contract_declarer == contract]:\n nodes = self.timestamp(f)\n if nodes:\n ret.append((f, nodes))\n return ret\n\n def _detect(self):\n \"\"\"\n \"\"\"\n results = []\n\n for c in self.contracts:\n dangerous_timestamp = self.detect_dangerous_timestamp(c)\n for (func, nodes) in dangerous_timestamp:\n\n info = [func, \" uses timestamp for comparisons\\n\"]\n\n info += ['\\tDangerous comparisons:\\n']\n for node in nodes:\n info += ['\\t- ', node, '\\n']\n\n res = self.generate_result(info)\n\n results.append(res)\n\n return results\n", "path": "slither/detectors/operations/block_timestamp.py"}]} | 1,375 | 980 |
gh_patches_debug_38581 | rasdani/github-patches | git_diff | kartoza__prj.app-217 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Crash on listing current sponsors
When opening the sponsors view (with some sponsors and sponsor periods created) we get a crash.
http://changelog.inasafe.org/en/qgis/sponsor/list/
Sentry info:
http://sentry.kartoza.com/kartoza/projecta-live/group/5848/
Relevant code.
```
def current_sponsor(self):
today = datetime.datetime.now().replace(tzinfo=utc)
end = self.end_date.replace(tzinfo=utc) # <-- offending line
if end < today:
return False
else:
return True
```
</issue>
<code>
[start of django_project/changes/models/sponsorship_period.py]
1 __author__ = 'rischan'
2
3 import string
4 import random
5 from django.utils import timezone
6 from django.core.urlresolvers import reverse
7 from django.utils.text import slugify
8 from core.settings.contrib import STOP_WORDS
9 from django.db import models
10 from django.utils.translation import ugettext_lazy as _
11 from django.contrib.auth.models import User
12
13
14 class ApprovedSponsorshipPeriodManager(models.Manager):
15 """Custom sponsor manager that shows only approved records."""
16
17 def get_queryset(self):
18 """Query set generator"""
19 return super(
20 ApprovedSponsorshipPeriodManager, self).get_queryset().filter(
21 approved=True)
22
23
24 class UnapprovedSponsorshipPeriodManager(models.Manager):
25 """Custom sponsor manager that shows only unapproved records."""
26
27 def get_queryset(self):
28 """Query set generator"""
29 return super(
30 UnapprovedSponsorshipPeriodManager, self).get_queryset().filter(
31 approved=False)
32
33
34 class SponsorshipPeriod(models.Model):
35 """A sponsorship period model e.g. gui, backend, web site etc."""
36
37 start_date = models.DateField(
38 _("Start date"),
39 help_text='Start date of sponsorship period',
40 default=timezone.now)
41
42 end_date = models.DateField(
43 _("End date"),
44 help_text='End date of sponsorship period',
45 default=timezone.now)
46
47 approved = models.BooleanField(
48 help_text=_(
49 'Whether this sponsorship period has been approved for use by '
50 'the project owner.'),
51 default=False
52 )
53
54 author = models.ForeignKey(User)
55 slug = models.SlugField()
56 project = models.ForeignKey('base.Project')
57 objects = models.Manager()
58 approved_objects = ApprovedSponsorshipPeriodManager()
59 unapproved_objects = UnapprovedSponsorshipPeriodManager()
60 sponsor = models.ForeignKey(
61 'Sponsor',
62 help_text='Input the sponsor name',
63 )
64 sponsorshiplevel = models.ForeignKey(
65 'SponsorshipLevel',
66 help_text='This level take from Sponsorship Level, '
67 'you can add it by using Sponsorship Level menu',
68 )
69 # noinspection PyClassicStyleClass
70
71 class Meta:
72 """Meta options for the sponsor class."""
73 unique_together = (
74 ('project', 'slug')
75 )
76 app_label = 'changes'
77 ordering = ['start_date']
78
79 def save(self, *args, **kwargs):
80
81 if not self.pk:
82 name = self.slug_generator()
83 words = name.split()
84 filtered_words = [t for t in words if t.lower() not in STOP_WORDS]
85 new_list = ' '.join(filtered_words)
86 self.slug = slugify(new_list)[:50]
87 super(SponsorshipPeriod, self).save(*args, **kwargs)
88
89 def slug_generator(self, size=6, chars=string.ascii_lowercase):
90 return ''.join(random.choice(chars) for _ in range(size))
91
92 def __unicode__(self):
93 return u'%s - %s : %s' % (
94 self.start_date,
95 self.end_date
96 )
97
98 def get_absolute_url(self):
99 return reverse('sponsorshipperiod-detail', kwargs={
100 'slug': self.slug,
101 'project_slug': self.project.slug
102 })
103
104 def current_sponsor(self):
105 today = timezone.now()
106 end = self.end_date
107 if end < today:
108 return False
109 else:
110 return True
111
[end of django_project/changes/models/sponsorship_period.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django_project/changes/models/sponsorship_period.py b/django_project/changes/models/sponsorship_period.py
--- a/django_project/changes/models/sponsorship_period.py
+++ b/django_project/changes/models/sponsorship_period.py
@@ -1,15 +1,19 @@
-__author__ = 'rischan'
+# coding=utf-8
import string
import random
+import datetime
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.utils.text import slugify
+# noinspection PyPackageRequirements
from core.settings.contrib import STOP_WORDS
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
+__author__ = 'rischan'
+
class ApprovedSponsorshipPeriodManager(models.Manager):
"""Custom sponsor manager that shows only approved records."""
@@ -57,14 +61,16 @@
objects = models.Manager()
approved_objects = ApprovedSponsorshipPeriodManager()
unapproved_objects = UnapprovedSponsorshipPeriodManager()
+ # noinspection PyUnresolvedReferences
sponsor = models.ForeignKey(
- 'Sponsor',
- help_text='Input the sponsor name',
+ 'Sponsor',
+ help_text='Input the sponsor name',
)
+ # noinspection PyUnresolvedReferences
sponsorshiplevel = models.ForeignKey(
- 'SponsorshipLevel',
- help_text='This level take from Sponsorship Level, '
- 'you can add it by using Sponsorship Level menu',
+ 'SponsorshipLevel',
+ help_text='This level take from Sponsorship Level, '
+ 'you can add it by using Sponsorship Level menu',
)
# noinspection PyClassicStyleClass
@@ -86,11 +92,13 @@
self.slug = slugify(new_list)[:50]
super(SponsorshipPeriod, self).save(*args, **kwargs)
- def slug_generator(self, size=6, chars=string.ascii_lowercase):
+ @staticmethod
+ def slug_generator(size=6, chars=string.ascii_lowercase):
return ''.join(random.choice(chars) for _ in range(size))
def __unicode__(self):
return u'%s - %s : %s' % (
+ self.sponsor.name,
self.start_date,
self.end_date
)
@@ -102,7 +110,7 @@
})
def current_sponsor(self):
- today = timezone.now()
+ today = datetime.datetime.now().date()
end = self.end_date
if end < today:
return False
| {"golden_diff": "diff --git a/django_project/changes/models/sponsorship_period.py b/django_project/changes/models/sponsorship_period.py\n--- a/django_project/changes/models/sponsorship_period.py\n+++ b/django_project/changes/models/sponsorship_period.py\n@@ -1,15 +1,19 @@\n-__author__ = 'rischan'\n+# coding=utf-8\n \n import string\n import random\n+import datetime\n from django.utils import timezone\n from django.core.urlresolvers import reverse\n from django.utils.text import slugify\n+# noinspection PyPackageRequirements\n from core.settings.contrib import STOP_WORDS\n from django.db import models\n from django.utils.translation import ugettext_lazy as _\n from django.contrib.auth.models import User\n \n+__author__ = 'rischan'\n+\n \n class ApprovedSponsorshipPeriodManager(models.Manager):\n \"\"\"Custom sponsor manager that shows only approved records.\"\"\"\n@@ -57,14 +61,16 @@\n objects = models.Manager()\n approved_objects = ApprovedSponsorshipPeriodManager()\n unapproved_objects = UnapprovedSponsorshipPeriodManager()\n+ # noinspection PyUnresolvedReferences\n sponsor = models.ForeignKey(\n- 'Sponsor',\n- help_text='Input the sponsor name',\n+ 'Sponsor',\n+ help_text='Input the sponsor name',\n )\n+ # noinspection PyUnresolvedReferences\n sponsorshiplevel = models.ForeignKey(\n- 'SponsorshipLevel',\n- help_text='This level take from Sponsorship Level, '\n- 'you can add it by using Sponsorship Level menu',\n+ 'SponsorshipLevel',\n+ help_text='This level take from Sponsorship Level, '\n+ 'you can add it by using Sponsorship Level menu',\n )\n # noinspection PyClassicStyleClass\n \n@@ -86,11 +92,13 @@\n self.slug = slugify(new_list)[:50]\n super(SponsorshipPeriod, self).save(*args, **kwargs)\n \n- def slug_generator(self, size=6, chars=string.ascii_lowercase):\n+ @staticmethod\n+ def slug_generator(size=6, chars=string.ascii_lowercase):\n return ''.join(random.choice(chars) for _ in range(size))\n \n def __unicode__(self):\n return u'%s - %s : %s' % (\n+ self.sponsor.name,\n self.start_date,\n self.end_date\n )\n@@ -102,7 +110,7 @@\n })\n \n def current_sponsor(self):\n- today = timezone.now()\n+ today = datetime.datetime.now().date()\n end = self.end_date\n if end < today:\n return False\n", "issue": "Crash on listing current sponsors\nWhen opening the sponsors view (with some sponsors and sponsor periods created) we get a crash.\n\nhttp://changelog.inasafe.org/en/qgis/sponsor/list/\n\nSentry info:\n\nhttp://sentry.kartoza.com/kartoza/projecta-live/group/5848/\n\nRelevant code.\n\n```\n def current_sponsor(self):\n today = datetime.datetime.now().replace(tzinfo=utc)\n end = self.end_date.replace(tzinfo=utc) # <-- offending line\n if end < today:\n return False\n else:\n return True\n```\n\n", "before_files": [{"content": "__author__ = 'rischan'\n\nimport string\nimport random\nfrom django.utils import timezone\nfrom django.core.urlresolvers import reverse\nfrom django.utils.text import slugify\nfrom core.settings.contrib import STOP_WORDS\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.models import User\n\n\nclass ApprovedSponsorshipPeriodManager(models.Manager):\n \"\"\"Custom sponsor manager that shows only approved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ApprovedSponsorshipPeriodManager, self).get_queryset().filter(\n approved=True)\n\n\nclass UnapprovedSponsorshipPeriodManager(models.Manager):\n \"\"\"Custom sponsor manager that shows only unapproved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n UnapprovedSponsorshipPeriodManager, self).get_queryset().filter(\n approved=False)\n\n\nclass SponsorshipPeriod(models.Model):\n \"\"\"A sponsorship period model e.g. gui, backend, web site etc.\"\"\"\n\n start_date = models.DateField(\n _(\"Start date\"),\n help_text='Start date of sponsorship period',\n default=timezone.now)\n\n end_date = models.DateField(\n _(\"End date\"),\n help_text='End date of sponsorship period',\n default=timezone.now)\n\n approved = models.BooleanField(\n help_text=_(\n 'Whether this sponsorship period has been approved for use by '\n 'the project owner.'),\n default=False\n )\n\n author = models.ForeignKey(User)\n slug = models.SlugField()\n project = models.ForeignKey('base.Project')\n objects = models.Manager()\n approved_objects = ApprovedSponsorshipPeriodManager()\n unapproved_objects = UnapprovedSponsorshipPeriodManager()\n sponsor = models.ForeignKey(\n 'Sponsor',\n help_text='Input the sponsor name',\n )\n sponsorshiplevel = models.ForeignKey(\n 'SponsorshipLevel',\n help_text='This level take from Sponsorship Level, '\n 'you can add it by using Sponsorship Level menu',\n )\n # noinspection PyClassicStyleClass\n\n class Meta:\n \"\"\"Meta options for the sponsor class.\"\"\"\n unique_together = (\n ('project', 'slug')\n )\n app_label = 'changes'\n ordering = ['start_date']\n\n def save(self, *args, **kwargs):\n\n if not self.pk:\n name = self.slug_generator()\n words = name.split()\n filtered_words = [t for t in words if t.lower() not in STOP_WORDS]\n new_list = ' '.join(filtered_words)\n self.slug = slugify(new_list)[:50]\n super(SponsorshipPeriod, self).save(*args, **kwargs)\n\n def slug_generator(self, size=6, chars=string.ascii_lowercase):\n return ''.join(random.choice(chars) for _ in range(size))\n\n def __unicode__(self):\n return u'%s - %s : %s' % (\n self.start_date,\n self.end_date\n )\n\n def get_absolute_url(self):\n return reverse('sponsorshipperiod-detail', kwargs={\n 'slug': self.slug,\n 'project_slug': self.project.slug\n })\n\n def current_sponsor(self):\n today = timezone.now()\n end = self.end_date\n if end < today:\n return False\n else:\n return True\n", "path": "django_project/changes/models/sponsorship_period.py"}]} | 1,614 | 575 |
gh_patches_debug_29501 | rasdani/github-patches | git_diff | adap__flower-1347 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sklearn-logreg-mnist example is outdated
### Describe the bug
The sklearn-logreg-mnist example is outdated and does not work with flower 1.0.0. I will make a pull request to fix this.
### Steps/Code to Reproduce
sh ./run.sh
### Expected Results
The example should run with no errors, both on the client and on the server.
### Actual Results
Number of errors arise when the script runs.
</issue>
<code>
[start of examples/sklearn-logreg-mnist/server.py]
1 import flwr as fl
2 import utils
3 from sklearn.metrics import log_loss
4 from sklearn.linear_model import LogisticRegression
5 from typing import Dict
6
7
8 def fit_round(server_round: int) -> Dict:
9 """Send round number to client."""
10 return {"server_round": server_round}
11
12
13 def get_evaluate_fn(model: LogisticRegression):
14 """Return an evaluation function for server-side evaluation."""
15
16 # Load test data here to avoid the overhead of doing it in `evaluate` itself
17 _, (X_test, y_test) = utils.load_mnist()
18
19 # The `evaluate` function will be called after every round
20 def evaluate(parameters: fl.common.Weights):
21 # Update model with the latest parameters
22 utils.set_model_params(model, parameters)
23 loss = log_loss(y_test, model.predict_proba(X_test))
24 accuracy = model.score(X_test, y_test)
25 return loss, {"accuracy": accuracy}
26
27 return evaluate
28
29
30 # Start Flower server for five rounds of federated learning
31 if __name__ == "__main__":
32 model = LogisticRegression()
33 utils.set_initial_params(model)
34 strategy = fl.server.strategy.FedAvg(
35 min_available_clients=2,
36 evaluate_fn=get_evaluate_fn(model),
37 on_fit_config_fn=fit_round,
38 )
39 fl.server.start_server(
40 server_address="0.0.0.0:8080",
41 strategy=strategy,
42 config={"num_rounds": 5},
43 )
44
[end of examples/sklearn-logreg-mnist/server.py]
[start of examples/sklearn-logreg-mnist/client.py]
1 import warnings
2 import flwr as fl
3 import numpy as np
4
5 from sklearn.linear_model import LogisticRegression
6 from sklearn.metrics import log_loss
7
8 import utils
9
10 if __name__ == "__main__":
11 # Load MNIST dataset from https://www.openml.org/d/554
12 (X_train, y_train), (X_test, y_test) = utils.load_mnist()
13
14 # Split train set into 10 partitions and randomly use one for training.
15 partition_id = np.random.choice(10)
16 (X_train, y_train) = utils.partition(X_train, y_train, 10)[partition_id]
17
18 # Create LogisticRegression Model
19 model = LogisticRegression(
20 penalty="l2",
21 max_iter=1, # local epoch
22 warm_start=True, # prevent refreshing weights when fitting
23 )
24
25 # Setting initial parameters, akin to model.compile for keras models
26 utils.set_initial_params(model)
27
28 # Define Flower client
29 class MnistClient(fl.client.NumPyClient):
30 def get_parameters(self): # type: ignore
31 return utils.get_model_parameters(model)
32
33 def fit(self, parameters, config): # type: ignore
34 utils.set_model_params(model, parameters)
35 # Ignore convergence failure due to low local epochs
36 with warnings.catch_warnings():
37 warnings.simplefilter("ignore")
38 model.fit(X_train, y_train)
39 print(f"Training finished for round {config['server_round']}")
40 return utils.get_model_parameters(model), len(X_train), {}
41
42 def evaluate(self, parameters, config): # type: ignore
43 utils.set_model_params(model, parameters)
44 loss = log_loss(y_test, model.predict_proba(X_test))
45 accuracy = model.score(X_test, y_test)
46 return loss, len(X_test), {"accuracy": accuracy}
47
48 # Start Flower client
49 fl.client.start_numpy_client("0.0.0.0:8080", client=MnistClient())
50
[end of examples/sklearn-logreg-mnist/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/sklearn-logreg-mnist/client.py b/examples/sklearn-logreg-mnist/client.py
--- a/examples/sklearn-logreg-mnist/client.py
+++ b/examples/sklearn-logreg-mnist/client.py
@@ -27,7 +27,7 @@
# Define Flower client
class MnistClient(fl.client.NumPyClient):
- def get_parameters(self): # type: ignore
+ def get_parameters(self, config): # type: ignore
return utils.get_model_parameters(model)
def fit(self, parameters, config): # type: ignore
@@ -46,4 +46,4 @@
return loss, len(X_test), {"accuracy": accuracy}
# Start Flower client
- fl.client.start_numpy_client("0.0.0.0:8080", client=MnistClient())
+ fl.client.start_numpy_client(server_address="0.0.0.0:8080", client=MnistClient())
diff --git a/examples/sklearn-logreg-mnist/server.py b/examples/sklearn-logreg-mnist/server.py
--- a/examples/sklearn-logreg-mnist/server.py
+++ b/examples/sklearn-logreg-mnist/server.py
@@ -17,7 +17,7 @@
_, (X_test, y_test) = utils.load_mnist()
# The `evaluate` function will be called after every round
- def evaluate(parameters: fl.common.Weights):
+ def evaluate(server_round, parameters: fl.common.NDArrays, config):
# Update model with the latest parameters
utils.set_model_params(model, parameters)
loss = log_loss(y_test, model.predict_proba(X_test))
@@ -39,5 +39,5 @@
fl.server.start_server(
server_address="0.0.0.0:8080",
strategy=strategy,
- config={"num_rounds": 5},
+ config=fl.server.ServerConfig(num_rounds=5),
)
| {"golden_diff": "diff --git a/examples/sklearn-logreg-mnist/client.py b/examples/sklearn-logreg-mnist/client.py\n--- a/examples/sklearn-logreg-mnist/client.py\n+++ b/examples/sklearn-logreg-mnist/client.py\n@@ -27,7 +27,7 @@\n \n # Define Flower client\n class MnistClient(fl.client.NumPyClient):\n- def get_parameters(self): # type: ignore\n+ def get_parameters(self, config): # type: ignore\n return utils.get_model_parameters(model)\n \n def fit(self, parameters, config): # type: ignore\n@@ -46,4 +46,4 @@\n return loss, len(X_test), {\"accuracy\": accuracy}\n \n # Start Flower client\n- fl.client.start_numpy_client(\"0.0.0.0:8080\", client=MnistClient())\n+ fl.client.start_numpy_client(server_address=\"0.0.0.0:8080\", client=MnistClient())\ndiff --git a/examples/sklearn-logreg-mnist/server.py b/examples/sklearn-logreg-mnist/server.py\n--- a/examples/sklearn-logreg-mnist/server.py\n+++ b/examples/sklearn-logreg-mnist/server.py\n@@ -17,7 +17,7 @@\n _, (X_test, y_test) = utils.load_mnist()\n \n # The `evaluate` function will be called after every round\n- def evaluate(parameters: fl.common.Weights):\n+ def evaluate(server_round, parameters: fl.common.NDArrays, config):\n # Update model with the latest parameters\n utils.set_model_params(model, parameters)\n loss = log_loss(y_test, model.predict_proba(X_test))\n@@ -39,5 +39,5 @@\n fl.server.start_server(\n server_address=\"0.0.0.0:8080\",\n strategy=strategy,\n- config={\"num_rounds\": 5},\n+ config=fl.server.ServerConfig(num_rounds=5),\n )\n", "issue": "sklearn-logreg-mnist example is outdated\n### Describe the bug\n\nThe sklearn-logreg-mnist example is outdated and does not work with flower 1.0.0. I will make a pull request to fix this.\n\n### Steps/Code to Reproduce\n\nsh ./run.sh\n\n### Expected Results\n\nThe example should run with no errors, both on the client and on the server.\n\n### Actual Results\n\nNumber of errors arise when the script runs.\n", "before_files": [{"content": "import flwr as fl\nimport utils\nfrom sklearn.metrics import log_loss\nfrom sklearn.linear_model import LogisticRegression\nfrom typing import Dict\n\n\ndef fit_round(server_round: int) -> Dict:\n \"\"\"Send round number to client.\"\"\"\n return {\"server_round\": server_round}\n\n\ndef get_evaluate_fn(model: LogisticRegression):\n \"\"\"Return an evaluation function for server-side evaluation.\"\"\"\n\n # Load test data here to avoid the overhead of doing it in `evaluate` itself\n _, (X_test, y_test) = utils.load_mnist()\n\n # The `evaluate` function will be called after every round\n def evaluate(parameters: fl.common.Weights):\n # Update model with the latest parameters\n utils.set_model_params(model, parameters)\n loss = log_loss(y_test, model.predict_proba(X_test))\n accuracy = model.score(X_test, y_test)\n return loss, {\"accuracy\": accuracy}\n\n return evaluate\n\n\n# Start Flower server for five rounds of federated learning\nif __name__ == \"__main__\":\n model = LogisticRegression()\n utils.set_initial_params(model)\n strategy = fl.server.strategy.FedAvg(\n min_available_clients=2,\n evaluate_fn=get_evaluate_fn(model),\n on_fit_config_fn=fit_round,\n )\n fl.server.start_server(\n server_address=\"0.0.0.0:8080\",\n strategy=strategy,\n config={\"num_rounds\": 5},\n )\n", "path": "examples/sklearn-logreg-mnist/server.py"}, {"content": "import warnings\nimport flwr as fl\nimport numpy as np\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import log_loss\n\nimport utils\n\nif __name__ == \"__main__\":\n # Load MNIST dataset from https://www.openml.org/d/554\n (X_train, y_train), (X_test, y_test) = utils.load_mnist()\n\n # Split train set into 10 partitions and randomly use one for training.\n partition_id = np.random.choice(10)\n (X_train, y_train) = utils.partition(X_train, y_train, 10)[partition_id]\n\n # Create LogisticRegression Model\n model = LogisticRegression(\n penalty=\"l2\",\n max_iter=1, # local epoch\n warm_start=True, # prevent refreshing weights when fitting\n )\n\n # Setting initial parameters, akin to model.compile for keras models\n utils.set_initial_params(model)\n\n # Define Flower client\n class MnistClient(fl.client.NumPyClient):\n def get_parameters(self): # type: ignore\n return utils.get_model_parameters(model)\n\n def fit(self, parameters, config): # type: ignore\n utils.set_model_params(model, parameters)\n # Ignore convergence failure due to low local epochs\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n model.fit(X_train, y_train)\n print(f\"Training finished for round {config['server_round']}\")\n return utils.get_model_parameters(model), len(X_train), {}\n\n def evaluate(self, parameters, config): # type: ignore\n utils.set_model_params(model, parameters)\n loss = log_loss(y_test, model.predict_proba(X_test))\n accuracy = model.score(X_test, y_test)\n return loss, len(X_test), {\"accuracy\": accuracy}\n\n # Start Flower client\n fl.client.start_numpy_client(\"0.0.0.0:8080\", client=MnistClient())\n", "path": "examples/sklearn-logreg-mnist/client.py"}]} | 1,571 | 442 |
gh_patches_debug_13768 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-4915 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Export-Button Problem in Modul "Brainstorming (with map)" on Prod, Stage and Dev
**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/multimodul-test-merkmalkategorie/basic/
**user:** initiator, admin
**expected behaviour:** as I user I want to export all Ideas and Comments in all moduls which have Ideas/Proposals
**behaviour:** In the Modules "Brainstorming/Brainstorming with Map" I cannot see the Excel-Export-Button and therefore not export the ideas/comments in my project.
**important screensize:** -
**device & browser:** -
**Comment/Question:** Every other modul with proposals/ideas has the excel-export-button. There's a workaround when recreating the right URL, I can get to the Excel-Export page. In this case: https://meinberlin-dev.liqd.net/dashboard/modules/brainstorming-mit-karte-7/export/mapidea/
<img width="311" alt="Bildschirmfoto 2023-02-03 um 10 50 25" src="https://user-images.githubusercontent.com/113608720/216568760-5075d601-eb68-44f1-9209-a3b547d994f9.png">
Screenshot?
</issue>
<code>
[start of meinberlin/apps/mapideas/dashboard.py]
1 from django.urls import reverse
2 from django.utils.translation import gettext_lazy as _
3
4 from adhocracy4.dashboard import DashboardComponent
5 from adhocracy4.dashboard import components
6
7 from . import exports
8 from . import views
9
10
11 class ExportMapIdeaComponent(DashboardComponent):
12 identifier = "mapidea_export"
13 weight = 50
14 label = _("Export Excel")
15
16 def is_effective(self, module):
17 return (
18 module.blueprint_type == "MIC"
19 and not module.project.is_draft
20 and not module.is_draft
21 )
22
23 def get_progress(self, module):
24 return 0, 0
25
26 def get_base_url(self, module):
27 return reverse(
28 "a4dashboard:mapidea-export-module",
29 kwargs={
30 "module_slug": module.slug,
31 },
32 )
33
34 def get_urls(self):
35 return [
36 (
37 r"^modules/(?P<module_slug>[-\w_]+)/export/mapidea/$",
38 views.MapIdeaDashboardExportView.as_view(component=self),
39 "mapidea-export-module",
40 ),
41 (
42 r"^modules/(?P<module_slug>[-\w_]+)/export/mapidea/ideas/$",
43 exports.MapIdeaExportView.as_view(),
44 "mapidea-export",
45 ),
46 (
47 r"^modules/(?P<module_slug>[-\w_]+)/export/mapidea/comments/$",
48 exports.MapIdeaCommentExportView.as_view(),
49 "mapidea-comment-export",
50 ),
51 ]
52
53
54 components.register_module(ExportMapIdeaComponent())
55
[end of meinberlin/apps/mapideas/dashboard.py]
[start of meinberlin/apps/ideas/dashboard.py]
1 from django.urls import reverse
2 from django.utils.translation import gettext_lazy as _
3
4 from adhocracy4.dashboard import DashboardComponent
5 from adhocracy4.dashboard import components
6
7 from . import exports
8 from . import views
9
10
11 class ExportIdeaComponent(DashboardComponent):
12 identifier = "idea_export"
13 weight = 50
14 label = _("Export Excel")
15
16 def is_effective(self, module):
17 return (
18 module.blueprint_type == "IC"
19 and not module.project.is_draft
20 and not module.is_draft
21 )
22
23 def get_progress(self, module):
24 return 0, 0
25
26 def get_base_url(self, module):
27 return reverse(
28 "a4dashboard:idea-export-module",
29 kwargs={
30 "module_slug": module.slug,
31 },
32 )
33
34 def get_urls(self):
35 return [
36 (
37 r"^modules/(?P<module_slug>[-\w_]+)/export/idea/$",
38 views.IdeaDashboardExportView.as_view(component=self),
39 "idea-export-module",
40 ),
41 (
42 r"^modules/(?P<module_slug>[-\w_]+)/export/idea/ideas/$",
43 exports.IdeaExportView.as_view(),
44 "idea-export",
45 ),
46 (
47 r"^modules/(?P<module_slug>[-\w_]+)/export/idea/comments/$",
48 exports.IdeaCommentExportView.as_view(),
49 "idea-comment-export",
50 ),
51 ]
52
53
54 components.register_module(ExportIdeaComponent())
55
[end of meinberlin/apps/ideas/dashboard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/ideas/dashboard.py b/meinberlin/apps/ideas/dashboard.py
--- a/meinberlin/apps/ideas/dashboard.py
+++ b/meinberlin/apps/ideas/dashboard.py
@@ -15,7 +15,7 @@
def is_effective(self, module):
return (
- module.blueprint_type == "IC"
+ module.blueprint_type in ["IC", "BS"]
and not module.project.is_draft
and not module.is_draft
)
diff --git a/meinberlin/apps/mapideas/dashboard.py b/meinberlin/apps/mapideas/dashboard.py
--- a/meinberlin/apps/mapideas/dashboard.py
+++ b/meinberlin/apps/mapideas/dashboard.py
@@ -15,7 +15,7 @@
def is_effective(self, module):
return (
- module.blueprint_type == "MIC"
+ module.blueprint_type in ["MIC", "MBS"]
and not module.project.is_draft
and not module.is_draft
)
| {"golden_diff": "diff --git a/meinberlin/apps/ideas/dashboard.py b/meinberlin/apps/ideas/dashboard.py\n--- a/meinberlin/apps/ideas/dashboard.py\n+++ b/meinberlin/apps/ideas/dashboard.py\n@@ -15,7 +15,7 @@\n \n def is_effective(self, module):\n return (\n- module.blueprint_type == \"IC\"\n+ module.blueprint_type in [\"IC\", \"BS\"]\n and not module.project.is_draft\n and not module.is_draft\n )\ndiff --git a/meinberlin/apps/mapideas/dashboard.py b/meinberlin/apps/mapideas/dashboard.py\n--- a/meinberlin/apps/mapideas/dashboard.py\n+++ b/meinberlin/apps/mapideas/dashboard.py\n@@ -15,7 +15,7 @@\n \n def is_effective(self, module):\n return (\n- module.blueprint_type == \"MIC\"\n+ module.blueprint_type in [\"MIC\", \"MBS\"]\n and not module.project.is_draft\n and not module.is_draft\n )\n", "issue": "Export-Button Problem in Modul \"Brainstorming (with map)\" on Prod, Stage and Dev\n**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/multimodul-test-merkmalkategorie/basic/\r\n**user:** initiator, admin\r\n**expected behaviour:** as I user I want to export all Ideas and Comments in all moduls which have Ideas/Proposals\r\n**behaviour:** In the Modules \"Brainstorming/Brainstorming with Map\" I cannot see the Excel-Export-Button and therefore not export the ideas/comments in my project. \r\n**important screensize:** - \r\n**device & browser:** - \r\n**Comment/Question:** Every other modul with proposals/ideas has the excel-export-button. There's a workaround when recreating the right URL, I can get to the Excel-Export page. In this case: https://meinberlin-dev.liqd.net/dashboard/modules/brainstorming-mit-karte-7/export/mapidea/\r\n<img width=\"311\" alt=\"Bildschirm\u00adfoto 2023-02-03 um 10 50 25\" src=\"https://user-images.githubusercontent.com/113608720/216568760-5075d601-eb68-44f1-9209-a3b547d994f9.png\">\r\n\r\n\r\nScreenshot?\r\n\n", "before_files": [{"content": "from django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom adhocracy4.dashboard import DashboardComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import exports\nfrom . import views\n\n\nclass ExportMapIdeaComponent(DashboardComponent):\n identifier = \"mapidea_export\"\n weight = 50\n label = _(\"Export Excel\")\n\n def is_effective(self, module):\n return (\n module.blueprint_type == \"MIC\"\n and not module.project.is_draft\n and not module.is_draft\n )\n\n def get_progress(self, module):\n return 0, 0\n\n def get_base_url(self, module):\n return reverse(\n \"a4dashboard:mapidea-export-module\",\n kwargs={\n \"module_slug\": module.slug,\n },\n )\n\n def get_urls(self):\n return [\n (\n r\"^modules/(?P<module_slug>[-\\w_]+)/export/mapidea/$\",\n views.MapIdeaDashboardExportView.as_view(component=self),\n \"mapidea-export-module\",\n ),\n (\n r\"^modules/(?P<module_slug>[-\\w_]+)/export/mapidea/ideas/$\",\n exports.MapIdeaExportView.as_view(),\n \"mapidea-export\",\n ),\n (\n r\"^modules/(?P<module_slug>[-\\w_]+)/export/mapidea/comments/$\",\n exports.MapIdeaCommentExportView.as_view(),\n \"mapidea-comment-export\",\n ),\n ]\n\n\ncomponents.register_module(ExportMapIdeaComponent())\n", "path": "meinberlin/apps/mapideas/dashboard.py"}, {"content": "from django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom adhocracy4.dashboard import DashboardComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import exports\nfrom . import views\n\n\nclass ExportIdeaComponent(DashboardComponent):\n identifier = \"idea_export\"\n weight = 50\n label = _(\"Export Excel\")\n\n def is_effective(self, module):\n return (\n module.blueprint_type == \"IC\"\n and not module.project.is_draft\n and not module.is_draft\n )\n\n def get_progress(self, module):\n return 0, 0\n\n def get_base_url(self, module):\n return reverse(\n \"a4dashboard:idea-export-module\",\n kwargs={\n \"module_slug\": module.slug,\n },\n )\n\n def get_urls(self):\n return [\n (\n r\"^modules/(?P<module_slug>[-\\w_]+)/export/idea/$\",\n views.IdeaDashboardExportView.as_view(component=self),\n \"idea-export-module\",\n ),\n (\n r\"^modules/(?P<module_slug>[-\\w_]+)/export/idea/ideas/$\",\n exports.IdeaExportView.as_view(),\n \"idea-export\",\n ),\n (\n r\"^modules/(?P<module_slug>[-\\w_]+)/export/idea/comments/$\",\n exports.IdeaCommentExportView.as_view(),\n \"idea-comment-export\",\n ),\n ]\n\n\ncomponents.register_module(ExportIdeaComponent())\n", "path": "meinberlin/apps/ideas/dashboard.py"}]} | 1,728 | 228 |
gh_patches_debug_52881 | rasdani/github-patches | git_diff | getsentry__sentry-55707 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to edit WHEN conditions from issue alert
### Environment
SaaS (https://sentry.io/)
### Steps to Reproduce
1. Create an issue alert with a few WHEN conditions
2. Save it
3. Go to the Alert details page
4. Click on Edit rule
5. Delete all the WHEN conditions
6. Click on Save
7. When you're back to the Alert details page, the WHEN conditions are still there, and the "Updated alert rule" message appears
### Expected Result
The users should be able to edit the alert rules
### Actual Result
The alert rule stays the same after editing
### Product Area
Alerts
### Link
_No response_
### DSN
_No response_
### Version
_No response_
</issue>
<code>
[start of src/sentry/mediators/project_rules/updater.py]
1 from django.db import router
2 from rest_framework.request import Request
3
4 from sentry.mediators.mediator import Mediator
5 from sentry.mediators.param import Param
6 from sentry.models import Actor, Project, Rule
7
8
9 class Updater(Mediator):
10 rule = Param(Rule)
11 name = Param(str, required=False)
12 owner = Param(int, required=False)
13 environment = Param(int, required=False)
14 project = Param(Project)
15 action_match = Param(str, required=False)
16 filter_match = Param(str, required=False)
17 actions = Param(list, required=False)
18 conditions = Param(list, required=False)
19 frequency = Param(int, required=False)
20 request = Param(Request, required=False)
21 using = router.db_for_write(Project)
22
23 def call(self):
24 self._update_name()
25 self._update_owner()
26 self._update_environment()
27 self._update_project()
28 self._update_actions()
29 self._update_action_match()
30 self._update_filter_match()
31 self._update_conditions()
32 self._update_frequency()
33 self.rule.save()
34 return self.rule
35
36 def _update_name(self):
37 if self.name:
38 self.rule.label = self.name
39
40 def _update_owner(self) -> None:
41 self.rule.owner = Actor.objects.get(id=self.owner) if self.owner else None
42
43 def _update_environment(self):
44 self.rule.environment_id = self.environment
45
46 def _update_project(self):
47 if self.project:
48 self.rule.project = self.project
49
50 def _update_actions(self):
51 if self.actions:
52 self.rule.data["actions"] = self.actions
53
54 def _update_action_match(self):
55 if self.action_match:
56 self.rule.data["action_match"] = self.action_match
57
58 def _update_filter_match(self):
59 if self.filter_match:
60 self.rule.data["filter_match"] = self.filter_match
61
62 def _update_conditions(self):
63 if self.conditions:
64 self.rule.data["conditions"] = self.conditions
65
66 def _update_frequency(self):
67 if self.frequency:
68 self.rule.data["frequency"] = self.frequency
69
[end of src/sentry/mediators/project_rules/updater.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/mediators/project_rules/updater.py b/src/sentry/mediators/project_rules/updater.py
--- a/src/sentry/mediators/project_rules/updater.py
+++ b/src/sentry/mediators/project_rules/updater.py
@@ -60,8 +60,7 @@
self.rule.data["filter_match"] = self.filter_match
def _update_conditions(self):
- if self.conditions:
- self.rule.data["conditions"] = self.conditions
+ self.rule.data["conditions"] = self.conditions or []
def _update_frequency(self):
if self.frequency:
| {"golden_diff": "diff --git a/src/sentry/mediators/project_rules/updater.py b/src/sentry/mediators/project_rules/updater.py\n--- a/src/sentry/mediators/project_rules/updater.py\n+++ b/src/sentry/mediators/project_rules/updater.py\n@@ -60,8 +60,7 @@\n self.rule.data[\"filter_match\"] = self.filter_match\n \n def _update_conditions(self):\n- if self.conditions:\n- self.rule.data[\"conditions\"] = self.conditions\n+ self.rule.data[\"conditions\"] = self.conditions or []\n \n def _update_frequency(self):\n if self.frequency:\n", "issue": "Unable to edit WHEN conditions from issue alert\n### Environment\n\nSaaS (https://sentry.io/)\n\n### Steps to Reproduce\n\n1. Create an issue alert with a few WHEN conditions\r\n2. Save it\r\n3. Go to the Alert details page\r\n4. Click on Edit rule\r\n5. Delete all the WHEN conditions\r\n6. Click on Save\r\n7. When you're back to the Alert details page, the WHEN conditions are still there, and the \"Updated alert rule\" message appears\n\n### Expected Result\n\nThe users should be able to edit the alert rules\n\n### Actual Result\n\nThe alert rule stays the same after editing\n\n### Product Area\n\nAlerts\n\n### Link\n\n_No response_\n\n### DSN\n\n_No response_\n\n### Version\n\n_No response_\n", "before_files": [{"content": "from django.db import router\nfrom rest_framework.request import Request\n\nfrom sentry.mediators.mediator import Mediator\nfrom sentry.mediators.param import Param\nfrom sentry.models import Actor, Project, Rule\n\n\nclass Updater(Mediator):\n rule = Param(Rule)\n name = Param(str, required=False)\n owner = Param(int, required=False)\n environment = Param(int, required=False)\n project = Param(Project)\n action_match = Param(str, required=False)\n filter_match = Param(str, required=False)\n actions = Param(list, required=False)\n conditions = Param(list, required=False)\n frequency = Param(int, required=False)\n request = Param(Request, required=False)\n using = router.db_for_write(Project)\n\n def call(self):\n self._update_name()\n self._update_owner()\n self._update_environment()\n self._update_project()\n self._update_actions()\n self._update_action_match()\n self._update_filter_match()\n self._update_conditions()\n self._update_frequency()\n self.rule.save()\n return self.rule\n\n def _update_name(self):\n if self.name:\n self.rule.label = self.name\n\n def _update_owner(self) -> None:\n self.rule.owner = Actor.objects.get(id=self.owner) if self.owner else None\n\n def _update_environment(self):\n self.rule.environment_id = self.environment\n\n def _update_project(self):\n if self.project:\n self.rule.project = self.project\n\n def _update_actions(self):\n if self.actions:\n self.rule.data[\"actions\"] = self.actions\n\n def _update_action_match(self):\n if self.action_match:\n self.rule.data[\"action_match\"] = self.action_match\n\n def _update_filter_match(self):\n if self.filter_match:\n self.rule.data[\"filter_match\"] = self.filter_match\n\n def _update_conditions(self):\n if self.conditions:\n self.rule.data[\"conditions\"] = self.conditions\n\n def _update_frequency(self):\n if self.frequency:\n self.rule.data[\"frequency\"] = self.frequency\n", "path": "src/sentry/mediators/project_rules/updater.py"}]} | 1,283 | 133 |
gh_patches_debug_13410 | rasdani/github-patches | git_diff | plotly__plotly.py-958 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Not all dependecies are listed
Would it be possible to add pandas (and numpy) to the list of dependencies? They're both imported in various places but not listed. This occasionally causes problems for packages importing plotly, since `pip` may not install all dependencies.
Xref: https://github.com/deeptools/deepTools/issues/669
Not all dependecies are listed
Would it be possible to add pandas (and numpy) to the list of dependencies? They're both imported in various places but not listed. This occasionally causes problems for packages importing plotly, since `pip` may not install all dependencies.
Xref: https://github.com/deeptools/deepTools/issues/669
</issue>
<code>
[start of plotly/figure_factory/__init__.py]
1 from __future__ import absolute_import
2
3 # Require that numpy exists for figure_factory
4 import numpy
5
6 from plotly.figure_factory._2d_density import create_2d_density
7 from plotly.figure_factory._annotated_heatmap import create_annotated_heatmap
8 from plotly.figure_factory._bullet import create_bullet
9 from plotly.figure_factory._candlestick import create_candlestick
10 from plotly.figure_factory._dendrogram import create_dendrogram
11 from plotly.figure_factory._distplot import create_distplot
12 from plotly.figure_factory._facet_grid import create_facet_grid
13 from plotly.figure_factory._gantt import create_gantt
14 from plotly.figure_factory._ohlc import create_ohlc
15 from plotly.figure_factory._quiver import create_quiver
16 from plotly.figure_factory._scatterplot import create_scatterplotmatrix
17 from plotly.figure_factory._streamline import create_streamline
18 from plotly.figure_factory._table import create_table
19 from plotly.figure_factory._trisurf import create_trisurf
20 from plotly.figure_factory._violin import create_violin
21 from plotly.figure_factory._county_choropleth import create_choropleth
[end of plotly/figure_factory/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plotly/figure_factory/__init__.py b/plotly/figure_factory/__init__.py
--- a/plotly/figure_factory/__init__.py
+++ b/plotly/figure_factory/__init__.py
@@ -1,5 +1,7 @@
from __future__ import absolute_import
+from plotly import optional_imports
+
# Require that numpy exists for figure_factory
import numpy
@@ -18,4 +20,5 @@
from plotly.figure_factory._table import create_table
from plotly.figure_factory._trisurf import create_trisurf
from plotly.figure_factory._violin import create_violin
-from plotly.figure_factory._county_choropleth import create_choropleth
\ No newline at end of file
+if optional_imports.get_module('pandas') is not None:
+ from plotly.figure_factory._county_choropleth import create_choropleth
| {"golden_diff": "diff --git a/plotly/figure_factory/__init__.py b/plotly/figure_factory/__init__.py\n--- a/plotly/figure_factory/__init__.py\n+++ b/plotly/figure_factory/__init__.py\n@@ -1,5 +1,7 @@\n from __future__ import absolute_import\n \n+from plotly import optional_imports\n+\n # Require that numpy exists for figure_factory\n import numpy\n \n@@ -18,4 +20,5 @@\n from plotly.figure_factory._table import create_table\n from plotly.figure_factory._trisurf import create_trisurf\n from plotly.figure_factory._violin import create_violin\n-from plotly.figure_factory._county_choropleth import create_choropleth\n\\ No newline at end of file\n+if optional_imports.get_module('pandas') is not None:\n+ from plotly.figure_factory._county_choropleth import create_choropleth\n", "issue": "Not all dependecies are listed\nWould it be possible to add pandas (and numpy) to the list of dependencies? They're both imported in various places but not listed. This occasionally causes problems for packages importing plotly, since `pip` may not install all dependencies.\r\n\r\nXref: https://github.com/deeptools/deepTools/issues/669\nNot all dependecies are listed\nWould it be possible to add pandas (and numpy) to the list of dependencies? They're both imported in various places but not listed. This occasionally causes problems for packages importing plotly, since `pip` may not install all dependencies.\r\n\r\nXref: https://github.com/deeptools/deepTools/issues/669\n", "before_files": [{"content": "from __future__ import absolute_import\n\n# Require that numpy exists for figure_factory\nimport numpy\n\nfrom plotly.figure_factory._2d_density import create_2d_density\nfrom plotly.figure_factory._annotated_heatmap import create_annotated_heatmap\nfrom plotly.figure_factory._bullet import create_bullet\nfrom plotly.figure_factory._candlestick import create_candlestick\nfrom plotly.figure_factory._dendrogram import create_dendrogram\nfrom plotly.figure_factory._distplot import create_distplot\nfrom plotly.figure_factory._facet_grid import create_facet_grid\nfrom plotly.figure_factory._gantt import create_gantt\nfrom plotly.figure_factory._ohlc import create_ohlc\nfrom plotly.figure_factory._quiver import create_quiver\nfrom plotly.figure_factory._scatterplot import create_scatterplotmatrix\nfrom plotly.figure_factory._streamline import create_streamline\nfrom plotly.figure_factory._table import create_table\nfrom plotly.figure_factory._trisurf import create_trisurf\nfrom plotly.figure_factory._violin import create_violin\nfrom plotly.figure_factory._county_choropleth import create_choropleth", "path": "plotly/figure_factory/__init__.py"}]} | 978 | 206 |
gh_patches_debug_5509 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-788 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Install.py script assumes database to exist
## Description
The install.py script has a hard check on the hostname to check if we're using the docker created postgres container and only creates the databases if we do.
`if DATABASES[database_key]["HOST"] == "db":`
In https://github.com/centerofci/mathesar/pull/747, we gave our containers persistent names. The db container's name was set as `mathesar_db`.
This leads to all fresh setup of mathesar to fail.
## Expected behavior
Following the instructions in readme should work without any error.
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
* Stop mathesar, if you have it running
* Remove your database
* Start mathesar and run migrations and install.py script
</issue>
<code>
[start of install.py]
1 """
2 This script installs functions and types for Mathesar onto the configured DB.
3 """
4 from config.settings import DATABASES
5 from db import install
6 import sys
7 import getopt
8
9
10 def main():
11 skip_confirm = False
12 (opts, _) = getopt.getopt(sys.argv[1:], ":s", ["skip-confirm"])
13 for (opt, value) in opts:
14 if (opt == "-s") or (opt == "--skip-confirm"):
15 skip_confirm = True
16 for database_key in [key for key in DATABASES if key != "default"]:
17 install_on_db_with_key(database_key, skip_confirm)
18
19
20 def install_on_db_with_key(database_key, skip_confirm):
21 if DATABASES[database_key]["HOST"] == "db":
22 # if we're going to install on the docker-created Postgres, we'll
23 # create the DB
24 print("Creating Mathesar DB on docker-created PostgreSQL instance")
25 install.create_mathesar_database(
26 DATABASES[database_key]["NAME"],
27 DATABASES["default"]["USER"],
28 DATABASES["default"]["PASSWORD"],
29 DATABASES["default"]["HOST"],
30 DATABASES["default"]["NAME"],
31 DATABASES["default"]["PORT"],
32 )
33 print(f"Created DB is {DATABASES['mathesar_tables']['NAME']}")
34 else:
35 # if we're installing anywhere else, we require the DB to exist in
36 # advance.
37 username = DATABASES[database_key]["USER"]
38 password = DATABASES[database_key]["PASSWORD"]
39 host = DATABASES[database_key]["HOST"]
40 db_name = DATABASES[database_key]["NAME"]
41 port = DATABASES[database_key]["PORT"]
42 print("Installing Mathesar DB on preexisting PostgreSQL instance...")
43 if skip_confirm is False:
44 confirmation = input(
45 f"Mathesar will be installed on DB {db_name} at host {host}."
46 "Confirm? (y/n) > "
47 )
48 if (confirmation.lower() in ["y", "yes"]) or (skip_confirm is True):
49 print("Installing...")
50 install.install_mathesar_on_preexisting_database(
51 username,
52 password,
53 host,
54 db_name,
55 port,
56 )
57 else:
58 print("Skipping DB with key {database_key}.")
59
60
61 if __name__ == "__main__":
62 main()
63
[end of install.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/install.py b/install.py
--- a/install.py
+++ b/install.py
@@ -18,7 +18,7 @@
def install_on_db_with_key(database_key, skip_confirm):
- if DATABASES[database_key]["HOST"] == "db":
+ if DATABASES[database_key]["HOST"] == "mathesar_db":
# if we're going to install on the docker-created Postgres, we'll
# create the DB
print("Creating Mathesar DB on docker-created PostgreSQL instance")
| {"golden_diff": "diff --git a/install.py b/install.py\n--- a/install.py\n+++ b/install.py\n@@ -18,7 +18,7 @@\n \n \n def install_on_db_with_key(database_key, skip_confirm):\n- if DATABASES[database_key][\"HOST\"] == \"db\":\n+ if DATABASES[database_key][\"HOST\"] == \"mathesar_db\":\n # if we're going to install on the docker-created Postgres, we'll\n # create the DB\n print(\"Creating Mathesar DB on docker-created PostgreSQL instance\")\n", "issue": "Install.py script assumes database to exist\n## Description\r\nThe install.py script has a hard check on the hostname to check if we're using the docker created postgres container and only creates the databases if we do.\r\n\r\n`if DATABASES[database_key][\"HOST\"] == \"db\":`\r\n\r\nIn https://github.com/centerofci/mathesar/pull/747, we gave our containers persistent names. The db container's name was set as `mathesar_db`.\r\n\r\nThis leads to all fresh setup of mathesar to fail.\r\n\r\n## Expected behavior\r\nFollowing the instructions in readme should work without any error.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n* Stop mathesar, if you have it running\r\n* Remove your database\r\n* Start mathesar and run migrations and install.py script \n", "before_files": [{"content": "\"\"\"\nThis script installs functions and types for Mathesar onto the configured DB.\n\"\"\"\nfrom config.settings import DATABASES\nfrom db import install\nimport sys\nimport getopt\n\n\ndef main():\n skip_confirm = False\n (opts, _) = getopt.getopt(sys.argv[1:], \":s\", [\"skip-confirm\"])\n for (opt, value) in opts:\n if (opt == \"-s\") or (opt == \"--skip-confirm\"):\n skip_confirm = True\n for database_key in [key for key in DATABASES if key != \"default\"]:\n install_on_db_with_key(database_key, skip_confirm)\n\n\ndef install_on_db_with_key(database_key, skip_confirm):\n if DATABASES[database_key][\"HOST\"] == \"db\":\n # if we're going to install on the docker-created Postgres, we'll\n # create the DB\n print(\"Creating Mathesar DB on docker-created PostgreSQL instance\")\n install.create_mathesar_database(\n DATABASES[database_key][\"NAME\"],\n DATABASES[\"default\"][\"USER\"],\n DATABASES[\"default\"][\"PASSWORD\"],\n DATABASES[\"default\"][\"HOST\"],\n DATABASES[\"default\"][\"NAME\"],\n DATABASES[\"default\"][\"PORT\"],\n )\n print(f\"Created DB is {DATABASES['mathesar_tables']['NAME']}\")\n else:\n # if we're installing anywhere else, we require the DB to exist in\n # advance.\n username = DATABASES[database_key][\"USER\"]\n password = DATABASES[database_key][\"PASSWORD\"]\n host = DATABASES[database_key][\"HOST\"]\n db_name = DATABASES[database_key][\"NAME\"]\n port = DATABASES[database_key][\"PORT\"]\n print(\"Installing Mathesar DB on preexisting PostgreSQL instance...\")\n if skip_confirm is False:\n confirmation = input(\n f\"Mathesar will be installed on DB {db_name} at host {host}.\"\n \"Confirm? (y/n) > \"\n )\n if (confirmation.lower() in [\"y\", \"yes\"]) or (skip_confirm is True):\n print(\"Installing...\")\n install.install_mathesar_on_preexisting_database(\n username,\n password,\n host,\n db_name,\n port,\n )\n else:\n print(\"Skipping DB with key {database_key}.\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install.py"}]} | 1,328 | 115 |
gh_patches_debug_1109 | rasdani/github-patches | git_diff | flairNLP__flair-435 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot install allennlp due to matplotlib dependency conflict
Hello, thanks for the great package. I want to play with ELMoEmbeddings, which requires package allennlp, not installed by default with Flair. However, installing latest allennlp fails because it requires matplotlib==2.2.3, while Flair requires >=3.0.0. When I install allennlp==0.7.0 (without the matplotlib requirement), it still fails because of PyTorch conflicts. Makes me wonder what versions I should use that are actually compatible?
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2
3 setup(
4 name='flair',
5 version='0.4.0',
6 description='A very simple framework for state-of-the-art NLP',
7 long_description=open("README.md", encoding='utf-8').read(),
8 long_description_content_type="text/markdown",
9 author='Alan Akbik',
10 author_email='[email protected]',
11 url='https://github.com/zalandoresearch/flair',
12 packages=find_packages(exclude='test'), # same as name
13 license='MIT',
14 install_requires=[
15 'torch>=1.0.0',
16 'gensim>=3.4.0',
17 'tqdm>=4.26.0',
18 'segtok>=1.5.7',
19 'matplotlib>=3.0.0',
20 'mpld3>=0.3',
21 'sklearn',
22 'sqlitedict>=1.6.0',
23 'deprecated>=1.2.4',
24 'hyperopt>=0.1.1',
25 'pytorch-pretrained-bert>=0.3.0'
26 ],
27 include_package_data=True,
28 python_requires='>=3.6',
29 )
30
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@
'gensim>=3.4.0',
'tqdm>=4.26.0',
'segtok>=1.5.7',
- 'matplotlib>=3.0.0',
+ 'matplotlib>=2.2.3',
'mpld3>=0.3',
'sklearn',
'sqlitedict>=1.6.0',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n 'gensim>=3.4.0',\n 'tqdm>=4.26.0',\n 'segtok>=1.5.7',\n- 'matplotlib>=3.0.0',\n+ 'matplotlib>=2.2.3',\n 'mpld3>=0.3',\n 'sklearn',\n 'sqlitedict>=1.6.0',\n", "issue": "Cannot install allennlp due to matplotlib dependency conflict\nHello, thanks for the great package. I want to play with ELMoEmbeddings, which requires package allennlp, not installed by default with Flair. However, installing latest allennlp fails because it requires matplotlib==2.2.3, while Flair requires >=3.0.0. When I install allennlp==0.7.0 (without the matplotlib requirement), it still fails because of PyTorch conflicts. Makes me wonder what versions I should use that are actually compatible?\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nsetup(\n name='flair',\n version='0.4.0',\n description='A very simple framework for state-of-the-art NLP',\n long_description=open(\"README.md\", encoding='utf-8').read(),\n long_description_content_type=\"text/markdown\",\n author='Alan Akbik',\n author_email='[email protected]',\n url='https://github.com/zalandoresearch/flair',\n packages=find_packages(exclude='test'), # same as name\n license='MIT',\n install_requires=[\n 'torch>=1.0.0',\n 'gensim>=3.4.0',\n 'tqdm>=4.26.0',\n 'segtok>=1.5.7',\n 'matplotlib>=3.0.0',\n 'mpld3>=0.3',\n 'sklearn',\n 'sqlitedict>=1.6.0',\n 'deprecated>=1.2.4',\n 'hyperopt>=0.1.1',\n 'pytorch-pretrained-bert>=0.3.0'\n ],\n include_package_data=True,\n python_requires='>=3.6',\n)\n", "path": "setup.py"}]} | 963 | 118 |
gh_patches_debug_17371 | rasdani/github-patches | git_diff | translate__pootle-4635 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Put `Review suggestions` action button before others
Let's change order of action buttons and put `Review suggestions` before `Needs translation` to encourage going through suggestion backlog.
</issue>
<code>
[start of pootle/core/browser.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from django.utils.translation import ugettext_lazy as _
11
12
13 HEADING_CHOICES = [
14 {
15 'id': 'name',
16 'class': 'stats',
17 'display_name': _("Name"),
18 },
19 {
20 'id': 'priority',
21 'class': 'stats-number sorttable_numeric',
22 'display_name': _("Priority"),
23 },
24 {
25 'id': 'project',
26 'class': 'stats',
27 'display_name': _("Project"),
28 },
29 {
30 'id': 'language',
31 'class': 'stats',
32 'display_name': _("Language"),
33 },
34 {
35 'id': 'progress',
36 'class': 'stats',
37 # Translators: noun. The graphical representation of translation status
38 'display_name': _("Progress"),
39 },
40 {
41 'id': 'total',
42 'class': 'stats-number sorttable_numeric when-loaded',
43 # Translators: Heading representing the total number of words of a file
44 # or directory
45 'display_name': _("Total"),
46 },
47 {
48 'id': 'last-updated',
49 'class': 'stats sorttable_numeric when-loaded',
50 'display_name': _("Last updated"),
51 },
52 {
53 'id': 'need-translation',
54 'class': 'stats-number sorttable_numeric when-loaded',
55 'display_name': _("Need Translation"),
56 },
57 {
58 'id': 'suggestions',
59 'class': 'stats-number sorttable_numeric when-loaded',
60 # Translators: The number of suggestions pending review
61 'display_name': _("Suggestions"),
62 },
63 {
64 'id': 'critical',
65 'class': 'stats-number sorttable_numeric when-loaded',
66 'display_name': _("Critical"),
67 },
68 {
69 'id': 'activity',
70 'class': 'stats sorttable_numeric when-loaded',
71 'display_name': _("Last Activity"),
72 },
73 ]
74
75
76 def get_table_headings(choices):
77 """Filters the list of available table headings to the given `choices`."""
78 return filter(lambda x: x['id'] in choices, HEADING_CHOICES)
79
80
81 def make_generic_item(path_obj, **kwargs):
82 """Template variables for each row in the table."""
83 return {
84 'href': path_obj.get_absolute_url(),
85 'href_all': path_obj.get_translate_url(),
86 'href_todo': path_obj.get_translate_url(state='incomplete', **kwargs),
87 'href_sugg': path_obj.get_translate_url(state='suggestions', **kwargs),
88 'href_critical': path_obj.get_critical_url(**kwargs),
89 'title': path_obj.name,
90 'code': path_obj.code,
91 'is_disabled': getattr(path_obj, 'disabled', False),
92 }
93
94
95 def make_directory_item(directory, **filters):
96 item = make_generic_item(directory, **filters)
97 item.update({
98 'icon': 'folder',
99 })
100 return item
101
102
103 def make_store_item(store):
104 item = make_generic_item(store)
105 item.update({
106 'icon': 'file',
107 })
108 return item
109
110
111 def get_parent(path_obj):
112 """Retrieves a representation of the parent object.
113
114 :param path_obj: either a `Directory` or Store` instance.
115 """
116 parent_dir = path_obj.parent
117
118 if parent_dir.is_project():
119 return None
120
121 if parent_dir.is_language():
122 label = _('Back to language')
123 else:
124 label = _('Back to parent folder')
125
126 return {
127 'title': label,
128 'href': parent_dir.get_absolute_url()
129 }
130
131
132 def make_project_item(translation_project):
133 item = make_generic_item(translation_project)
134 item.update({
135 'icon': 'project',
136 'title': translation_project.project.name,
137 })
138 return item
139
140
141 def make_language_item(translation_project):
142 item = make_generic_item(translation_project)
143 item.update({
144 'icon': 'language',
145 'title': translation_project.language.name,
146 })
147 return item
148
149
150 def make_xlanguage_item(resource_obj):
151 translation_project = resource_obj.translation_project
152 item = make_generic_item(resource_obj)
153 item.update({
154 'icon': 'language',
155 'code': translation_project.language.code,
156 'title': translation_project.language.name,
157 })
158 return item
159
160
161 def make_project_list_item(project):
162 item = make_generic_item(project)
163 item.update({
164 'icon': 'project',
165 'title': project.fullname,
166 })
167 return item
168
[end of pootle/core/browser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/core/browser.py b/pootle/core/browser.py
--- a/pootle/core/browser.py
+++ b/pootle/core/browser.py
@@ -50,9 +50,9 @@
'display_name': _("Last updated"),
},
{
- 'id': 'need-translation',
+ 'id': 'critical',
'class': 'stats-number sorttable_numeric when-loaded',
- 'display_name': _("Need Translation"),
+ 'display_name': _("Critical"),
},
{
'id': 'suggestions',
@@ -61,9 +61,9 @@
'display_name': _("Suggestions"),
},
{
- 'id': 'critical',
+ 'id': 'need-translation',
'class': 'stats-number sorttable_numeric when-loaded',
- 'display_name': _("Critical"),
+ 'display_name': _("Need Translation"),
},
{
'id': 'activity',
| {"golden_diff": "diff --git a/pootle/core/browser.py b/pootle/core/browser.py\n--- a/pootle/core/browser.py\n+++ b/pootle/core/browser.py\n@@ -50,9 +50,9 @@\n 'display_name': _(\"Last updated\"),\n },\n {\n- 'id': 'need-translation',\n+ 'id': 'critical',\n 'class': 'stats-number sorttable_numeric when-loaded',\n- 'display_name': _(\"Need Translation\"),\n+ 'display_name': _(\"Critical\"),\n },\n {\n 'id': 'suggestions',\n@@ -61,9 +61,9 @@\n 'display_name': _(\"Suggestions\"),\n },\n {\n- 'id': 'critical',\n+ 'id': 'need-translation',\n 'class': 'stats-number sorttable_numeric when-loaded',\n- 'display_name': _(\"Critical\"),\n+ 'display_name': _(\"Need Translation\"),\n },\n {\n 'id': 'activity',\n", "issue": "Put `Review suggestions` action button before others\nLet's change order of action buttons and put `Review suggestions` before `Needs translation` to encourage going through suggestion backlog.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.utils.translation import ugettext_lazy as _\n\n\nHEADING_CHOICES = [\n {\n 'id': 'name',\n 'class': 'stats',\n 'display_name': _(\"Name\"),\n },\n {\n 'id': 'priority',\n 'class': 'stats-number sorttable_numeric',\n 'display_name': _(\"Priority\"),\n },\n {\n 'id': 'project',\n 'class': 'stats',\n 'display_name': _(\"Project\"),\n },\n {\n 'id': 'language',\n 'class': 'stats',\n 'display_name': _(\"Language\"),\n },\n {\n 'id': 'progress',\n 'class': 'stats',\n # Translators: noun. The graphical representation of translation status\n 'display_name': _(\"Progress\"),\n },\n {\n 'id': 'total',\n 'class': 'stats-number sorttable_numeric when-loaded',\n # Translators: Heading representing the total number of words of a file\n # or directory\n 'display_name': _(\"Total\"),\n },\n {\n 'id': 'last-updated',\n 'class': 'stats sorttable_numeric when-loaded',\n 'display_name': _(\"Last updated\"),\n },\n {\n 'id': 'need-translation',\n 'class': 'stats-number sorttable_numeric when-loaded',\n 'display_name': _(\"Need Translation\"),\n },\n {\n 'id': 'suggestions',\n 'class': 'stats-number sorttable_numeric when-loaded',\n # Translators: The number of suggestions pending review\n 'display_name': _(\"Suggestions\"),\n },\n {\n 'id': 'critical',\n 'class': 'stats-number sorttable_numeric when-loaded',\n 'display_name': _(\"Critical\"),\n },\n {\n 'id': 'activity',\n 'class': 'stats sorttable_numeric when-loaded',\n 'display_name': _(\"Last Activity\"),\n },\n]\n\n\ndef get_table_headings(choices):\n \"\"\"Filters the list of available table headings to the given `choices`.\"\"\"\n return filter(lambda x: x['id'] in choices, HEADING_CHOICES)\n\n\ndef make_generic_item(path_obj, **kwargs):\n \"\"\"Template variables for each row in the table.\"\"\"\n return {\n 'href': path_obj.get_absolute_url(),\n 'href_all': path_obj.get_translate_url(),\n 'href_todo': path_obj.get_translate_url(state='incomplete', **kwargs),\n 'href_sugg': path_obj.get_translate_url(state='suggestions', **kwargs),\n 'href_critical': path_obj.get_critical_url(**kwargs),\n 'title': path_obj.name,\n 'code': path_obj.code,\n 'is_disabled': getattr(path_obj, 'disabled', False),\n }\n\n\ndef make_directory_item(directory, **filters):\n item = make_generic_item(directory, **filters)\n item.update({\n 'icon': 'folder',\n })\n return item\n\n\ndef make_store_item(store):\n item = make_generic_item(store)\n item.update({\n 'icon': 'file',\n })\n return item\n\n\ndef get_parent(path_obj):\n \"\"\"Retrieves a representation of the parent object.\n\n :param path_obj: either a `Directory` or Store` instance.\n \"\"\"\n parent_dir = path_obj.parent\n\n if parent_dir.is_project():\n return None\n\n if parent_dir.is_language():\n label = _('Back to language')\n else:\n label = _('Back to parent folder')\n\n return {\n 'title': label,\n 'href': parent_dir.get_absolute_url()\n }\n\n\ndef make_project_item(translation_project):\n item = make_generic_item(translation_project)\n item.update({\n 'icon': 'project',\n 'title': translation_project.project.name,\n })\n return item\n\n\ndef make_language_item(translation_project):\n item = make_generic_item(translation_project)\n item.update({\n 'icon': 'language',\n 'title': translation_project.language.name,\n })\n return item\n\n\ndef make_xlanguage_item(resource_obj):\n translation_project = resource_obj.translation_project\n item = make_generic_item(resource_obj)\n item.update({\n 'icon': 'language',\n 'code': translation_project.language.code,\n 'title': translation_project.language.name,\n })\n return item\n\n\ndef make_project_list_item(project):\n item = make_generic_item(project)\n item.update({\n 'icon': 'project',\n 'title': project.fullname,\n })\n return item\n", "path": "pootle/core/browser.py"}]} | 1,995 | 209 |
gh_patches_debug_23732 | rasdani/github-patches | git_diff | scrapy__scrapy-4814 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Test the upcoming pip dependency resolver in CI
From [email protected], about pip 20.2:
> NOTICE: This release includes the beta of the next-generation dependency resolver. It is significantly stricter and more consistent when it receives incompatible instructions, and reduces support for certain kinds of constraints files, so some workarounds and workflows may break. Please test it with the `--use-feature=2020-resolver` flag. Please see our guide on how to test and migrate, and how to report issues
<https://pip.pypa.io/en/latest/user_guide/#changes-to-the-pip-dependency-resolver-in-20-2-2020>.
>
> The new dependency resolver is *off by default* because it is *not yet
ready for everyday use*.
</issue>
<code>
[start of setup.py]
1 from os.path import dirname, join
2 from pkg_resources import parse_version
3 from setuptools import setup, find_packages, __version__ as setuptools_version
4
5
6 with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:
7 version = f.read().decode('ascii').strip()
8
9
10 def has_environment_marker_platform_impl_support():
11 """Code extracted from 'pytest/setup.py'
12 https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31
13
14 The first known release to support environment marker with range operators
15 it is 18.5, see:
16 https://setuptools.readthedocs.io/en/latest/history.html#id235
17 """
18 return parse_version(setuptools_version) >= parse_version('18.5')
19
20
21 install_requires = [
22 'Twisted>=17.9.0',
23 'cryptography>=2.0',
24 'cssselect>=0.9.1',
25 'itemloaders>=1.0.1',
26 'parsel>=1.5.0',
27 'PyDispatcher>=2.0.5',
28 'pyOpenSSL>=16.2.0',
29 'queuelib>=1.4.2',
30 'service_identity>=16.0.0',
31 'w3lib>=1.17.0',
32 'zope.interface>=4.1.3',
33 'protego>=0.1.15',
34 'itemadapter>=0.1.0',
35 ]
36 extras_require = {}
37
38 if has_environment_marker_platform_impl_support():
39 extras_require[':platform_python_implementation == "CPython"'] = [
40 'lxml>=3.5.0',
41 ]
42 extras_require[':platform_python_implementation == "PyPy"'] = [
43 # Earlier lxml versions are affected by
44 # https://foss.heptapod.net/pypy/pypy/-/issues/2498,
45 # which was fixed in Cython 0.26, released on 2017-06-19, and used to
46 # generate the C headers of lxml release tarballs published since then, the
47 # first of which was:
48 'lxml>=4.0.0',
49 'PyPyDispatcher>=2.1.0',
50 ]
51 else:
52 install_requires.append('lxml>=3.5.0')
53
54
55 setup(
56 name='Scrapy',
57 version=version,
58 url='https://scrapy.org',
59 project_urls={
60 'Documentation': 'https://docs.scrapy.org/',
61 'Source': 'https://github.com/scrapy/scrapy',
62 'Tracker': 'https://github.com/scrapy/scrapy/issues',
63 },
64 description='A high-level Web Crawling and Web Scraping framework',
65 long_description=open('README.rst').read(),
66 author='Scrapy developers',
67 maintainer='Pablo Hoffman',
68 maintainer_email='[email protected]',
69 license='BSD',
70 packages=find_packages(exclude=('tests', 'tests.*')),
71 include_package_data=True,
72 zip_safe=False,
73 entry_points={
74 'console_scripts': ['scrapy = scrapy.cmdline:execute']
75 },
76 classifiers=[
77 'Framework :: Scrapy',
78 'Development Status :: 5 - Production/Stable',
79 'Environment :: Console',
80 'Intended Audience :: Developers',
81 'License :: OSI Approved :: BSD License',
82 'Operating System :: OS Independent',
83 'Programming Language :: Python',
84 'Programming Language :: Python :: 3',
85 'Programming Language :: Python :: 3.6',
86 'Programming Language :: Python :: 3.7',
87 'Programming Language :: Python :: 3.8',
88 'Programming Language :: Python :: Implementation :: CPython',
89 'Programming Language :: Python :: Implementation :: PyPy',
90 'Topic :: Internet :: WWW/HTTP',
91 'Topic :: Software Development :: Libraries :: Application Frameworks',
92 'Topic :: Software Development :: Libraries :: Python Modules',
93 ],
94 python_requires='>=3.6',
95 install_requires=install_requires,
96 extras_require=extras_require,
97 )
98
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,6 @@
'cssselect>=0.9.1',
'itemloaders>=1.0.1',
'parsel>=1.5.0',
- 'PyDispatcher>=2.0.5',
'pyOpenSSL>=16.2.0',
'queuelib>=1.4.2',
'service_identity>=16.0.0',
@@ -34,11 +33,12 @@
'itemadapter>=0.1.0',
]
extras_require = {}
-
+cpython_dependencies = [
+ 'lxml>=3.5.0',
+ 'PyDispatcher>=2.0.5',
+]
if has_environment_marker_platform_impl_support():
- extras_require[':platform_python_implementation == "CPython"'] = [
- 'lxml>=3.5.0',
- ]
+ extras_require[':platform_python_implementation == "CPython"'] = cpython_dependencies
extras_require[':platform_python_implementation == "PyPy"'] = [
# Earlier lxml versions are affected by
# https://foss.heptapod.net/pypy/pypy/-/issues/2498,
@@ -49,7 +49,7 @@
'PyPyDispatcher>=2.1.0',
]
else:
- install_requires.append('lxml>=3.5.0')
+ install_requires.extend(cpython_dependencies)
setup(
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,7 +24,6 @@\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n- 'PyDispatcher>=2.0.5',\n 'pyOpenSSL>=16.2.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n@@ -34,11 +33,12 @@\n 'itemadapter>=0.1.0',\n ]\n extras_require = {}\n-\n+cpython_dependencies = [\n+ 'lxml>=3.5.0',\n+ 'PyDispatcher>=2.0.5',\n+]\n if has_environment_marker_platform_impl_support():\n- extras_require[':platform_python_implementation == \"CPython\"'] = [\n- 'lxml>=3.5.0',\n- ]\n+ extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n # Earlier lxml versions are affected by\n # https://foss.heptapod.net/pypy/pypy/-/issues/2498,\n@@ -49,7 +49,7 @@\n 'PyPyDispatcher>=2.1.0',\n ]\n else:\n- install_requires.append('lxml>=3.5.0')\n+ install_requires.extend(cpython_dependencies)\n \n \n setup(\n", "issue": "Test the upcoming pip dependency resolver in CI\nFrom [email protected], about pip 20.2:\r\n\r\n> NOTICE: This release includes the beta of the next-generation dependency resolver. It is significantly stricter and more consistent when it receives incompatible instructions, and reduces support for certain kinds of constraints files, so some workarounds and workflows may break. Please test it with the `--use-feature=2020-resolver` flag. Please see our guide on how to test and migrate, and how to report issues\r\n<https://pip.pypa.io/en/latest/user_guide/#changes-to-the-pip-dependency-resolver-in-20-2-2020>.\r\n> \r\n> The new dependency resolver is *off by default* because it is *not yet\r\nready for everyday use*.\n", "before_files": [{"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\ninstall_requires = [\n 'Twisted>=17.9.0',\n 'cryptography>=2.0',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n 'PyDispatcher>=2.0.5',\n 'pyOpenSSL>=16.2.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n 'w3lib>=1.17.0',\n 'zope.interface>=4.1.3',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n]\nextras_require = {}\n\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = [\n 'lxml>=3.5.0',\n ]\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n # Earlier lxml versions are affected by\n # https://foss.heptapod.net/pypy/pypy/-/issues/2498,\n # which was fixed in Cython 0.26, released on 2017-06-19, and used to\n # generate the C headers of lxml release tarballs published since then, the\n # first of which was:\n 'lxml>=4.0.0',\n 'PyPyDispatcher>=2.1.0',\n ]\nelse:\n install_requires.append('lxml>=3.5.0')\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n project_urls={\n 'Documentation': 'https://docs.scrapy.org/',\n 'Source': 'https://github.com/scrapy/scrapy',\n 'Tracker': 'https://github.com/scrapy/scrapy/issues',\n },\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6',\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}]} | 1,778 | 346 |
gh_patches_debug_32468 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-1102 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pizza Ranch
Spider #29 is not collecting all locations, it's only capturing the 1st page on each state list.
</issue>
<code>
[start of locations/spiders/pizzaranch.py]
1 import scrapy
2 import re
3 from locations.items import GeojsonPointItem
4 import json
5 class CVSSpider(scrapy.Spider):
6
7 name = "pizzaranch"
8 allowed_domains = ["pizzaranch.com"]
9 download_delay = 0.5
10 start_urls = (
11 'https://pizzaranch.com/locations',
12 )
13 def parse_times(self, times):
14 if times.strip() == 'Open 24 hours':
15 return '24/7'
16 hours_to = [x.strip() for x in times.split('-')]
17 cleaned_times = []
18
19 for hour in hours_to:
20 if re.search('PM$', hour):
21 hour = re.sub('PM', '', hour).strip()
22 hour_min = hour.split(":")
23 if int(hour_min[0]) < 12:
24 hour_min[0] = str(12 + int(hour_min[0]))
25 cleaned_times.append(":".join(hour_min))
26
27 if re.search('AM$', hour):
28 hour = re.sub('AM', '', hour).strip()
29 hour_min = hour.split(":")
30 if len(hour_min[0]) <2:
31 hour_min[0] = hour_min[0].zfill(2)
32 else:
33 hour_min[0] = str(int(hour_min[0]))
34
35 cleaned_times.append(":".join(hour_min))
36 return "-".join(cleaned_times)
37
38 def parse_hours(self, lis):
39 hours = []
40 for li in lis:
41 day = li.xpath('normalize-space(.//td[@class="c-location-hours-details-row-day"]/text())').extract_first()[:2]
42 times = li.xpath('.//td[@class="c-location-hours-details-row-intervals"]/span/span/text()').extract()
43 times = "".join(x for x in times)
44 if times and day:
45 parsed_time = self.parse_times(times)
46 hours.append(day + ' ' + parsed_time)
47
48 return "; ".join(hours)
49
50 def parse_stores(self, response):
51 map_data = response.xpath('normalize-space(//script[@id="js-map-config-dir-map-nap-map"]/text())').extract_first()
52 map_json = json.loads(map_data)
53 properties = {
54 'addr_full': response.xpath('normalize-space(//span[@itemprop="streetAddress"]/span/text())').extract_first(),
55 'phone': response.xpath('normalize-space(//div[@class="c-phone-number c-phone-main-number"]/span[@class="c-phone-number-span c-phone-main-number-span"]/text())').extract_first(),
56 'city': response.xpath('normalize-space(//span[@itemprop="addressLocality"]/text())').extract_first(),
57 'state': response.xpath('normalize-space(//abbr[@itemprop="addressRegion"]/text())').extract_first(),
58 'postcode': response.xpath('normalize-space(//span[@itemprop="postalCode"]/text())').extract_first(),
59 'ref': map_json['locs'][0]['id'],
60 'website': response.url,
61 'lat': float( map_json['locs'][0]['latitude']),
62 'lon': float( map_json['locs'][0]['longitude']),
63 }
64 hours = self.parse_hours(response.xpath('//div[@class="hours-primary hidden-xs"]/div[@class="c-location-hours"]/div[@class="c-location-hours-details-wrapper js-location-hours"]/table/tbody/tr'))
65
66 if hours:
67 properties['opening_hours'] = hours
68
69 yield GeojsonPointItem(**properties)
70
71 def parse_state_stores(self, response):
72 stores = response.xpath('//h3[@class="title"]/a/@href').extract()
73 for store in stores:
74 yield scrapy.Request(response.urljoin(store), callback=self.parse_stores)
75
76 def parse(self, response):
77 urls = response.xpath('//ol[@class="state-list"]/li/a/@href').extract()
78 for path in urls:
79 yield scrapy.Request(response.urljoin(path), callback=self.parse_state_stores)
80
[end of locations/spiders/pizzaranch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/pizzaranch.py b/locations/spiders/pizzaranch.py
--- a/locations/spiders/pizzaranch.py
+++ b/locations/spiders/pizzaranch.py
@@ -51,6 +51,7 @@
map_data = response.xpath('normalize-space(//script[@id="js-map-config-dir-map-nap-map"]/text())').extract_first()
map_json = json.loads(map_data)
properties = {
+ 'name': response.xpath('//span[@class="location-name-geo"]/text()').extract_first(),
'addr_full': response.xpath('normalize-space(//span[@itemprop="streetAddress"]/span/text())').extract_first(),
'phone': response.xpath('normalize-space(//div[@class="c-phone-number c-phone-main-number"]/span[@class="c-phone-number-span c-phone-main-number-span"]/text())').extract_first(),
'city': response.xpath('normalize-space(//span[@itemprop="addressLocality"]/text())').extract_first(),
@@ -68,11 +69,17 @@
yield GeojsonPointItem(**properties)
+
def parse_state_stores(self, response):
stores = response.xpath('//h3[@class="title"]/a/@href').extract()
for store in stores:
yield scrapy.Request(response.urljoin(store), callback=self.parse_stores)
+ next_page_url = response.xpath('//div[@class="pagination"]//li[@class="next"]/a/@href').extract_first()
+
+ if next_page_url:
+ yield scrapy.Request(next_page_url, callback=self.parse_state_stores)
+
def parse(self, response):
urls = response.xpath('//ol[@class="state-list"]/li/a/@href').extract()
for path in urls:
| {"golden_diff": "diff --git a/locations/spiders/pizzaranch.py b/locations/spiders/pizzaranch.py\n--- a/locations/spiders/pizzaranch.py\n+++ b/locations/spiders/pizzaranch.py\n@@ -51,6 +51,7 @@\n map_data = response.xpath('normalize-space(//script[@id=\"js-map-config-dir-map-nap-map\"]/text())').extract_first()\n map_json = json.loads(map_data)\n properties = {\n+ 'name': response.xpath('//span[@class=\"location-name-geo\"]/text()').extract_first(),\n 'addr_full': response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]/span/text())').extract_first(),\n 'phone': response.xpath('normalize-space(//div[@class=\"c-phone-number c-phone-main-number\"]/span[@class=\"c-phone-number-span c-phone-main-number-span\"]/text())').extract_first(),\n 'city': response.xpath('normalize-space(//span[@itemprop=\"addressLocality\"]/text())').extract_first(),\n@@ -68,11 +69,17 @@\n \n yield GeojsonPointItem(**properties)\n \n+\n def parse_state_stores(self, response):\n stores = response.xpath('//h3[@class=\"title\"]/a/@href').extract()\n for store in stores:\n yield scrapy.Request(response.urljoin(store), callback=self.parse_stores)\n \n+ next_page_url = response.xpath('//div[@class=\"pagination\"]//li[@class=\"next\"]/a/@href').extract_first()\n+\n+ if next_page_url:\n+ yield scrapy.Request(next_page_url, callback=self.parse_state_stores)\n+\n def parse(self, response):\n urls = response.xpath('//ol[@class=\"state-list\"]/li/a/@href').extract()\n for path in urls:\n", "issue": "Pizza Ranch \nSpider #29 is not collecting all locations, it's only capturing the 1st page on each state list.\r\n\n", "before_files": [{"content": "import scrapy\nimport re\nfrom locations.items import GeojsonPointItem\nimport json\nclass CVSSpider(scrapy.Spider):\n\n name = \"pizzaranch\"\n allowed_domains = [\"pizzaranch.com\"]\n download_delay = 0.5\n start_urls = (\n 'https://pizzaranch.com/locations',\n )\n def parse_times(self, times):\n if times.strip() == 'Open 24 hours':\n return '24/7'\n hours_to = [x.strip() for x in times.split('-')]\n cleaned_times = []\n\n for hour in hours_to:\n if re.search('PM$', hour):\n hour = re.sub('PM', '', hour).strip()\n hour_min = hour.split(\":\")\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n cleaned_times.append(\":\".join(hour_min))\n\n if re.search('AM$', hour):\n hour = re.sub('AM', '', hour).strip()\n hour_min = hour.split(\":\")\n if len(hour_min[0]) <2:\n hour_min[0] = hour_min[0].zfill(2)\n else:\n hour_min[0] = str(int(hour_min[0]))\n\n cleaned_times.append(\":\".join(hour_min))\n return \"-\".join(cleaned_times)\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n day = li.xpath('normalize-space(.//td[@class=\"c-location-hours-details-row-day\"]/text())').extract_first()[:2]\n times = li.xpath('.//td[@class=\"c-location-hours-details-row-intervals\"]/span/span/text()').extract()\n times = \"\".join(x for x in times)\n if times and day:\n parsed_time = self.parse_times(times)\n hours.append(day + ' ' + parsed_time)\n\n return \"; \".join(hours)\n\n def parse_stores(self, response):\n map_data = response.xpath('normalize-space(//script[@id=\"js-map-config-dir-map-nap-map\"]/text())').extract_first()\n map_json = json.loads(map_data)\n properties = {\n 'addr_full': response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]/span/text())').extract_first(),\n 'phone': response.xpath('normalize-space(//div[@class=\"c-phone-number c-phone-main-number\"]/span[@class=\"c-phone-number-span c-phone-main-number-span\"]/text())').extract_first(),\n 'city': response.xpath('normalize-space(//span[@itemprop=\"addressLocality\"]/text())').extract_first(),\n 'state': response.xpath('normalize-space(//abbr[@itemprop=\"addressRegion\"]/text())').extract_first(),\n 'postcode': response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first(),\n 'ref': map_json['locs'][0]['id'],\n 'website': response.url,\n 'lat': float( map_json['locs'][0]['latitude']),\n 'lon': float( map_json['locs'][0]['longitude']),\n }\n hours = self.parse_hours(response.xpath('//div[@class=\"hours-primary hidden-xs\"]/div[@class=\"c-location-hours\"]/div[@class=\"c-location-hours-details-wrapper js-location-hours\"]/table/tbody/tr'))\n\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse_state_stores(self, response):\n stores = response.xpath('//h3[@class=\"title\"]/a/@href').extract()\n for store in stores:\n yield scrapy.Request(response.urljoin(store), callback=self.parse_stores)\n\n def parse(self, response):\n urls = response.xpath('//ol[@class=\"state-list\"]/li/a/@href').extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_state_stores)\n", "path": "locations/spiders/pizzaranch.py"}]} | 1,563 | 387 |
gh_patches_debug_34766 | rasdani/github-patches | git_diff | bokeh__bokeh-9461 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bokeh.palettes refguide page header mis-renders
Add lots of space, changes font size, for some reason (just this page)
https://docs.bokeh.org/en/latest/docs/reference/palettes.html
</issue>
<code>
[start of bokeh/sphinxext/bokeh_palette_group.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
3 # All rights reserved.
4 #
5 # The full license is in the file LICENSE.txt, distributed with this software.
6 #-----------------------------------------------------------------------------
7 ''' Generate visual representations of palettes in Bokeh palette groups.
8
9 The ``bokeh.palettes`` modules expose attributes such as ``mpl``, ``brewer``,
10 and ``d3`` that provide groups of palettes. The ``bokeh-palette-group``
11 directive accepts the name of one of these groups, and generates a visual
12 matrix of colors for every palette in the group.
13
14 As an example, the following usage of the the directive:
15
16 .. code-block:: rest
17
18 .. bokeh-palette-group:: mpl
19
20 Generates the output:
21
22 .. bokeh-palette-group:: mpl
23
24 '''
25
26 #-----------------------------------------------------------------------------
27 # Boilerplate
28 #-----------------------------------------------------------------------------
29 import logging # isort:skip
30 log = logging.getLogger(__name__)
31
32 #-----------------------------------------------------------------------------
33 # Imports
34 #-----------------------------------------------------------------------------
35
36 # External imports
37 from docutils import nodes
38 from docutils.parsers.rst import Directive
39 from sphinx.errors import SphinxError
40
41 # Bokeh imports
42 from .. import palettes as bp
43 from .templates import PALETTE_GROUP_DETAIL
44
45 #-----------------------------------------------------------------------------
46 # Globals and constants
47 #-----------------------------------------------------------------------------
48
49 __all__ = (
50 'bokeh_palette_group',
51 'BokehPaletteGroupDirective',
52 'html_visit_bokeh_palette_group',
53 'setup',
54 )
55
56 #-----------------------------------------------------------------------------
57 # General API
58 #-----------------------------------------------------------------------------
59
60 #-----------------------------------------------------------------------------
61 # Dev API
62 #-----------------------------------------------------------------------------
63
64 class bokeh_palette_group(nodes.General, nodes.Element):
65 pass
66
67
68 class BokehPaletteGroupDirective(Directive):
69
70 has_content = False
71 required_arguments = 1
72
73 def run(self):
74 node = bokeh_palette_group()
75 node['group'] = self.arguments[0]
76 return [node]
77
78 def html_visit_bokeh_palette_group(self, node):
79 self.body.append(_BOOTSTRAP_CSS)
80 self.body.append('<div class="container-fluid"><div class="row">"')
81 group = getattr(bp, node['group'], None)
82 if not isinstance(group, dict):
83 raise SphinxError("invalid palette group name %r" % node['group'])
84 names = sorted(group)
85 for name in names:
86 palettes = group[name]
87 # arbitrary cuttoff here, idea is to not show large (e.g 256 length) palettes
88 numbers = [x for x in sorted(palettes) if x < 30]
89 html = PALETTE_GROUP_DETAIL.render(name=name, numbers=numbers, palettes=palettes)
90 self.body.append(html)
91 self.body.append('</div></div>')
92 self.body.append(_BOOTSTRAP_JS)
93 raise nodes.SkipNode
94
95 def setup(app):
96 ''' Required Sphinx extension setup function. '''
97 app.add_node(bokeh_palette_group, html=(html_visit_bokeh_palette_group, None))
98 app.add_directive('bokeh-palette-group', BokehPaletteGroupDirective)
99
100 #-----------------------------------------------------------------------------
101 # Private API
102 #-----------------------------------------------------------------------------
103
104 _BOOTSTRAP_CSS = """
105 <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css">
106 """
107
108 _BOOTSTRAP_JS = """
109 <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
110 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
111 """
112
113 #-----------------------------------------------------------------------------
114 # Code
115 #-----------------------------------------------------------------------------
116
[end of bokeh/sphinxext/bokeh_palette_group.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bokeh/sphinxext/bokeh_palette_group.py b/bokeh/sphinxext/bokeh_palette_group.py
--- a/bokeh/sphinxext/bokeh_palette_group.py
+++ b/bokeh/sphinxext/bokeh_palette_group.py
@@ -21,6 +21,11 @@
.. bokeh-palette-group:: mpl
+.. note::
+ This extension assumes both Bootstrap and JQuery are present (which is the
+ case for the Bokeh documentation theme). If using this theme outside the
+ Bokeh documentation, be sure to include those resources by hand.
+
'''
#-----------------------------------------------------------------------------
@@ -75,8 +80,9 @@
node['group'] = self.arguments[0]
return [node]
+# NOTE: This extension now *assumes* both Bootstrap and JQuery are present
+# (which is now the case for the Bokeh docs theme).
def html_visit_bokeh_palette_group(self, node):
- self.body.append(_BOOTSTRAP_CSS)
self.body.append('<div class="container-fluid"><div class="row">"')
group = getattr(bp, node['group'], None)
if not isinstance(group, dict):
@@ -89,7 +95,6 @@
html = PALETTE_GROUP_DETAIL.render(name=name, numbers=numbers, palettes=palettes)
self.body.append(html)
self.body.append('</div></div>')
- self.body.append(_BOOTSTRAP_JS)
raise nodes.SkipNode
def setup(app):
@@ -101,15 +106,6 @@
# Private API
#-----------------------------------------------------------------------------
-_BOOTSTRAP_CSS = """
-<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css">
-"""
-
-_BOOTSTRAP_JS = """
-<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
-<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
-"""
-
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| {"golden_diff": "diff --git a/bokeh/sphinxext/bokeh_palette_group.py b/bokeh/sphinxext/bokeh_palette_group.py\n--- a/bokeh/sphinxext/bokeh_palette_group.py\n+++ b/bokeh/sphinxext/bokeh_palette_group.py\n@@ -21,6 +21,11 @@\n \n .. bokeh-palette-group:: mpl\n \n+.. note::\n+ This extension assumes both Bootstrap and JQuery are present (which is the\n+ case for the Bokeh documentation theme). If using this theme outside the\n+ Bokeh documentation, be sure to include those resources by hand.\n+\n '''\n \n #-----------------------------------------------------------------------------\n@@ -75,8 +80,9 @@\n node['group'] = self.arguments[0]\n return [node]\n \n+# NOTE: This extension now *assumes* both Bootstrap and JQuery are present\n+# (which is now the case for the Bokeh docs theme).\n def html_visit_bokeh_palette_group(self, node):\n- self.body.append(_BOOTSTRAP_CSS)\n self.body.append('<div class=\"container-fluid\"><div class=\"row\">\"')\n group = getattr(bp, node['group'], None)\n if not isinstance(group, dict):\n@@ -89,7 +95,6 @@\n html = PALETTE_GROUP_DETAIL.render(name=name, numbers=numbers, palettes=palettes)\n self.body.append(html)\n self.body.append('</div></div>')\n- self.body.append(_BOOTSTRAP_JS)\n raise nodes.SkipNode\n \n def setup(app):\n@@ -101,15 +106,6 @@\n # Private API\n #-----------------------------------------------------------------------------\n \n-_BOOTSTRAP_CSS = \"\"\"\n-<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css\">\n-\"\"\"\n-\n-_BOOTSTRAP_JS = \"\"\"\n-<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js\"></script>\n-<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js\"></script>\n-\"\"\"\n-\n #-----------------------------------------------------------------------------\n # Code\n #-----------------------------------------------------------------------------\n", "issue": "bokeh.palettes refguide page header mis-renders\nAdd lots of space, changes font size, for some reason (just this page)\r\n\r\nhttps://docs.bokeh.org/en/latest/docs/reference/palettes.html\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' Generate visual representations of palettes in Bokeh palette groups.\n\nThe ``bokeh.palettes`` modules expose attributes such as ``mpl``, ``brewer``,\nand ``d3`` that provide groups of palettes. The ``bokeh-palette-group``\ndirective accepts the name of one of these groups, and generates a visual\nmatrix of colors for every palette in the group.\n\nAs an example, the following usage of the the directive:\n\n.. code-block:: rest\n\n .. bokeh-palette-group:: mpl\n\nGenerates the output:\n\n .. bokeh-palette-group:: mpl\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# External imports\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive\nfrom sphinx.errors import SphinxError\n\n# Bokeh imports\nfrom .. import palettes as bp\nfrom .templates import PALETTE_GROUP_DETAIL\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'bokeh_palette_group',\n 'BokehPaletteGroupDirective',\n 'html_visit_bokeh_palette_group',\n 'setup',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\nclass bokeh_palette_group(nodes.General, nodes.Element):\n pass\n\n\nclass BokehPaletteGroupDirective(Directive):\n\n has_content = False\n required_arguments = 1\n\n def run(self):\n node = bokeh_palette_group()\n node['group'] = self.arguments[0]\n return [node]\n\ndef html_visit_bokeh_palette_group(self, node):\n self.body.append(_BOOTSTRAP_CSS)\n self.body.append('<div class=\"container-fluid\"><div class=\"row\">\"')\n group = getattr(bp, node['group'], None)\n if not isinstance(group, dict):\n raise SphinxError(\"invalid palette group name %r\" % node['group'])\n names = sorted(group)\n for name in names:\n palettes = group[name]\n # arbitrary cuttoff here, idea is to not show large (e.g 256 length) palettes\n numbers = [x for x in sorted(palettes) if x < 30]\n html = PALETTE_GROUP_DETAIL.render(name=name, numbers=numbers, palettes=palettes)\n self.body.append(html)\n self.body.append('</div></div>')\n self.body.append(_BOOTSTRAP_JS)\n raise nodes.SkipNode\n\ndef setup(app):\n ''' Required Sphinx extension setup function. '''\n app.add_node(bokeh_palette_group, html=(html_visit_bokeh_palette_group, None))\n app.add_directive('bokeh-palette-group', BokehPaletteGroupDirective)\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n_BOOTSTRAP_CSS = \"\"\"\n<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css\">\n\"\"\"\n\n_BOOTSTRAP_JS = \"\"\"\n<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js\"></script>\n<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js\"></script>\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "path": "bokeh/sphinxext/bokeh_palette_group.py"}]} | 1,602 | 460 |
gh_patches_debug_10670 | rasdani/github-patches | git_diff | freedomofpress__securedrop-4346 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update SQLAlchemy to 1.3.0
## Description
Multiple vulnerabilities were reported in SQLAlchemy <1.3.0:
- CVE-2019-7164 (moderate) : SQLAlchemy through 1.2.17 and 1.3.x through 1.3.0b2 allows SQL Injection via the order_by parameter.
- CVE-2019-7548 (moderate) : SQLAlchemy 1.2.17 has SQL Injection when the group_by parameter can be controlled.
We should update this dependency ASAP for the SecureDrop application.
</issue>
<code>
[start of install_files/ansible-base/callback_plugins/ansible_version_check.py]
1 # -*- encoding:utf-8 -*-
2 from __future__ import absolute_import, division, print_function, \
3 unicode_literals
4
5 import sys
6
7 import ansible
8
9 try:
10 # Version 2.0+
11 from ansible.plugins.callback import CallbackBase
12 except ImportError:
13 CallbackBase = object
14
15
16 def print_red_bold(text):
17 print('\x1b[31;1m' + text + '\x1b[0m')
18
19
20 class CallbackModule(CallbackBase):
21 def __init__(self):
22 # Can't use `on_X` because this isn't forwards compatible
23 # with Ansible 2.0+
24 required_version = '2.6.8' # Keep synchronized with requirements files
25 if not ansible.__version__.startswith(required_version):
26 print_red_bold(
27 "SecureDrop restriction: only Ansible {version}.*"
28 "is supported."
29 .format(version=required_version)
30 )
31 sys.exit(1)
32
[end of install_files/ansible-base/callback_plugins/ansible_version_check.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py
--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py
+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py
@@ -21,7 +21,7 @@
def __init__(self):
# Can't use `on_X` because this isn't forwards compatible
# with Ansible 2.0+
- required_version = '2.6.8' # Keep synchronized with requirements files
+ required_version = '2.6.14' # Keep synchronized with requirements files
if not ansible.__version__.startswith(required_version):
print_red_bold(
"SecureDrop restriction: only Ansible {version}.*"
| {"golden_diff": "diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py\n+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n@@ -21,7 +21,7 @@\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n- required_version = '2.6.8' # Keep synchronized with requirements files\n+ required_version = '2.6.14' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n", "issue": "Update SQLAlchemy to 1.3.0\n## Description\r\n\r\nMultiple vulnerabilities were reported in SQLAlchemy <1.3.0:\r\n- CVE-2019-7164 (moderate) : SQLAlchemy through 1.2.17 and 1.3.x through 1.3.0b2 allows SQL Injection via the order_by parameter.\r\n- CVE-2019-7548 (moderate) : SQLAlchemy 1.2.17 has SQL Injection when the group_by parameter can be controlled.\r\n\r\nWe should update this dependency ASAP for the SecureDrop application.\r\n\n", "before_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n required_version = '2.6.8' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n \"is supported.\"\n .format(version=required_version)\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}]} | 938 | 177 |
gh_patches_debug_29546 | rasdani/github-patches | git_diff | Mailu__Mailu-1014 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Should `antispam` be hardcoded?
I'm in a middle of setting up my own mailu instances, however I'm using namespaced container names (i.e. `mailu-admin`, `mailu-front` etc.). Everything works pretty well so far, however I have some problems with imap and antispam. After some investigation I noticed, that files `core/dovecot/conf/bin/ham` and `core/dovecot/conf/bin/spam` have antispam host hardcoded instead of using `HOST_ANTISPAM`.
I'm happy to provide PR, but firstly want to be sure - is it in fact oversight, or is this by design?
Should `antispam` be hardcoded?
I'm in a middle of setting up my own mailu instances, however I'm using namespaced container names (i.e. `mailu-admin`, `mailu-front` etc.). Everything works pretty well so far, however I have some problems with imap and antispam. After some investigation I noticed, that files `core/dovecot/conf/bin/ham` and `core/dovecot/conf/bin/spam` have antispam host hardcoded instead of using `HOST_ANTISPAM`.
I'm happy to provide PR, but firstly want to be sure - is it in fact oversight, or is this by design?
</issue>
<code>
[start of core/dovecot/start.py]
1 #!/usr/bin/python3
2
3 import os
4 import glob
5 import multiprocessing
6 import logging as log
7 import sys
8
9 from podop import run_server
10 from socrate import system, conf
11
12 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
13
14 def start_podop():
15 os.setuid(8)
16 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§"
17 run_server(0, "dovecot", "/tmp/podop.socket", [
18 ("quota", "url", url ),
19 ("auth", "url", url),
20 ("sieve", "url", url),
21 ])
22
23 # Actual startup script
24 os.environ["FRONT_ADDRESS"] = system.resolve_address(os.environ.get("HOST_FRONT", "front"))
25 os.environ["REDIS_ADDRESS"] = system.resolve_address(os.environ.get("HOST_REDIS", "redis"))
26 os.environ["ADMIN_ADDRESS"] = system.resolve_address(os.environ.get("HOST_ADMIN", "admin"))
27 if os.environ["WEBMAIL"] != "none":
28 os.environ["WEBMAIL_ADDRESS"] = system.resolve_address(os.environ.get("HOST_WEBMAIL", "webmail"))
29
30 for dovecot_file in glob.glob("/conf/*.conf"):
31 conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
32
33 # Run Podop, then postfix
34 multiprocessing.Process(target=start_podop).start()
35 os.system("chown mail:mail /mail")
36 os.system("chown -R mail:mail /var/lib/dovecot /conf")
37 os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])
38
[end of core/dovecot/start.py]
[start of services/rspamd/start.py]
1 #!/usr/bin/python3
2
3 import os
4 import glob
5 import logging as log
6 import sys
7 from socrate import system, conf
8
9 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
10
11 # Actual startup script
12 os.environ["FRONT_ADDRESS"] = system.resolve_address(os.environ.get("HOST_FRONT", "front"))
13
14 if "HOST_REDIS" not in os.environ:
15 os.environ["REDIS_ADDRESS"] = system.resolve_address(os.environ.get("HOST_REDIS", "redis"))
16
17
18 for rspamd_file in glob.glob("/conf/*"):
19 conf.jinja(rspamd_file, os.environ, os.path.join("/etc/rspamd/local.d", os.path.basename(rspamd_file)))
20
21 # Run rspamd
22 os.execv("/usr/sbin/rspamd", ["rspamd", "-i", "-f"])
23
[end of services/rspamd/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/dovecot/start.py b/core/dovecot/start.py
--- a/core/dovecot/start.py
+++ b/core/dovecot/start.py
@@ -24,6 +24,7 @@
os.environ["FRONT_ADDRESS"] = system.resolve_address(os.environ.get("HOST_FRONT", "front"))
os.environ["REDIS_ADDRESS"] = system.resolve_address(os.environ.get("HOST_REDIS", "redis"))
os.environ["ADMIN_ADDRESS"] = system.resolve_address(os.environ.get("HOST_ADMIN", "admin"))
+os.environ["ANTISPAM_ADDRESS"] = system.resolve_address(os.environ.get("HOST_ANTISPAM", "antispam:11334"))
if os.environ["WEBMAIL"] != "none":
os.environ["WEBMAIL_ADDRESS"] = system.resolve_address(os.environ.get("HOST_WEBMAIL", "webmail"))
diff --git a/services/rspamd/start.py b/services/rspamd/start.py
--- a/services/rspamd/start.py
+++ b/services/rspamd/start.py
@@ -9,11 +9,14 @@
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
# Actual startup script
+
os.environ["FRONT_ADDRESS"] = system.resolve_address(os.environ.get("HOST_FRONT", "front"))
if "HOST_REDIS" not in os.environ:
os.environ["REDIS_ADDRESS"] = system.resolve_address(os.environ.get("HOST_REDIS", "redis"))
+if os.environ.get("ANTIVIRUS") == 'clamav':
+ os.environ["ANTIVIRUS_ADDRESS"] = system.resolve_address(os.environ.get("HOST_ANTIVIRUS", "antivirus:3310"))
for rspamd_file in glob.glob("/conf/*"):
conf.jinja(rspamd_file, os.environ, os.path.join("/etc/rspamd/local.d", os.path.basename(rspamd_file)))
| {"golden_diff": "diff --git a/core/dovecot/start.py b/core/dovecot/start.py\n--- a/core/dovecot/start.py\n+++ b/core/dovecot/start.py\n@@ -24,6 +24,7 @@\n os.environ[\"FRONT_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_FRONT\", \"front\"))\n os.environ[\"REDIS_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_REDIS\", \"redis\"))\n os.environ[\"ADMIN_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_ADMIN\", \"admin\"))\n+os.environ[\"ANTISPAM_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_ANTISPAM\", \"antispam:11334\"))\n if os.environ[\"WEBMAIL\"] != \"none\":\n os.environ[\"WEBMAIL_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_WEBMAIL\", \"webmail\"))\n \ndiff --git a/services/rspamd/start.py b/services/rspamd/start.py\n--- a/services/rspamd/start.py\n+++ b/services/rspamd/start.py\n@@ -9,11 +9,14 @@\n log.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n \n # Actual startup script\n+\n os.environ[\"FRONT_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_FRONT\", \"front\"))\n \n if \"HOST_REDIS\" not in os.environ:\n os.environ[\"REDIS_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_REDIS\", \"redis\"))\n \n+if os.environ.get(\"ANTIVIRUS\") == 'clamav':\n+ os.environ[\"ANTIVIRUS_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_ANTIVIRUS\", \"antivirus:3310\"))\n \n for rspamd_file in glob.glob(\"/conf/*\"):\n conf.jinja(rspamd_file, os.environ, os.path.join(\"/etc/rspamd/local.d\", os.path.basename(rspamd_file)))\n", "issue": "Should `antispam` be hardcoded?\nI'm in a middle of setting up my own mailu instances, however I'm using namespaced container names (i.e. `mailu-admin`, `mailu-front` etc.). Everything works pretty well so far, however I have some problems with imap and antispam. After some investigation I noticed, that files `core/dovecot/conf/bin/ham` and `core/dovecot/conf/bin/spam` have antispam host hardcoded instead of using `HOST_ANTISPAM`. \r\n\r\nI'm happy to provide PR, but firstly want to be sure - is it in fact oversight, or is this by design?\nShould `antispam` be hardcoded?\nI'm in a middle of setting up my own mailu instances, however I'm using namespaced container names (i.e. `mailu-admin`, `mailu-front` etc.). Everything works pretty well so far, however I have some problems with imap and antispam. After some investigation I noticed, that files `core/dovecot/conf/bin/ham` and `core/dovecot/conf/bin/spam` have antispam host hardcoded instead of using `HOST_ANTISPAM`. \r\n\r\nI'm happy to provide PR, but firstly want to be sure - is it in fact oversight, or is this by design?\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(8)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/dovecot/\u00a7\"\n run_server(0, \"dovecot\", \"/tmp/podop.socket\", [\n\t\t(\"quota\", \"url\", url ),\n\t\t(\"auth\", \"url\", url),\n\t\t(\"sieve\", \"url\", url),\n ])\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_FRONT\", \"front\"))\nos.environ[\"REDIS_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_REDIS\", \"redis\"))\nos.environ[\"ADMIN_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_ADMIN\", \"admin\"))\nif os.environ[\"WEBMAIL\"] != \"none\":\n os.environ[\"WEBMAIL_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_WEBMAIL\", \"webmail\"))\n\nfor dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n\n# Run Podop, then postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"chown mail:mail /mail\")\nos.system(\"chown -R mail:mail /var/lib/dovecot /conf\")\nos.execv(\"/usr/sbin/dovecot\", [\"dovecot\", \"-c\", \"/etc/dovecot/dovecot.conf\", \"-F\"])\n", "path": "core/dovecot/start.py"}, {"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport logging as log\nimport sys\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_FRONT\", \"front\"))\n\nif \"HOST_REDIS\" not in os.environ:\n os.environ[\"REDIS_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_REDIS\", \"redis\"))\n\n\nfor rspamd_file in glob.glob(\"/conf/*\"):\n conf.jinja(rspamd_file, os.environ, os.path.join(\"/etc/rspamd/local.d\", os.path.basename(rspamd_file)))\n\n# Run rspamd\nos.execv(\"/usr/sbin/rspamd\", [\"rspamd\", \"-i\", \"-f\"])\n", "path": "services/rspamd/start.py"}]} | 1,488 | 402 |
gh_patches_debug_15349 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-4248 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/Chat/coati/models/base/actor.py]
1 from typing import Optional
2
3 import torch
4 import torch.nn as nn
5
6 from ..lora import LoRAModule
7
8
9 class Actor(LoRAModule):
10 """
11 Actor model base class.
12
13 Args:
14 model (nn.Module): Actor Model.
15 lora_rank (int): LoRA rank.
16 lora_train_bias (str): LoRA bias training mode.
17 """
18
19 def __init__(self, model: nn.Module, lora_rank: int = 0, lora_train_bias: str = 'none') -> None:
20 super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)
21 self.model = model
22 self.convert_to_lora()
23
24 def forward(self,
25 input_ids: torch.LongTensor,
26 attention_mask: Optional[torch.Tensor] = None,
27 **model_kwargs, # HACK: `generate` method may pass more kwargs
28 ) -> torch.Tensor:
29 """Returns model output.
30 """
31 output = self.model(
32 input_ids,
33 attention_mask=attention_mask,
34 **model_kwargs
35 )
36 return output
37
[end of applications/Chat/coati/models/base/actor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/applications/Chat/coati/models/base/actor.py b/applications/Chat/coati/models/base/actor.py
--- a/applications/Chat/coati/models/base/actor.py
+++ b/applications/Chat/coati/models/base/actor.py
@@ -21,16 +21,13 @@
self.model = model
self.convert_to_lora()
- def forward(self,
- input_ids: torch.LongTensor,
- attention_mask: Optional[torch.Tensor] = None,
- **model_kwargs, # HACK: `generate` method may pass more kwargs
- ) -> torch.Tensor:
+ def forward(
+ self,
+ input_ids: torch.LongTensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ **model_kwargs, # HACK: `generate` method may pass more kwargs
+ ) -> torch.Tensor:
"""Returns model output.
"""
- output = self.model(
- input_ids,
- attention_mask=attention_mask,
- **model_kwargs
- )
+ output = self.model(input_ids, attention_mask=attention_mask, **model_kwargs)
return output
| {"golden_diff": "diff --git a/applications/Chat/coati/models/base/actor.py b/applications/Chat/coati/models/base/actor.py\n--- a/applications/Chat/coati/models/base/actor.py\n+++ b/applications/Chat/coati/models/base/actor.py\n@@ -21,16 +21,13 @@\n self.model = model\n self.convert_to_lora()\n \n- def forward(self,\n- input_ids: torch.LongTensor,\n- attention_mask: Optional[torch.Tensor] = None,\n- **model_kwargs, # HACK: `generate` method may pass more kwargs\n- ) -> torch.Tensor:\n+ def forward(\n+ self,\n+ input_ids: torch.LongTensor,\n+ attention_mask: Optional[torch.Tensor] = None,\n+ **model_kwargs, # HACK: `generate` method may pass more kwargs\n+ ) -> torch.Tensor:\n \"\"\"Returns model output.\n \"\"\"\n- output = self.model(\n- input_ids,\n- attention_mask=attention_mask,\n- **model_kwargs\n- )\n+ output = self.model(input_ids, attention_mask=attention_mask, **model_kwargs)\n return output\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from typing import Optional\n\nimport torch\nimport torch.nn as nn\n\nfrom ..lora import LoRAModule\n\n\nclass Actor(LoRAModule):\n \"\"\"\n Actor model base class.\n\n Args:\n model (nn.Module): Actor Model.\n lora_rank (int): LoRA rank.\n lora_train_bias (str): LoRA bias training mode.\n \"\"\"\n\n def __init__(self, model: nn.Module, lora_rank: int = 0, lora_train_bias: str = 'none') -> None:\n super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)\n self.model = model\n self.convert_to_lora()\n\n def forward(self,\n input_ids: torch.LongTensor,\n attention_mask: Optional[torch.Tensor] = None,\n **model_kwargs, # HACK: `generate` method may pass more kwargs\n ) -> torch.Tensor:\n \"\"\"Returns model output.\n \"\"\"\n output = self.model(\n input_ids,\n attention_mask=attention_mask,\n **model_kwargs\n )\n return output\n", "path": "applications/Chat/coati/models/base/actor.py"}]} | 872 | 256 |
gh_patches_debug_31496 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-247 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Save a new domain application on form submission
_Outcome_: On "save" press, save any form changes to the Application model.
Relies on #221
</issue>
<code>
[start of src/registrar/forms/application_wizard.py]
1 """Forms Wizard for creating a new domain application."""
2
3 import logging
4
5 from django import forms
6
7 from django.contrib.auth.mixins import LoginRequiredMixin
8
9 from formtools.wizard.views import NamedUrlSessionWizardView # type: ignore
10
11
12 logger = logging.getLogger(__name__)
13
14
15 class OrganizationForm(forms.Form):
16 organization_type = forms.ChoiceField(
17 required=True,
18 choices=[
19 ("Federal", "Federal: a federal agency"),
20 ("Interstate", "Interstate: an organization of two or more states"),
21 (
22 "State_or_Territory",
23 (
24 "State or Territory: One of the 50 U.S. states, the District of "
25 "Columbia, American Samoa, Guam, Northern Mariana Islands, "
26 "Puerto Rico, or the U.S. Virgin Islands"
27 ),
28 ),
29 (
30 "Tribal",
31 (
32 "Tribal: a tribal government recognized by the federal or "
33 "state government"
34 ),
35 ),
36 ("County", "County: a county, parish, or borough"),
37 ("City", "City: a city, town, township, village, etc."),
38 (
39 "Special_District",
40 "Special District: an independent organization within a single state",
41 ),
42 ],
43 widget=forms.RadioSelect,
44 )
45 federal_type = forms.ChoiceField(
46 required=False,
47 choices=[
48 ("Executive", "Executive"),
49 ("Judicial", "Judicial"),
50 ("Legislative", "Legislative"),
51 ],
52 widget=forms.RadioSelect,
53 )
54 is_election_board = forms.ChoiceField(
55 required=False,
56 choices=[
57 ("Yes", "Yes"),
58 ("No", "No"),
59 ],
60 widget=forms.RadioSelect,
61 )
62
63
64 class ContactForm(forms.Form):
65 organization_name = forms.CharField(label="Organization Name")
66 street_address = forms.CharField(label="Street address")
67
68
69 # List of forms in our wizard. Each entry is a tuple of a name and a form
70 # subclass
71 FORMS = [
72 ("organization", OrganizationForm),
73 ("contact", ContactForm),
74 ]
75
76 # Dict to match up the right template with the right step. Keys here must
77 # match the first elements of the tuples in FORMS
78 TEMPLATES = {
79 "organization": "application_organization.html",
80 "contact": "application_contact.html",
81 }
82
83 # We need to pass our page titles as context to the templates, indexed
84 # by the step names
85 TITLES = {
86 "organization": "About your organization",
87 "contact": "Your organization's contact information",
88 }
89
90
91 class ApplicationWizard(LoginRequiredMixin, NamedUrlSessionWizardView):
92
93 """Multi-page form ("wizard") for new domain applications.
94
95 This sets up a sequence of forms that gather information for new
96 domain applications. Each form in the sequence has its own URL and
97 the progress through the form is stored in the Django session (thus
98 "NamedUrlSessionWizardView").
99 """
100
101 form_list = FORMS
102
103 def get_template_names(self):
104 """Template for the current step.
105
106 The return is a singleton list.
107 """
108 return [TEMPLATES[self.steps.current]]
109
110 def get_context_data(self, form, **kwargs):
111 """Add title information to the context for all steps."""
112 context = super().get_context_data(form=form, **kwargs)
113 context["form_titles"] = TITLES
114 return context
115
116 def done(self, form_list, **kwargs):
117 logger.info("Application form submitted.")
118
[end of src/registrar/forms/application_wizard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/registrar/forms/application_wizard.py b/src/registrar/forms/application_wizard.py
--- a/src/registrar/forms/application_wizard.py
+++ b/src/registrar/forms/application_wizard.py
@@ -3,11 +3,14 @@
import logging
from django import forms
+from django.shortcuts import redirect
from django.contrib.auth.mixins import LoginRequiredMixin
from formtools.wizard.views import NamedUrlSessionWizardView # type: ignore
+from registrar.models import DomainApplication, Website
+
logger = logging.getLogger(__name__)
@@ -113,5 +116,33 @@
context["form_titles"] = TITLES
return context
- def done(self, form_list, **kwargs):
- logger.info("Application form submitted.")
+ def forms_to_object(self, form_dict: dict) -> DomainApplication:
+ """Unpack the form responses onto the model object properties."""
+ application = DomainApplication.objects.create(creator=self.request.user)
+
+ # organization information
+ organization_data = form_dict["organization"].cleaned_data
+ application.organization_type = organization_data["organization_type"]
+ application.federal_branch = organization_data["federal_type"]
+ application.is_election_office = organization_data["is_election_board"]
+
+ # contact information
+ contact_data = form_dict["contact"].cleaned_data
+ application.organization_name = contact_data["organization_name"]
+ application.street_address = contact_data["street_address"]
+ # TODO: add the rest of these fields when they are created in the forms
+
+ # This isn't really the requested_domain field
+ # but we need something in this field to make the form submittable
+ requested_site, _ = Website.objects.get_or_create(
+ website=contact_data["organization_name"] + ".gov"
+ )
+ application.requested_domain = requested_site
+ return application
+
+ def done(self, form_list, form_dict, **kwargs):
+ application = self.forms_to_object(form_dict)
+ application.submit() # change the status to submitted
+ application.save()
+ logger.debug("Application object saved:", application.id)
+ return redirect("home")
| {"golden_diff": "diff --git a/src/registrar/forms/application_wizard.py b/src/registrar/forms/application_wizard.py\n--- a/src/registrar/forms/application_wizard.py\n+++ b/src/registrar/forms/application_wizard.py\n@@ -3,11 +3,14 @@\n import logging\n \n from django import forms\n+from django.shortcuts import redirect\n \n from django.contrib.auth.mixins import LoginRequiredMixin\n \n from formtools.wizard.views import NamedUrlSessionWizardView # type: ignore\n \n+from registrar.models import DomainApplication, Website\n+\n \n logger = logging.getLogger(__name__)\n \n@@ -113,5 +116,33 @@\n context[\"form_titles\"] = TITLES\n return context\n \n- def done(self, form_list, **kwargs):\n- logger.info(\"Application form submitted.\")\n+ def forms_to_object(self, form_dict: dict) -> DomainApplication:\n+ \"\"\"Unpack the form responses onto the model object properties.\"\"\"\n+ application = DomainApplication.objects.create(creator=self.request.user)\n+\n+ # organization information\n+ organization_data = form_dict[\"organization\"].cleaned_data\n+ application.organization_type = organization_data[\"organization_type\"]\n+ application.federal_branch = organization_data[\"federal_type\"]\n+ application.is_election_office = organization_data[\"is_election_board\"]\n+\n+ # contact information\n+ contact_data = form_dict[\"contact\"].cleaned_data\n+ application.organization_name = contact_data[\"organization_name\"]\n+ application.street_address = contact_data[\"street_address\"]\n+ # TODO: add the rest of these fields when they are created in the forms\n+\n+ # This isn't really the requested_domain field\n+ # but we need something in this field to make the form submittable\n+ requested_site, _ = Website.objects.get_or_create(\n+ website=contact_data[\"organization_name\"] + \".gov\"\n+ )\n+ application.requested_domain = requested_site\n+ return application\n+\n+ def done(self, form_list, form_dict, **kwargs):\n+ application = self.forms_to_object(form_dict)\n+ application.submit() # change the status to submitted\n+ application.save()\n+ logger.debug(\"Application object saved:\", application.id)\n+ return redirect(\"home\")\n", "issue": "Save a new domain application on form submission\n_Outcome_: On \"save\" press, save any form changes to the Application model. \n\nRelies on #221 \n", "before_files": [{"content": "\"\"\"Forms Wizard for creating a new domain application.\"\"\"\n\nimport logging\n\nfrom django import forms\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom formtools.wizard.views import NamedUrlSessionWizardView # type: ignore\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass OrganizationForm(forms.Form):\n organization_type = forms.ChoiceField(\n required=True,\n choices=[\n (\"Federal\", \"Federal: a federal agency\"),\n (\"Interstate\", \"Interstate: an organization of two or more states\"),\n (\n \"State_or_Territory\",\n (\n \"State or Territory: One of the 50 U.S. states, the District of \"\n \"Columbia, American Samoa, Guam, Northern Mariana Islands, \"\n \"Puerto Rico, or the U.S. Virgin Islands\"\n ),\n ),\n (\n \"Tribal\",\n (\n \"Tribal: a tribal government recognized by the federal or \"\n \"state government\"\n ),\n ),\n (\"County\", \"County: a county, parish, or borough\"),\n (\"City\", \"City: a city, town, township, village, etc.\"),\n (\n \"Special_District\",\n \"Special District: an independent organization within a single state\",\n ),\n ],\n widget=forms.RadioSelect,\n )\n federal_type = forms.ChoiceField(\n required=False,\n choices=[\n (\"Executive\", \"Executive\"),\n (\"Judicial\", \"Judicial\"),\n (\"Legislative\", \"Legislative\"),\n ],\n widget=forms.RadioSelect,\n )\n is_election_board = forms.ChoiceField(\n required=False,\n choices=[\n (\"Yes\", \"Yes\"),\n (\"No\", \"No\"),\n ],\n widget=forms.RadioSelect,\n )\n\n\nclass ContactForm(forms.Form):\n organization_name = forms.CharField(label=\"Organization Name\")\n street_address = forms.CharField(label=\"Street address\")\n\n\n# List of forms in our wizard. Each entry is a tuple of a name and a form\n# subclass\nFORMS = [\n (\"organization\", OrganizationForm),\n (\"contact\", ContactForm),\n]\n\n# Dict to match up the right template with the right step. Keys here must\n# match the first elements of the tuples in FORMS\nTEMPLATES = {\n \"organization\": \"application_organization.html\",\n \"contact\": \"application_contact.html\",\n}\n\n# We need to pass our page titles as context to the templates, indexed\n# by the step names\nTITLES = {\n \"organization\": \"About your organization\",\n \"contact\": \"Your organization's contact information\",\n}\n\n\nclass ApplicationWizard(LoginRequiredMixin, NamedUrlSessionWizardView):\n\n \"\"\"Multi-page form (\"wizard\") for new domain applications.\n\n This sets up a sequence of forms that gather information for new\n domain applications. Each form in the sequence has its own URL and\n the progress through the form is stored in the Django session (thus\n \"NamedUrlSessionWizardView\").\n \"\"\"\n\n form_list = FORMS\n\n def get_template_names(self):\n \"\"\"Template for the current step.\n\n The return is a singleton list.\n \"\"\"\n return [TEMPLATES[self.steps.current]]\n\n def get_context_data(self, form, **kwargs):\n \"\"\"Add title information to the context for all steps.\"\"\"\n context = super().get_context_data(form=form, **kwargs)\n context[\"form_titles\"] = TITLES\n return context\n\n def done(self, form_list, **kwargs):\n logger.info(\"Application form submitted.\")\n", "path": "src/registrar/forms/application_wizard.py"}]} | 1,578 | 483 |
gh_patches_debug_20332 | rasdani/github-patches | git_diff | TheAlgorithms__Python-2467 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Python type hints and doctests to other/two_sum.py
@tonydelanuez Would you be willing to add Python type hints and doctests to [`other/two_sum.py`](../blob/master/other/two_sum.py)? The function comments say that it will return a `list[int]` but instead it either returns a `list[list[int]]` or `False`.
</issue>
<code>
[start of other/two_sum.py]
1 """
2 Given an array of integers, return indices of the two numbers such that they add up to
3 a specific target.
4
5 You may assume that each input would have exactly one solution, and you may not use the
6 same element twice.
7
8 Example:
9 Given nums = [2, 7, 11, 15], target = 9,
10
11 Because nums[0] + nums[1] = 2 + 7 = 9,
12 return [0, 1].
13 """
14
15
16 def twoSum(nums, target):
17 """
18 :type nums: List[int]
19 :type target: int
20 :rtype: List[int]
21 """
22 chk_map = {}
23 for index, val in enumerate(nums):
24 compl = target - val
25 if compl in chk_map:
26 indices = [chk_map[compl], index]
27 print(indices)
28 return [indices]
29 else:
30 chk_map[val] = index
31 return False
32
[end of other/two_sum.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/other/two_sum.py b/other/two_sum.py
--- a/other/two_sum.py
+++ b/other/two_sum.py
@@ -11,21 +11,37 @@
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
"""
+from __future__ import annotations
-def twoSum(nums, target):
+def two_sum(nums: list[int], target: int) -> list[int]:
"""
- :type nums: List[int]
- :type target: int
- :rtype: List[int]
+ >>> two_sum([2, 7, 11, 15], 9)
+ [0, 1]
+ >>> two_sum([15, 2, 11, 7], 13)
+ [1, 2]
+ >>> two_sum([2, 7, 11, 15], 17)
+ [0, 3]
+ >>> two_sum([7, 15, 11, 2], 18)
+ [0, 2]
+ >>> two_sum([2, 7, 11, 15], 26)
+ [2, 3]
+ >>> two_sum([2, 7, 11, 15], 8)
+ []
+ >>> two_sum([3 * i for i in range(10)], 19)
+ []
"""
chk_map = {}
for index, val in enumerate(nums):
compl = target - val
if compl in chk_map:
- indices = [chk_map[compl], index]
- print(indices)
- return [indices]
- else:
- chk_map[val] = index
- return False
+ return [chk_map[compl], index]
+ chk_map[val] = index
+ return []
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ print(f"{two_sum([2, 7, 11, 15], 9) = }")
| {"golden_diff": "diff --git a/other/two_sum.py b/other/two_sum.py\n--- a/other/two_sum.py\n+++ b/other/two_sum.py\n@@ -11,21 +11,37 @@\n Because nums[0] + nums[1] = 2 + 7 = 9,\n return [0, 1].\n \"\"\"\n+from __future__ import annotations\n \n \n-def twoSum(nums, target):\n+def two_sum(nums: list[int], target: int) -> list[int]:\n \"\"\"\n- :type nums: List[int]\n- :type target: int\n- :rtype: List[int]\n+ >>> two_sum([2, 7, 11, 15], 9)\n+ [0, 1]\n+ >>> two_sum([15, 2, 11, 7], 13)\n+ [1, 2]\n+ >>> two_sum([2, 7, 11, 15], 17)\n+ [0, 3]\n+ >>> two_sum([7, 15, 11, 2], 18)\n+ [0, 2]\n+ >>> two_sum([2, 7, 11, 15], 26)\n+ [2, 3]\n+ >>> two_sum([2, 7, 11, 15], 8)\n+ []\n+ >>> two_sum([3 * i for i in range(10)], 19)\n+ []\n \"\"\"\n chk_map = {}\n for index, val in enumerate(nums):\n compl = target - val\n if compl in chk_map:\n- indices = [chk_map[compl], index]\n- print(indices)\n- return [indices]\n- else:\n- chk_map[val] = index\n- return False\n+ return [chk_map[compl], index]\n+ chk_map[val] = index\n+ return []\n+\n+\n+if __name__ == \"__main__\":\n+ import doctest\n+\n+ doctest.testmod()\n+ print(f\"{two_sum([2, 7, 11, 15], 9) = }\")\n", "issue": "Add Python type hints and doctests to other/two_sum.py\n@tonydelanuez Would you be willing to add Python type hints and doctests to [`other/two_sum.py`](../blob/master/other/two_sum.py)? The function comments say that it will return a `list[int]` but instead it either returns a `list[list[int]]` or `False`.\n", "before_files": [{"content": "\"\"\"\nGiven an array of integers, return indices of the two numbers such that they add up to\na specific target.\n\nYou may assume that each input would have exactly one solution, and you may not use the\nsame element twice.\n\nExample:\nGiven nums = [2, 7, 11, 15], target = 9,\n\nBecause nums[0] + nums[1] = 2 + 7 = 9,\nreturn [0, 1].\n\"\"\"\n\n\ndef twoSum(nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n chk_map = {}\n for index, val in enumerate(nums):\n compl = target - val\n if compl in chk_map:\n indices = [chk_map[compl], index]\n print(indices)\n return [indices]\n else:\n chk_map[val] = index\n return False\n", "path": "other/two_sum.py"}]} | 875 | 491 |
gh_patches_debug_34623 | rasdani/github-patches | git_diff | chainer__chainer-505 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a function for summation along axes
`sum` function reduces all elements, while there are many cases that I want to take a summation over some axes. This is analogous to `numpy.sum` with the `axis` argument.
The implementation should contain a general function for GPU to compute a reduction over specified axes.
</issue>
<code>
[start of chainer/functions/math/sum.py]
1 import numpy
2
3 from chainer import cuda
4 from chainer import function
5 from chainer.utils import type_check
6
7
8 class Sum(function.Function):
9 """Sum of array elements over a given axis."""
10
11 def __init__(self, axis=None):
12 self.axis = axis
13
14 def check_type_forward(self, in_types):
15 type_check.expect(
16 in_types.size() == 1,
17 in_types[0].dtype == numpy.float32
18 )
19
20 if self.axis is not None:
21 type_check.expect(
22 self.axis < in_types[0].ndim,
23 )
24
25 def forward(self, x):
26 xp = cuda.get_array_module(*x)
27 return xp.asarray(x[0].sum(axis=self.axis)),
28
29 def backward(self, x, gy):
30 xp = cuda.get_array_module(*x)
31
32 gx = xp.empty_like(x[0])
33 if self.axis is None:
34 gx[:] = gy[0]
35 else:
36 gx[:] = xp.expand_dims(gy[0], axis=self.axis)
37
38 return gx,
39
40
41 def sum(x, axis=None):
42 """Sum of array elements over a given axis.
43
44 Args:
45 x (~chainer.Variable): Elements to sum.
46 axis (None or int): Axis which a sum is performed.
47 The default (axis = None) is perform a sum over all the dimensions
48 of the input array.
49
50 Returns:
51 ~chainer.Variable: Output variable.
52
53 """
54 return Sum(axis)(x)
55
[end of chainer/functions/math/sum.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/functions/math/sum.py b/chainer/functions/math/sum.py
--- a/chainer/functions/math/sum.py
+++ b/chainer/functions/math/sum.py
@@ -9,7 +9,17 @@
"""Sum of array elements over a given axis."""
def __init__(self, axis=None):
- self.axis = axis
+ if axis is None:
+ self.axis = None
+ elif isinstance(axis, int):
+ self.axis = (axis,)
+ elif isinstance(axis, tuple) and all(isinstance(a, int) for a in axis):
+ if len(set(axis)) != len(axis):
+ raise ValueError('duplicate value in axis: ({})'.format(
+ ', '.join(map(str, axis))))
+ self.axis = axis
+ else:
+ raise TypeError('None, int or tuple of int are required')
def check_type_forward(self, in_types):
type_check.expect(
@@ -18,9 +28,15 @@
)
if self.axis is not None:
- type_check.expect(
- self.axis < in_types[0].ndim,
- )
+ for axis in self.axis:
+ if axis >= 0:
+ type_check.expect(
+ axis < in_types[0].ndim,
+ )
+ else:
+ type_check.expect(
+ -axis - 1 < in_types[0].ndim,
+ )
def forward(self, x):
xp = cuda.get_array_module(*x)
@@ -33,7 +49,15 @@
if self.axis is None:
gx[:] = gy[0]
else:
- gx[:] = xp.expand_dims(gy[0], axis=self.axis)
+ gy = gy[0]
+ actual_axis = []
+ for axis in self.axis:
+ if axis < 0:
+ axis = len(gx.shape) + axis
+ actual_axis.append(axis)
+ for axis in sorted(actual_axis):
+ gy = xp.expand_dims(gy, axis=axis)
+ gx[:] = gy
return gx,
@@ -43,7 +67,7 @@
Args:
x (~chainer.Variable): Elements to sum.
- axis (None or int): Axis which a sum is performed.
+ axis (None, int, or tuple of int): Axis which a sum is performed.
The default (axis = None) is perform a sum over all the dimensions
of the input array.
| {"golden_diff": "diff --git a/chainer/functions/math/sum.py b/chainer/functions/math/sum.py\n--- a/chainer/functions/math/sum.py\n+++ b/chainer/functions/math/sum.py\n@@ -9,7 +9,17 @@\n \"\"\"Sum of array elements over a given axis.\"\"\"\n \n def __init__(self, axis=None):\n- self.axis = axis\n+ if axis is None:\n+ self.axis = None\n+ elif isinstance(axis, int):\n+ self.axis = (axis,)\n+ elif isinstance(axis, tuple) and all(isinstance(a, int) for a in axis):\n+ if len(set(axis)) != len(axis):\n+ raise ValueError('duplicate value in axis: ({})'.format(\n+ ', '.join(map(str, axis))))\n+ self.axis = axis\n+ else:\n+ raise TypeError('None, int or tuple of int are required')\n \n def check_type_forward(self, in_types):\n type_check.expect(\n@@ -18,9 +28,15 @@\n )\n \n if self.axis is not None:\n- type_check.expect(\n- self.axis < in_types[0].ndim,\n- )\n+ for axis in self.axis:\n+ if axis >= 0:\n+ type_check.expect(\n+ axis < in_types[0].ndim,\n+ )\n+ else:\n+ type_check.expect(\n+ -axis - 1 < in_types[0].ndim,\n+ )\n \n def forward(self, x):\n xp = cuda.get_array_module(*x)\n@@ -33,7 +49,15 @@\n if self.axis is None:\n gx[:] = gy[0]\n else:\n- gx[:] = xp.expand_dims(gy[0], axis=self.axis)\n+ gy = gy[0]\n+ actual_axis = []\n+ for axis in self.axis:\n+ if axis < 0:\n+ axis = len(gx.shape) + axis\n+ actual_axis.append(axis)\n+ for axis in sorted(actual_axis):\n+ gy = xp.expand_dims(gy, axis=axis)\n+ gx[:] = gy\n \n return gx,\n \n@@ -43,7 +67,7 @@\n \n Args:\n x (~chainer.Variable): Elements to sum.\n- axis (None or int): Axis which a sum is performed.\n+ axis (None, int, or tuple of int): Axis which a sum is performed.\n The default (axis = None) is perform a sum over all the dimensions\n of the input array.\n", "issue": "Add a function for summation along axes\n`sum` function reduces all elements, while there are many cases that I want to take a summation over some axes. This is analogous to `numpy.sum` with the `axis` argument.\n\nThe implementation should contain a general function for GPU to compute a reduction over specified axes.\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Sum(function.Function):\n \"\"\"Sum of array elements over a given axis.\"\"\"\n\n def __init__(self, axis=None):\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1,\n in_types[0].dtype == numpy.float32\n )\n\n if self.axis is not None:\n type_check.expect(\n self.axis < in_types[0].ndim,\n )\n\n def forward(self, x):\n xp = cuda.get_array_module(*x)\n return xp.asarray(x[0].sum(axis=self.axis)),\n\n def backward(self, x, gy):\n xp = cuda.get_array_module(*x)\n\n gx = xp.empty_like(x[0])\n if self.axis is None:\n gx[:] = gy[0]\n else:\n gx[:] = xp.expand_dims(gy[0], axis=self.axis)\n\n return gx,\n\n\ndef sum(x, axis=None):\n \"\"\"Sum of array elements over a given axis.\n\n Args:\n x (~chainer.Variable): Elements to sum.\n axis (None or int): Axis which a sum is performed.\n The default (axis = None) is perform a sum over all the dimensions\n of the input array.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Sum(axis)(x)\n", "path": "chainer/functions/math/sum.py"}]} | 1,030 | 556 |
gh_patches_debug_3104 | rasdani/github-patches | git_diff | certbot__certbot-2004 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
don't add includeSubDomains for Strict-Transport-Security header
Hi,
I think the letsencrypt client shouldn’t add `includeSubDomains` to the Strict-Transport-Security header by default.
If you let the letsencrypt client modify the configuration for a webserver which hosts example.com, this would make it only possible to reach subdomain hosts (foo.example.com) with https. If you forgot only one webserver which serves a subdomain by http only, you can't reach this one any more.
And as browsers actually don't have an option to clear the HSTS cache, It won't help you to remove the includeSubDomains later from the webserver config.
includeSubDomains is something, that an admin should only add if he truly knows, that all subdomains are reachable by https. So I think this should never be added by an automatic tool, like the letsencrypt client.
RFC 6797 section 11.1 [1] also describes some implications of includeSubDomains
Chris
[1] https://tools.ietf.org/html/rfc6797#section-11.4
</issue>
<code>
[start of letsencrypt-apache/letsencrypt_apache/constants.py]
1 """Apache plugin constants."""
2 import pkg_resources
3
4
5 CLI_DEFAULTS = dict(
6 server_root="/etc/apache2",
7 ctl="apache2ctl",
8 enmod="a2enmod",
9 dismod="a2dismod",
10 le_vhost_ext="-le-ssl.conf",
11 )
12 """CLI defaults."""
13
14 MOD_SSL_CONF_DEST = "options-ssl-apache.conf"
15 """Name of the mod_ssl config file as saved in `IConfig.config_dir`."""
16
17 MOD_SSL_CONF_SRC = pkg_resources.resource_filename(
18 "letsencrypt_apache", "options-ssl-apache.conf")
19 """Path to the Apache mod_ssl config file found in the Let's Encrypt
20 distribution."""
21
22 AUGEAS_LENS_DIR = pkg_resources.resource_filename(
23 "letsencrypt_apache", "augeas_lens")
24 """Path to the Augeas lens directory"""
25
26 REWRITE_HTTPS_ARGS = [
27 "^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[L,QSA,R=permanent]"]
28 """Apache version<2.3.9 rewrite rule arguments used for redirections to https vhost"""
29
30 REWRITE_HTTPS_ARGS_WITH_END = [
31 "^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[END,QSA,R=permanent]"]
32 """Apache version >= 2.3.9 rewrite rule arguments used for redirections to
33 https vhost"""
34
35 HSTS_ARGS = ["always", "set", "Strict-Transport-Security",
36 "\"max-age=31536000; includeSubDomains\""]
37 """Apache header arguments for HSTS"""
38
39 UIR_ARGS = ["always", "set", "Content-Security-Policy",
40 "upgrade-insecure-requests"]
41
42 HEADER_ARGS = {"Strict-Transport-Security": HSTS_ARGS,
43 "Upgrade-Insecure-Requests": UIR_ARGS}
44
45
[end of letsencrypt-apache/letsencrypt_apache/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/letsencrypt-apache/letsencrypt_apache/constants.py b/letsencrypt-apache/letsencrypt_apache/constants.py
--- a/letsencrypt-apache/letsencrypt_apache/constants.py
+++ b/letsencrypt-apache/letsencrypt_apache/constants.py
@@ -33,7 +33,7 @@
https vhost"""
HSTS_ARGS = ["always", "set", "Strict-Transport-Security",
- "\"max-age=31536000; includeSubDomains\""]
+ "\"max-age=31536000\""]
"""Apache header arguments for HSTS"""
UIR_ARGS = ["always", "set", "Content-Security-Policy",
| {"golden_diff": "diff --git a/letsencrypt-apache/letsencrypt_apache/constants.py b/letsencrypt-apache/letsencrypt_apache/constants.py\n--- a/letsencrypt-apache/letsencrypt_apache/constants.py\n+++ b/letsencrypt-apache/letsencrypt_apache/constants.py\n@@ -33,7 +33,7 @@\n https vhost\"\"\"\n \n HSTS_ARGS = [\"always\", \"set\", \"Strict-Transport-Security\",\n- \"\\\"max-age=31536000; includeSubDomains\\\"\"]\n+ \"\\\"max-age=31536000\\\"\"]\n \"\"\"Apache header arguments for HSTS\"\"\"\n \n UIR_ARGS = [\"always\", \"set\", \"Content-Security-Policy\",\n", "issue": "don't add includeSubDomains for Strict-Transport-Security header\nHi,\n\nI think the letsencrypt client shouldn\u2019t add `includeSubDomains` to the Strict-Transport-Security header by default.\n\nIf you let the letsencrypt client modify the configuration for a webserver which hosts example.com, this would make it only possible to reach subdomain hosts (foo.example.com) with https. If you forgot only one webserver which serves a subdomain by http only, you can't reach this one any more.\nAnd as browsers actually don't have an option to clear the HSTS cache, It won't help you to remove the includeSubDomains later from the webserver config.\n\nincludeSubDomains is something, that an admin should only add if he truly knows, that all subdomains are reachable by https. So I think this should never be added by an automatic tool, like the letsencrypt client.\n\nRFC 6797 section 11.1 [1] also describes some implications of includeSubDomains\n\nChris\n\n[1] https://tools.ietf.org/html/rfc6797#section-11.4\n\n", "before_files": [{"content": "\"\"\"Apache plugin constants.\"\"\"\nimport pkg_resources\n\n\nCLI_DEFAULTS = dict(\n server_root=\"/etc/apache2\",\n ctl=\"apache2ctl\",\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n)\n\"\"\"CLI defaults.\"\"\"\n\nMOD_SSL_CONF_DEST = \"options-ssl-apache.conf\"\n\"\"\"Name of the mod_ssl config file as saved in `IConfig.config_dir`.\"\"\"\n\nMOD_SSL_CONF_SRC = pkg_resources.resource_filename(\n \"letsencrypt_apache\", \"options-ssl-apache.conf\")\n\"\"\"Path to the Apache mod_ssl config file found in the Let's Encrypt\ndistribution.\"\"\"\n\nAUGEAS_LENS_DIR = pkg_resources.resource_filename(\n \"letsencrypt_apache\", \"augeas_lens\")\n\"\"\"Path to the Augeas lens directory\"\"\"\n\nREWRITE_HTTPS_ARGS = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[L,QSA,R=permanent]\"]\n\"\"\"Apache version<2.3.9 rewrite rule arguments used for redirections to https vhost\"\"\"\n\nREWRITE_HTTPS_ARGS_WITH_END = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[END,QSA,R=permanent]\"]\n\"\"\"Apache version >= 2.3.9 rewrite rule arguments used for redirections to\n https vhost\"\"\"\n\nHSTS_ARGS = [\"always\", \"set\", \"Strict-Transport-Security\",\n \"\\\"max-age=31536000; includeSubDomains\\\"\"]\n\"\"\"Apache header arguments for HSTS\"\"\"\n\nUIR_ARGS = [\"always\", \"set\", \"Content-Security-Policy\",\n \"upgrade-insecure-requests\"]\n\nHEADER_ARGS = {\"Strict-Transport-Security\": HSTS_ARGS,\n \"Upgrade-Insecure-Requests\": UIR_ARGS}\n\n", "path": "letsencrypt-apache/letsencrypt_apache/constants.py"}]} | 1,247 | 158 |
gh_patches_debug_21326 | rasdani/github-patches | git_diff | hylang__hy-1266 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Single quote crashes hy2py
## x.hy
```
'
```
## `hy2py x.hy`
```
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "hy/cmdline.py", line 406, in hy2py_main
if stdin_text is None
File "hy/cmdline.py", line 186, in pretty_error
print(e, file=sys.stderr)
File "hy/lex/exceptions.py", line 43, in __str__
source = self.source.split("\n")
AttributeError: 'NoneType' object has no attribute 'split'
```
</issue>
<code>
[start of hy/lex/exceptions.py]
1 # Copyright (c) 2013 Nicolas Dandrimont <[email protected]>
2 # Copyright (c) 2013 Bob Tolbert <[email protected]>
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the "Software"),
6 # to deal in the Software without restriction, including without limitation
7 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 # and/or sell copies of the Software, and to permit persons to whom the
9 # Software is furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice shall be included in
12 # all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 # DEALINGS IN THE SOFTWARE.
21
22 from hy.errors import HyError
23
24
25 class LexException(HyError):
26 """Error during the Lexing of a Hython expression."""
27 def __init__(self, message, lineno, colno):
28 super(LexException, self).__init__(message)
29 self.message = message
30 self.lineno = lineno
31 self.colno = colno
32 self.source = None
33 self.filename = '<stdin>'
34
35 def __str__(self):
36 from hy.errors import colored
37
38 line = self.lineno
39 start = self.colno
40
41 result = ""
42
43 source = self.source.split("\n")
44
45 if line > 0 and start > 0:
46 result += ' File "%s", line %d, column %d\n\n' % (self.filename,
47 line,
48 start)
49
50 if len(self.source) > 0:
51 source_line = source[line-1]
52 else:
53 source_line = ""
54
55 result += ' %s\n' % colored.red(source_line)
56 result += ' %s%s\n' % (' '*(start-1), colored.green('^'))
57
58 result += colored.yellow("LexException: %s\n\n" % self.message)
59
60 return result
61
62
63 class PrematureEndOfInput(LexException):
64 """We got a premature end of input"""
65 def __init__(self, message):
66 super(PrematureEndOfInput, self).__init__(message, -1, -1)
67
[end of hy/lex/exceptions.py]
[start of hy/lex/__init__.py]
1 # Copyright (c) 2013 Paul Tagliamonte <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the "Software"),
5 # to deal in the Software without restriction, including without limitation
6 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 # and/or sell copies of the Software, and to permit persons to whom the
8 # Software is furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 # DEALINGS IN THE SOFTWARE.
20
21 from rply.errors import LexingError
22
23 from hy.lex.exceptions import LexException, PrematureEndOfInput # NOQA
24 from hy.lex.lexer import lexer
25 from hy.lex.parser import parser
26
27
28 def tokenize(buf):
29 """
30 Tokenize a Lisp file or string buffer into internal Hy objects.
31 """
32 try:
33 return parser.parse(lexer.lex(buf))
34 except LexingError as e:
35 pos = e.getsourcepos()
36 raise LexException("Could not identify the next token.",
37 pos.lineno, pos.colno)
38 except LexException as e:
39 if e.source is None:
40 e.source = buf
41 raise
42
[end of hy/lex/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hy/lex/__init__.py b/hy/lex/__init__.py
--- a/hy/lex/__init__.py
+++ b/hy/lex/__init__.py
@@ -34,7 +34,7 @@
except LexingError as e:
pos = e.getsourcepos()
raise LexException("Could not identify the next token.",
- pos.lineno, pos.colno)
+ pos.lineno, pos.colno, buf)
except LexException as e:
if e.source is None:
e.source = buf
diff --git a/hy/lex/exceptions.py b/hy/lex/exceptions.py
--- a/hy/lex/exceptions.py
+++ b/hy/lex/exceptions.py
@@ -24,12 +24,12 @@
class LexException(HyError):
"""Error during the Lexing of a Hython expression."""
- def __init__(self, message, lineno, colno):
+ def __init__(self, message, lineno, colno, source=None):
super(LexException, self).__init__(message)
self.message = message
self.lineno = lineno
self.colno = colno
- self.source = None
+ self.source = source
self.filename = '<stdin>'
def __str__(self):
| {"golden_diff": "diff --git a/hy/lex/__init__.py b/hy/lex/__init__.py\n--- a/hy/lex/__init__.py\n+++ b/hy/lex/__init__.py\n@@ -34,7 +34,7 @@\n except LexingError as e:\n pos = e.getsourcepos()\n raise LexException(\"Could not identify the next token.\",\n- pos.lineno, pos.colno)\n+ pos.lineno, pos.colno, buf)\n except LexException as e:\n if e.source is None:\n e.source = buf\ndiff --git a/hy/lex/exceptions.py b/hy/lex/exceptions.py\n--- a/hy/lex/exceptions.py\n+++ b/hy/lex/exceptions.py\n@@ -24,12 +24,12 @@\n \n class LexException(HyError):\n \"\"\"Error during the Lexing of a Hython expression.\"\"\"\n- def __init__(self, message, lineno, colno):\n+ def __init__(self, message, lineno, colno, source=None):\n super(LexException, self).__init__(message)\n self.message = message\n self.lineno = lineno\n self.colno = colno\n- self.source = None\n+ self.source = source\n self.filename = '<stdin>'\n \n def __str__(self):\n", "issue": "Single quote crashes hy2py\n## x.hy\r\n\r\n```\r\n'\r\n```\r\n\r\n## `hy2py x.hy`\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"hy/cmdline.py\", line 406, in hy2py_main\r\n if stdin_text is None\r\n File \"hy/cmdline.py\", line 186, in pretty_error\r\n print(e, file=sys.stderr)\r\n File \"hy/lex/exceptions.py\", line 43, in __str__\r\n source = self.source.split(\"\\n\")\r\nAttributeError: 'NoneType' object has no attribute 'split'\r\n```\n", "before_files": [{"content": "# Copyright (c) 2013 Nicolas Dandrimont <[email protected]>\n# Copyright (c) 2013 Bob Tolbert <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.errors import HyError\n\n\nclass LexException(HyError):\n \"\"\"Error during the Lexing of a Hython expression.\"\"\"\n def __init__(self, message, lineno, colno):\n super(LexException, self).__init__(message)\n self.message = message\n self.lineno = lineno\n self.colno = colno\n self.source = None\n self.filename = '<stdin>'\n\n def __str__(self):\n from hy.errors import colored\n\n line = self.lineno\n start = self.colno\n\n result = \"\"\n\n source = self.source.split(\"\\n\")\n\n if line > 0 and start > 0:\n result += ' File \"%s\", line %d, column %d\\n\\n' % (self.filename,\n line,\n start)\n\n if len(self.source) > 0:\n source_line = source[line-1]\n else:\n source_line = \"\"\n\n result += ' %s\\n' % colored.red(source_line)\n result += ' %s%s\\n' % (' '*(start-1), colored.green('^'))\n\n result += colored.yellow(\"LexException: %s\\n\\n\" % self.message)\n\n return result\n\n\nclass PrematureEndOfInput(LexException):\n \"\"\"We got a premature end of input\"\"\"\n def __init__(self, message):\n super(PrematureEndOfInput, self).__init__(message, -1, -1)\n", "path": "hy/lex/exceptions.py"}, {"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom rply.errors import LexingError\n\nfrom hy.lex.exceptions import LexException, PrematureEndOfInput # NOQA\nfrom hy.lex.lexer import lexer\nfrom hy.lex.parser import parser\n\n\ndef tokenize(buf):\n \"\"\"\n Tokenize a Lisp file or string buffer into internal Hy objects.\n \"\"\"\n try:\n return parser.parse(lexer.lex(buf))\n except LexingError as e:\n pos = e.getsourcepos()\n raise LexException(\"Could not identify the next token.\",\n pos.lineno, pos.colno)\n except LexException as e:\n if e.source is None:\n e.source = buf\n raise\n", "path": "hy/lex/__init__.py"}]} | 1,874 | 297 |
gh_patches_debug_25452 | rasdani/github-patches | git_diff | encode__httpx-566 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
scripts as a single top-level point of project automation.
Generally across the `encode` projects I've been using [GitHub's "Scripts to Rule Them All" pattern](https://github.blog/2015-06-30-scripts-to-rule-them-all/) as a consistent style for project testing, linting, publishing etc...
It's benenficial from my POV, because it ends up that we can always just run `scripts/install`, or `scripts/test` from *any* project and expect that to perform the same kind of thing, even if we're using different tooling between projects. (eg. started adopting `nox` in some places, but not yet there accross the board.)
I'd like to do the same with `httpx`, while still keeping the niceness of `nox` (So eg. reintroduce a `scripts/test`, `scripts/lint` and `scripts/publish`.) but figure it's worth raising this for discussion first.
I'd also be quite keen on having a fast-feedback test suite being the default, with nox's run against multiple environments as an explicit case?
Any general thoughts here or should I just dive in and take a crack at this?
</issue>
<code>
[start of noxfile.py]
1 import nox
2
3 nox.options.stop_on_first_error = True
4 nox.options.reuse_existing_virtualenvs = True
5 nox.options.keywords = "not serve"
6
7 source_files = ("httpx", "tools", "tests", "setup.py", "noxfile.py")
8
9
10 @nox.session
11 def lint(session):
12 session.install(
13 "--upgrade", "autoflake", "black", "flake8", "isort", "seed-isort-config"
14 )
15
16 session.run("autoflake", "--in-place", "--recursive", *source_files)
17 session.run("seed-isort-config", "--application-directories=httpx")
18 session.run("isort", "--project=httpx", "--recursive", "--apply", *source_files)
19 session.run("black", "--target-version=py36", *source_files)
20
21 check(session)
22
23
24 @nox.session
25 def check(session):
26 session.install(
27 "--upgrade", "black", "flake8", "flake8-bugbear", "flake8-pie", "isort", "mypy"
28 )
29
30 session.run("black", "--check", "--diff", "--target-version=py36", *source_files)
31 session.run("flake8", *source_files)
32 session.run("mypy", "httpx")
33 session.run(
34 "isort", "--check", "--diff", "--project=httpx", "--recursive", *source_files
35 )
36
37
38 @nox.session
39 def docs(session):
40 session.install("--upgrade", "mkdocs", "mkdocs-material", "mkautodoc>=0.1.0")
41 session.install("-e", ".")
42 session.run("mkdocs", "build")
43
44
45 @nox.session(reuse_venv=True)
46 def serve(session):
47 session.install("--upgrade", "mkdocs", "mkdocs-material")
48
49 session.run("mkdocs", "serve")
50
51
52 @nox.session(python=["3.6", "3.7", "3.8"])
53 def test(session):
54 session.install("--upgrade", "-r", "test-requirements.txt")
55 session.run("python", "-m", "pytest", *session.posargs)
56
[end of noxfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/noxfile.py b/noxfile.py
deleted file mode 100644
--- a/noxfile.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import nox
-
-nox.options.stop_on_first_error = True
-nox.options.reuse_existing_virtualenvs = True
-nox.options.keywords = "not serve"
-
-source_files = ("httpx", "tools", "tests", "setup.py", "noxfile.py")
-
-
[email protected]
-def lint(session):
- session.install(
- "--upgrade", "autoflake", "black", "flake8", "isort", "seed-isort-config"
- )
-
- session.run("autoflake", "--in-place", "--recursive", *source_files)
- session.run("seed-isort-config", "--application-directories=httpx")
- session.run("isort", "--project=httpx", "--recursive", "--apply", *source_files)
- session.run("black", "--target-version=py36", *source_files)
-
- check(session)
-
-
[email protected]
-def check(session):
- session.install(
- "--upgrade", "black", "flake8", "flake8-bugbear", "flake8-pie", "isort", "mypy"
- )
-
- session.run("black", "--check", "--diff", "--target-version=py36", *source_files)
- session.run("flake8", *source_files)
- session.run("mypy", "httpx")
- session.run(
- "isort", "--check", "--diff", "--project=httpx", "--recursive", *source_files
- )
-
-
[email protected]
-def docs(session):
- session.install("--upgrade", "mkdocs", "mkdocs-material", "mkautodoc>=0.1.0")
- session.install("-e", ".")
- session.run("mkdocs", "build")
-
-
[email protected](reuse_venv=True)
-def serve(session):
- session.install("--upgrade", "mkdocs", "mkdocs-material")
-
- session.run("mkdocs", "serve")
-
-
[email protected](python=["3.6", "3.7", "3.8"])
-def test(session):
- session.install("--upgrade", "-r", "test-requirements.txt")
- session.run("python", "-m", "pytest", *session.posargs)
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\ndeleted file mode 100644\n--- a/noxfile.py\n+++ /dev/null\n@@ -1,55 +0,0 @@\n-import nox\n-\n-nox.options.stop_on_first_error = True\n-nox.options.reuse_existing_virtualenvs = True\n-nox.options.keywords = \"not serve\"\n-\n-source_files = (\"httpx\", \"tools\", \"tests\", \"setup.py\", \"noxfile.py\")\n-\n-\[email protected]\n-def lint(session):\n- session.install(\n- \"--upgrade\", \"autoflake\", \"black\", \"flake8\", \"isort\", \"seed-isort-config\"\n- )\n-\n- session.run(\"autoflake\", \"--in-place\", \"--recursive\", *source_files)\n- session.run(\"seed-isort-config\", \"--application-directories=httpx\")\n- session.run(\"isort\", \"--project=httpx\", \"--recursive\", \"--apply\", *source_files)\n- session.run(\"black\", \"--target-version=py36\", *source_files)\n-\n- check(session)\n-\n-\[email protected]\n-def check(session):\n- session.install(\n- \"--upgrade\", \"black\", \"flake8\", \"flake8-bugbear\", \"flake8-pie\", \"isort\", \"mypy\"\n- )\n-\n- session.run(\"black\", \"--check\", \"--diff\", \"--target-version=py36\", *source_files)\n- session.run(\"flake8\", *source_files)\n- session.run(\"mypy\", \"httpx\")\n- session.run(\n- \"isort\", \"--check\", \"--diff\", \"--project=httpx\", \"--recursive\", *source_files\n- )\n-\n-\[email protected]\n-def docs(session):\n- session.install(\"--upgrade\", \"mkdocs\", \"mkdocs-material\", \"mkautodoc>=0.1.0\")\n- session.install(\"-e\", \".\")\n- session.run(\"mkdocs\", \"build\")\n-\n-\[email protected](reuse_venv=True)\n-def serve(session):\n- session.install(\"--upgrade\", \"mkdocs\", \"mkdocs-material\")\n-\n- session.run(\"mkdocs\", \"serve\")\n-\n-\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\n-def test(session):\n- session.install(\"--upgrade\", \"-r\", \"test-requirements.txt\")\n- session.run(\"python\", \"-m\", \"pytest\", *session.posargs)\n", "issue": "scripts as a single top-level point of project automation.\nGenerally across the `encode` projects I've been using [GitHub's \"Scripts to Rule Them All\" pattern](https://github.blog/2015-06-30-scripts-to-rule-them-all/) as a consistent style for project testing, linting, publishing etc...\r\n\r\nIt's benenficial from my POV, because it ends up that we can always just run `scripts/install`, or `scripts/test` from *any* project and expect that to perform the same kind of thing, even if we're using different tooling between projects. (eg. started adopting `nox` in some places, but not yet there accross the board.)\r\n\r\nI'd like to do the same with `httpx`, while still keeping the niceness of `nox` (So eg. reintroduce a `scripts/test`, `scripts/lint` and `scripts/publish`.) but figure it's worth raising this for discussion first.\r\n\r\nI'd also be quite keen on having a fast-feedback test suite being the default, with nox's run against multiple environments as an explicit case?\r\n\r\nAny general thoughts here or should I just dive in and take a crack at this?\n", "before_files": [{"content": "import nox\n\nnox.options.stop_on_first_error = True\nnox.options.reuse_existing_virtualenvs = True\nnox.options.keywords = \"not serve\"\n\nsource_files = (\"httpx\", \"tools\", \"tests\", \"setup.py\", \"noxfile.py\")\n\n\[email protected]\ndef lint(session):\n session.install(\n \"--upgrade\", \"autoflake\", \"black\", \"flake8\", \"isort\", \"seed-isort-config\"\n )\n\n session.run(\"autoflake\", \"--in-place\", \"--recursive\", *source_files)\n session.run(\"seed-isort-config\", \"--application-directories=httpx\")\n session.run(\"isort\", \"--project=httpx\", \"--recursive\", \"--apply\", *source_files)\n session.run(\"black\", \"--target-version=py36\", *source_files)\n\n check(session)\n\n\[email protected]\ndef check(session):\n session.install(\n \"--upgrade\", \"black\", \"flake8\", \"flake8-bugbear\", \"flake8-pie\", \"isort\", \"mypy\"\n )\n\n session.run(\"black\", \"--check\", \"--diff\", \"--target-version=py36\", *source_files)\n session.run(\"flake8\", *source_files)\n session.run(\"mypy\", \"httpx\")\n session.run(\n \"isort\", \"--check\", \"--diff\", \"--project=httpx\", \"--recursive\", *source_files\n )\n\n\[email protected]\ndef docs(session):\n session.install(\"--upgrade\", \"mkdocs\", \"mkdocs-material\", \"mkautodoc>=0.1.0\")\n session.install(\"-e\", \".\")\n session.run(\"mkdocs\", \"build\")\n\n\[email protected](reuse_venv=True)\ndef serve(session):\n session.install(\"--upgrade\", \"mkdocs\", \"mkdocs-material\")\n\n session.run(\"mkdocs\", \"serve\")\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\ndef test(session):\n session.install(\"--upgrade\", \"-r\", \"test-requirements.txt\")\n session.run(\"python\", \"-m\", \"pytest\", *session.posargs)\n", "path": "noxfile.py"}]} | 1,351 | 539 |
gh_patches_debug_2584 | rasdani/github-patches | git_diff | kivy__python-for-android-2180 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issues introduced by PR #2113 (SDL2)
As said on Discord #dev channel yesterday, PR #2113 introduces a lot of blocking issues.
These are the results of the tests done by me, @AndreMiras and @opacam :
- `sdl2==2.0.10` have issues that have been solved by the SDL2 team, so it needs to be bumped to `2.0.12`.
- `sdl2==2.0.12` works but create freezes during runtime.
- These freezes are definitely related to the new `SDL_LockMutex` / `SDL_UnlockMutex` mechanism they added for concurrency issues.
- Commenting `SDL_LockMutex` on `Touch` related events fixes the freeze issue for non-fullscreen apps.
- On fullscreen apps, the patch it's also needed on `Resize, .. etc` events.
I'm providing an attached patch that fixes the issues on top of `2.0.12`, btw seems not a good idea to do that, so it needs some more investigation:
[disable_mutex.txt](https://github.com/kivy/python-for-android/files/4569870/disable_mutex.txt)
</issue>
<code>
[start of pythonforandroid/recipes/sdl2/__init__.py]
1 from pythonforandroid.recipe import BootstrapNDKRecipe
2 from pythonforandroid.toolchain import current_directory, shprint
3 import sh
4
5
6 class LibSDL2Recipe(BootstrapNDKRecipe):
7 version = "2.0.10"
8 url = "https://www.libsdl.org/release/SDL2-{version}.zip"
9 md5sum = "6b2e9a4a2faba4ff277062cf669724f4"
10
11 dir_name = 'SDL'
12
13 depends = ['sdl2_image', 'sdl2_mixer', 'sdl2_ttf']
14
15 def get_recipe_env(self, arch=None, with_flags_in_cc=True, with_python=True):
16 env = super().get_recipe_env(
17 arch=arch, with_flags_in_cc=with_flags_in_cc, with_python=with_python)
18 env['APP_ALLOW_MISSING_DEPS'] = 'true'
19 return env
20
21 def build_arch(self, arch):
22 env = self.get_recipe_env(arch)
23
24 with current_directory(self.get_jni_dir()):
25 shprint(
26 sh.ndk_build,
27 "V=1",
28 "NDK_DEBUG=" + ("1" if self.ctx.build_as_debuggable else "0"),
29 _env=env
30 )
31
32
33 recipe = LibSDL2Recipe()
34
[end of pythonforandroid/recipes/sdl2/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pythonforandroid/recipes/sdl2/__init__.py b/pythonforandroid/recipes/sdl2/__init__.py
--- a/pythonforandroid/recipes/sdl2/__init__.py
+++ b/pythonforandroid/recipes/sdl2/__init__.py
@@ -4,9 +4,9 @@
class LibSDL2Recipe(BootstrapNDKRecipe):
- version = "2.0.10"
- url = "https://www.libsdl.org/release/SDL2-{version}.zip"
- md5sum = "6b2e9a4a2faba4ff277062cf669724f4"
+ version = "2.0.9"
+ url = "https://www.libsdl.org/release/SDL2-{version}.tar.gz"
+ md5sum = 'f2ecfba915c54f7200f504d8b48a5dfe'
dir_name = 'SDL'
| {"golden_diff": "diff --git a/pythonforandroid/recipes/sdl2/__init__.py b/pythonforandroid/recipes/sdl2/__init__.py\n--- a/pythonforandroid/recipes/sdl2/__init__.py\n+++ b/pythonforandroid/recipes/sdl2/__init__.py\n@@ -4,9 +4,9 @@\n \n \n class LibSDL2Recipe(BootstrapNDKRecipe):\n- version = \"2.0.10\"\n- url = \"https://www.libsdl.org/release/SDL2-{version}.zip\"\n- md5sum = \"6b2e9a4a2faba4ff277062cf669724f4\"\n+ version = \"2.0.9\"\n+ url = \"https://www.libsdl.org/release/SDL2-{version}.tar.gz\"\n+ md5sum = 'f2ecfba915c54f7200f504d8b48a5dfe'\n \n dir_name = 'SDL'\n", "issue": "Issues introduced by PR #2113 (SDL2)\nAs said on Discord #dev channel yesterday, PR #2113 introduces a lot of blocking issues.\r\n\r\nThese are the results of the tests done by me, @AndreMiras and @opacam :\r\n\r\n- `sdl2==2.0.10` have issues that have been solved by the SDL2 team, so it needs to be bumped to `2.0.12`.\r\n\r\n- `sdl2==2.0.12` works but create freezes during runtime.\r\n\r\n- These freezes are definitely related to the new `SDL_LockMutex` / `SDL_UnlockMutex` mechanism they added for concurrency issues.\r\n\r\n- Commenting `SDL_LockMutex` on `Touch` related events fixes the freeze issue for non-fullscreen apps.\r\n\r\n- On fullscreen apps, the patch it's also needed on `Resize, .. etc` events.\r\n\r\nI'm providing an attached patch that fixes the issues on top of `2.0.12`, btw seems not a good idea to do that, so it needs some more investigation:\r\n\r\n[disable_mutex.txt](https://github.com/kivy/python-for-android/files/4569870/disable_mutex.txt)\r\n\r\n\n", "before_files": [{"content": "from pythonforandroid.recipe import BootstrapNDKRecipe\nfrom pythonforandroid.toolchain import current_directory, shprint\nimport sh\n\n\nclass LibSDL2Recipe(BootstrapNDKRecipe):\n version = \"2.0.10\"\n url = \"https://www.libsdl.org/release/SDL2-{version}.zip\"\n md5sum = \"6b2e9a4a2faba4ff277062cf669724f4\"\n\n dir_name = 'SDL'\n\n depends = ['sdl2_image', 'sdl2_mixer', 'sdl2_ttf']\n\n def get_recipe_env(self, arch=None, with_flags_in_cc=True, with_python=True):\n env = super().get_recipe_env(\n arch=arch, with_flags_in_cc=with_flags_in_cc, with_python=with_python)\n env['APP_ALLOW_MISSING_DEPS'] = 'true'\n return env\n\n def build_arch(self, arch):\n env = self.get_recipe_env(arch)\n\n with current_directory(self.get_jni_dir()):\n shprint(\n sh.ndk_build,\n \"V=1\",\n \"NDK_DEBUG=\" + (\"1\" if self.ctx.build_as_debuggable else \"0\"),\n _env=env\n )\n\n\nrecipe = LibSDL2Recipe()\n", "path": "pythonforandroid/recipes/sdl2/__init__.py"}]} | 1,157 | 228 |
gh_patches_debug_16086 | rasdani/github-patches | git_diff | nvaccess__nvda-13382 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Recent alpha versions crash Notepad++ versions 8.2.x and below
<!-- Please read the text in this edit field before filling it in.
Please thoroughly read NVDA's wiki article on how to fill in this template, including how to provide the required files.
Issues may be closed if the required information is not present.
https://github.com/nvaccess/nvda/blob/master/devDocs/githubIssueTemplateExplanationAndExamples.md
Please also note that the NVDA project has a Citizen and Contributor Code of Conduct which can be found at https://github.com/nvaccess/nvda/blob/master/CODE_OF_CONDUCT.md. NV Access expects that all contributors and other community members read and abide by the rules set out in this document while participating or contributing to this project. This includes creating or commenting on issues and pull requests.
Each of the questions and sections below start with multiple hash symbols (#). Place your answers and information on the blank line below each question.
-->
### Steps to reproduce:
1. Use NVDA alpha version nvda_snapshot_alpha-24771,a1a6598b.exe or above.
2. Open Notepad++ v8.1.x or v8.2.x 64-bit.
3.Press up/down a few times.
### Actual behavior:
Notepad++ crashes. No error message is printed in NVDA log.
### Expected behavior:
Notepad++ should not crash.
### System configuration
#### NVDA installed/portable/running from source:
Portable alpha nvda_snapshot_alpha-24771,a1a6598b.exe
#### NVDA version:
nvda_snapshot_alpha-24771,a1a6598b.exe
#### Windows version:
Reproduced on both Windows 10 and 11 64-bit.
#### Name and version of other software in use when reproducing the issue:
Reproduced using either of these two versions (64-bit in both cases):
- Notepad++ v8.1.9.3: https://github.com/notepad-plus-plus/notepad-plus-plus/releases/download/v8.1.9.3/npp.8.1.9.3.portable.x64.zip
- Notepad++ v8.2.1: https://github.com/notepad-plus-plus/notepad-plus-plus/releases/download/v8.2.1/npp.8.2.1.portable.x64.zip
#### Other information about your system:
N/A
### Other questions
#### Does the issue still occur after restarting your computer?
Yes
#### Have you tried any other versions of NVDA? If so, please report their behaviors.
Identified that commit a1a6598b19f71bd108bef64373dd35c89c14cd4a is causing this, therefore cc: @lukaszgo1.
#### If NVDA add-ons are disabled, is your problem still occurring?
Yes
#### Does the issue still occur after you run the COM Registration Fixing Tool in NVDA's tools menu?
Yes
</issue>
<code>
[start of source/appModules/notepadPlusPlus.py]
1 # A part of NonVisual Desktop Access (NVDA)
2 # Copyright (C) 2022 NV Access Limited, Łukasz Golonka
3 # This file may be used under the terms of the GNU General Public License, version 2 or later.
4 # For more details see: https://www.gnu.org/licenses/gpl-2.0.html
5
6 """AppModule for Notepad++.
7 Do not rename! The executable file for Notepad++ is named `notepad++` and `+` is not a valid character
8 in Python's import statements.
9 This module is mapped to the right binary separately
10 and the current name makes it possible to expose it from `nvdaBuiltin` for add-on developers.
11 """
12
13 import ctypes
14
15 import appModuleHandler
16 import NVDAObjects.window.scintilla as ScintillaBase
17
18
19 class CharacterRangeStructLongLong(ctypes.Structure):
20 """By default character ranges in Scintilla are represented by longs.
21 However long is not big enough for files over 2 GB,
22 therefore in 64-bit builds of Notepad++ 8.3 and later
23 these ranges are represented by longlong.
24 """
25 _fields_ = [
26 ('cpMin', ctypes.c_longlong),
27 ('cpMax', ctypes.c_longlong),
28 ]
29
30
31 class ScintillaTextInfoNpp83(ScintillaBase.ScintillaTextInfo):
32 """Text info for 64-bit builds of Notepad++ 8.3 and later.
33 """
34
35 class TextRangeStruct(ctypes.Structure):
36 _fields_ = [
37 ('chrg', CharacterRangeStructLongLong),
38 ('lpstrText', ctypes.c_char_p),
39 ]
40
41
42 class NppEdit(ScintillaBase.Scintilla):
43
44 name = None # The name of the editor is not useful.
45
46 def _get_TextInfo(self):
47 if self.appModule.is64BitProcess:
48 appVerMajor, appVerMinor, *__ = self.appModule.productVersion.split(".")
49 if int(appVerMajor) >= 8 and int(appVerMinor) >= 3:
50 return ScintillaTextInfoNpp83
51 return super().TextInfo
52
53
54 class AppModule(appModuleHandler.AppModule):
55
56 def chooseNVDAObjectOverlayClasses(self, obj, clsList):
57 if obj.windowClassName == "Scintilla" and obj.windowControlID == 0:
58 clsList.insert(0, NppEdit)
59
[end of source/appModules/notepadPlusPlus.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/source/appModules/notepadPlusPlus.py b/source/appModules/notepadPlusPlus.py
--- a/source/appModules/notepadPlusPlus.py
+++ b/source/appModules/notepadPlusPlus.py
@@ -46,7 +46,12 @@
def _get_TextInfo(self):
if self.appModule.is64BitProcess:
appVerMajor, appVerMinor, *__ = self.appModule.productVersion.split(".")
- if int(appVerMajor) >= 8 and int(appVerMinor) >= 3:
+ # When retrieving the version, Notepad++ concatenates
+ # minor, patch, build in major.minor.patch.build to the form of major.minor
+ # https://github.com/notepad-plus-plus/npp-usermanual/blob/master/content/docs/plugin-communication.md#nppm_getnppversion
+ # e.g. '8.3' for '8.3', '8.21' for '8.2.1' and '8.192' for '8.1.9.2'.
+ # Therefore, only use the first digit of the minor version to match against version 8.3 or later.
+ if int(appVerMajor) >= 8 and int(appVerMinor[0]) >= 3:
return ScintillaTextInfoNpp83
return super().TextInfo
| {"golden_diff": "diff --git a/source/appModules/notepadPlusPlus.py b/source/appModules/notepadPlusPlus.py\n--- a/source/appModules/notepadPlusPlus.py\n+++ b/source/appModules/notepadPlusPlus.py\n@@ -46,7 +46,12 @@\n \tdef _get_TextInfo(self):\r\n \t\tif self.appModule.is64BitProcess:\r\n \t\t\tappVerMajor, appVerMinor, *__ = self.appModule.productVersion.split(\".\")\r\n-\t\t\tif int(appVerMajor) >= 8 and int(appVerMinor) >= 3:\r\n+\t\t\t# When retrieving the version, Notepad++ concatenates\r\n+\t\t\t# minor, patch, build in major.minor.patch.build to the form of major.minor\r\n+\t\t\t# https://github.com/notepad-plus-plus/npp-usermanual/blob/master/content/docs/plugin-communication.md#nppm_getnppversion\r\n+\t\t\t# e.g. '8.3' for '8.3', '8.21' for '8.2.1' and '8.192' for '8.1.9.2'.\r\n+\t\t\t# Therefore, only use the first digit of the minor version to match against version 8.3 or later.\r\n+\t\t\tif int(appVerMajor) >= 8 and int(appVerMinor[0]) >= 3:\r\n \t\t\t\treturn ScintillaTextInfoNpp83\r\n \t\treturn super().TextInfo\n", "issue": "Recent alpha versions crash Notepad++ versions 8.2.x and below\n<!-- Please read the text in this edit field before filling it in.\r\nPlease thoroughly read NVDA's wiki article on how to fill in this template, including how to provide the required files.\r\nIssues may be closed if the required information is not present.\r\nhttps://github.com/nvaccess/nvda/blob/master/devDocs/githubIssueTemplateExplanationAndExamples.md\r\nPlease also note that the NVDA project has a Citizen and Contributor Code of Conduct which can be found at https://github.com/nvaccess/nvda/blob/master/CODE_OF_CONDUCT.md. NV Access expects that all contributors and other community members read and abide by the rules set out in this document while participating or contributing to this project. This includes creating or commenting on issues and pull requests. \r\n\r\nEach of the questions and sections below start with multiple hash symbols (#). Place your answers and information on the blank line below each question.\r\n-->\r\n\r\n### Steps to reproduce:\r\n1. Use NVDA alpha version nvda_snapshot_alpha-24771,a1a6598b.exe or above.\r\n2. Open Notepad++ v8.1.x or v8.2.x 64-bit.\r\n3.Press up/down a few times.\r\n\r\n### Actual behavior:\r\nNotepad++ crashes. No error message is printed in NVDA log.\r\n### Expected behavior:\r\nNotepad++ should not crash.\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\nPortable alpha nvda_snapshot_alpha-24771,a1a6598b.exe\r\n#### NVDA version:\r\nnvda_snapshot_alpha-24771,a1a6598b.exe\r\n\r\n#### Windows version:\r\nReproduced on both Windows 10 and 11 64-bit.\r\n#### Name and version of other software in use when reproducing the issue:\r\nReproduced using either of these two versions (64-bit in both cases):\r\n- Notepad++ v8.1.9.3: https://github.com/notepad-plus-plus/notepad-plus-plus/releases/download/v8.1.9.3/npp.8.1.9.3.portable.x64.zip\r\n- Notepad++ v8.2.1: https://github.com/notepad-plus-plus/notepad-plus-plus/releases/download/v8.2.1/npp.8.2.1.portable.x64.zip\r\n#### Other information about your system:\r\nN/A\r\n### Other questions\r\n#### Does the issue still occur after restarting your computer?\r\nYes\r\n#### Have you tried any other versions of NVDA? If so, please report their behaviors.\r\nIdentified that commit a1a6598b19f71bd108bef64373dd35c89c14cd4a is causing this, therefore cc: @lukaszgo1.\r\n#### If NVDA add-ons are disabled, is your problem still occurring?\r\nYes\r\n#### Does the issue still occur after you run the COM Registration Fixing Tool in NVDA's tools menu?\r\nYes\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2022 NV Access Limited, \u0141ukasz Golonka\r\n# This file may be used under the terms of the GNU General Public License, version 2 or later.\r\n# For more details see: https://www.gnu.org/licenses/gpl-2.0.html\r\n\r\n\"\"\"AppModule for Notepad++.\r\nDo not rename! The executable file for Notepad++ is named `notepad++` and `+` is not a valid character\r\nin Python's import statements.\r\nThis module is mapped to the right binary separately\r\nand the current name makes it possible to expose it from `nvdaBuiltin` for add-on developers.\r\n\"\"\"\r\n\r\nimport ctypes\r\n\r\nimport appModuleHandler\r\nimport NVDAObjects.window.scintilla as ScintillaBase\r\n\r\n\r\nclass CharacterRangeStructLongLong(ctypes.Structure):\r\n\t\"\"\"By default character ranges in Scintilla are represented by longs.\r\n\tHowever long is not big enough for files over 2 GB,\r\n\ttherefore in 64-bit builds of Notepad++ 8.3 and later\r\n\tthese ranges are represented by longlong.\r\n\t\"\"\"\r\n\t_fields_ = [\r\n\t\t('cpMin', ctypes.c_longlong),\r\n\t\t('cpMax', ctypes.c_longlong),\r\n\t]\r\n\r\n\r\nclass ScintillaTextInfoNpp83(ScintillaBase.ScintillaTextInfo):\r\n\t\"\"\"Text info for 64-bit builds of Notepad++ 8.3 and later.\r\n\t\"\"\"\r\n\r\n\tclass TextRangeStruct(ctypes.Structure):\r\n\t\t_fields_ = [\r\n\t\t\t('chrg', CharacterRangeStructLongLong),\r\n\t\t\t('lpstrText', ctypes.c_char_p),\r\n\t\t]\r\n\r\n\r\nclass NppEdit(ScintillaBase.Scintilla):\r\n\r\n\tname = None # The name of the editor is not useful.\r\n\r\n\tdef _get_TextInfo(self):\r\n\t\tif self.appModule.is64BitProcess:\r\n\t\t\tappVerMajor, appVerMinor, *__ = self.appModule.productVersion.split(\".\")\r\n\t\t\tif int(appVerMajor) >= 8 and int(appVerMinor) >= 3:\r\n\t\t\t\treturn ScintillaTextInfoNpp83\r\n\t\treturn super().TextInfo\r\n\r\n\r\nclass AppModule(appModuleHandler.AppModule):\r\n\r\n\tdef chooseNVDAObjectOverlayClasses(self, obj, clsList):\r\n\t\tif obj.windowClassName == \"Scintilla\" and obj.windowControlID == 0:\r\n\t\t\tclsList.insert(0, NppEdit)\r\n", "path": "source/appModules/notepadPlusPlus.py"}]} | 1,841 | 307 |
gh_patches_debug_3407 | rasdani/github-patches | git_diff | vllm-project__vllm-1855 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
benchmark_latency.py will hang when --batchsize=1 and --n=2
benchmark_latency.py will hang if setting --batchsize=1 and --n=2or4or8.
should max_num_seqs been set as args.batch_size * args.n ?
https://github.com/vllm-project/vllm/blob/main/benchmarks/benchmark_latency.py#L23
</issue>
<code>
[start of benchmarks/benchmark_latency.py]
1 """Benchmark the latency of processing a single batch of requests."""
2 import argparse
3 import time
4
5 import numpy as np
6 import torch
7 from tqdm import tqdm
8
9 from vllm import LLM, SamplingParams
10
11
12 def main(args: argparse.Namespace):
13 print(args)
14
15 # NOTE(woosuk): If the request cannot be processed in a single batch,
16 # the engine will automatically process the request in multiple batches.
17 llm = LLM(
18 model=args.model,
19 tokenizer=args.tokenizer,
20 quantization=args.quantization,
21 tensor_parallel_size=args.tensor_parallel_size,
22 max_num_seqs=args.batch_size,
23 trust_remote_code=args.trust_remote_code,
24 dtype=args.dtype,
25 )
26
27 sampling_params = SamplingParams(
28 n=args.n,
29 temperature=0.0 if args.use_beam_search else 1.0,
30 top_p=1.0,
31 use_beam_search=args.use_beam_search,
32 ignore_eos=True,
33 max_tokens=args.output_len,
34 )
35 print(sampling_params)
36 dummy_prompt_token_ids = [[0] * args.input_len] * args.batch_size
37
38 def run_to_completion(profile: bool = False):
39 if profile:
40 with torch.profiler.profile(activities=[
41 torch.profiler.ProfilerActivity.CPU,
42 torch.profiler.ProfilerActivity.CUDA,
43 ]) as p:
44 llm.generate(prompt_token_ids=dummy_prompt_token_ids,
45 sampling_params=sampling_params,
46 use_tqdm=False)
47 print(p.key_averages())
48 else:
49 start_time = time.perf_counter()
50 llm.generate(prompt_token_ids=dummy_prompt_token_ids,
51 sampling_params=sampling_params,
52 use_tqdm=False)
53 end_time = time.perf_counter()
54 latency = end_time - start_time
55 return latency
56
57 print("Warming up...")
58 run_to_completion(profile=False)
59
60 if args.profile:
61 print("Profiling...")
62 run_to_completion(profile=True)
63 return
64
65 # Benchmark.
66 latencies = []
67 for _ in tqdm(range(args.num_iters), desc="Profiling iterations"):
68 latencies.append(run_to_completion(profile=False))
69 print(f'Avg latency: {np.mean(latencies)} seconds')
70
71
72 if __name__ == '__main__':
73 parser = argparse.ArgumentParser(
74 description='Benchmark the latency of processing a single batch of '
75 'requests till completion.')
76 parser.add_argument('--model', type=str, default='facebook/opt-125m')
77 parser.add_argument('--tokenizer', type=str, default=None)
78 parser.add_argument('--quantization',
79 '-q',
80 choices=['awq', 'squeezellm', None],
81 default=None)
82 parser.add_argument('--tensor-parallel-size', '-tp', type=int, default=1)
83 parser.add_argument('--input-len', type=int, default=32)
84 parser.add_argument('--output-len', type=int, default=128)
85 parser.add_argument('--batch-size', type=int, default=8)
86 parser.add_argument('--n',
87 type=int,
88 default=1,
89 help='Number of generated sequences per prompt.')
90 parser.add_argument('--use-beam-search', action='store_true')
91 parser.add_argument('--num-iters',
92 type=int,
93 default=3,
94 help='Number of iterations to run.')
95 parser.add_argument('--trust-remote-code',
96 action='store_true',
97 help='trust remote code from huggingface')
98 parser.add_argument(
99 '--dtype',
100 type=str,
101 default='auto',
102 choices=['auto', 'half', 'float16', 'bfloat16', 'float', 'float32'],
103 help='data type for model weights and activations. '
104 'The "auto" option will use FP16 precision '
105 'for FP32 and FP16 models, and BF16 precision '
106 'for BF16 models.')
107 parser.add_argument(
108 '--profile',
109 action='store_true',
110 help='profile the generation process of a single batch')
111 args = parser.parse_args()
112 main(args)
113
[end of benchmarks/benchmark_latency.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/benchmarks/benchmark_latency.py b/benchmarks/benchmark_latency.py
--- a/benchmarks/benchmark_latency.py
+++ b/benchmarks/benchmark_latency.py
@@ -19,7 +19,6 @@
tokenizer=args.tokenizer,
quantization=args.quantization,
tensor_parallel_size=args.tensor_parallel_size,
- max_num_seqs=args.batch_size,
trust_remote_code=args.trust_remote_code,
dtype=args.dtype,
)
| {"golden_diff": "diff --git a/benchmarks/benchmark_latency.py b/benchmarks/benchmark_latency.py\n--- a/benchmarks/benchmark_latency.py\n+++ b/benchmarks/benchmark_latency.py\n@@ -19,7 +19,6 @@\n tokenizer=args.tokenizer,\n quantization=args.quantization,\n tensor_parallel_size=args.tensor_parallel_size,\n- max_num_seqs=args.batch_size,\n trust_remote_code=args.trust_remote_code,\n dtype=args.dtype,\n )\n", "issue": "benchmark_latency.py will hang when --batchsize=1 and --n=2\nbenchmark_latency.py will hang if setting --batchsize=1 and --n=2or4or8.\r\nshould max_num_seqs been set as args.batch_size * args.n \uff1f\r\nhttps://github.com/vllm-project/vllm/blob/main/benchmarks/benchmark_latency.py#L23\n", "before_files": [{"content": "\"\"\"Benchmark the latency of processing a single batch of requests.\"\"\"\nimport argparse\nimport time\n\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\nfrom vllm import LLM, SamplingParams\n\n\ndef main(args: argparse.Namespace):\n print(args)\n\n # NOTE(woosuk): If the request cannot be processed in a single batch,\n # the engine will automatically process the request in multiple batches.\n llm = LLM(\n model=args.model,\n tokenizer=args.tokenizer,\n quantization=args.quantization,\n tensor_parallel_size=args.tensor_parallel_size,\n max_num_seqs=args.batch_size,\n trust_remote_code=args.trust_remote_code,\n dtype=args.dtype,\n )\n\n sampling_params = SamplingParams(\n n=args.n,\n temperature=0.0 if args.use_beam_search else 1.0,\n top_p=1.0,\n use_beam_search=args.use_beam_search,\n ignore_eos=True,\n max_tokens=args.output_len,\n )\n print(sampling_params)\n dummy_prompt_token_ids = [[0] * args.input_len] * args.batch_size\n\n def run_to_completion(profile: bool = False):\n if profile:\n with torch.profiler.profile(activities=[\n torch.profiler.ProfilerActivity.CPU,\n torch.profiler.ProfilerActivity.CUDA,\n ]) as p:\n llm.generate(prompt_token_ids=dummy_prompt_token_ids,\n sampling_params=sampling_params,\n use_tqdm=False)\n print(p.key_averages())\n else:\n start_time = time.perf_counter()\n llm.generate(prompt_token_ids=dummy_prompt_token_ids,\n sampling_params=sampling_params,\n use_tqdm=False)\n end_time = time.perf_counter()\n latency = end_time - start_time\n return latency\n\n print(\"Warming up...\")\n run_to_completion(profile=False)\n\n if args.profile:\n print(\"Profiling...\")\n run_to_completion(profile=True)\n return\n\n # Benchmark.\n latencies = []\n for _ in tqdm(range(args.num_iters), desc=\"Profiling iterations\"):\n latencies.append(run_to_completion(profile=False))\n print(f'Avg latency: {np.mean(latencies)} seconds')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Benchmark the latency of processing a single batch of '\n 'requests till completion.')\n parser.add_argument('--model', type=str, default='facebook/opt-125m')\n parser.add_argument('--tokenizer', type=str, default=None)\n parser.add_argument('--quantization',\n '-q',\n choices=['awq', 'squeezellm', None],\n default=None)\n parser.add_argument('--tensor-parallel-size', '-tp', type=int, default=1)\n parser.add_argument('--input-len', type=int, default=32)\n parser.add_argument('--output-len', type=int, default=128)\n parser.add_argument('--batch-size', type=int, default=8)\n parser.add_argument('--n',\n type=int,\n default=1,\n help='Number of generated sequences per prompt.')\n parser.add_argument('--use-beam-search', action='store_true')\n parser.add_argument('--num-iters',\n type=int,\n default=3,\n help='Number of iterations to run.')\n parser.add_argument('--trust-remote-code',\n action='store_true',\n help='trust remote code from huggingface')\n parser.add_argument(\n '--dtype',\n type=str,\n default='auto',\n choices=['auto', 'half', 'float16', 'bfloat16', 'float', 'float32'],\n help='data type for model weights and activations. '\n 'The \"auto\" option will use FP16 precision '\n 'for FP32 and FP16 models, and BF16 precision '\n 'for BF16 models.')\n parser.add_argument(\n '--profile',\n action='store_true',\n help='profile the generation process of a single batch')\n args = parser.parse_args()\n main(args)\n", "path": "benchmarks/benchmark_latency.py"}]} | 1,715 | 101 |
gh_patches_debug_2566 | rasdani/github-patches | git_diff | getpelican__pelican-2632 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Markdown as an (optional) dependency
Since its inception, this project has taken the collective position that since not everyone uses Markdown, the `markdown` package should not be a dependency of the project and should instead be manually installed by users who want to use Markdown.
On the other hand, the `docutils` package that parses reStructuredText markup has always been a dependency. Particularly given Markdown's widespread popularity, I cannot come up with a compelling argument to support the continuation of this discrepancy.
This project is littered with issues caused by environments with missing `markdown` packages. Many folks have encountered frustration when trying to build their Markdown-content sites, only to be met with an (often inscrutable) error message. And that's just the folks who have taken the time to file issues! Imagine how many threw up their hands and just moved on to another solution…
Adding Markdown as a dependency would have significant benefits, including simplifying installation instructions, reducing frustration, and minimizing support request load. So to take another approach at this topic… What are the reasons for *not* adding Markdown as a dependency, and do you think they are sufficiently important to out-weigh the concerns raised above?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import sys
3 from io import open
4 from os import walk
5 from os.path import join, relpath
6
7 from setuptools import setup
8
9
10 version = "4.1.2"
11
12 requires = ['feedgenerator >= 1.9', 'jinja2 >= 2.7', 'pygments', 'docutils',
13 'pytz >= 0a', 'blinker', 'unidecode', 'six >= 1.4',
14 'python-dateutil']
15
16 entry_points = {
17 'console_scripts': [
18 'pelican = pelican.__main__:main',
19 'pelican-import = pelican.tools.pelican_import:main',
20 'pelican-quickstart = pelican.tools.pelican_quickstart:main',
21 'pelican-themes = pelican.tools.pelican_themes:main'
22 ]
23 }
24
25 README = open('README.rst', encoding='utf-8').read()
26 CHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()
27
28 description = u'\n'.join([README, CHANGELOG])
29 if sys.version_info.major < 3:
30 description = description.encode('utf-8')
31
32 setup(
33 name='pelican',
34 version=version,
35 url='https://getpelican.com/',
36 author='Justin Mayer',
37 author_email='[email protected]',
38 description="Static site generator supporting reStructuredText and "
39 "Markdown source content.",
40 project_urls={
41 'Documentation': 'https://docs.getpelican.com/',
42 'Funding': 'https://donate.getpelican.com/',
43 'Source': 'https://github.com/getpelican/pelican',
44 'Tracker': 'https://github.com/getpelican/pelican/issues',
45 },
46 keywords='static web site generator SSG reStructuredText Markdown',
47 license='AGPLv3',
48 long_description=description,
49 packages=['pelican', 'pelican.tools'],
50 package_data={
51 # we manually collect the package data, as opposed to using,
52 # include_package_data=True because we don't want the tests to be
53 # included automatically as package data (MANIFEST.in is too greedy)
54 'pelican': [relpath(join(root, name), 'pelican')
55 for root, _, names in walk(join('pelican', 'themes'))
56 for name in names],
57 'pelican.tools': [relpath(join(root, name), join('pelican', 'tools'))
58 for root, _, names in walk(join('pelican',
59 'tools',
60 'templates'))
61 for name in names],
62 },
63 install_requires=requires,
64 entry_points=entry_points,
65 classifiers=[
66 'Development Status :: 5 - Production/Stable',
67 'Environment :: Console',
68 'Framework :: Pelican',
69 'License :: OSI Approved :: GNU Affero General Public License v3',
70 'Operating System :: OS Independent',
71 'Programming Language :: Python :: 2',
72 'Programming Language :: Python :: 2.7',
73 'Programming Language :: Python :: 3',
74 'Programming Language :: Python :: 3.5',
75 'Programming Language :: Python :: 3.6',
76 'Programming Language :: Python :: 3.7',
77 'Programming Language :: Python :: Implementation :: CPython',
78 'Topic :: Internet :: WWW/HTTP',
79 'Topic :: Software Development :: Libraries :: Python Modules',
80 ],
81 test_suite='pelican.tests',
82 )
83
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,6 +61,9 @@
for name in names],
},
install_requires=requires,
+ extras_require={
+ 'Markdown': ['markdown~=3.1.1']
+ },
entry_points=entry_points,
classifiers=[
'Development Status :: 5 - Production/Stable',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,6 +61,9 @@\n for name in names],\n },\n install_requires=requires,\n+ extras_require={\n+ 'Markdown': ['markdown~=3.1.1']\n+ },\n entry_points=entry_points,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n", "issue": "Add Markdown as an (optional) dependency\nSince its inception, this project has taken the collective position that since not everyone uses Markdown, the `markdown` package should not be a dependency of the project and should instead be manually installed by users who want to use Markdown.\r\n\r\nOn the other hand, the `docutils` package that parses reStructuredText markup has always been a dependency. Particularly given Markdown's widespread popularity, I cannot come up with a compelling argument to support the continuation of this discrepancy.\r\n\r\nThis project is littered with issues caused by environments with missing `markdown` packages. Many folks have encountered frustration when trying to build their Markdown-content sites, only to be met with an (often inscrutable) error message. And that's just the folks who have taken the time to file issues! Imagine how many threw up their hands and just moved on to another solution\u2026\r\n\r\nAdding Markdown as a dependency would have significant benefits, including simplifying installation instructions, reducing frustration, and minimizing support request load. So to take another approach at this topic\u2026\u00a0What are the reasons for *not* adding Markdown as a dependency, and do you think they are sufficiently important to out-weigh the concerns raised above?\n", "before_files": [{"content": "#!/usr/bin/env python\nimport sys\nfrom io import open\nfrom os import walk\nfrom os.path import join, relpath\n\nfrom setuptools import setup\n\n\nversion = \"4.1.2\"\n\nrequires = ['feedgenerator >= 1.9', 'jinja2 >= 2.7', 'pygments', 'docutils',\n 'pytz >= 0a', 'blinker', 'unidecode', 'six >= 1.4',\n 'python-dateutil']\n\nentry_points = {\n 'console_scripts': [\n 'pelican = pelican.__main__:main',\n 'pelican-import = pelican.tools.pelican_import:main',\n 'pelican-quickstart = pelican.tools.pelican_quickstart:main',\n 'pelican-themes = pelican.tools.pelican_themes:main'\n ]\n}\n\nREADME = open('README.rst', encoding='utf-8').read()\nCHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()\n\ndescription = u'\\n'.join([README, CHANGELOG])\nif sys.version_info.major < 3:\n description = description.encode('utf-8')\n\nsetup(\n name='pelican',\n version=version,\n url='https://getpelican.com/',\n author='Justin Mayer',\n author_email='[email protected]',\n description=\"Static site generator supporting reStructuredText and \"\n \"Markdown source content.\",\n project_urls={\n 'Documentation': 'https://docs.getpelican.com/',\n 'Funding': 'https://donate.getpelican.com/',\n 'Source': 'https://github.com/getpelican/pelican',\n 'Tracker': 'https://github.com/getpelican/pelican/issues',\n },\n keywords='static web site generator SSG reStructuredText Markdown',\n license='AGPLv3',\n long_description=description,\n packages=['pelican', 'pelican.tools'],\n package_data={\n # we manually collect the package data, as opposed to using,\n # include_package_data=True because we don't want the tests to be\n # included automatically as package data (MANIFEST.in is too greedy)\n 'pelican': [relpath(join(root, name), 'pelican')\n for root, _, names in walk(join('pelican', 'themes'))\n for name in names],\n 'pelican.tools': [relpath(join(root, name), join('pelican', 'tools'))\n for root, _, names in walk(join('pelican',\n 'tools',\n 'templates'))\n for name in names],\n },\n install_requires=requires,\n entry_points=entry_points,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Framework :: Pelican',\n 'License :: OSI Approved :: GNU Affero General Public License v3',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n test_suite='pelican.tests',\n)\n", "path": "setup.py"}]} | 1,667 | 91 |
gh_patches_debug_26678 | rasdani/github-patches | git_diff | archlinux__archinstall-2071 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Localizations revert to defaults on entry of "Locales" menu
If localizations are changed from the defaults they will revert back to the defaults upon entry of the _Locales_ menu.
</issue>
<code>
[start of archinstall/lib/locale/locale_menu.py]
1 from dataclasses import dataclass
2 from typing import Dict, Any, TYPE_CHECKING, Optional
3
4 from .locale import set_kb_layout, list_keyboard_languages, list_locales
5 from ..menu import Selector, AbstractSubMenu, MenuSelectionType, Menu
6
7 if TYPE_CHECKING:
8 _: Any
9
10
11 @dataclass
12 class LocaleConfiguration:
13 kb_layout: str
14 sys_lang: str
15 sys_enc: str
16
17 @staticmethod
18 def default() -> 'LocaleConfiguration':
19 return LocaleConfiguration('us', 'en_US', 'UTF-8')
20
21 def json(self) -> Dict[str, str]:
22 return {
23 'kb_layout': self.kb_layout,
24 'sys_lang': self.sys_lang,
25 'sys_enc': self.sys_enc
26 }
27
28 @classmethod
29 def _load_config(cls, config: 'LocaleConfiguration', args: Dict[str, Any]) -> 'LocaleConfiguration':
30 if 'sys_lang' in args:
31 config.sys_lang = args['sys_lang']
32 if 'sys_enc' in args:
33 config.sys_enc = args['sys_enc']
34 if 'kb_layout' in args:
35 config.kb_layout = args['kb_layout']
36
37 return config
38
39 @classmethod
40 def parse_arg(cls, args: Dict[str, Any]) -> 'LocaleConfiguration':
41 default = cls.default()
42
43 if 'locale_config' in args:
44 default = cls._load_config(default, args['locale_config'])
45 else:
46 default = cls._load_config(default, args)
47
48 return default
49
50
51 class LocaleMenu(AbstractSubMenu):
52 def __init__(
53 self,
54 data_store: Dict[str, Any],
55 locele_conf: LocaleConfiguration
56 ):
57 self._preset = locele_conf
58 super().__init__(data_store=data_store)
59
60 def setup_selection_menu_options(self):
61 self._menu_options['keyboard-layout'] = \
62 Selector(
63 _('Keyboard layout'),
64 lambda preset: self._select_kb_layout(preset),
65 default='us',
66 enabled=True)
67 self._menu_options['sys-language'] = \
68 Selector(
69 _('Locale language'),
70 lambda preset: select_locale_lang(preset),
71 default='en_US',
72 enabled=True)
73 self._menu_options['sys-encoding'] = \
74 Selector(
75 _('Locale encoding'),
76 lambda preset: select_locale_enc(preset),
77 default='UTF-8',
78 enabled=True)
79
80 def run(self, allow_reset: bool = True) -> LocaleConfiguration:
81 super().run(allow_reset=allow_reset)
82
83 return LocaleConfiguration(
84 self._data_store['keyboard-layout'],
85 self._data_store['sys-language'],
86 self._data_store['sys-encoding']
87 )
88
89 def _select_kb_layout(self, preset: Optional[str]) -> Optional[str]:
90 kb_lang = select_kb_layout(preset)
91 if kb_lang:
92 set_kb_layout(kb_lang)
93 return kb_lang
94
95
96 def select_locale_lang(preset: Optional[str] = None) -> Optional[str]:
97 locales = list_locales()
98 locale_lang = set([locale.split()[0] for locale in locales])
99
100 choice = Menu(
101 _('Choose which locale language to use'),
102 list(locale_lang),
103 sort=True,
104 preset_values=preset
105 ).run()
106
107 match choice.type_:
108 case MenuSelectionType.Selection: return choice.single_value
109 case MenuSelectionType.Skip: return preset
110
111 return None
112
113
114 def select_locale_enc(preset: Optional[str] = None) -> Optional[str]:
115 locales = list_locales()
116 locale_enc = set([locale.split()[1] for locale in locales])
117
118 choice = Menu(
119 _('Choose which locale encoding to use'),
120 list(locale_enc),
121 sort=True,
122 preset_values=preset
123 ).run()
124
125 match choice.type_:
126 case MenuSelectionType.Selection: return choice.single_value
127 case MenuSelectionType.Skip: return preset
128
129 return None
130
131
132 def select_kb_layout(preset: Optional[str] = None) -> Optional[str]:
133 """
134 Asks the user to select a language
135 Usually this is combined with :ref:`archinstall.list_keyboard_languages`.
136
137 :return: The language/dictionary key of the selected language
138 :rtype: str
139 """
140 kb_lang = list_keyboard_languages()
141 # sort alphabetically and then by length
142 sorted_kb_lang = sorted(sorted(list(kb_lang)), key=len)
143
144 choice = Menu(
145 _('Select keyboard layout'),
146 sorted_kb_lang,
147 preset_values=preset,
148 sort=False
149 ).run()
150
151 match choice.type_:
152 case MenuSelectionType.Skip: return preset
153 case MenuSelectionType.Selection: return choice.single_value
154
155 return None
156
[end of archinstall/lib/locale/locale_menu.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/archinstall/lib/locale/locale_menu.py b/archinstall/lib/locale/locale_menu.py
--- a/archinstall/lib/locale/locale_menu.py
+++ b/archinstall/lib/locale/locale_menu.py
@@ -52,9 +52,9 @@
def __init__(
self,
data_store: Dict[str, Any],
- locele_conf: LocaleConfiguration
+ locale_conf: LocaleConfiguration
):
- self._preset = locele_conf
+ self._preset = locale_conf
super().__init__(data_store=data_store)
def setup_selection_menu_options(self):
@@ -62,19 +62,19 @@
Selector(
_('Keyboard layout'),
lambda preset: self._select_kb_layout(preset),
- default='us',
+ default=self._preset.kb_layout,
enabled=True)
self._menu_options['sys-language'] = \
Selector(
_('Locale language'),
lambda preset: select_locale_lang(preset),
- default='en_US',
+ default=self._preset.sys_lang,
enabled=True)
self._menu_options['sys-encoding'] = \
Selector(
_('Locale encoding'),
lambda preset: select_locale_enc(preset),
- default='UTF-8',
+ default=self._preset.sys_enc,
enabled=True)
def run(self, allow_reset: bool = True) -> LocaleConfiguration:
| {"golden_diff": "diff --git a/archinstall/lib/locale/locale_menu.py b/archinstall/lib/locale/locale_menu.py\n--- a/archinstall/lib/locale/locale_menu.py\n+++ b/archinstall/lib/locale/locale_menu.py\n@@ -52,9 +52,9 @@\n \tdef __init__(\n \t\tself,\n \t\tdata_store: Dict[str, Any],\n-\t\tlocele_conf: LocaleConfiguration\n+\t\tlocale_conf: LocaleConfiguration\n \t):\n-\t\tself._preset = locele_conf\n+\t\tself._preset = locale_conf\n \t\tsuper().__init__(data_store=data_store)\n \n \tdef setup_selection_menu_options(self):\n@@ -62,19 +62,19 @@\n \t\t\tSelector(\n \t\t\t\t_('Keyboard layout'),\n \t\t\t\tlambda preset: self._select_kb_layout(preset),\n-\t\t\t\tdefault='us',\n+\t\t\t\tdefault=self._preset.kb_layout,\n \t\t\t\tenabled=True)\n \t\tself._menu_options['sys-language'] = \\\n \t\t\tSelector(\n \t\t\t\t_('Locale language'),\n \t\t\t\tlambda preset: select_locale_lang(preset),\n-\t\t\t\tdefault='en_US',\n+\t\t\t\tdefault=self._preset.sys_lang,\n \t\t\t\tenabled=True)\n \t\tself._menu_options['sys-encoding'] = \\\n \t\t\tSelector(\n \t\t\t\t_('Locale encoding'),\n \t\t\t\tlambda preset: select_locale_enc(preset),\n-\t\t\t\tdefault='UTF-8',\n+\t\t\t\tdefault=self._preset.sys_enc,\n \t\t\t\tenabled=True)\n \n \tdef run(self, allow_reset: bool = True) -> LocaleConfiguration:\n", "issue": "Localizations revert to defaults on entry of \"Locales\" menu\nIf localizations are changed from the defaults they will revert back to the defaults upon entry of the _Locales_ menu.\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom typing import Dict, Any, TYPE_CHECKING, Optional\n\nfrom .locale import set_kb_layout, list_keyboard_languages, list_locales\nfrom ..menu import Selector, AbstractSubMenu, MenuSelectionType, Menu\n\nif TYPE_CHECKING:\n\t_: Any\n\n\n@dataclass\nclass LocaleConfiguration:\n\tkb_layout: str\n\tsys_lang: str\n\tsys_enc: str\n\n\t@staticmethod\n\tdef default() -> 'LocaleConfiguration':\n\t\treturn LocaleConfiguration('us', 'en_US', 'UTF-8')\n\n\tdef json(self) -> Dict[str, str]:\n\t\treturn {\n\t\t\t'kb_layout': self.kb_layout,\n\t\t\t'sys_lang': self.sys_lang,\n\t\t\t'sys_enc': self.sys_enc\n\t\t}\n\n\t@classmethod\n\tdef _load_config(cls, config: 'LocaleConfiguration', args: Dict[str, Any]) -> 'LocaleConfiguration':\n\t\tif 'sys_lang' in args:\n\t\t\tconfig.sys_lang = args['sys_lang']\n\t\tif 'sys_enc' in args:\n\t\t\tconfig.sys_enc = args['sys_enc']\n\t\tif 'kb_layout' in args:\n\t\t\tconfig.kb_layout = args['kb_layout']\n\n\t\treturn config\n\n\t@classmethod\n\tdef parse_arg(cls, args: Dict[str, Any]) -> 'LocaleConfiguration':\n\t\tdefault = cls.default()\n\n\t\tif 'locale_config' in args:\n\t\t\tdefault = cls._load_config(default, args['locale_config'])\n\t\telse:\n\t\t\tdefault = cls._load_config(default, args)\n\n\t\treturn default\n\n\nclass LocaleMenu(AbstractSubMenu):\n\tdef __init__(\n\t\tself,\n\t\tdata_store: Dict[str, Any],\n\t\tlocele_conf: LocaleConfiguration\n\t):\n\t\tself._preset = locele_conf\n\t\tsuper().__init__(data_store=data_store)\n\n\tdef setup_selection_menu_options(self):\n\t\tself._menu_options['keyboard-layout'] = \\\n\t\t\tSelector(\n\t\t\t\t_('Keyboard layout'),\n\t\t\t\tlambda preset: self._select_kb_layout(preset),\n\t\t\t\tdefault='us',\n\t\t\t\tenabled=True)\n\t\tself._menu_options['sys-language'] = \\\n\t\t\tSelector(\n\t\t\t\t_('Locale language'),\n\t\t\t\tlambda preset: select_locale_lang(preset),\n\t\t\t\tdefault='en_US',\n\t\t\t\tenabled=True)\n\t\tself._menu_options['sys-encoding'] = \\\n\t\t\tSelector(\n\t\t\t\t_('Locale encoding'),\n\t\t\t\tlambda preset: select_locale_enc(preset),\n\t\t\t\tdefault='UTF-8',\n\t\t\t\tenabled=True)\n\n\tdef run(self, allow_reset: bool = True) -> LocaleConfiguration:\n\t\tsuper().run(allow_reset=allow_reset)\n\n\t\treturn LocaleConfiguration(\n\t\t\tself._data_store['keyboard-layout'],\n\t\t\tself._data_store['sys-language'],\n\t\t\tself._data_store['sys-encoding']\n\t\t)\n\n\tdef _select_kb_layout(self, preset: Optional[str]) -> Optional[str]:\n\t\tkb_lang = select_kb_layout(preset)\n\t\tif kb_lang:\n\t\t\tset_kb_layout(kb_lang)\n\t\treturn kb_lang\n\n\ndef select_locale_lang(preset: Optional[str] = None) -> Optional[str]:\n\tlocales = list_locales()\n\tlocale_lang = set([locale.split()[0] for locale in locales])\n\n\tchoice = Menu(\n\t\t_('Choose which locale language to use'),\n\t\tlist(locale_lang),\n\t\tsort=True,\n\t\tpreset_values=preset\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\t\tcase MenuSelectionType.Skip: return preset\n\n\treturn None\n\n\ndef select_locale_enc(preset: Optional[str] = None) -> Optional[str]:\n\tlocales = list_locales()\n\tlocale_enc = set([locale.split()[1] for locale in locales])\n\n\tchoice = Menu(\n\t\t_('Choose which locale encoding to use'),\n\t\tlist(locale_enc),\n\t\tsort=True,\n\t\tpreset_values=preset\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\t\tcase MenuSelectionType.Skip: return preset\n\n\treturn None\n\n\ndef select_kb_layout(preset: Optional[str] = None) -> Optional[str]:\n\t\"\"\"\n\tAsks the user to select a language\n\tUsually this is combined with :ref:`archinstall.list_keyboard_languages`.\n\n\t:return: The language/dictionary key of the selected language\n\t:rtype: str\n\t\"\"\"\n\tkb_lang = list_keyboard_languages()\n\t# sort alphabetically and then by length\n\tsorted_kb_lang = sorted(sorted(list(kb_lang)), key=len)\n\n\tchoice = Menu(\n\t\t_('Select keyboard layout'),\n\t\tsorted_kb_lang,\n\t\tpreset_values=preset,\n\t\tsort=False\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Skip: return preset\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\n\treturn None\n", "path": "archinstall/lib/locale/locale_menu.py"}]} | 1,999 | 313 |
gh_patches_debug_93 | rasdani/github-patches | git_diff | statsmodels__statsmodels-1374 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
move graphics.tsa to tsa.graphics
Makes more sense to me to keep the tsa stuff under the tsa namespace. Might need to deprecate functions that aren't new.
</issue>
<code>
[start of statsmodels/tsa/api.py]
1 from .ar_model import AR
2 from .arima_model import ARMA, ARIMA
3 import vector_ar as var
4 from .vector_ar.var_model import VAR
5 from .vector_ar.svar_model import SVAR
6 from .vector_ar.dynamic import DynamicVAR
7 import filters
8 import tsatools
9 from .tsatools import (add_trend, detrend, lagmat, lagmat2ds, add_lag)
10 import interp
11 import stattools
12 from .stattools import *
13 from .base import datetools
14
[end of statsmodels/tsa/api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/statsmodels/tsa/api.py b/statsmodels/tsa/api.py
--- a/statsmodels/tsa/api.py
+++ b/statsmodels/tsa/api.py
@@ -11,3 +11,4 @@
import stattools
from .stattools import *
from .base import datetools
+from ..graphics import tsaplots as graphics
| {"golden_diff": "diff --git a/statsmodels/tsa/api.py b/statsmodels/tsa/api.py\n--- a/statsmodels/tsa/api.py\n+++ b/statsmodels/tsa/api.py\n@@ -11,3 +11,4 @@\n import stattools\n from .stattools import *\n from .base import datetools\n+from ..graphics import tsaplots as graphics\n", "issue": "move graphics.tsa to tsa.graphics\nMakes more sense to me to keep the tsa stuff under the tsa namespace. Might need to deprecate functions that aren't new. \n\n", "before_files": [{"content": "from .ar_model import AR\nfrom .arima_model import ARMA, ARIMA\nimport vector_ar as var\nfrom .vector_ar.var_model import VAR\nfrom .vector_ar.svar_model import SVAR\nfrom .vector_ar.dynamic import DynamicVAR\nimport filters\nimport tsatools\nfrom .tsatools import (add_trend, detrend, lagmat, lagmat2ds, add_lag)\nimport interp\nimport stattools\nfrom .stattools import *\nfrom .base import datetools\n", "path": "statsmodels/tsa/api.py"}]} | 705 | 78 |
gh_patches_debug_12989 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1818 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_AZURE_23 - Reports incorrect fail when using azurerm_mssql_server_extended_auditing_policy
Check: CKV_AZURE_23: "Ensure that 'Auditing' is set to 'On' for SQL servers"
FAILED for resource: azurerm_mssql_server.server
File: /sql_server.tf:19-37
Guide: https://docs.bridgecrew.io/docs/bc_azr_logging_2
19 | resource "azurerm_mssql_server" "server" {
20 | name = "${var.org_short}-${var.env_short}-${var.loc_short}-${var.service}-mssql"
21 | location = var.location
22 | resource_group_name = azurerm_resource_group.server.name
23 | version = var.sql_server_server_version
24 | administrator_login = "${var.org_short}-${var.env_short}-${var.loc_short}-${var.service}-mssql-admin"
25 | administrator_login_password = random_password.server.result
26 | minimum_tls_version = var.min_tls_version
27 | public_network_access_enabled = false
28 | azuread_administrator {
29 | login_username = "AzureAD Admin"
30 | object_id = data.azuread_group.sqldba.object_id
31 | tenant_id = data.azurerm_client_config.current.tenant_id
32 | }
33 | identity {
34 | type = "SystemAssigned"
35 | }
36 | tags = local.default_tags
37 | }
When using Checkov v1.0.861
However
The auditing policy is set by the separate resource azurerm_mssql_server_extended_auditing_policy
```
resource "azurerm_mssql_server_extended_auditing_policy" "server" {
server_id = azurerm_mssql_server.server.id
log_monitoring_enabled = true
storage_endpoint = azurerm_storage_account.server.primary_blob_endpoint
storage_account_access_key = azurerm_storage_account.server.primary_access_key
storage_account_access_key_is_secondary = true
retention_in_days = var.log_rention_policy_days
}
```
</issue>
<code>
[start of checkov/terraform/checks/resource/azure/SQLServerAuditingEnabled.py]
1 from checkov.common.models.enums import CheckCategories
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3 from checkov.common.models.consts import ANY_VALUE
4
5
6 class SQLServerAuditingEnabled(BaseResourceValueCheck):
7 def __init__(self):
8 name = "Ensure that 'Auditing' is set to 'On' for SQL servers"
9 id = "CKV_AZURE_23"
10 supported_resources = ['azurerm_sql_server', 'azurerm_mssql_server']
11 categories = [CheckCategories.LOGGING]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def get_inspected_key(self):
15 return 'extended_auditing_policy'
16
17 def get_expected_value(self):
18 return ANY_VALUE
19
20
21 check = SQLServerAuditingEnabled()
22
[end of checkov/terraform/checks/resource/azure/SQLServerAuditingEnabled.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/azure/SQLServerAuditingEnabled.py b/checkov/terraform/checks/resource/azure/SQLServerAuditingEnabled.py
deleted file mode 100644
--- a/checkov/terraform/checks/resource/azure/SQLServerAuditingEnabled.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from checkov.common.models.enums import CheckCategories
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
-from checkov.common.models.consts import ANY_VALUE
-
-
-class SQLServerAuditingEnabled(BaseResourceValueCheck):
- def __init__(self):
- name = "Ensure that 'Auditing' is set to 'On' for SQL servers"
- id = "CKV_AZURE_23"
- supported_resources = ['azurerm_sql_server', 'azurerm_mssql_server']
- categories = [CheckCategories.LOGGING]
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
-
- def get_inspected_key(self):
- return 'extended_auditing_policy'
-
- def get_expected_value(self):
- return ANY_VALUE
-
-
-check = SQLServerAuditingEnabled()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/SQLServerAuditingEnabled.py b/checkov/terraform/checks/resource/azure/SQLServerAuditingEnabled.py\ndeleted file mode 100644\n--- a/checkov/terraform/checks/resource/azure/SQLServerAuditingEnabled.py\n+++ /dev/null\n@@ -1,21 +0,0 @@\n-from checkov.common.models.enums import CheckCategories\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-from checkov.common.models.consts import ANY_VALUE\n-\n-\n-class SQLServerAuditingEnabled(BaseResourceValueCheck):\n- def __init__(self):\n- name = \"Ensure that 'Auditing' is set to 'On' for SQL servers\"\n- id = \"CKV_AZURE_23\"\n- supported_resources = ['azurerm_sql_server', 'azurerm_mssql_server']\n- categories = [CheckCategories.LOGGING]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n-\n- def get_inspected_key(self):\n- return 'extended_auditing_policy'\n-\n- def get_expected_value(self):\n- return ANY_VALUE\n-\n-\n-check = SQLServerAuditingEnabled()\n", "issue": "CKV_AZURE_23 - Reports incorrect fail when using azurerm_mssql_server_extended_auditing_policy\nCheck: CKV_AZURE_23: \"Ensure that 'Auditing' is set to 'On' for SQL servers\"\r\n FAILED for resource: azurerm_mssql_server.server\r\n File: /sql_server.tf:19-37\r\n Guide: https://docs.bridgecrew.io/docs/bc_azr_logging_2\r\n\r\n 19 | resource \"azurerm_mssql_server\" \"server\" {\r\n 20 | name = \"${var.org_short}-${var.env_short}-${var.loc_short}-${var.service}-mssql\"\r\n 21 | location = var.location\r\n 22 | resource_group_name = azurerm_resource_group.server.name\r\n 23 | version = var.sql_server_server_version\r\n 24 | administrator_login = \"${var.org_short}-${var.env_short}-${var.loc_short}-${var.service}-mssql-admin\"\r\n 25 | administrator_login_password = random_password.server.result\r\n 26 | minimum_tls_version = var.min_tls_version\r\n 27 | public_network_access_enabled = false\r\n 28 | azuread_administrator {\r\n 29 | login_username = \"AzureAD Admin\"\r\n 30 | object_id = data.azuread_group.sqldba.object_id\r\n 31 | tenant_id = data.azurerm_client_config.current.tenant_id\r\n 32 | }\r\n 33 | identity {\r\n 34 | type = \"SystemAssigned\"\r\n 35 | }\r\n 36 | tags = local.default_tags\r\n 37 | }\r\n\r\nWhen using Checkov v1.0.861\r\n\r\nHowever\r\nThe auditing policy is set by the separate resource azurerm_mssql_server_extended_auditing_policy\r\n\r\n```\r\nresource \"azurerm_mssql_server_extended_auditing_policy\" \"server\" {\r\n server_id = azurerm_mssql_server.server.id\r\n log_monitoring_enabled = true\r\n storage_endpoint = azurerm_storage_account.server.primary_blob_endpoint\r\n storage_account_access_key = azurerm_storage_account.server.primary_access_key\r\n storage_account_access_key_is_secondary = true\r\n retention_in_days = var.log_rention_policy_days\r\n}\r\n```\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.consts import ANY_VALUE\n\n\nclass SQLServerAuditingEnabled(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that 'Auditing' is set to 'On' for SQL servers\"\n id = \"CKV_AZURE_23\"\n supported_resources = ['azurerm_sql_server', 'azurerm_mssql_server']\n categories = [CheckCategories.LOGGING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'extended_auditing_policy'\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = SQLServerAuditingEnabled()\n", "path": "checkov/terraform/checks/resource/azure/SQLServerAuditingEnabled.py"}]} | 1,300 | 277 |
gh_patches_debug_33997 | rasdani/github-patches | git_diff | pytorch__ignite-1318 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve R2Score metric for DDP
## 🚀 Feature
Current implementation of `R2Score` metric is not compatible with ddp. The idea is to improve this metric to work in parallel.
Related to #1284
</issue>
<code>
[start of ignite/contrib/metrics/regression/r2_score.py]
1 import torch
2
3 from ignite.contrib.metrics.regression._base import _BaseRegression
4 from ignite.exceptions import NotComputableError
5
6
7 class R2Score(_BaseRegression):
8 r"""
9 Calculates the R-Squared, the
10 `coefficient of determination <https://en.wikipedia.org/wiki/Coefficient_of_determination>`_:
11
12 :math:`R^2 = 1 - \frac{\sum_{j=1}^n(A_j - P_j)^2}{\sum_{j=1}^n(A_j - \bar{A})^2}`,
13
14 where :math:`A_j` is the ground truth, :math:`P_j` is the predicted value and
15 :math:`\bar{A}` is the mean of the ground truth.
16
17 - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
18 - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
19 """
20
21 def reset(self):
22 self._num_examples = 0
23 self._sum_of_errors = 0
24 self._y_sq_sum = 0
25 self._y_sum = 0
26
27 def _update(self, output):
28 y_pred, y = output
29 self._num_examples += y.shape[0]
30 self._sum_of_errors += torch.sum(torch.pow(y_pred - y, 2)).item()
31
32 self._y_sum += torch.sum(y).item()
33 self._y_sq_sum += torch.sum(torch.pow(y, 2)).item()
34
35 def compute(self):
36 if self._num_examples == 0:
37 raise NotComputableError("R2Score must have at least one example before it can be computed.")
38 return 1 - self._sum_of_errors / (self._y_sq_sum - (self._y_sum ** 2) / self._num_examples)
39
[end of ignite/contrib/metrics/regression/r2_score.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ignite/contrib/metrics/regression/r2_score.py b/ignite/contrib/metrics/regression/r2_score.py
--- a/ignite/contrib/metrics/regression/r2_score.py
+++ b/ignite/contrib/metrics/regression/r2_score.py
@@ -1,7 +1,10 @@
+from typing import Callable, Union
+
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
+from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class R2Score(_BaseRegression):
@@ -18,21 +21,32 @@
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
"""
+ def __init__(
+ self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
+ ):
+ self._num_examples = None
+ self._sum_of_errors = None
+ self._y_sq_sum = None
+ self._y_sum = None
+ super(R2Score, self).__init__(output_transform, device)
+
+ @reinit__is_reduced
def reset(self):
self._num_examples = 0
- self._sum_of_errors = 0
- self._y_sq_sum = 0
- self._y_sum = 0
+ self._sum_of_errors = torch.tensor(0.0, device=self._device)
+ self._y_sq_sum = torch.tensor(0.0, device=self._device)
+ self._y_sum = torch.tensor(0.0, device=self._device)
def _update(self, output):
y_pred, y = output
self._num_examples += y.shape[0]
- self._sum_of_errors += torch.sum(torch.pow(y_pred - y, 2)).item()
+ self._sum_of_errors += torch.sum(torch.pow(y_pred - y, 2)).to(self._device)
- self._y_sum += torch.sum(y).item()
- self._y_sq_sum += torch.sum(torch.pow(y, 2)).item()
+ self._y_sum += torch.sum(y).to(self._device)
+ self._y_sq_sum += torch.sum(torch.pow(y, 2)).to(self._device)
+ @sync_all_reduce("_num_examples", "_sum_of_errors", "_y_sq_sum", "_y_sum")
def compute(self):
if self._num_examples == 0:
raise NotComputableError("R2Score must have at least one example before it can be computed.")
- return 1 - self._sum_of_errors / (self._y_sq_sum - (self._y_sum ** 2) / self._num_examples)
+ return 1 - self._sum_of_errors.item() / (self._y_sq_sum.item() - (self._y_sum.item() ** 2) / self._num_examples)
| {"golden_diff": "diff --git a/ignite/contrib/metrics/regression/r2_score.py b/ignite/contrib/metrics/regression/r2_score.py\n--- a/ignite/contrib/metrics/regression/r2_score.py\n+++ b/ignite/contrib/metrics/regression/r2_score.py\n@@ -1,7 +1,10 @@\n+from typing import Callable, Union\n+\n import torch\n \n from ignite.contrib.metrics.regression._base import _BaseRegression\n from ignite.exceptions import NotComputableError\n+from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce\n \n \n class R2Score(_BaseRegression):\n@@ -18,21 +21,32 @@\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.\n \"\"\"\n \n+ def __init__(\n+ self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device(\"cpu\")\n+ ):\n+ self._num_examples = None\n+ self._sum_of_errors = None\n+ self._y_sq_sum = None\n+ self._y_sum = None\n+ super(R2Score, self).__init__(output_transform, device)\n+\n+ @reinit__is_reduced\n def reset(self):\n self._num_examples = 0\n- self._sum_of_errors = 0\n- self._y_sq_sum = 0\n- self._y_sum = 0\n+ self._sum_of_errors = torch.tensor(0.0, device=self._device)\n+ self._y_sq_sum = torch.tensor(0.0, device=self._device)\n+ self._y_sum = torch.tensor(0.0, device=self._device)\n \n def _update(self, output):\n y_pred, y = output\n self._num_examples += y.shape[0]\n- self._sum_of_errors += torch.sum(torch.pow(y_pred - y, 2)).item()\n+ self._sum_of_errors += torch.sum(torch.pow(y_pred - y, 2)).to(self._device)\n \n- self._y_sum += torch.sum(y).item()\n- self._y_sq_sum += torch.sum(torch.pow(y, 2)).item()\n+ self._y_sum += torch.sum(y).to(self._device)\n+ self._y_sq_sum += torch.sum(torch.pow(y, 2)).to(self._device)\n \n+ @sync_all_reduce(\"_num_examples\", \"_sum_of_errors\", \"_y_sq_sum\", \"_y_sum\")\n def compute(self):\n if self._num_examples == 0:\n raise NotComputableError(\"R2Score must have at least one example before it can be computed.\")\n- return 1 - self._sum_of_errors / (self._y_sq_sum - (self._y_sum ** 2) / self._num_examples)\n+ return 1 - self._sum_of_errors.item() / (self._y_sq_sum.item() - (self._y_sum.item() ** 2) / self._num_examples)\n", "issue": "Improve R2Score metric for DDP\n## \ud83d\ude80 Feature\r\n\r\nCurrent implementation of `R2Score` metric is not compatible with ddp. The idea is to improve this metric to work in parallel.\r\n\r\nRelated to #1284\n", "before_files": [{"content": "import torch\n\nfrom ignite.contrib.metrics.regression._base import _BaseRegression\nfrom ignite.exceptions import NotComputableError\n\n\nclass R2Score(_BaseRegression):\n r\"\"\"\n Calculates the R-Squared, the\n `coefficient of determination <https://en.wikipedia.org/wiki/Coefficient_of_determination>`_:\n\n :math:`R^2 = 1 - \\frac{\\sum_{j=1}^n(A_j - P_j)^2}{\\sum_{j=1}^n(A_j - \\bar{A})^2}`,\n\n where :math:`A_j` is the ground truth, :math:`P_j` is the predicted value and\n :math:`\\bar{A}` is the mean of the ground truth.\n\n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.\n \"\"\"\n\n def reset(self):\n self._num_examples = 0\n self._sum_of_errors = 0\n self._y_sq_sum = 0\n self._y_sum = 0\n\n def _update(self, output):\n y_pred, y = output\n self._num_examples += y.shape[0]\n self._sum_of_errors += torch.sum(torch.pow(y_pred - y, 2)).item()\n\n self._y_sum += torch.sum(y).item()\n self._y_sq_sum += torch.sum(torch.pow(y, 2)).item()\n\n def compute(self):\n if self._num_examples == 0:\n raise NotComputableError(\"R2Score must have at least one example before it can be computed.\")\n return 1 - self._sum_of_errors / (self._y_sq_sum - (self._y_sum ** 2) / self._num_examples)\n", "path": "ignite/contrib/metrics/regression/r2_score.py"}]} | 1,099 | 673 |
gh_patches_debug_27900 | rasdani/github-patches | git_diff | conan-io__conan-center-index-1707 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[request] zstd/1.4.5
### Package Details
* Package Name/Version: **zstd/1.4.5**
* Changelog: **https://github.com/facebook/zstd/releases/tag/v1.4.5**
The above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.
</issue>
<code>
[start of recipes/zstd/all/conanfile.py]
1 import os
2 from conans import ConanFile, CMake, tools
3
4
5 class ZstdConan(ConanFile):
6 name = "zstd"
7 url = "https://github.com/conan-io/conan-center-index"
8 homepage = "https://github.com/facebook/zstd"
9 description = "Zstandard - Fast real-time compression algorithm"
10 topics = ("conan", "zstd", "compression", "algorithm", "decoder")
11 license = "BSD-3-Clause"
12 exports_sources = ['CMakeLists.txt']
13 generators = 'cmake'
14 settings = "os", "arch", "compiler", "build_type"
15 options = {"shared": [True, False], "fPIC": [True, False]}
16 default_options = {"shared": False, "fPIC": True}
17
18 @property
19 def _source_subfolder(self):
20 return "source_subfolder"
21
22 def source(self):
23 tools.get(**self.conan_data["sources"][self.version])
24 extracted_dir = self.name + "-" + self.version
25 os.rename(extracted_dir, self._source_subfolder)
26
27 def config_options(self):
28 if self.settings.os == "Windows":
29 del self.options.fPIC
30
31 def configure(self):
32 del self.settings.compiler.libcxx
33 del self.settings.compiler.cppstd
34
35 def _configure_cmake(self):
36 cmake = CMake(self)
37 cmake.definitions["ZSTD_BUILD_PROGRAMS"] = False
38 cmake.definitions["ZSTD_BUILD_STATIC"] = not self.options.shared
39 cmake.definitions["ZSTD_BUILD_SHARED"] = self.options.shared
40 cmake.configure()
41 return cmake
42
43 def build(self):
44 cmake = self._configure_cmake()
45 cmake.build()
46
47 def package(self):
48 self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
49 cmake = self._configure_cmake()
50 cmake.install()
51 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
52
53 def package_info(self):
54 self.cpp_info.libs = tools.collect_libs(self)
55 if self.settings.os == "Linux":
56 self.cpp_info.system_libs.append("pthread")
57
[end of recipes/zstd/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/zstd/all/conanfile.py b/recipes/zstd/all/conanfile.py
--- a/recipes/zstd/all/conanfile.py
+++ b/recipes/zstd/all/conanfile.py
@@ -9,7 +9,7 @@
description = "Zstandard - Fast real-time compression algorithm"
topics = ("conan", "zstd", "compression", "algorithm", "decoder")
license = "BSD-3-Clause"
- exports_sources = ['CMakeLists.txt']
+ exports_sources = ['CMakeLists.txt', "patches/**"]
generators = 'cmake'
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False], "fPIC": [True, False]}
@@ -40,7 +40,12 @@
cmake.configure()
return cmake
+ def _patch_sources(self):
+ for patch in self.conan_data.get("patches", {}).get(self.version, []):
+ tools.patch(**patch)
+
def build(self):
+ self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
@@ -48,6 +53,7 @@
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
+ tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
| {"golden_diff": "diff --git a/recipes/zstd/all/conanfile.py b/recipes/zstd/all/conanfile.py\n--- a/recipes/zstd/all/conanfile.py\n+++ b/recipes/zstd/all/conanfile.py\n@@ -9,7 +9,7 @@\n description = \"Zstandard - Fast real-time compression algorithm\"\n topics = (\"conan\", \"zstd\", \"compression\", \"algorithm\", \"decoder\")\n license = \"BSD-3-Clause\"\n- exports_sources = ['CMakeLists.txt']\n+ exports_sources = ['CMakeLists.txt', \"patches/**\"]\n generators = 'cmake'\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n@@ -40,7 +40,12 @@\n cmake.configure()\n return cmake\n \n+ def _patch_sources(self):\n+ for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n+ tools.patch(**patch)\n+\n def build(self):\n+ self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n \n@@ -48,6 +53,7 @@\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n+ tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n \n def package_info(self):\n", "issue": "[request] zstd/1.4.5\n### Package Details\r\n * Package Name/Version: **zstd/1.4.5**\r\n * Changelog: **https://github.com/facebook/zstd/releases/tag/v1.4.5**\r\n\r\n\r\nThe above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.\r\n\n", "before_files": [{"content": "import os\nfrom conans import ConanFile, CMake, tools\n\n\nclass ZstdConan(ConanFile):\n name = \"zstd\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/facebook/zstd\"\n description = \"Zstandard - Fast real-time compression algorithm\"\n topics = (\"conan\", \"zstd\", \"compression\", \"algorithm\", \"decoder\")\n license = \"BSD-3-Clause\"\n exports_sources = ['CMakeLists.txt']\n generators = 'cmake'\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions[\"ZSTD_BUILD_PROGRAMS\"] = False\n cmake.definitions[\"ZSTD_BUILD_STATIC\"] = not self.options.shared\n cmake.definitions[\"ZSTD_BUILD_SHARED\"] = self.options.shared\n cmake.configure()\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"pthread\")\n", "path": "recipes/zstd/all/conanfile.py"}]} | 1,193 | 340 |
gh_patches_debug_2664 | rasdani/github-patches | git_diff | opsdroid__opsdroid-1408 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Duplicated shell prompt
# Description
When I run the hello skill from the shell, I found duplicated shell prompt output. I think there's some issue with the shell connector.
## Steps to Reproduce
```
qidong@ubuntu:~/Documents/opsdroid$ opsdroid start
mybot> hello
Hello qidong
mybot> mybot>
```
## Expected Functionality
There should be only one prompt printed after the skill response.
## Experienced Functionality
One extra prompt is printed.
## Configuration File
```yaml
logging:
console: false
connectors:
websocket:
bot-name: "mybot"
max-connections: 10
connection-timeout: 10
shell:
bot-name: "mybot"
skills:
## Hello (https://github.com/opsdroid/skill-hello)
hello: {}
```
</issue>
<code>
[start of opsdroid/connector/shell/__init__.py]
1 """A connector to send messages using the command line."""
2 import logging
3 import os
4 import sys
5 import platform
6 import asyncio
7
8 from opsdroid.connector import Connector, register_event
9 from opsdroid.events import Message
10
11 _LOGGER = logging.getLogger(__name__)
12 CONFIG_SCHEMA = {"bot-name": str}
13
14
15 class ConnectorShell(Connector):
16 """A connector to send messages using the command line."""
17
18 def __init__(self, config, opsdroid=None):
19 """Create the connector."""
20 _LOGGER.debug(_("Loaded shell Connector."))
21 super().__init__(config, opsdroid=opsdroid)
22 self.name = "shell"
23 self.config = config
24 self.bot_name = config.get("bot-name", "opsdroid")
25 self.prompt_length = None
26 self.listening = True
27 self.reader = None
28 self._closing = asyncio.Event()
29 self.loop = asyncio.get_event_loop()
30
31 for name in ("LOGNAME", "USER", "LNAME", "USERNAME"):
32 user = os.environ.get(name)
33 if user:
34 self.user = user
35
36 @property
37 def is_listening(self):
38 """Get listening status."""
39 return self.listening
40
41 @is_listening.setter
42 def is_listening(self, val):
43 """Set listening status."""
44 self.listening = val
45
46 async def read_stdin(self):
47 """Create a stream reader to read stdin asynchronously.
48
49 Returns:
50 class: asyncio.streams.StreamReader
51
52 """
53 self.reader = asyncio.StreamReader(loop=self.loop)
54 reader_protocol = asyncio.StreamReaderProtocol(self.reader)
55
56 await self.loop.connect_read_pipe(lambda: reader_protocol, sys.stdin)
57
58 return self.reader
59
60 async def async_input(self):
61 """Read user input asynchronously from stdin.
62
63 Returns:
64 string: A decoded string from user input.
65
66 """
67 if not self.reader:
68 self.reader = await self.read_stdin()
69 line = await self.reader.readline()
70
71 return line.decode("utf8").replace("\r", "").replace("\n", "")
72
73 def draw_prompt(self):
74 """Draw the user input prompt."""
75 prompt = self.bot_name + "> "
76 self.prompt_length = len(prompt)
77 print(prompt, end="", flush=True)
78
79 def clear_prompt(self):
80 """Clear the prompt."""
81 print("\r" + (" " * self.prompt_length) + "\r", end="", flush=True)
82
83 async def parseloop(self):
84 """Parseloop moved out for testing."""
85 self.draw_prompt()
86 user_input = await self.async_input()
87 message = Message(text=user_input, user=self.user, target=None, connector=self)
88 await self.opsdroid.parse(message)
89
90 async def _parse_message(self):
91 """Parse user input."""
92 while self.is_listening:
93 await self.parseloop()
94
95 async def connect(self):
96 """Connect to the shell.
97
98 There is nothing to do here since stdin is already available.
99
100 Since this is the first method called when opsdroid starts, a logging
101 message is shown if the user is using windows.
102
103 """
104 if platform.system() == "Windows":
105 _LOGGER.warning(
106 "The shell connector does not work on windows. Please install the Opsdroid Desktop App."
107 )
108 pass
109
110 async def listen(self):
111 """Listen for and parse new user input."""
112 _LOGGER.debug(_("Connecting to shell."))
113 message_processor = self.loop.create_task(self._parse_message())
114 await self._closing.wait()
115 message_processor.cancel()
116
117 @register_event(Message)
118 async def respond(self, message):
119 """Respond with a message.
120
121 Args:
122 message (object): An instance of Message
123
124 """
125 _LOGGER.debug(_("Responding with: %s."), message.text)
126 self.clear_prompt()
127 print(message.text)
128 self.draw_prompt()
129
130 async def disconnect(self):
131 """Disconnects the connector."""
132 self._closing.set()
133
[end of opsdroid/connector/shell/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/connector/shell/__init__.py b/opsdroid/connector/shell/__init__.py
--- a/opsdroid/connector/shell/__init__.py
+++ b/opsdroid/connector/shell/__init__.py
@@ -125,7 +125,6 @@
_LOGGER.debug(_("Responding with: %s."), message.text)
self.clear_prompt()
print(message.text)
- self.draw_prompt()
async def disconnect(self):
"""Disconnects the connector."""
| {"golden_diff": "diff --git a/opsdroid/connector/shell/__init__.py b/opsdroid/connector/shell/__init__.py\n--- a/opsdroid/connector/shell/__init__.py\n+++ b/opsdroid/connector/shell/__init__.py\n@@ -125,7 +125,6 @@\n _LOGGER.debug(_(\"Responding with: %s.\"), message.text)\n self.clear_prompt()\n print(message.text)\n- self.draw_prompt()\n \n async def disconnect(self):\n \"\"\"Disconnects the connector.\"\"\"\n", "issue": "Duplicated shell prompt \n# Description\r\nWhen I run the hello skill from the shell, I found duplicated shell prompt output. I think there's some issue with the shell connector. \r\n\r\n\r\n## Steps to Reproduce\r\n```\r\nqidong@ubuntu:~/Documents/opsdroid$ opsdroid start\r\nmybot> hello\r\nHello qidong\r\nmybot> mybot>\r\n```\r\n\r\n\r\n## Expected Functionality\r\nThere should be only one prompt printed after the skill response. \r\n\r\n## Experienced Functionality\r\nOne extra prompt is printed. \r\n\r\n\r\n## Configuration File\r\n\r\n```yaml\r\nlogging:\r\n console: false\r\n\r\nconnectors:\r\n websocket:\r\n bot-name: \"mybot\"\r\n max-connections: 10\r\n connection-timeout: 10\r\n \r\n shell:\r\n bot-name: \"mybot\"\r\n\r\nskills:\r\n ## Hello (https://github.com/opsdroid/skill-hello)\r\n hello: {}\r\n```\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"A connector to send messages using the command line.\"\"\"\nimport logging\nimport os\nimport sys\nimport platform\nimport asyncio\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\"bot-name\": str}\n\n\nclass ConnectorShell(Connector):\n \"\"\"A connector to send messages using the command line.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n _LOGGER.debug(_(\"Loaded shell Connector.\"))\n super().__init__(config, opsdroid=opsdroid)\n self.name = \"shell\"\n self.config = config\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.prompt_length = None\n self.listening = True\n self.reader = None\n self._closing = asyncio.Event()\n self.loop = asyncio.get_event_loop()\n\n for name in (\"LOGNAME\", \"USER\", \"LNAME\", \"USERNAME\"):\n user = os.environ.get(name)\n if user:\n self.user = user\n\n @property\n def is_listening(self):\n \"\"\"Get listening status.\"\"\"\n return self.listening\n\n @is_listening.setter\n def is_listening(self, val):\n \"\"\"Set listening status.\"\"\"\n self.listening = val\n\n async def read_stdin(self):\n \"\"\"Create a stream reader to read stdin asynchronously.\n\n Returns:\n class: asyncio.streams.StreamReader\n\n \"\"\"\n self.reader = asyncio.StreamReader(loop=self.loop)\n reader_protocol = asyncio.StreamReaderProtocol(self.reader)\n\n await self.loop.connect_read_pipe(lambda: reader_protocol, sys.stdin)\n\n return self.reader\n\n async def async_input(self):\n \"\"\"Read user input asynchronously from stdin.\n\n Returns:\n string: A decoded string from user input.\n\n \"\"\"\n if not self.reader:\n self.reader = await self.read_stdin()\n line = await self.reader.readline()\n\n return line.decode(\"utf8\").replace(\"\\r\", \"\").replace(\"\\n\", \"\")\n\n def draw_prompt(self):\n \"\"\"Draw the user input prompt.\"\"\"\n prompt = self.bot_name + \"> \"\n self.prompt_length = len(prompt)\n print(prompt, end=\"\", flush=True)\n\n def clear_prompt(self):\n \"\"\"Clear the prompt.\"\"\"\n print(\"\\r\" + (\" \" * self.prompt_length) + \"\\r\", end=\"\", flush=True)\n\n async def parseloop(self):\n \"\"\"Parseloop moved out for testing.\"\"\"\n self.draw_prompt()\n user_input = await self.async_input()\n message = Message(text=user_input, user=self.user, target=None, connector=self)\n await self.opsdroid.parse(message)\n\n async def _parse_message(self):\n \"\"\"Parse user input.\"\"\"\n while self.is_listening:\n await self.parseloop()\n\n async def connect(self):\n \"\"\"Connect to the shell.\n\n There is nothing to do here since stdin is already available.\n\n Since this is the first method called when opsdroid starts, a logging\n message is shown if the user is using windows.\n\n \"\"\"\n if platform.system() == \"Windows\":\n _LOGGER.warning(\n \"The shell connector does not work on windows. Please install the Opsdroid Desktop App.\"\n )\n pass\n\n async def listen(self):\n \"\"\"Listen for and parse new user input.\"\"\"\n _LOGGER.debug(_(\"Connecting to shell.\"))\n message_processor = self.loop.create_task(self._parse_message())\n await self._closing.wait()\n message_processor.cancel()\n\n @register_event(Message)\n async def respond(self, message):\n \"\"\"Respond with a message.\n\n Args:\n message (object): An instance of Message\n\n \"\"\"\n _LOGGER.debug(_(\"Responding with: %s.\"), message.text)\n self.clear_prompt()\n print(message.text)\n self.draw_prompt()\n\n async def disconnect(self):\n \"\"\"Disconnects the connector.\"\"\"\n self._closing.set()\n", "path": "opsdroid/connector/shell/__init__.py"}]} | 1,887 | 120 |
gh_patches_debug_10776 | rasdani/github-patches | git_diff | marshmallow-code__webargs-123 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ValidationError causes HTTP/500 in Flask instead of HTTP/422 in Python 2
Here are the symptoms: http://stackoverflow.com/questions/37321835/flask-error-handler-not-able-to-handle-assertion-error/39624893#39624893
Here is the case where I personally hit the issue with Python 2.x and PyPy in Python 2.x mode only: https://travis-ci.org/frol/flask-restplus-server-example/builds/161685419
After troubleshooting, I discovered that this issue stems from the `FlaskParser.handle_error` which calls `abort` function with `exc=` argument, which then [is set as "data"](https://github.com/sloria/webargs/blob/dev/webargs/flaskparser.py#L38) on the raised `HTTPException`, which in its turn is tried to be json-dumped and fails with `TypeError`, and now the new exception is in `sys.exc_info()`, which causes Flask to [assert here](https://github.com/pallets/flask/blob/01081dbe6cdfa3fc43d8e1fff708d4ed95e1be7e/flask/app.py#L1515).
Searching git history for the introduced `exc=` in `FlaskParser`, I found it here: https://github.com/sloria/webargs/commit/6f8088c7c85c05d2891e81ee97d38f2ae801159d
Is there a reason for `exc=` to be passed? How can we fix this?
Python 3 seems to have changed something with `sys.exc_info()` behaviour since even after the `TypeError` caused by `json.dumps()`, `sys.exc_info()` still reports `HTTPException` while in Python 2 it returns `TypeError`.
</issue>
<code>
[start of webargs/flaskparser.py]
1 # -*- coding: utf-8 -*-
2 """Flask request argument parsing module.
3
4 Example: ::
5
6 from flask import Flask
7
8 from webargs import fields
9 from webargs.flaskparser import use_args
10
11 app = Flask(__name__)
12
13 hello_args = {
14 'name': fields.Str(required=True)
15 }
16
17 @app.route('/')
18 @use_args(hello_args)
19 def index(args):
20 return 'Hello ' + args['name']
21 """
22 import flask
23 from werkzeug.exceptions import HTTPException
24
25 from webargs import core
26
27
28 def abort(http_status_code, **kwargs):
29 """Raise a HTTPException for the given http_status_code. Attach any keyword
30 arguments to the exception for later processing.
31
32 From Flask-Restful. See NOTICE file for license information.
33 """
34 try:
35 flask.abort(http_status_code)
36 except HTTPException as err:
37 if len(kwargs):
38 err.data = kwargs
39 raise err
40
41
42 def is_json_request(req):
43 return core.is_json(req.mimetype)
44
45
46 class FlaskParser(core.Parser):
47 """Flask request argument parser."""
48
49 __location_map__ = dict(
50 view_args='parse_view_args',
51 **core.Parser.__location_map__
52 )
53
54 def parse_view_args(self, req, name, field):
55 """Pull a value from the request's ``view_args``."""
56 return core.get_value(req.view_args, name, field)
57
58 def parse_json(self, req, name, field):
59 """Pull a json value from the request."""
60 # Pass force in order to handle vendor media types,
61 # e.g. applications/vnd.json+api
62 # this should be unnecessary in Flask 1.0
63 force = is_json_request(req)
64 # Fail silently so that the webargs parser can handle the error
65 if hasattr(req, 'get_json'):
66 # Flask >= 0.10.x
67 json_data = req.get_json(force=force, silent=True)
68 else:
69 # Flask <= 0.9.x
70 json_data = req.json
71 if json_data is None:
72 return core.missing
73 return core.get_value(json_data, name, field, allow_many_nested=True)
74
75 def parse_querystring(self, req, name, field):
76 """Pull a querystring value from the request."""
77 return core.get_value(req.args, name, field)
78
79 def parse_form(self, req, name, field):
80 """Pull a form value from the request."""
81 try:
82 return core.get_value(req.form, name, field)
83 except AttributeError:
84 pass
85 return core.missing
86
87 def parse_headers(self, req, name, field):
88 """Pull a value from the header data."""
89 return core.get_value(req.headers, name, field)
90
91 def parse_cookies(self, req, name, field):
92 """Pull a value from the cookiejar."""
93 return core.get_value(req.cookies, name, field)
94
95 def parse_files(self, req, name, field):
96 """Pull a file from the request."""
97 return core.get_value(req.files, name, field)
98
99 def handle_error(self, error):
100 """Handles errors during parsing. Aborts the current HTTP request and
101 responds with a 422 error.
102 """
103 status_code = getattr(error, 'status_code', self.DEFAULT_VALIDATION_STATUS)
104 abort(status_code, messages=error.messages, exc=error)
105
106 def get_default_request(self):
107 """Override to use Flask's thread-local request objec by default"""
108 return flask.request
109
110 parser = FlaskParser()
111 use_args = parser.use_args
112 use_kwargs = parser.use_kwargs
113
[end of webargs/flaskparser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/webargs/flaskparser.py b/webargs/flaskparser.py
--- a/webargs/flaskparser.py
+++ b/webargs/flaskparser.py
@@ -25,7 +25,7 @@
from webargs import core
-def abort(http_status_code, **kwargs):
+def abort(http_status_code, exc=None, **kwargs):
"""Raise a HTTPException for the given http_status_code. Attach any keyword
arguments to the exception for later processing.
@@ -36,6 +36,7 @@
except HTTPException as err:
if len(kwargs):
err.data = kwargs
+ err.exc = exc
raise err
| {"golden_diff": "diff --git a/webargs/flaskparser.py b/webargs/flaskparser.py\n--- a/webargs/flaskparser.py\n+++ b/webargs/flaskparser.py\n@@ -25,7 +25,7 @@\n from webargs import core\n \n \n-def abort(http_status_code, **kwargs):\n+def abort(http_status_code, exc=None, **kwargs):\n \"\"\"Raise a HTTPException for the given http_status_code. Attach any keyword\n arguments to the exception for later processing.\n \n@@ -36,6 +36,7 @@\n except HTTPException as err:\n if len(kwargs):\n err.data = kwargs\n+ err.exc = exc\n raise err\n", "issue": "ValidationError causes HTTP/500 in Flask instead of HTTP/422 in Python 2\nHere are the symptoms: http://stackoverflow.com/questions/37321835/flask-error-handler-not-able-to-handle-assertion-error/39624893#39624893\n\nHere is the case where I personally hit the issue with Python 2.x and PyPy in Python 2.x mode only: https://travis-ci.org/frol/flask-restplus-server-example/builds/161685419\n\nAfter troubleshooting, I discovered that this issue stems from the `FlaskParser.handle_error` which calls `abort` function with `exc=` argument, which then [is set as \"data\"](https://github.com/sloria/webargs/blob/dev/webargs/flaskparser.py#L38) on the raised `HTTPException`, which in its turn is tried to be json-dumped and fails with `TypeError`, and now the new exception is in `sys.exc_info()`, which causes Flask to [assert here](https://github.com/pallets/flask/blob/01081dbe6cdfa3fc43d8e1fff708d4ed95e1be7e/flask/app.py#L1515).\n\nSearching git history for the introduced `exc=` in `FlaskParser`, I found it here: https://github.com/sloria/webargs/commit/6f8088c7c85c05d2891e81ee97d38f2ae801159d\n\nIs there a reason for `exc=` to be passed? How can we fix this?\n\nPython 3 seems to have changed something with `sys.exc_info()` behaviour since even after the `TypeError` caused by `json.dumps()`, `sys.exc_info()` still reports `HTTPException` while in Python 2 it returns `TypeError`.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Flask request argument parsing module.\n\nExample: ::\n\n from flask import Flask\n\n from webargs import fields\n from webargs.flaskparser import use_args\n\n app = Flask(__name__)\n\n hello_args = {\n 'name': fields.Str(required=True)\n }\n\n @app.route('/')\n @use_args(hello_args)\n def index(args):\n return 'Hello ' + args['name']\n\"\"\"\nimport flask\nfrom werkzeug.exceptions import HTTPException\n\nfrom webargs import core\n\n\ndef abort(http_status_code, **kwargs):\n \"\"\"Raise a HTTPException for the given http_status_code. Attach any keyword\n arguments to the exception for later processing.\n\n From Flask-Restful. See NOTICE file for license information.\n \"\"\"\n try:\n flask.abort(http_status_code)\n except HTTPException as err:\n if len(kwargs):\n err.data = kwargs\n raise err\n\n\ndef is_json_request(req):\n return core.is_json(req.mimetype)\n\n\nclass FlaskParser(core.Parser):\n \"\"\"Flask request argument parser.\"\"\"\n\n __location_map__ = dict(\n view_args='parse_view_args',\n **core.Parser.__location_map__\n )\n\n def parse_view_args(self, req, name, field):\n \"\"\"Pull a value from the request's ``view_args``.\"\"\"\n return core.get_value(req.view_args, name, field)\n\n def parse_json(self, req, name, field):\n \"\"\"Pull a json value from the request.\"\"\"\n # Pass force in order to handle vendor media types,\n # e.g. applications/vnd.json+api\n # this should be unnecessary in Flask 1.0\n force = is_json_request(req)\n # Fail silently so that the webargs parser can handle the error\n if hasattr(req, 'get_json'):\n # Flask >= 0.10.x\n json_data = req.get_json(force=force, silent=True)\n else:\n # Flask <= 0.9.x\n json_data = req.json\n if json_data is None:\n return core.missing\n return core.get_value(json_data, name, field, allow_many_nested=True)\n\n def parse_querystring(self, req, name, field):\n \"\"\"Pull a querystring value from the request.\"\"\"\n return core.get_value(req.args, name, field)\n\n def parse_form(self, req, name, field):\n \"\"\"Pull a form value from the request.\"\"\"\n try:\n return core.get_value(req.form, name, field)\n except AttributeError:\n pass\n return core.missing\n\n def parse_headers(self, req, name, field):\n \"\"\"Pull a value from the header data.\"\"\"\n return core.get_value(req.headers, name, field)\n\n def parse_cookies(self, req, name, field):\n \"\"\"Pull a value from the cookiejar.\"\"\"\n return core.get_value(req.cookies, name, field)\n\n def parse_files(self, req, name, field):\n \"\"\"Pull a file from the request.\"\"\"\n return core.get_value(req.files, name, field)\n\n def handle_error(self, error):\n \"\"\"Handles errors during parsing. Aborts the current HTTP request and\n responds with a 422 error.\n \"\"\"\n status_code = getattr(error, 'status_code', self.DEFAULT_VALIDATION_STATUS)\n abort(status_code, messages=error.messages, exc=error)\n\n def get_default_request(self):\n \"\"\"Override to use Flask's thread-local request objec by default\"\"\"\n return flask.request\n\nparser = FlaskParser()\nuse_args = parser.use_args\nuse_kwargs = parser.use_kwargs\n", "path": "webargs/flaskparser.py"}]} | 1,972 | 146 |
gh_patches_debug_24134 | rasdani/github-patches | git_diff | meltano__meltano-6320 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`meltano select tap-gitlab tags "*"` fails on Windows
Example: https://gitlab.com/meltano/legacy-ci/meltano/-/jobs/2653845782
This occurs with Meltano built from `main`, i.e. `fb6ed89799eb341bea295350ea809e5049ee7ca6`.
</issue>
<code>
[start of src/meltano/cli/cli.py]
1 import logging # noqa: D100
2 import sys
3 import warnings # noqa: F401
4
5 import click
6
7 import meltano
8 from meltano.core.behavior.versioned import IncompatibleVersionError
9 from meltano.core.logging import LEVELS, setup_logging
10 from meltano.core.project import Project, ProjectNotFound
11 from meltano.core.project_settings_service import ProjectSettingsService
12
13 logger = logging.getLogger(__name__)
14
15
16 @click.group(invoke_without_command=True, no_args_is_help=True)
17 @click.option("--log-level", type=click.Choice(LEVELS.keys()))
18 @click.option(
19 "--log-config", type=str, help="Path to a python logging yaml config file."
20 )
21 @click.option("-v", "--verbose", count=True, help="Not used.")
22 @click.option(
23 "--environment",
24 envvar="MELTANO_ENVIRONMENT",
25 help="Meltano environment name.",
26 )
27 @click.option(
28 "--no-environment", is_flag=True, default=False, help="Don't use any environment."
29 )
30 @click.version_option(version=meltano.__version__, prog_name="meltano")
31 @click.pass_context
32 def cli( # noqa: WPS231
33 ctx,
34 log_level: str,
35 log_config: str,
36 verbose: int,
37 environment: str,
38 no_environment: bool,
39 ): # noqa: WPS231
40 """
41 ELT for the DataOps era.
42
43 \b\nRead more at https://www.meltano.com/docs/command-line-interface.html
44 """
45 if log_level:
46 ProjectSettingsService.config_override["cli.log_level"] = log_level
47
48 if log_config:
49 ProjectSettingsService.config_override["cli.log_config"] = log_config
50
51 ctx.ensure_object(dict)
52 ctx.obj["verbosity"] = verbose
53 try: # noqa: WPS229
54 project = Project.find()
55 setup_logging(project)
56
57 readonly = ProjectSettingsService(project).get("project_readonly")
58 if readonly:
59 project.readonly = True
60 if project.readonly:
61 logger.debug("Project is read-only.")
62
63 # detect active environment
64 selected_environment = None
65 if no_environment or (environment and environment.lower() == "null"):
66 logger.info("No environment is active")
67 elif environment:
68 selected_environment = environment
69 elif project.meltano.default_environment:
70 selected_environment = project.meltano.default_environment
71 # activate environment
72 if selected_environment:
73 project.activate_environment(selected_environment)
74 logger.info(
75 "Environment '%s' is active", selected_environment # noqa: WPS323
76 )
77
78 ctx.obj["project"] = project
79 except ProjectNotFound:
80 ctx.obj["project"] = None
81 except IncompatibleVersionError:
82 click.secho(
83 "This Meltano project is incompatible with this version of `meltano`.",
84 fg="yellow",
85 )
86 click.echo(
87 "For more details, visit http://meltano.com/docs/installation.html#upgrading-meltano-version"
88 )
89 sys.exit(3)
90
[end of src/meltano/cli/cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/meltano/cli/cli.py b/src/meltano/cli/cli.py
--- a/src/meltano/cli/cli.py
+++ b/src/meltano/cli/cli.py
@@ -1,6 +1,6 @@
import logging # noqa: D100
import sys
-import warnings # noqa: F401
+from typing import NoReturn
import click
@@ -13,7 +13,28 @@
logger = logging.getLogger(__name__)
[email protected](invoke_without_command=True, no_args_is_help=True)
+class NoWindowsGlobbingGroup(click.Group):
+ """A Click group that does not perform glob expansion on Windows.
+
+ This restores the behaviour of Click's globbing to how it was before v8.
+ Click (as of version 8.1.3) ignores quotes around an asterisk, which makes
+ it behave differently than most shells that support globbing, and make some
+ typical Meltano commands fail, e.g. `meltano select tap-gitlab tags "*"`.
+ """
+
+ def main(self, *args, **kwargs) -> NoReturn:
+ """Invoke the Click CLI with Windows globbing disabled.
+
+ Parameters:
+ args: Positional arguments for the Click group.
+ kwargs: Keyword arguments for the Click group.
+ """
+ return super().main(*args, windows_expand_args=False, **kwargs)
+
+
[email protected](
+ cls=NoWindowsGlobbingGroup, invoke_without_command=True, no_args_is_help=True
+)
@click.option("--log-level", type=click.Choice(LEVELS.keys()))
@click.option(
"--log-config", type=str, help="Path to a python logging yaml config file."
| {"golden_diff": "diff --git a/src/meltano/cli/cli.py b/src/meltano/cli/cli.py\n--- a/src/meltano/cli/cli.py\n+++ b/src/meltano/cli/cli.py\n@@ -1,6 +1,6 @@\n import logging # noqa: D100\n import sys\n-import warnings # noqa: F401\n+from typing import NoReturn\n \n import click\n \n@@ -13,7 +13,28 @@\n logger = logging.getLogger(__name__)\n \n \[email protected](invoke_without_command=True, no_args_is_help=True)\n+class NoWindowsGlobbingGroup(click.Group):\n+ \"\"\"A Click group that does not perform glob expansion on Windows.\n+\n+ This restores the behaviour of Click's globbing to how it was before v8.\n+ Click (as of version 8.1.3) ignores quotes around an asterisk, which makes\n+ it behave differently than most shells that support globbing, and make some\n+ typical Meltano commands fail, e.g. `meltano select tap-gitlab tags \"*\"`.\n+ \"\"\"\n+\n+ def main(self, *args, **kwargs) -> NoReturn:\n+ \"\"\"Invoke the Click CLI with Windows globbing disabled.\n+\n+ Parameters:\n+ args: Positional arguments for the Click group.\n+ kwargs: Keyword arguments for the Click group.\n+ \"\"\"\n+ return super().main(*args, windows_expand_args=False, **kwargs)\n+\n+\[email protected](\n+ cls=NoWindowsGlobbingGroup, invoke_without_command=True, no_args_is_help=True\n+)\n @click.option(\"--log-level\", type=click.Choice(LEVELS.keys()))\n @click.option(\n \"--log-config\", type=str, help=\"Path to a python logging yaml config file.\"\n", "issue": "`meltano select tap-gitlab tags \"*\"` fails on Windows\nExample: https://gitlab.com/meltano/legacy-ci/meltano/-/jobs/2653845782\r\n\r\nThis occurs with Meltano built from `main`, i.e. `fb6ed89799eb341bea295350ea809e5049ee7ca6`.\n", "before_files": [{"content": "import logging # noqa: D100\nimport sys\nimport warnings # noqa: F401\n\nimport click\n\nimport meltano\nfrom meltano.core.behavior.versioned import IncompatibleVersionError\nfrom meltano.core.logging import LEVELS, setup_logging\nfrom meltano.core.project import Project, ProjectNotFound\nfrom meltano.core.project_settings_service import ProjectSettingsService\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](invoke_without_command=True, no_args_is_help=True)\[email protected](\"--log-level\", type=click.Choice(LEVELS.keys()))\[email protected](\n \"--log-config\", type=str, help=\"Path to a python logging yaml config file.\"\n)\[email protected](\"-v\", \"--verbose\", count=True, help=\"Not used.\")\[email protected](\n \"--environment\",\n envvar=\"MELTANO_ENVIRONMENT\",\n help=\"Meltano environment name.\",\n)\[email protected](\n \"--no-environment\", is_flag=True, default=False, help=\"Don't use any environment.\"\n)\[email protected]_option(version=meltano.__version__, prog_name=\"meltano\")\[email protected]_context\ndef cli( # noqa: WPS231\n ctx,\n log_level: str,\n log_config: str,\n verbose: int,\n environment: str,\n no_environment: bool,\n): # noqa: WPS231\n \"\"\"\n ELT for the DataOps era.\n\n \\b\\nRead more at https://www.meltano.com/docs/command-line-interface.html\n \"\"\"\n if log_level:\n ProjectSettingsService.config_override[\"cli.log_level\"] = log_level\n\n if log_config:\n ProjectSettingsService.config_override[\"cli.log_config\"] = log_config\n\n ctx.ensure_object(dict)\n ctx.obj[\"verbosity\"] = verbose\n try: # noqa: WPS229\n project = Project.find()\n setup_logging(project)\n\n readonly = ProjectSettingsService(project).get(\"project_readonly\")\n if readonly:\n project.readonly = True\n if project.readonly:\n logger.debug(\"Project is read-only.\")\n\n # detect active environment\n selected_environment = None\n if no_environment or (environment and environment.lower() == \"null\"):\n logger.info(\"No environment is active\")\n elif environment:\n selected_environment = environment\n elif project.meltano.default_environment:\n selected_environment = project.meltano.default_environment\n # activate environment\n if selected_environment:\n project.activate_environment(selected_environment)\n logger.info(\n \"Environment '%s' is active\", selected_environment # noqa: WPS323\n )\n\n ctx.obj[\"project\"] = project\n except ProjectNotFound:\n ctx.obj[\"project\"] = None\n except IncompatibleVersionError:\n click.secho(\n \"This Meltano project is incompatible with this version of `meltano`.\",\n fg=\"yellow\",\n )\n click.echo(\n \"For more details, visit http://meltano.com/docs/installation.html#upgrading-meltano-version\"\n )\n sys.exit(3)\n", "path": "src/meltano/cli/cli.py"}]} | 1,471 | 385 |
gh_patches_debug_30452 | rasdani/github-patches | git_diff | mozilla__kitsune-4541 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Importing ABC directly from collections has been deprecated and will be removed in Python 3.10
Use `collections` for Python 2 and `collections.abc` for Python 3.4+
https://github.com/mozilla/kitsune/blob/db42cf7841b75ff0d4b6d3b21ac2524636110429/kitsune/wiki/widgets.py#L34
</issue>
<code>
[start of kitsune/wiki/widgets.py]
1 import collections
2
3 from django import forms
4 from django.template.loader import render_to_string
5
6 from kitsune.products.models import Topic
7 from kitsune.wiki.models import Document
8
9
10 class ProductTopicsAndSubtopicsWidget(forms.widgets.SelectMultiple):
11 """A widget to render topics organized by product and with subtopics."""
12
13 def render(self, name, value, attrs=None, renderer=None):
14 topics_and_subtopics = Topic.objects.all()
15 topics = [t for t in topics_and_subtopics if t.parent_id is None]
16
17 for topic in topics:
18 self.process_topic(value, topic)
19
20 topic.my_subtopics = [t for t in topics_and_subtopics if t.parent_id == topic.id]
21
22 for subtopic in topic.my_subtopics:
23 self.process_topic(value, subtopic)
24
25 return render_to_string(
26 "wiki/includes/product_topics_widget.html",
27 {
28 "topics": topics,
29 "name": name,
30 },
31 )
32
33 def process_topic(self, value, topic):
34 if isinstance(value, int) and topic.id == value:
35 topic.checked = True
36 elif (
37 not isinstance(value, str)
38 and isinstance(value, collections.Iterable)
39 and topic.id in value
40 ):
41 topic.checked = True
42 else:
43 topic.checked = False
44
45
46 class RelatedDocumentsWidget(forms.widgets.SelectMultiple):
47 """A widget to render the related documents list and search field."""
48
49 def render(self, name, value, attrs=None, renderer=None):
50 if isinstance(value, int):
51 related_documents = Document.objects.filter(id__in=[value])
52 elif not isinstance(value, str) and isinstance(value, collections.Iterable):
53 related_documents = Document.objects.filter(id__in=value)
54 else:
55 related_documents = Document.objects.none()
56
57 return render_to_string(
58 "wiki/includes/related_docs_widget.html",
59 {"related_documents": related_documents, "name": name},
60 )
61
[end of kitsune/wiki/widgets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kitsune/wiki/widgets.py b/kitsune/wiki/widgets.py
--- a/kitsune/wiki/widgets.py
+++ b/kitsune/wiki/widgets.py
@@ -1,4 +1,3 @@
-import collections
from django import forms
from django.template.loader import render_to_string
@@ -6,6 +5,11 @@
from kitsune.products.models import Topic
from kitsune.wiki.models import Document
+try:
+ from collections.abc import Iterable
+except ImportError:
+ from collections import Iterable
+
class ProductTopicsAndSubtopicsWidget(forms.widgets.SelectMultiple):
"""A widget to render topics organized by product and with subtopics."""
@@ -35,7 +39,7 @@
topic.checked = True
elif (
not isinstance(value, str)
- and isinstance(value, collections.Iterable)
+ and isinstance(value, Iterable)
and topic.id in value
):
topic.checked = True
@@ -49,7 +53,7 @@
def render(self, name, value, attrs=None, renderer=None):
if isinstance(value, int):
related_documents = Document.objects.filter(id__in=[value])
- elif not isinstance(value, str) and isinstance(value, collections.Iterable):
+ elif not isinstance(value, str) and isinstance(value, Iterable):
related_documents = Document.objects.filter(id__in=value)
else:
related_documents = Document.objects.none()
| {"golden_diff": "diff --git a/kitsune/wiki/widgets.py b/kitsune/wiki/widgets.py\n--- a/kitsune/wiki/widgets.py\n+++ b/kitsune/wiki/widgets.py\n@@ -1,4 +1,3 @@\n-import collections\n \n from django import forms\n from django.template.loader import render_to_string\n@@ -6,6 +5,11 @@\n from kitsune.products.models import Topic\n from kitsune.wiki.models import Document\n \n+try:\n+ from collections.abc import Iterable\n+except ImportError:\n+ from collections import Iterable\n+\n \n class ProductTopicsAndSubtopicsWidget(forms.widgets.SelectMultiple):\n \"\"\"A widget to render topics organized by product and with subtopics.\"\"\"\n@@ -35,7 +39,7 @@\n topic.checked = True\n elif (\n not isinstance(value, str)\n- and isinstance(value, collections.Iterable)\n+ and isinstance(value, Iterable)\n and topic.id in value\n ):\n topic.checked = True\n@@ -49,7 +53,7 @@\n def render(self, name, value, attrs=None, renderer=None):\n if isinstance(value, int):\n related_documents = Document.objects.filter(id__in=[value])\n- elif not isinstance(value, str) and isinstance(value, collections.Iterable):\n+ elif not isinstance(value, str) and isinstance(value, Iterable):\n related_documents = Document.objects.filter(id__in=value)\n else:\n related_documents = Document.objects.none()\n", "issue": "Importing ABC directly from collections has been deprecated and will be removed in Python 3.10\nUse `collections` for Python 2 and `collections.abc` for Python 3.4+\r\n\r\nhttps://github.com/mozilla/kitsune/blob/db42cf7841b75ff0d4b6d3b21ac2524636110429/kitsune/wiki/widgets.py#L34\n", "before_files": [{"content": "import collections\n\nfrom django import forms\nfrom django.template.loader import render_to_string\n\nfrom kitsune.products.models import Topic\nfrom kitsune.wiki.models import Document\n\n\nclass ProductTopicsAndSubtopicsWidget(forms.widgets.SelectMultiple):\n \"\"\"A widget to render topics organized by product and with subtopics.\"\"\"\n\n def render(self, name, value, attrs=None, renderer=None):\n topics_and_subtopics = Topic.objects.all()\n topics = [t for t in topics_and_subtopics if t.parent_id is None]\n\n for topic in topics:\n self.process_topic(value, topic)\n\n topic.my_subtopics = [t for t in topics_and_subtopics if t.parent_id == topic.id]\n\n for subtopic in topic.my_subtopics:\n self.process_topic(value, subtopic)\n\n return render_to_string(\n \"wiki/includes/product_topics_widget.html\",\n {\n \"topics\": topics,\n \"name\": name,\n },\n )\n\n def process_topic(self, value, topic):\n if isinstance(value, int) and topic.id == value:\n topic.checked = True\n elif (\n not isinstance(value, str)\n and isinstance(value, collections.Iterable)\n and topic.id in value\n ):\n topic.checked = True\n else:\n topic.checked = False\n\n\nclass RelatedDocumentsWidget(forms.widgets.SelectMultiple):\n \"\"\"A widget to render the related documents list and search field.\"\"\"\n\n def render(self, name, value, attrs=None, renderer=None):\n if isinstance(value, int):\n related_documents = Document.objects.filter(id__in=[value])\n elif not isinstance(value, str) and isinstance(value, collections.Iterable):\n related_documents = Document.objects.filter(id__in=value)\n else:\n related_documents = Document.objects.none()\n\n return render_to_string(\n \"wiki/includes/related_docs_widget.html\",\n {\"related_documents\": related_documents, \"name\": name},\n )\n", "path": "kitsune/wiki/widgets.py"}]} | 1,157 | 307 |
gh_patches_debug_59577 | rasdani/github-patches | git_diff | sotetsuk__pgx-186 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pgx v 0.0.9 でimages フォルダがダウンロードされない.
</issue>
<code>
[start of setup.py]
1 from setuptools import find_packages, setup
2
3 setup(
4 name="pgx",
5 version="0.0.9",
6 long_description_content_type="text/markdown",
7 description="",
8 url="",
9 author="Sotetsu KOYAMADA",
10 author_email="[email protected]",
11 keywords="",
12 packages=find_packages(),
13 install_requires=["jax", "flax", "svgwrite"],
14 classifiers=[
15 "Programming Language :: Python :: 3.8",
16 ],
17 )
18
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -10,6 +10,8 @@
author_email="[email protected]",
keywords="",
packages=find_packages(),
+ package_data={'': ['LICENSE', '*.svg']},
+ include_package_data=True,
install_requires=["jax", "flax", "svgwrite"],
classifiers=[
"Programming Language :: Python :: 3.8",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,6 +10,8 @@\n author_email=\"[email protected]\",\n keywords=\"\",\n packages=find_packages(),\n+ package_data={'': ['LICENSE', '*.svg']},\n+ include_package_data=True,\n install_requires=[\"jax\", \"flax\", \"svgwrite\"],\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n", "issue": "pgx v 0.0.9 \u3067images \u30d5\u30a9\u30eb\u30c0\u304c\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3055\u308c\u306a\u3044.\n\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nsetup(\n name=\"pgx\",\n version=\"0.0.9\",\n long_description_content_type=\"text/markdown\",\n description=\"\",\n url=\"\",\n author=\"Sotetsu KOYAMADA\",\n author_email=\"[email protected]\",\n keywords=\"\",\n packages=find_packages(),\n install_requires=[\"jax\", \"flax\", \"svgwrite\"],\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}]} | 693 | 105 |
gh_patches_debug_1857 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-4676 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve Intro and Getting Started documentation
I think the [introduction](https://docs.readthedocs.io/en/latest/) and [getting started guide](https://docs.readthedocs.io/en/latest/getting_started.html) could use a few improvements to make it easier for brand new users who may not already know about Sphinx/MkDocs/Markdown/RestructuredText and are just looking for a guide on how to write some docs.
I also think our introduction could stand some improvements to point users in the right direction. We have a lot of docs, but a few layout and explanation improvements will help users find the right section for them.
Here are some specific goals and improvements:
- Make it easier to start a brand new docs project
* Have a getting started guide for Sphinx
* Have a getting started guide for Sphinx with commonmark
* Have a getting started guide for MkDocs
* Explain the why between the above technologies
- Improve the intro paragraphs ("Read the Docs hosts documentation for...") on the index page to explain RTD's value proposition and why somebody should choose Read the Docs.
- Full sentence/paragraph descriptions on different sections (eg. User documentation) rather than just a big toctree.
</issue>
<code>
[start of docs/conf.py]
1 # -*- coding: utf-8 -*-
2
3 from __future__ import division, print_function, unicode_literals
4
5 import os
6 import sys
7
8 import sphinx_rtd_theme
9 from recommonmark.parser import CommonMarkParser
10
11 sys.path.insert(0, os.path.abspath('..'))
12 sys.path.append(os.path.dirname(__file__))
13 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "readthedocs.settings.dev")
14
15 from django.conf import settings
16 from django.utils import timezone
17
18 import django
19 django.setup()
20
21
22 sys.path.append(os.path.abspath('_ext'))
23 extensions = [
24 'sphinx.ext.autosectionlabel',
25 'sphinx.ext.autodoc',
26 'sphinx.ext.intersphinx',
27 'sphinxcontrib.httpdomain',
28 'djangodocs',
29 'doc_extensions',
30 ]
31 templates_path = ['_templates']
32
33 source_suffix = ['.rst', '.md']
34 source_parsers = {
35 '.md': CommonMarkParser,
36 }
37
38 master_doc = 'index'
39 project = u'Read the Docs'
40 copyright = '2010-{}, Read the Docs, Inc & contributors'.format(
41 timezone.now().year
42 )
43 version = '1.0'
44 release = '1.0'
45 exclude_patterns = ['_build']
46 default_role = 'obj'
47 intersphinx_mapping = {
48 'python': ('http://python.readthedocs.io/en/latest/', None),
49 'django': ('http://django.readthedocs.io/en/1.9.x/', None),
50 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),
51 }
52 htmlhelp_basename = 'ReadTheDocsdoc'
53 latex_documents = [
54 ('index', 'ReadTheDocs.tex', u'Read the Docs Documentation',
55 u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),
56 ]
57 man_pages = [
58 ('index', 'read-the-docs', u'Read the Docs Documentation',
59 [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)
60 ]
61
62 exclude_patterns = [
63 # 'api' # needed for ``make gettext`` to not die.
64 ]
65
66 language = 'en'
67
68 locale_dirs = [
69 'locale/',
70 ]
71 gettext_compact = False
72
73 html_theme = 'sphinx_rtd_theme'
74 # html_static_path = ['_static']
75 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
76 html_logo = 'img/logo.svg'
77 html_theme_options = {
78 'logo_only': True,
79 'display_version': False,
80 }
81
82 # Activate autosectionlabel plugin
83 autosectionlabel_prefix_document = True
84
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -71,7 +71,7 @@
gettext_compact = False
html_theme = 'sphinx_rtd_theme'
-# html_static_path = ['_static']
+html_static_path = ['_static']
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_logo = 'img/logo.svg'
html_theme_options = {
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -71,7 +71,7 @@\n gettext_compact = False\n \n html_theme = 'sphinx_rtd_theme'\n-# html_static_path = ['_static']\n+html_static_path = ['_static']\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n html_logo = 'img/logo.svg'\n html_theme_options = {\n", "issue": "Improve Intro and Getting Started documentation\nI think the [introduction](https://docs.readthedocs.io/en/latest/) and [getting started guide](https://docs.readthedocs.io/en/latest/getting_started.html) could use a few improvements to make it easier for brand new users who may not already know about Sphinx/MkDocs/Markdown/RestructuredText and are just looking for a guide on how to write some docs.\r\n\r\nI also think our introduction could stand some improvements to point users in the right direction. We have a lot of docs, but a few layout and explanation improvements will help users find the right section for them.\r\n\r\nHere are some specific goals and improvements:\r\n\r\n- Make it easier to start a brand new docs project\r\n * Have a getting started guide for Sphinx\r\n * Have a getting started guide for Sphinx with commonmark\r\n * Have a getting started guide for MkDocs\r\n * Explain the why between the above technologies\r\n- Improve the intro paragraphs (\"Read the Docs hosts documentation for...\") on the index page to explain RTD's value proposition and why somebody should choose Read the Docs.\r\n- Full sentence/paragraph descriptions on different sections (eg. User documentation) rather than just a big toctree.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, unicode_literals\n\nimport os\nimport sys\n\nimport sphinx_rtd_theme\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.conf import settings\nfrom django.utils import timezone\n\nimport django\ndjango.setup()\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n]\ntemplates_path = ['_templates']\n\nsource_suffix = ['.rst', '.md']\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nmaster_doc = 'index'\nproject = u'Read the Docs'\ncopyright = '2010-{}, Read the Docs, Inc & contributors'.format(\n timezone.now().year\n)\nversion = '1.0'\nrelease = '1.0'\nexclude_patterns = ['_build']\ndefault_role = 'obj'\nintersphinx_mapping = {\n 'python': ('http://python.readthedocs.io/en/latest/', None),\n 'django': ('http://django.readthedocs.io/en/1.9.x/', None),\n 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),\n}\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', u'Read the Docs Documentation',\n u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', u'Read the Docs Documentation',\n [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\nhtml_theme = 'sphinx_rtd_theme'\n# html_static_path = ['_static']\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_logo = 'img/logo.svg'\nhtml_theme_options = {\n 'logo_only': True,\n 'display_version': False,\n}\n\n# Activate autosectionlabel plugin\nautosectionlabel_prefix_document = True\n", "path": "docs/conf.py"}]} | 1,489 | 100 |
gh_patches_debug_21685 | rasdani/github-patches | git_diff | getsentry__sentry-1683 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing 'org_results' data in production but has some in staging
In file 'web/frontend/help_platform_base.py', method 'group_project_list' the org_results appears to be blank in our production environment.
This manifests when we try to load the 'setup & installation' page
<img width="734" alt="prod" src="https://cloud.githubusercontent.com/assets/4432296/9042832/2c773b94-39c6-11e5-9cc8-fa49f0a6f3ef.png">
However, the same setup is in staging and appears to work, for certain random projects:
<img width="737" alt="staging" src="https://cloud.githubusercontent.com/assets/4432296/9042839/37822d32-39c6-11e5-8c65-ddf6d700de75.png">
In staging I ran 'update sentry_organizationmember_teams set is_active = 1;' and the projects all showed up in the dropdown.
We _did_ add a custom SSO implementation, so perhaps we missed a step on account creation?
Since there is a "client keys" page, this isn't a show stopper but I'm worried that whatever is wrong could manifest in other ways that we haven't found yet.
</issue>
<code>
[start of src/sentry/web/frontend/help_platform_base.py]
1 from __future__ import absolute_import
2
3 from django.db.models import Q
4 from itertools import groupby
5
6 from sentry.models import Project, ProjectKey
7 from sentry.web.frontend.base import BaseView
8
9
10 class HelpPlatformBaseView(BaseView):
11 auth_required = False
12
13 def get_project_list(self, user):
14 return list(Project.objects.filter(
15 Q(organization__member_set__has_global_access=True, organization__member_set__user=user)
16 | Q(team__organizationmember__user=user)
17 ).exclude(
18 team__organizationmemberteam__is_active=False,
19 ).select_related('team', 'organization').order_by('organization', 'team').distinct())
20
21 def group_project_list(self, project_list):
22 results = []
23 for org, org_project_list in groupby(project_list, key=lambda x: x.organization):
24 org_results = []
25 for team, team_project_list in groupby(list(org_project_list), key=lambda x: x.team):
26 org_results.append((team, list(team_project_list)))
27 results.append((org, org_results))
28 return results
29
30 def get_key(self, project):
31 try:
32 key = ProjectKey.objects.filter(project=project)[0]
33 except IndexError:
34 key = None
35 return key
36
37 def convert_args(self, request, *args, **kwargs):
38 try:
39 pid = int(request.GET.get('pid', request.session.get('pid', 0)))
40 except (TypeError, ValueError):
41 pid = None
42
43 if request.user.is_authenticated():
44 project_list = self.get_project_list(request.user)
45 else:
46 project_list = []
47
48 if pid:
49 for project in project_list:
50 if pid == project.id:
51 selected_project = project
52 break
53 else:
54 selected_project = None
55 else:
56 selected_project = None
57
58 if selected_project:
59 request.session['pid'] = selected_project.id
60
61 kwargs['project_list'] = project_list
62 kwargs['selected_project'] = selected_project
63
64 return (args, kwargs)
65
66 def get_context_data(self, request, project_list, selected_project, **kwargs):
67 context = super(HelpPlatformBaseView, self).get_context_data(request, **kwargs)
68
69 if selected_project:
70 key = self.get_key(selected_project)
71 else:
72 key = None
73
74 if key:
75 dsn_private = key.dsn_private
76 dsn_public = key.dsn_public
77 else:
78 dsn_private = None
79 dsn_public = None
80
81 context = {
82 'selected_project': selected_project,
83 'org_results': self.group_project_list(project_list),
84 'dsn': dsn_private,
85 'dsn_public': dsn_public,
86
87 }
88 return context
89
[end of src/sentry/web/frontend/help_platform_base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/web/frontend/help_platform_base.py b/src/sentry/web/frontend/help_platform_base.py
--- a/src/sentry/web/frontend/help_platform_base.py
+++ b/src/sentry/web/frontend/help_platform_base.py
@@ -3,7 +3,7 @@
from django.db.models import Q
from itertools import groupby
-from sentry.models import Project, ProjectKey
+from sentry.models import OrganizationMemberTeam, Project, ProjectKey
from sentry.web.frontend.base import BaseView
@@ -15,8 +15,11 @@
Q(organization__member_set__has_global_access=True, organization__member_set__user=user)
| Q(team__organizationmember__user=user)
).exclude(
- team__organizationmemberteam__is_active=False,
- ).select_related('team', 'organization').order_by('organization', 'team').distinct())
+ team__id__in=OrganizationMemberTeam.objects.filter(
+ organizationmember__user=user,
+ is_active=False
+ ).values('team_id')
+ ).select_related('team', 'organization').order_by('organization__name', 'team__name', 'name').distinct())
def group_project_list(self, project_list):
results = []
| {"golden_diff": "diff --git a/src/sentry/web/frontend/help_platform_base.py b/src/sentry/web/frontend/help_platform_base.py\n--- a/src/sentry/web/frontend/help_platform_base.py\n+++ b/src/sentry/web/frontend/help_platform_base.py\n@@ -3,7 +3,7 @@\n from django.db.models import Q\n from itertools import groupby\n \n-from sentry.models import Project, ProjectKey\n+from sentry.models import OrganizationMemberTeam, Project, ProjectKey\n from sentry.web.frontend.base import BaseView\n \n \n@@ -15,8 +15,11 @@\n Q(organization__member_set__has_global_access=True, organization__member_set__user=user)\n | Q(team__organizationmember__user=user)\n ).exclude(\n- team__organizationmemberteam__is_active=False,\n- ).select_related('team', 'organization').order_by('organization', 'team').distinct())\n+ team__id__in=OrganizationMemberTeam.objects.filter(\n+ organizationmember__user=user,\n+ is_active=False\n+ ).values('team_id')\n+ ).select_related('team', 'organization').order_by('organization__name', 'team__name', 'name').distinct())\n \n def group_project_list(self, project_list):\n results = []\n", "issue": "Missing 'org_results' data in production but has some in staging\nIn file 'web/frontend/help_platform_base.py', method 'group_project_list' the org_results appears to be blank in our production environment.\n\nThis manifests when we try to load the 'setup & installation' page\n<img width=\"734\" alt=\"prod\" src=\"https://cloud.githubusercontent.com/assets/4432296/9042832/2c773b94-39c6-11e5-9cc8-fa49f0a6f3ef.png\">\n\nHowever, the same setup is in staging and appears to work, for certain random projects:\n<img width=\"737\" alt=\"staging\" src=\"https://cloud.githubusercontent.com/assets/4432296/9042839/37822d32-39c6-11e5-8c65-ddf6d700de75.png\">\n\nIn staging I ran 'update sentry_organizationmember_teams set is_active = 1;' and the projects all showed up in the dropdown. \n\nWe _did_ add a custom SSO implementation, so perhaps we missed a step on account creation?\n\nSince there is a \"client keys\" page, this isn't a show stopper but I'm worried that whatever is wrong could manifest in other ways that we haven't found yet.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom django.db.models import Q\nfrom itertools import groupby\n\nfrom sentry.models import Project, ProjectKey\nfrom sentry.web.frontend.base import BaseView\n\n\nclass HelpPlatformBaseView(BaseView):\n auth_required = False\n\n def get_project_list(self, user):\n return list(Project.objects.filter(\n Q(organization__member_set__has_global_access=True, organization__member_set__user=user)\n | Q(team__organizationmember__user=user)\n ).exclude(\n team__organizationmemberteam__is_active=False,\n ).select_related('team', 'organization').order_by('organization', 'team').distinct())\n\n def group_project_list(self, project_list):\n results = []\n for org, org_project_list in groupby(project_list, key=lambda x: x.organization):\n org_results = []\n for team, team_project_list in groupby(list(org_project_list), key=lambda x: x.team):\n org_results.append((team, list(team_project_list)))\n results.append((org, org_results))\n return results\n\n def get_key(self, project):\n try:\n key = ProjectKey.objects.filter(project=project)[0]\n except IndexError:\n key = None\n return key\n\n def convert_args(self, request, *args, **kwargs):\n try:\n pid = int(request.GET.get('pid', request.session.get('pid', 0)))\n except (TypeError, ValueError):\n pid = None\n\n if request.user.is_authenticated():\n project_list = self.get_project_list(request.user)\n else:\n project_list = []\n\n if pid:\n for project in project_list:\n if pid == project.id:\n selected_project = project\n break\n else:\n selected_project = None\n else:\n selected_project = None\n\n if selected_project:\n request.session['pid'] = selected_project.id\n\n kwargs['project_list'] = project_list\n kwargs['selected_project'] = selected_project\n\n return (args, kwargs)\n\n def get_context_data(self, request, project_list, selected_project, **kwargs):\n context = super(HelpPlatformBaseView, self).get_context_data(request, **kwargs)\n\n if selected_project:\n key = self.get_key(selected_project)\n else:\n key = None\n\n if key:\n dsn_private = key.dsn_private\n dsn_public = key.dsn_public\n else:\n dsn_private = None\n dsn_public = None\n\n context = {\n 'selected_project': selected_project,\n 'org_results': self.group_project_list(project_list),\n 'dsn': dsn_private,\n 'dsn_public': dsn_public,\n\n }\n return context\n", "path": "src/sentry/web/frontend/help_platform_base.py"}]} | 1,610 | 272 |
gh_patches_debug_30138 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1988 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Most Beloved Book on Instance has zero local reviews
**Describe the bug**
On the About this Instance page there's a cool section that shows various books, like Most Beloved, Most Contentious, Most Wanted to Read. The Most Beloved category, however, appears to be driven by federated statistics and not local statistics. A book with no local ratings is listed as the most Beloved Book because *other* instances have rated it highly.
**To Reproduce**
Steps to reproduce the behavior:
1. Don't have a lot of ratings on your instance.
2. Import a book that is highly rated from another (in this case 3 reviews, all 5 stars)
3. Go to instancedomain/about and view the Most Beloved book
I'm really only guessing on these steps to reproduce, it's just something I stumbled across.
**Expected behavior**
I expect the most beloved book to be beloved by my instance, not other instances. Federated statistics would also be cool, but it doesn't seem like this section is meant to represent the network.
**Instance**
https://tankie.ml/
---
**Smartphone (please complete the following information):**
- Device: Samsung Galaxy A52 5G
- OS: Android 12
- Browser: Firefox
- Version: 97.2.0
</issue>
<code>
[start of bookwyrm/templatetags/landing_page_tags.py]
1 """ template filters """
2 from django import template
3 from django.db.models import Avg, StdDev, Count, F, Q
4
5 from bookwyrm import models
6
7 register = template.Library()
8
9
10 @register.simple_tag(takes_context=False)
11 def get_book_superlatives():
12 """get book stats for the about page"""
13 total_ratings = models.Review.objects.filter(local=True, deleted=False).count()
14 data = {}
15 data["top_rated"] = (
16 models.Work.objects.annotate(
17 rating=Avg(
18 "editions__review__rating",
19 filter=Q(editions__review__local=True, editions__review__deleted=False),
20 ),
21 rating_count=Count(
22 "editions__review",
23 filter=Q(editions__review__local=True, editions__review__deleted=False),
24 ),
25 )
26 .annotate(weighted=F("rating") * F("rating_count") / total_ratings)
27 .filter(rating__gt=4, weighted__gt=0)
28 .order_by("-weighted")
29 .first()
30 )
31
32 data["controversial"] = (
33 models.Work.objects.annotate(
34 deviation=StdDev(
35 "editions__review__rating",
36 filter=Q(editions__review__local=True, editions__review__deleted=False),
37 ),
38 rating_count=Count(
39 "editions__review",
40 filter=Q(editions__review__local=True, editions__review__deleted=False),
41 ),
42 )
43 .annotate(weighted=F("deviation") * F("rating_count") / total_ratings)
44 .filter(weighted__gt=0)
45 .order_by("-weighted")
46 .first()
47 )
48
49 data["wanted"] = (
50 models.Work.objects.annotate(
51 shelf_count=Count(
52 "editions__shelves", filter=Q(editions__shelves__identifier="to-read")
53 )
54 )
55 .order_by("-shelf_count")
56 .first()
57 )
58 return data
59
60
61 @register.simple_tag(takes_context=False)
62 def get_landing_books():
63 """list of books for the landing page"""
64 return list(
65 set(
66 models.Edition.objects.filter(
67 review__published_date__isnull=False,
68 review__deleted=False,
69 review__user__local=True,
70 review__privacy__in=["public", "unlisted"],
71 )
72 .exclude(cover__exact="")
73 .distinct()
74 .order_by("-review__published_date")[:6]
75 )
76 )
77
[end of bookwyrm/templatetags/landing_page_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/templatetags/landing_page_tags.py b/bookwyrm/templatetags/landing_page_tags.py
--- a/bookwyrm/templatetags/landing_page_tags.py
+++ b/bookwyrm/templatetags/landing_page_tags.py
@@ -16,11 +16,15 @@
models.Work.objects.annotate(
rating=Avg(
"editions__review__rating",
- filter=Q(editions__review__local=True, editions__review__deleted=False),
+ filter=Q(
+ editions__review__user__local=True, editions__review__deleted=False
+ ),
),
rating_count=Count(
"editions__review",
- filter=Q(editions__review__local=True, editions__review__deleted=False),
+ filter=Q(
+ editions__review__user__local=True, editions__review__deleted=False
+ ),
),
)
.annotate(weighted=F("rating") * F("rating_count") / total_ratings)
@@ -33,11 +37,15 @@
models.Work.objects.annotate(
deviation=StdDev(
"editions__review__rating",
- filter=Q(editions__review__local=True, editions__review__deleted=False),
+ filter=Q(
+ editions__review__user__local=True, editions__review__deleted=False
+ ),
),
rating_count=Count(
"editions__review",
- filter=Q(editions__review__local=True, editions__review__deleted=False),
+ filter=Q(
+ editions__review__user__local=True, editions__review__deleted=False
+ ),
),
)
.annotate(weighted=F("deviation") * F("rating_count") / total_ratings)
| {"golden_diff": "diff --git a/bookwyrm/templatetags/landing_page_tags.py b/bookwyrm/templatetags/landing_page_tags.py\n--- a/bookwyrm/templatetags/landing_page_tags.py\n+++ b/bookwyrm/templatetags/landing_page_tags.py\n@@ -16,11 +16,15 @@\n models.Work.objects.annotate(\n rating=Avg(\n \"editions__review__rating\",\n- filter=Q(editions__review__local=True, editions__review__deleted=False),\n+ filter=Q(\n+ editions__review__user__local=True, editions__review__deleted=False\n+ ),\n ),\n rating_count=Count(\n \"editions__review\",\n- filter=Q(editions__review__local=True, editions__review__deleted=False),\n+ filter=Q(\n+ editions__review__user__local=True, editions__review__deleted=False\n+ ),\n ),\n )\n .annotate(weighted=F(\"rating\") * F(\"rating_count\") / total_ratings)\n@@ -33,11 +37,15 @@\n models.Work.objects.annotate(\n deviation=StdDev(\n \"editions__review__rating\",\n- filter=Q(editions__review__local=True, editions__review__deleted=False),\n+ filter=Q(\n+ editions__review__user__local=True, editions__review__deleted=False\n+ ),\n ),\n rating_count=Count(\n \"editions__review\",\n- filter=Q(editions__review__local=True, editions__review__deleted=False),\n+ filter=Q(\n+ editions__review__user__local=True, editions__review__deleted=False\n+ ),\n ),\n )\n .annotate(weighted=F(\"deviation\") * F(\"rating_count\") / total_ratings)\n", "issue": "Most Beloved Book on Instance has zero local reviews\n**Describe the bug**\nOn the About this Instance page there's a cool section that shows various books, like Most Beloved, Most Contentious, Most Wanted to Read. The Most Beloved category, however, appears to be driven by federated statistics and not local statistics. A book with no local ratings is listed as the most Beloved Book because *other* instances have rated it highly. \n\n**To Reproduce**\nSteps to reproduce the behavior:\n1. Don't have a lot of ratings on your instance.\n2. Import a book that is highly rated from another (in this case 3 reviews, all 5 stars) \n3. Go to instancedomain/about and view the Most Beloved book\n\nI'm really only guessing on these steps to reproduce, it's just something I stumbled across.\n\n**Expected behavior**\nI expect the most beloved book to be beloved by my instance, not other instances. Federated statistics would also be cool, but it doesn't seem like this section is meant to represent the network. \n\n**Instance**\nhttps://tankie.ml/\n\n---\n\n**Smartphone (please complete the following information):**\n - Device: Samsung Galaxy A52 5G\n - OS: Android 12\n - Browser: Firefox\n - Version: 97.2.0\n \n", "before_files": [{"content": "\"\"\" template filters \"\"\"\nfrom django import template\nfrom django.db.models import Avg, StdDev, Count, F, Q\n\nfrom bookwyrm import models\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=False)\ndef get_book_superlatives():\n \"\"\"get book stats for the about page\"\"\"\n total_ratings = models.Review.objects.filter(local=True, deleted=False).count()\n data = {}\n data[\"top_rated\"] = (\n models.Work.objects.annotate(\n rating=Avg(\n \"editions__review__rating\",\n filter=Q(editions__review__local=True, editions__review__deleted=False),\n ),\n rating_count=Count(\n \"editions__review\",\n filter=Q(editions__review__local=True, editions__review__deleted=False),\n ),\n )\n .annotate(weighted=F(\"rating\") * F(\"rating_count\") / total_ratings)\n .filter(rating__gt=4, weighted__gt=0)\n .order_by(\"-weighted\")\n .first()\n )\n\n data[\"controversial\"] = (\n models.Work.objects.annotate(\n deviation=StdDev(\n \"editions__review__rating\",\n filter=Q(editions__review__local=True, editions__review__deleted=False),\n ),\n rating_count=Count(\n \"editions__review\",\n filter=Q(editions__review__local=True, editions__review__deleted=False),\n ),\n )\n .annotate(weighted=F(\"deviation\") * F(\"rating_count\") / total_ratings)\n .filter(weighted__gt=0)\n .order_by(\"-weighted\")\n .first()\n )\n\n data[\"wanted\"] = (\n models.Work.objects.annotate(\n shelf_count=Count(\n \"editions__shelves\", filter=Q(editions__shelves__identifier=\"to-read\")\n )\n )\n .order_by(\"-shelf_count\")\n .first()\n )\n return data\n\n\[email protected]_tag(takes_context=False)\ndef get_landing_books():\n \"\"\"list of books for the landing page\"\"\"\n return list(\n set(\n models.Edition.objects.filter(\n review__published_date__isnull=False,\n review__deleted=False,\n review__user__local=True,\n review__privacy__in=[\"public\", \"unlisted\"],\n )\n .exclude(cover__exact=\"\")\n .distinct()\n .order_by(\"-review__published_date\")[:6]\n )\n )\n", "path": "bookwyrm/templatetags/landing_page_tags.py"}]} | 1,505 | 399 |
gh_patches_debug_11215 | rasdani/github-patches | git_diff | bentoml__BentoML-992 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add console log to prediction
**Is your feature request related to a problem? Please describe.**
When running bentoml in a docker container, request logs are not printed to standard out.
**Describe the solution you'd like**
In a [slack discussion](https://bentoml.slack.com/archives/CKRANBHPH/p1597270160005700), the consensus is to add the console log to prediction by default. One potential solution is to add `console` to the prediction's [handler list](https://github.com/bentoml/BentoML/blob/master/bentoml/utils/log.py#L90).
**Describe alternatives you've considered**
It was suggested to us to use the existing log files from the workers. But, this introduces complication where we have to mount the log folder in container to the host so that datadog agent may have access to the log and forward it to datadog. Also, we will need to take care of log rotation so that it doesn't grow big. If we have many bentoml services, we have to do this for each of them.
**Additional context**
The standard out logging is important for our application monitoring such as Datadog. The standard out logging allows the datadog agent to pick up the log messages and forward them to datadog.
</issue>
<code>
[start of bentoml/utils/log.py]
1 # Copyright 2019 Atalaya Tech, Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import sys
17 import logging.config
18 from pathlib import Path
19
20 from bentoml import config
21 from bentoml.configuration import get_debug_mode
22
23
24 def get_logging_config_dict(logging_level, base_log_directory):
25 conf = config("logging") # proxy to logging section in bentoml config file
26
27 LOG_FORMAT = conf.get("LOG_FORMAT")
28 DEV_LOG_FORMAT = conf.get("DEV_LOG_FORMAT")
29
30 PREDICTION_LOG_FILENAME = conf.get("prediction_log_filename")
31 PREDICTION_LOG_JSON_FORMAT = conf.get("prediction_log_json_format")
32
33 FEEDBACK_LOG_FILENAME = conf.get("feedback_log_filename")
34 FEEDBACK_LOG_JSON_FORMAT = conf.get("feedback_log_json_format")
35
36 return {
37 "version": 1,
38 "disable_existing_loggers": False,
39 "formatters": {
40 "console": {"format": LOG_FORMAT},
41 "dev": {"format": DEV_LOG_FORMAT},
42 "prediction": {
43 "()": "pythonjsonlogger.jsonlogger.JsonFormatter",
44 "fmt": PREDICTION_LOG_JSON_FORMAT,
45 },
46 "feedback": {
47 "()": "pythonjsonlogger.jsonlogger.JsonFormatter",
48 "fmt": FEEDBACK_LOG_JSON_FORMAT,
49 },
50 },
51 "handlers": {
52 "console": {
53 "level": logging_level,
54 "formatter": "console",
55 "class": "logging.StreamHandler",
56 "stream": sys.stdout,
57 },
58 "local": {
59 "level": logging_level,
60 "formatter": "dev",
61 "class": "logging.handlers.RotatingFileHandler",
62 "filename": os.path.join(base_log_directory, "active.log"),
63 "maxBytes": 100 * 1000 * 1000,
64 "backupCount": 2,
65 },
66 "prediction": {
67 "class": "logging.handlers.RotatingFileHandler",
68 "formatter": "prediction",
69 "level": "INFO",
70 "filename": os.path.join(base_log_directory, PREDICTION_LOG_FILENAME),
71 "maxBytes": 100 * 1000 * 1000,
72 "backupCount": 10,
73 },
74 "feedback": {
75 "class": "logging.handlers.RotatingFileHandler",
76 "formatter": "feedback",
77 "level": "INFO",
78 "filename": os.path.join(base_log_directory, FEEDBACK_LOG_FILENAME),
79 "maxBytes": 100 * 1000 * 1000,
80 "backupCount": 10,
81 },
82 },
83 "loggers": {
84 "bentoml": {
85 "handlers": ["console", "local"],
86 "level": logging_level,
87 "propagate": False,
88 },
89 "bentoml.prediction": {
90 "handlers": ["prediction"],
91 "level": "INFO",
92 "propagate": False,
93 },
94 "bentoml.feedback": {
95 "handlers": ["feedback"],
96 "level": "INFO",
97 "propagate": False,
98 },
99 },
100 }
101
102
103 def configure_logging(logging_level=None):
104 if logging_level is None:
105 logging_level = config("logging").get("LOGGING_LEVEL").upper()
106 if get_debug_mode():
107 logging_level = logging.getLevelName(logging.DEBUG)
108
109 base_log_dir = os.path.expanduser(config("logging").get("BASE_LOG_DIR"))
110 Path(base_log_dir).mkdir(parents=True, exist_ok=True)
111 logging_config = get_logging_config_dict(logging_level, base_log_dir)
112 logging.config.dictConfig(logging_config)
113
[end of bentoml/utils/log.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bentoml/utils/log.py b/bentoml/utils/log.py
--- a/bentoml/utils/log.py
+++ b/bentoml/utils/log.py
@@ -87,12 +87,12 @@
"propagate": False,
},
"bentoml.prediction": {
- "handlers": ["prediction"],
+ "handlers": ["prediction", "console"],
"level": "INFO",
"propagate": False,
},
"bentoml.feedback": {
- "handlers": ["feedback"],
+ "handlers": ["feedback", "console"],
"level": "INFO",
"propagate": False,
},
| {"golden_diff": "diff --git a/bentoml/utils/log.py b/bentoml/utils/log.py\n--- a/bentoml/utils/log.py\n+++ b/bentoml/utils/log.py\n@@ -87,12 +87,12 @@\n \"propagate\": False,\n },\n \"bentoml.prediction\": {\n- \"handlers\": [\"prediction\"],\n+ \"handlers\": [\"prediction\", \"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n \"bentoml.feedback\": {\n- \"handlers\": [\"feedback\"],\n+ \"handlers\": [\"feedback\", \"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n", "issue": "Add console log to prediction\n**Is your feature request related to a problem? Please describe.**\r\nWhen running bentoml in a docker container, request logs are not printed to standard out.\r\n\r\n**Describe the solution you'd like**\r\nIn a [slack discussion](https://bentoml.slack.com/archives/CKRANBHPH/p1597270160005700), the consensus is to add the console log to prediction by default. One potential solution is to add `console` to the prediction's [handler list](https://github.com/bentoml/BentoML/blob/master/bentoml/utils/log.py#L90).\r\n\r\n**Describe alternatives you've considered**\r\nIt was suggested to us to use the existing log files from the workers. But, this introduces complication where we have to mount the log folder in container to the host so that datadog agent may have access to the log and forward it to datadog. Also, we will need to take care of log rotation so that it doesn't grow big. If we have many bentoml services, we have to do this for each of them. \r\n\r\n**Additional context**\r\nThe standard out logging is important for our application monitoring such as Datadog. The standard out logging allows the datadog agent to pick up the log messages and forward them to datadog.\r\n\n", "before_files": [{"content": "# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport logging.config\nfrom pathlib import Path\n\nfrom bentoml import config\nfrom bentoml.configuration import get_debug_mode\n\n\ndef get_logging_config_dict(logging_level, base_log_directory):\n conf = config(\"logging\") # proxy to logging section in bentoml config file\n\n LOG_FORMAT = conf.get(\"LOG_FORMAT\")\n DEV_LOG_FORMAT = conf.get(\"DEV_LOG_FORMAT\")\n\n PREDICTION_LOG_FILENAME = conf.get(\"prediction_log_filename\")\n PREDICTION_LOG_JSON_FORMAT = conf.get(\"prediction_log_json_format\")\n\n FEEDBACK_LOG_FILENAME = conf.get(\"feedback_log_filename\")\n FEEDBACK_LOG_JSON_FORMAT = conf.get(\"feedback_log_json_format\")\n\n return {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"console\": {\"format\": LOG_FORMAT},\n \"dev\": {\"format\": DEV_LOG_FORMAT},\n \"prediction\": {\n \"()\": \"pythonjsonlogger.jsonlogger.JsonFormatter\",\n \"fmt\": PREDICTION_LOG_JSON_FORMAT,\n },\n \"feedback\": {\n \"()\": \"pythonjsonlogger.jsonlogger.JsonFormatter\",\n \"fmt\": FEEDBACK_LOG_JSON_FORMAT,\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": logging_level,\n \"formatter\": \"console\",\n \"class\": \"logging.StreamHandler\",\n \"stream\": sys.stdout,\n },\n \"local\": {\n \"level\": logging_level,\n \"formatter\": \"dev\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"filename\": os.path.join(base_log_directory, \"active.log\"),\n \"maxBytes\": 100 * 1000 * 1000,\n \"backupCount\": 2,\n },\n \"prediction\": {\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"formatter\": \"prediction\",\n \"level\": \"INFO\",\n \"filename\": os.path.join(base_log_directory, PREDICTION_LOG_FILENAME),\n \"maxBytes\": 100 * 1000 * 1000,\n \"backupCount\": 10,\n },\n \"feedback\": {\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"formatter\": \"feedback\",\n \"level\": \"INFO\",\n \"filename\": os.path.join(base_log_directory, FEEDBACK_LOG_FILENAME),\n \"maxBytes\": 100 * 1000 * 1000,\n \"backupCount\": 10,\n },\n },\n \"loggers\": {\n \"bentoml\": {\n \"handlers\": [\"console\", \"local\"],\n \"level\": logging_level,\n \"propagate\": False,\n },\n \"bentoml.prediction\": {\n \"handlers\": [\"prediction\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n \"bentoml.feedback\": {\n \"handlers\": [\"feedback\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n },\n }\n\n\ndef configure_logging(logging_level=None):\n if logging_level is None:\n logging_level = config(\"logging\").get(\"LOGGING_LEVEL\").upper()\n if get_debug_mode():\n logging_level = logging.getLevelName(logging.DEBUG)\n\n base_log_dir = os.path.expanduser(config(\"logging\").get(\"BASE_LOG_DIR\"))\n Path(base_log_dir).mkdir(parents=True, exist_ok=True)\n logging_config = get_logging_config_dict(logging_level, base_log_dir)\n logging.config.dictConfig(logging_config)\n", "path": "bentoml/utils/log.py"}]} | 1,961 | 153 |
gh_patches_debug_14556 | rasdani/github-patches | git_diff | hedyorg__hedy-173 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include button to go back to previous level
Thanks for the great project! While trying the website I wished I could go back one level, but there is only a button to go to the next level.
</issue>
<code>
[start of hedyweb.py]
1 import collections
2 import attr
3 import glob
4 from os import path
5
6 from flask import jsonify, render_template, abort, redirect
7 import courses
8 from auth import current_user
9
10 class Translations:
11 def __init__(self):
12 self.data = {}
13 translations = glob.glob('coursedata/texts/*.yaml')
14 for trans_file in translations:
15 lang = path.splitext(path.basename(trans_file))[0]
16 self.data[lang] = courses.load_yaml(trans_file)
17
18 def get_translations(self, language, section):
19 # Merge with English when lacking translations
20 # Start from a defaultdict
21 d = collections.defaultdict(lambda: '???')
22 d.update(**self.data.get('en', {}).get(section, {}))
23 d.update(**self.data.get(language, {}).get(section, {}))
24 return d
25
26
27 def render_assignment_editor(request, course, level_number, assignment_number, menu, translations, version, loaded_program):
28 assignment = course.get_assignment(level_number, assignment_number)
29 if not assignment:
30 abort(404)
31
32 arguments_dict = {}
33
34 # Meta stuff
35 arguments_dict['course'] = course
36 arguments_dict['level_nr'] = str(level_number)
37 arguments_dict['assignment_nr'] = assignment.step # Give this a chance to be 'None'
38 arguments_dict['lang'] = course.language
39 arguments_dict['level'] = assignment.level
40 arguments_dict['next_level'] = int(level_number) + 1 if int(level_number) < course.max_level() else None
41 arguments_dict['next_assignment'] = int(assignment_number) + 1 if int(assignment_number) < course.max_step(level_number) else None
42 arguments_dict['menu'] = menu
43 arguments_dict['latest'] = version
44 arguments_dict['selected_page'] = 'code'
45 arguments_dict['page_title'] = f'Level {level_number} – Hedy'
46 arguments_dict['docs'] = [attr.asdict(d) for d in assignment.docs]
47 arguments_dict['auth'] = translations.data [course.language] ['Auth']
48 arguments_dict['username'] = current_user(request) ['username']
49 arguments_dict['loaded_program'] = loaded_program or ''
50
51 # Translations
52 arguments_dict.update(**translations.get_translations(course.language, 'ui'))
53
54 # Actual assignment
55 arguments_dict.update(**attr.asdict(assignment))
56
57 return render_template("code-page.html", **arguments_dict)
58
59
60 def render_assignment_docs(doc_type, course, level_number, assignment_number, menu, translations):
61 assignment = course.get_assignment(level_number, assignment_number)
62 if not assignment:
63 abort(404)
64
65 arguments_dict = {}
66
67 # Meta stuff
68 arguments_dict['course'] = course
69 arguments_dict['level_nr'] = str(level_number)
70 arguments_dict['assignment_nr'] = assignment.step # Give this a chance to be 'None'
71 arguments_dict['pagetitle'] = f'Level {level_number} - Hedy'
72 arguments_dict['lang'] = course.language
73 arguments_dict['selected_page'] = doc_type
74 arguments_dict['docs'] = [attr.asdict(d) for d in assignment.docs]
75 arguments_dict['auth'] = translations.data [course.language] ['Auth']
76
77 # Translations
78 arguments_dict.update(**translations.get_translations(course.language, 'ui'))
79
80 doc = course.docs.get(int(level_number), doc_type)
81 if not doc:
82 # Redirect to code page. Nasty 'import' here to work around
83 # cyclic imports.
84 import app
85 return redirect(app.hedy_link(level_number, assignment_number))
86
87 arguments_dict['mkd'] = doc.markdown
88 arguments_dict['menu'] = menu
89 arguments_dict['page_title'] = f'Level {level_number}.{assignment_number} – ' + doc.front_matter.get('title', '') + ' – Hedy'
90
91 return render_template("per-level-text.html", **arguments_dict)
92
[end of hedyweb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hedyweb.py b/hedyweb.py
--- a/hedyweb.py
+++ b/hedyweb.py
@@ -37,6 +37,7 @@
arguments_dict['assignment_nr'] = assignment.step # Give this a chance to be 'None'
arguments_dict['lang'] = course.language
arguments_dict['level'] = assignment.level
+ arguments_dict['prev_level'] = int(level_number) - 1 if int(level_number) > 1 else None
arguments_dict['next_level'] = int(level_number) + 1 if int(level_number) < course.max_level() else None
arguments_dict['next_assignment'] = int(assignment_number) + 1 if int(assignment_number) < course.max_step(level_number) else None
arguments_dict['menu'] = menu
| {"golden_diff": "diff --git a/hedyweb.py b/hedyweb.py\n--- a/hedyweb.py\n+++ b/hedyweb.py\n@@ -37,6 +37,7 @@\n arguments_dict['assignment_nr'] = assignment.step # Give this a chance to be 'None'\n arguments_dict['lang'] = course.language\n arguments_dict['level'] = assignment.level\n+ arguments_dict['prev_level'] = int(level_number) - 1 if int(level_number) > 1 else None\n arguments_dict['next_level'] = int(level_number) + 1 if int(level_number) < course.max_level() else None\n arguments_dict['next_assignment'] = int(assignment_number) + 1 if int(assignment_number) < course.max_step(level_number) else None\n arguments_dict['menu'] = menu\n", "issue": "Include button to go back to previous level\nThanks for the great project! While trying the website I wished I could go back one level, but there is only a button to go to the next level.\n", "before_files": [{"content": "import collections\nimport attr\nimport glob\nfrom os import path\n\nfrom flask import jsonify, render_template, abort, redirect\nimport courses\nfrom auth import current_user\n\nclass Translations:\n def __init__(self):\n self.data = {}\n translations = glob.glob('coursedata/texts/*.yaml')\n for trans_file in translations:\n lang = path.splitext(path.basename(trans_file))[0]\n self.data[lang] = courses.load_yaml(trans_file)\n\n def get_translations(self, language, section):\n # Merge with English when lacking translations\n # Start from a defaultdict\n d = collections.defaultdict(lambda: '???')\n d.update(**self.data.get('en', {}).get(section, {}))\n d.update(**self.data.get(language, {}).get(section, {}))\n return d\n\n\ndef render_assignment_editor(request, course, level_number, assignment_number, menu, translations, version, loaded_program):\n assignment = course.get_assignment(level_number, assignment_number)\n if not assignment:\n abort(404)\n\n arguments_dict = {}\n\n # Meta stuff\n arguments_dict['course'] = course\n arguments_dict['level_nr'] = str(level_number)\n arguments_dict['assignment_nr'] = assignment.step # Give this a chance to be 'None'\n arguments_dict['lang'] = course.language\n arguments_dict['level'] = assignment.level\n arguments_dict['next_level'] = int(level_number) + 1 if int(level_number) < course.max_level() else None\n arguments_dict['next_assignment'] = int(assignment_number) + 1 if int(assignment_number) < course.max_step(level_number) else None\n arguments_dict['menu'] = menu\n arguments_dict['latest'] = version\n arguments_dict['selected_page'] = 'code'\n arguments_dict['page_title'] = f'Level {level_number} \u2013 Hedy'\n arguments_dict['docs'] = [attr.asdict(d) for d in assignment.docs]\n arguments_dict['auth'] = translations.data [course.language] ['Auth']\n arguments_dict['username'] = current_user(request) ['username']\n arguments_dict['loaded_program'] = loaded_program or ''\n\n # Translations\n arguments_dict.update(**translations.get_translations(course.language, 'ui'))\n\n # Actual assignment\n arguments_dict.update(**attr.asdict(assignment))\n\n return render_template(\"code-page.html\", **arguments_dict)\n\n\ndef render_assignment_docs(doc_type, course, level_number, assignment_number, menu, translations):\n assignment = course.get_assignment(level_number, assignment_number)\n if not assignment:\n abort(404)\n\n arguments_dict = {}\n\n # Meta stuff\n arguments_dict['course'] = course\n arguments_dict['level_nr'] = str(level_number)\n arguments_dict['assignment_nr'] = assignment.step # Give this a chance to be 'None'\n arguments_dict['pagetitle'] = f'Level {level_number} - Hedy'\n arguments_dict['lang'] = course.language\n arguments_dict['selected_page'] = doc_type\n arguments_dict['docs'] = [attr.asdict(d) for d in assignment.docs]\n arguments_dict['auth'] = translations.data [course.language] ['Auth']\n\n # Translations\n arguments_dict.update(**translations.get_translations(course.language, 'ui'))\n\n doc = course.docs.get(int(level_number), doc_type)\n if not doc:\n # Redirect to code page. Nasty 'import' here to work around\n # cyclic imports.\n import app\n return redirect(app.hedy_link(level_number, assignment_number))\n\n arguments_dict['mkd'] = doc.markdown\n arguments_dict['menu'] = menu\n arguments_dict['page_title'] = f'Level {level_number}.{assignment_number} \u2013 ' + doc.front_matter.get('title', '') + ' \u2013 Hedy'\n\n return render_template(\"per-level-text.html\", **arguments_dict)\n", "path": "hedyweb.py"}]} | 1,594 | 178 |
gh_patches_debug_3499 | rasdani/github-patches | git_diff | speechbrain__speechbrain-1052 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installation into user site directory with editable source fails
Currently, the installation of speechbrain fails for a user site directory with an editable source if the user does not have write access to the base-Python site-packages directory.
Both the problem and the solution are also described in https://github.com/pypa/pip/issues/7953
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2 import os
3 import setuptools
4 from distutils.core import setup
5
6 with open("README.md") as f:
7 long_description = f.read()
8
9 with open(os.path.join("speechbrain", "version.txt")) as f:
10 version = f.read().strip()
11
12 setup(
13 name="speechbrain",
14 version=version,
15 description="All-in-one speech toolkit in pure Python and Pytorch",
16 long_description=long_description,
17 long_description_content_type="text/markdown",
18 author="Mirco Ravanelli & Others",
19 author_email="[email protected]",
20 packages=setuptools.find_packages(),
21 package_data={"speechbrain": ["version.txt", "log-config.yaml"]},
22 install_requires=[
23 "hyperpyyaml",
24 "joblib",
25 "numpy",
26 "packaging",
27 "scipy",
28 "sentencepiece",
29 "torch",
30 "torchaudio",
31 "tqdm",
32 "huggingface_hub",
33 ],
34 python_requires=">=3.7",
35 url="https://speechbrain.github.io/",
36 )
37
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,8 +1,15 @@
#!/usr/bin/env python3
import os
+import sys
+import site
import setuptools
from distutils.core import setup
+
+# Editable install in user site directory can be allowed with this hack:
+# https://github.com/pypa/pip/issues/7953.
+site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
+
with open("README.md") as f:
long_description = f.read()
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,8 +1,15 @@\n #!/usr/bin/env python3\n import os\n+import sys\n+import site\n import setuptools\n from distutils.core import setup\n \n+\n+# Editable install in user site directory can be allowed with this hack:\n+# https://github.com/pypa/pip/issues/7953.\n+site.ENABLE_USER_SITE = \"--user\" in sys.argv[1:]\n+\n with open(\"README.md\") as f:\n long_description = f.read()\n", "issue": "Installation into user site directory with editable source fails\nCurrently, the installation of speechbrain fails for a user site directory with an editable source if the user does not have write access to the base-Python site-packages directory.\r\nBoth the problem and the solution are also described in https://github.com/pypa/pip/issues/7953 \n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nimport setuptools\nfrom distutils.core import setup\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nwith open(os.path.join(\"speechbrain\", \"version.txt\")) as f:\n version = f.read().strip()\n\nsetup(\n name=\"speechbrain\",\n version=version,\n description=\"All-in-one speech toolkit in pure Python and Pytorch\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Mirco Ravanelli & Others\",\n author_email=\"[email protected]\",\n packages=setuptools.find_packages(),\n package_data={\"speechbrain\": [\"version.txt\", \"log-config.yaml\"]},\n install_requires=[\n \"hyperpyyaml\",\n \"joblib\",\n \"numpy\",\n \"packaging\",\n \"scipy\",\n \"sentencepiece\",\n \"torch\",\n \"torchaudio\",\n \"tqdm\",\n \"huggingface_hub\",\n ],\n python_requires=\">=3.7\",\n url=\"https://speechbrain.github.io/\",\n)\n", "path": "setup.py"}]} | 897 | 127 |
gh_patches_debug_8602 | rasdani/github-patches | git_diff | Nitrate__Nitrate-613 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't change case run status from the Status dropdown menu
Steps:
* Open a test run page
* Select a few of case runs
* Choose one status to change from Status dropdown menu
* Server side responses 500, the error looks like:
</issue>
<code>
[start of src/tcms/logs/views.py]
1 # -*- coding: utf-8 -*-
2
3 from django.conf import settings
4 from django.contrib.contenttypes.models import ContentType
5
6 from .models import TCMSLogModel
7
8 from django.utils.encoding import smart_text as smart_unicode
9
10
11 class TCMSLog:
12 """TCMS Log"""
13
14 def __init__(self, model):
15 super().__init__()
16 self.model = model
17
18 def get_new_log_object(self):
19 elements = ['who', 'field', 'original_value', 'new_value']
20
21 for element in elements:
22 if not hasattr(self, element):
23 raise NotImplementedError(
24 f'Log does not have attribute {element}')
25
26 model = self.get_log_model()
27 new = model(**self.get_log_create_data())
28
29 return new
30
31 def get_log_model(self):
32 """
33 Get the log model to create with this class.
34 """
35 return TCMSLogModel
36
37 def get_log_create_data(self):
38 return {
39 'content_object': self.model,
40 'site_id': settings.SITE_ID,
41 'who': self.who,
42 'field': self.field,
43 'original_value': self.original_value,
44 'new_value': self.new_value,
45 }
46
47 def make(self, who, new_value, field='', original_value=''):
48 """Create new log"""
49 self.who = who
50 self.field = field
51 self.original_value = original_value
52 self.new_value = new_value
53
54 model = self.get_new_log_object()
55 model.save()
56
57 def lookup_content_type(self):
58 return ContentType.objects.get_for_model(self.model)
59
60 def get_query_set(self):
61 ctype = self.lookup_content_type()
62 model = self.get_log_model()
63
64 qs = model.objects.filter(content_type=ctype,
65 object_pk=smart_unicode(self.model.pk),
66 site=settings.SITE_ID)
67 qs = qs.select_related('who')
68 return qs
69
70 def list(self):
71 """List the logs"""
72 return self.get_query_set().all()
73
[end of src/tcms/logs/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/tcms/logs/views.py b/src/tcms/logs/views.py
--- a/src/tcms/logs/views.py
+++ b/src/tcms/logs/views.py
@@ -44,11 +44,11 @@
'new_value': self.new_value,
}
- def make(self, who, new_value, field='', original_value=''):
+ def make(self, who, new_value, field=None, original_value=None):
"""Create new log"""
self.who = who
- self.field = field
- self.original_value = original_value
+ self.field = field or ''
+ self.original_value = original_value or ''
self.new_value = new_value
model = self.get_new_log_object()
| {"golden_diff": "diff --git a/src/tcms/logs/views.py b/src/tcms/logs/views.py\n--- a/src/tcms/logs/views.py\n+++ b/src/tcms/logs/views.py\n@@ -44,11 +44,11 @@\n 'new_value': self.new_value,\n }\n \n- def make(self, who, new_value, field='', original_value=''):\n+ def make(self, who, new_value, field=None, original_value=None):\n \"\"\"Create new log\"\"\"\n self.who = who\n- self.field = field\n- self.original_value = original_value\n+ self.field = field or ''\n+ self.original_value = original_value or ''\n self.new_value = new_value\n \n model = self.get_new_log_object()\n", "issue": "Can't change case run status from the Status dropdown menu\nSteps:\r\n\r\n* Open a test run page\r\n* Select a few of case runs\r\n* Choose one status to change from Status dropdown menu\r\n* Server side responses 500, the error looks like:\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom .models import TCMSLogModel\n\nfrom django.utils.encoding import smart_text as smart_unicode\n\n\nclass TCMSLog:\n \"\"\"TCMS Log\"\"\"\n\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def get_new_log_object(self):\n elements = ['who', 'field', 'original_value', 'new_value']\n\n for element in elements:\n if not hasattr(self, element):\n raise NotImplementedError(\n f'Log does not have attribute {element}')\n\n model = self.get_log_model()\n new = model(**self.get_log_create_data())\n\n return new\n\n def get_log_model(self):\n \"\"\"\n Get the log model to create with this class.\n \"\"\"\n return TCMSLogModel\n\n def get_log_create_data(self):\n return {\n 'content_object': self.model,\n 'site_id': settings.SITE_ID,\n 'who': self.who,\n 'field': self.field,\n 'original_value': self.original_value,\n 'new_value': self.new_value,\n }\n\n def make(self, who, new_value, field='', original_value=''):\n \"\"\"Create new log\"\"\"\n self.who = who\n self.field = field\n self.original_value = original_value\n self.new_value = new_value\n\n model = self.get_new_log_object()\n model.save()\n\n def lookup_content_type(self):\n return ContentType.objects.get_for_model(self.model)\n\n def get_query_set(self):\n ctype = self.lookup_content_type()\n model = self.get_log_model()\n\n qs = model.objects.filter(content_type=ctype,\n object_pk=smart_unicode(self.model.pk),\n site=settings.SITE_ID)\n qs = qs.select_related('who')\n return qs\n\n def list(self):\n \"\"\"List the logs\"\"\"\n return self.get_query_set().all()\n", "path": "src/tcms/logs/views.py"}]} | 1,157 | 161 |
gh_patches_debug_8434 | rasdani/github-patches | git_diff | google__mobly-323 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`signals.TestAbortClass` does not work in `setup_test` and `on_xxxx` methods
It is valid to abort the class at any stage of a test.
E.g. If we find out in `on_fail` that a test failed due to a fatal error like snippet_client losing socket connection, it is reasonable for the test to abort all subsequent tests in the class instead of continue running and failing more tests when we know they will fail.
</issue>
<code>
[start of mobly/signals.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """This module is where all the test signal classes and related utilities live.
15 """
16
17 import functools
18 import json
19
20
21 class TestSignalError(Exception):
22 """Raised when an error occurs inside a test signal."""
23
24
25 class TestSignal(Exception):
26 """Base class for all test result control signals. This is used to signal
27 the result of a test.
28
29 Attribute:
30 details: A string that describes the reason for raising this signal.
31 extras: A json-serializable data type to convey extra information about
32 a test result.
33 """
34
35 def __init__(self, details, extras=None):
36 super(TestSignal, self).__init__(details)
37 self.details = str(details)
38 try:
39 json.dumps(extras)
40 self.extras = extras
41 except TypeError:
42 raise TestSignalError('Extras must be json serializable. %s '
43 'is not.' % extras)
44
45 def __str__(self):
46 return 'Details=%s, Extras=%s' % (self.details, self.extras)
47
48
49 class TestError(TestSignal):
50 """Raised when a test has an unexpected error."""
51
52
53 class TestFailure(TestSignal):
54 """Raised when a test has failed."""
55
56
57 class TestPass(TestSignal):
58 """Raised when a test has passed."""
59
60
61 class TestSkip(TestSignal):
62 """Raised when a test has been skipped."""
63
64
65 class TestAbortClass(TestSignal):
66 """Raised when all subsequent tests within the same test class should
67 be aborted.
68 """
69
70
71 class TestAbortAll(TestSignal):
72 """Raised when all subsequent tests should be aborted."""
73
74
75 class ControllerError(Exception):
76 """Raised when an error occured in controller classes."""
77
[end of mobly/signals.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mobly/signals.py b/mobly/signals.py
--- a/mobly/signals.py
+++ b/mobly/signals.py
@@ -62,13 +62,18 @@
"""Raised when a test has been skipped."""
-class TestAbortClass(TestSignal):
+class TestAbortSignal(TestSignal):
+ """Base class for abort signals.
+ """
+
+
+class TestAbortClass(TestAbortSignal):
"""Raised when all subsequent tests within the same test class should
be aborted.
"""
-class TestAbortAll(TestSignal):
+class TestAbortAll(TestAbortSignal):
"""Raised when all subsequent tests should be aborted."""
| {"golden_diff": "diff --git a/mobly/signals.py b/mobly/signals.py\n--- a/mobly/signals.py\n+++ b/mobly/signals.py\n@@ -62,13 +62,18 @@\n \"\"\"Raised when a test has been skipped.\"\"\"\n \n \n-class TestAbortClass(TestSignal):\n+class TestAbortSignal(TestSignal):\n+ \"\"\"Base class for abort signals.\n+ \"\"\"\n+\n+\n+class TestAbortClass(TestAbortSignal):\n \"\"\"Raised when all subsequent tests within the same test class should\n be aborted.\n \"\"\"\n \n \n-class TestAbortAll(TestSignal):\n+class TestAbortAll(TestAbortSignal):\n \"\"\"Raised when all subsequent tests should be aborted.\"\"\"\n", "issue": "`signals.TestAbortClass` does not work in `setup_test` and `on_xxxx` methods\nIt is valid to abort the class at any stage of a test.\r\n\r\nE.g. If we find out in `on_fail` that a test failed due to a fatal error like snippet_client losing socket connection, it is reasonable for the test to abort all subsequent tests in the class instead of continue running and failing more tests when we know they will fail.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module is where all the test signal classes and related utilities live.\n\"\"\"\n\nimport functools\nimport json\n\n\nclass TestSignalError(Exception):\n \"\"\"Raised when an error occurs inside a test signal.\"\"\"\n\n\nclass TestSignal(Exception):\n \"\"\"Base class for all test result control signals. This is used to signal\n the result of a test.\n\n Attribute:\n details: A string that describes the reason for raising this signal.\n extras: A json-serializable data type to convey extra information about\n a test result.\n \"\"\"\n\n def __init__(self, details, extras=None):\n super(TestSignal, self).__init__(details)\n self.details = str(details)\n try:\n json.dumps(extras)\n self.extras = extras\n except TypeError:\n raise TestSignalError('Extras must be json serializable. %s '\n 'is not.' % extras)\n\n def __str__(self):\n return 'Details=%s, Extras=%s' % (self.details, self.extras)\n\n\nclass TestError(TestSignal):\n \"\"\"Raised when a test has an unexpected error.\"\"\"\n\n\nclass TestFailure(TestSignal):\n \"\"\"Raised when a test has failed.\"\"\"\n\n\nclass TestPass(TestSignal):\n \"\"\"Raised when a test has passed.\"\"\"\n\n\nclass TestSkip(TestSignal):\n \"\"\"Raised when a test has been skipped.\"\"\"\n\n\nclass TestAbortClass(TestSignal):\n \"\"\"Raised when all subsequent tests within the same test class should\n be aborted.\n \"\"\"\n\n\nclass TestAbortAll(TestSignal):\n \"\"\"Raised when all subsequent tests should be aborted.\"\"\"\n\n\nclass ControllerError(Exception):\n \"\"\"Raised when an error occured in controller classes.\"\"\"\n", "path": "mobly/signals.py"}]} | 1,260 | 148 |
gh_patches_debug_9763 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3146 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add OpenAPI Specification for /tables/ endpoint
## Problem
In order to ensure the accuracy of the specs generated by drf-spectacular for /tables/ endpoint , we will generate an OpenAPI Specification specifically for this endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page.
## Proposed solution
* Implement custom preprocessing hook function to filter out all endpoints except for the /tables/ endpoint. The hook function selectively retains only the endpoint paths that match /tabels/
* Finally, generate the spec file using the `manage.py spectacular --color --file schema.yml` command
</issue>
<code>
[start of config/settings/openapi.py]
1 def custom_preprocessing_hook(endpoints):
2 filtered = []
3 for (path, path_regex, method, callback) in endpoints:
4 # Remove all but DRF API endpoints
5 if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/"):
6 filtered.append((path, path_regex, method, callback))
7 return filtered
8
9
10 def remove_url_prefix_hook(result, **kwargs):
11 # Remove namespace and version URL prefix from the operation Id of the generated API schema
12 for path, path_info in result['paths'].items():
13 for method, operation in path_info.items():
14 operation_id = operation.get('operationId')
15 if operation_id:
16 if path.startswith('/api/db/v0/'):
17 operation['operationId'] = operation_id.replace('db_v0_', '')
18 elif path.startswith('/api/ui/v0/'):
19 operation['operationId'] = operation_id.replace('ui_v0_', '')
20
21 return result
22
[end of config/settings/openapi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/config/settings/openapi.py b/config/settings/openapi.py
--- a/config/settings/openapi.py
+++ b/config/settings/openapi.py
@@ -2,7 +2,7 @@
filtered = []
for (path, path_regex, method, callback) in endpoints:
# Remove all but DRF API endpoints
- if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/"):
+ if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"):
filtered.append((path, path_regex, method, callback))
return filtered
| {"golden_diff": "diff --git a/config/settings/openapi.py b/config/settings/openapi.py\n--- a/config/settings/openapi.py\n+++ b/config/settings/openapi.py\n@@ -2,7 +2,7 @@\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n- if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\"):\n+ if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n", "issue": "Add OpenAPI Specification for /tables/ endpoint\n## Problem\r\nIn order to ensure the accuracy of the specs generated by drf-spectacular for /tables/ endpoint , we will generate an OpenAPI Specification specifically for this endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page.\r\n\r\n## Proposed solution\r\n* Implement custom preprocessing hook function to filter out all endpoints except for the /tables/ endpoint. The hook function selectively retains only the endpoint paths that match /tabels/ \r\n* Finally, generate the spec file using the `manage.py spectacular --color --file schema.yml` command\n", "before_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}]} | 922 | 172 |
gh_patches_debug_13035 | rasdani/github-patches | git_diff | bokeh__bokeh-1495 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Method to update data source contents in the notebook
There is a reference implementation by @akloster in https://github.com/akloster/nb-bokeh-animations
```
def update_data_source(ds):
id = ds.ref['id']
model = ds.ref['type']
js = """
var ds = Bokeh.Collections('{model}').get('{id}');
var data = {json};
ds.set(data);
""".format(
model=model,
id=id,
json= serialize_json(ds.vm_serialize()))
display.display_javascript(js, raw=True)
```
Plan to use this as a starting place for a method on data sources that will update the BokehJS data source and trigger a re-draw, from python. Propose `ds.notebook_update()` for method name.
</issue>
<code>
[start of bokeh/models/sources.py]
1 from __future__ import absolute_import
2
3 from ..plot_object import PlotObject
4 from ..properties import HasProps
5 from ..properties import Any, Int, String, Instance, List, Dict, Either
6
7 class DataSource(PlotObject):
8 """ Base class for data sources """
9 # List of names of the fields of each tuple in self.data
10 # ordering is incoporated here
11 column_names = List(String)
12 selected = List(Int) # index of selected points
13
14 def columns(self, *columns):
15 """ Returns a ColumnsRef object that points to a column or set of
16 columns on this data source
17 """
18 return ColumnsRef(source=self, columns=list(columns))
19
20 class ColumnsRef(HasProps):
21 source = Instance(DataSource)
22 columns = List(String)
23
24 class ColumnDataSource(DataSource):
25 # Maps names of columns to sequences or arrays
26 data = Dict(String, Any)
27
28 # Maps field/column name to a DataRange or FactorRange object. If the
29 # field is not in the dict, then a range is created automatically.
30 cont_ranges = Dict(String, Instance(".models.Range"))
31 discrete_ranges = Dict(String, Instance(".models.Range"))
32
33 def __init__(self, *args, **kw):
34 """ Modify the basic DataSource/PlotObj constructor so that if we
35 are called with a single argument that is a dict, then we treat
36 that implicitly as our "data" attribute.
37 """
38 if len(args) == 1 and "data" not in kw:
39 kw["data"] = args[0]
40 raw_data = kw.pop("data", {})
41 if not isinstance(raw_data, dict):
42 import pandas as pd
43 if isinstance(raw_data, pd.DataFrame):
44 raw_data = self.from_df(raw_data)
45 else:
46 raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
47 for name, data in raw_data.items():
48 self.add(data, name)
49 super(ColumnDataSource, self).__init__(**kw)
50
51 @classmethod
52 def from_df(cls, raw_data):
53 dfindex = raw_data.index
54 new_data = {}
55 for colname in raw_data:
56 new_data[colname] = raw_data[colname].tolist()
57 if dfindex.name:
58 new_data[dfindex.name] = dfindex.tolist()
59 elif dfindex.names and not all([x is None for x in dfindex.names]):
60 new_data["_".join(dfindex.names)] = dfindex.tolist()
61 else:
62 new_data["index"] = dfindex.tolist()
63 return new_data
64
65 def to_df(self):
66 """convert data source to pandas dataframe
67 local import of pandas because of possible compatability issues (pypy?)
68 if we have column_names set, we use those, otherwise we let
69 pandas infer the column names. column_names can be used to
70 either order or filter the columns here.
71 """
72 import pandas as pd
73 if self.column_names:
74 return pd.DataFrame(self.data, columns=self.column_names)
75 else:
76 return pd.DataFrame(self.data)
77
78 def add(self, data, name=None):
79 """ Appends the data to the list of columns. Returns the name
80 that was inserted.
81 """
82 if name is None:
83 n = len(self.data)
84 while "Series %d"%n in self.data:
85 n += 1
86 name = "Series %d"%n
87 self.column_names.append(name)
88 self.data[name] = data
89 return name
90
91 def remove(self, name):
92 try:
93 self.column_names.remove(name)
94 del self.data[name]
95 except (ValueError, KeyError):
96 import warnings
97 warnings.warn("Unable to find column '%s' in datasource" % name)
98
99 class ServerDataSource(DataSource):
100 data_url = String()
101 owner_username = String()
102
103 # allow us to add some data that isn't on the remote source
104 # and join it to the remote data
105 data = Dict(String, Any)
106
107 # Paramters of data transformation operations
108 # The 'Any' is used to pass primtives around. Minimally, a tag to say which downsample routine to use. In some downsamplers, parameters are passed this way too.
109 # TODO: Find/create a property type for 'any primitive/atomic value'
110 transform = Dict(String,Either(Instance(PlotObject), Any))
111
[end of bokeh/models/sources.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bokeh/models/sources.py b/bokeh/models/sources.py
--- a/bokeh/models/sources.py
+++ b/bokeh/models/sources.py
@@ -96,6 +96,19 @@
import warnings
warnings.warn("Unable to find column '%s' in datasource" % name)
+ def push_notebook(self):
+ from IPython.core import display
+ from bokeh.protocol import serialize_json
+ id = self.ref['id']
+ model = self.ref['type']
+ json = serialize_json(self.vm_serialize())
+ js = """
+ var ds = Bokeh.Collections('{model}').get('{id}');
+ var data = {json};
+ ds.set(data);
+ """.format(model=model, id=id, json=json)
+ display.display_javascript(js, raw=True)
+
class ServerDataSource(DataSource):
data_url = String()
owner_username = String()
| {"golden_diff": "diff --git a/bokeh/models/sources.py b/bokeh/models/sources.py\n--- a/bokeh/models/sources.py\n+++ b/bokeh/models/sources.py\n@@ -96,6 +96,19 @@\n import warnings\n warnings.warn(\"Unable to find column '%s' in datasource\" % name)\n \n+ def push_notebook(self):\n+ from IPython.core import display\n+ from bokeh.protocol import serialize_json\n+ id = self.ref['id']\n+ model = self.ref['type']\n+ json = serialize_json(self.vm_serialize())\n+ js = \"\"\"\n+ var ds = Bokeh.Collections('{model}').get('{id}');\n+ var data = {json};\n+ ds.set(data);\n+ \"\"\".format(model=model, id=id, json=json)\n+ display.display_javascript(js, raw=True)\n+\n class ServerDataSource(DataSource):\n data_url = String()\n owner_username = String()\n", "issue": "Method to update data source contents in the notebook\nThere is a reference implementation by @akloster in https://github.com/akloster/nb-bokeh-animations\n\n```\ndef update_data_source(ds):\n id = ds.ref['id']\n model = ds.ref['type']\n js = \"\"\"\n var ds = Bokeh.Collections('{model}').get('{id}');\n var data = {json};\n ds.set(data);\n \"\"\".format(\n model=model, \n id=id, \n json= serialize_json(ds.vm_serialize()))\n display.display_javascript(js, raw=True)\n```\n\nPlan to use this as a starting place for a method on data sources that will update the BokehJS data source and trigger a re-draw, from python. Propose `ds.notebook_update()` for method name. \n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom ..plot_object import PlotObject\nfrom ..properties import HasProps\nfrom ..properties import Any, Int, String, Instance, List, Dict, Either\n\nclass DataSource(PlotObject):\n \"\"\" Base class for data sources \"\"\"\n # List of names of the fields of each tuple in self.data\n # ordering is incoporated here\n column_names = List(String)\n selected = List(Int) # index of selected points\n\n def columns(self, *columns):\n \"\"\" Returns a ColumnsRef object that points to a column or set of\n columns on this data source\n \"\"\"\n return ColumnsRef(source=self, columns=list(columns))\n\nclass ColumnsRef(HasProps):\n source = Instance(DataSource)\n columns = List(String)\n\nclass ColumnDataSource(DataSource):\n # Maps names of columns to sequences or arrays\n data = Dict(String, Any)\n\n # Maps field/column name to a DataRange or FactorRange object. If the\n # field is not in the dict, then a range is created automatically.\n cont_ranges = Dict(String, Instance(\".models.Range\"))\n discrete_ranges = Dict(String, Instance(\".models.Range\"))\n\n def __init__(self, *args, **kw):\n \"\"\" Modify the basic DataSource/PlotObj constructor so that if we\n are called with a single argument that is a dict, then we treat\n that implicitly as our \"data\" attribute.\n \"\"\"\n if len(args) == 1 and \"data\" not in kw:\n kw[\"data\"] = args[0]\n raw_data = kw.pop(\"data\", {})\n if not isinstance(raw_data, dict):\n import pandas as pd\n if isinstance(raw_data, pd.DataFrame):\n raw_data = self.from_df(raw_data)\n else:\n raise ValueError(\"expected a dict or pandas.DataFrame, got %s\" % raw_data)\n for name, data in raw_data.items():\n self.add(data, name)\n super(ColumnDataSource, self).__init__(**kw)\n\n @classmethod\n def from_df(cls, raw_data):\n dfindex = raw_data.index\n new_data = {}\n for colname in raw_data:\n new_data[colname] = raw_data[colname].tolist()\n if dfindex.name:\n new_data[dfindex.name] = dfindex.tolist()\n elif dfindex.names and not all([x is None for x in dfindex.names]):\n new_data[\"_\".join(dfindex.names)] = dfindex.tolist()\n else:\n new_data[\"index\"] = dfindex.tolist()\n return new_data\n\n def to_df(self):\n \"\"\"convert data source to pandas dataframe\n local import of pandas because of possible compatability issues (pypy?)\n if we have column_names set, we use those, otherwise we let\n pandas infer the column names. column_names can be used to\n either order or filter the columns here.\n \"\"\"\n import pandas as pd\n if self.column_names:\n return pd.DataFrame(self.data, columns=self.column_names)\n else:\n return pd.DataFrame(self.data)\n\n def add(self, data, name=None):\n \"\"\" Appends the data to the list of columns. Returns the name\n that was inserted.\n \"\"\"\n if name is None:\n n = len(self.data)\n while \"Series %d\"%n in self.data:\n n += 1\n name = \"Series %d\"%n\n self.column_names.append(name)\n self.data[name] = data\n return name\n\n def remove(self, name):\n try:\n self.column_names.remove(name)\n del self.data[name]\n except (ValueError, KeyError):\n import warnings\n warnings.warn(\"Unable to find column '%s' in datasource\" % name)\n\nclass ServerDataSource(DataSource):\n data_url = String()\n owner_username = String()\n\n # allow us to add some data that isn't on the remote source\n # and join it to the remote data\n data = Dict(String, Any)\n\n # Paramters of data transformation operations\n # The 'Any' is used to pass primtives around. Minimally, a tag to say which downsample routine to use. In some downsamplers, parameters are passed this way too.\n # TODO: Find/create a property type for 'any primitive/atomic value'\n transform = Dict(String,Either(Instance(PlotObject), Any))\n", "path": "bokeh/models/sources.py"}]} | 1,879 | 205 |
gh_patches_debug_40784 | rasdani/github-patches | git_diff | Project-MONAI__MONAI-5093 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ContrastiveLoss with dynamic batchsize
**Is your feature request related to a problem? Please describe.**
When we instantiate an instance of type class `ContrastiveLoss` we need to pass in the parameter for `batch_size`. This assumes that the batch size remains constant all through the training(which is not always the case).
**Describe the solution you'd like**
As we know and have fixed in the [docs](https://docs.monai.io/en/latest/losses.html#monai.losses.ContrastiveLoss) that the forward accepts inputs of shape `B[F]`, why not pick up the Batch size dynamically in the forward. Is there a reason why it is not designed in that fashion?
I would be happy to put a pull request if it is okay.
</issue>
<code>
[start of monai/losses/contrastive.py]
1 # Copyright (c) MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 import torch
13 from torch.nn import functional as F
14 from torch.nn.modules.loss import _Loss
15
16 from monai.utils import deprecated_arg
17
18
19 class ContrastiveLoss(_Loss):
20
21 """
22 Compute the Contrastive loss defined in:
23
24 Chen, Ting, et al. "A simple framework for contrastive learning of visual representations." International
25 conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html)
26
27 Adapted from:
28 https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5
29
30 """
31
32 @deprecated_arg(name="reduction", since="0.8", msg_suffix="`reduction` is no longer supported.")
33 def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction="sum") -> None:
34 """
35 Args:
36 temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.
37 batch_size: The number of samples.
38
39 Raises:
40 ValueError: When an input of dimension length > 2 is passed
41 ValueError: When input and target are of different shapes
42
43 .. deprecated:: 0.8.0
44
45 `reduction` is no longer supported.
46
47 """
48 super().__init__()
49
50 self.batch_size = batch_size
51 self.temperature = temperature
52
53 def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
54 """
55 Args:
56 input: the shape should be B[F].
57 target: the shape should be B[F].
58 """
59 if len(target.shape) > 2 or len(input.shape) > 2:
60 raise ValueError(
61 f"Either target or input has dimensions greater than 2 where target "
62 f"shape is ({target.shape}) and input shape is ({input.shape})"
63 )
64
65 if target.shape != input.shape:
66 raise ValueError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
67
68 temperature_tensor = torch.as_tensor(self.temperature).to(input.device)
69
70 norm_i = F.normalize(input, dim=1)
71 norm_j = F.normalize(target, dim=1)
72
73 negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)
74 negatives_mask = torch.clone(negatives_mask.type(torch.float)).to(input.device)
75
76 repr = torch.cat([norm_i, norm_j], dim=0)
77 sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)
78 sim_ij = torch.diag(sim_matrix, self.batch_size)
79 sim_ji = torch.diag(sim_matrix, -self.batch_size)
80
81 positives = torch.cat([sim_ij, sim_ji], dim=0)
82 nominator = torch.exp(positives / temperature_tensor)
83 denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor)
84
85 loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))
86
87 return torch.sum(loss_partial) / (2 * self.batch_size)
88
[end of monai/losses/contrastive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/monai/losses/contrastive.py b/monai/losses/contrastive.py
--- a/monai/losses/contrastive.py
+++ b/monai/losses/contrastive.py
@@ -9,6 +9,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from distutils.log import warn
+
import torch
from torch.nn import functional as F
from torch.nn.modules.loss import _Loss
@@ -30,11 +32,10 @@
"""
@deprecated_arg(name="reduction", since="0.8", msg_suffix="`reduction` is no longer supported.")
- def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction="sum") -> None:
+ def __init__(self, temperature: float = 0.5, batch_size: int = -1, reduction="sum") -> None:
"""
Args:
temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.
- batch_size: The number of samples.
Raises:
ValueError: When an input of dimension length > 2 is passed
@@ -46,10 +47,11 @@
"""
super().__init__()
-
- self.batch_size = batch_size
self.temperature = temperature
+ if batch_size != -1:
+ warn("batch_size is no longer required to be set. It will be estimated dynamically in the forward call")
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""
Args:
@@ -66,17 +68,18 @@
raise ValueError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
temperature_tensor = torch.as_tensor(self.temperature).to(input.device)
+ batch_size = input.shape[0]
norm_i = F.normalize(input, dim=1)
norm_j = F.normalize(target, dim=1)
- negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)
+ negatives_mask = ~torch.eye(batch_size * 2, batch_size * 2, dtype=torch.bool)
negatives_mask = torch.clone(negatives_mask.type(torch.float)).to(input.device)
repr = torch.cat([norm_i, norm_j], dim=0)
sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)
- sim_ij = torch.diag(sim_matrix, self.batch_size)
- sim_ji = torch.diag(sim_matrix, -self.batch_size)
+ sim_ij = torch.diag(sim_matrix, batch_size)
+ sim_ji = torch.diag(sim_matrix, -batch_size)
positives = torch.cat([sim_ij, sim_ji], dim=0)
nominator = torch.exp(positives / temperature_tensor)
@@ -84,4 +87,4 @@
loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))
- return torch.sum(loss_partial) / (2 * self.batch_size)
+ return torch.sum(loss_partial) / (2 * batch_size)
| {"golden_diff": "diff --git a/monai/losses/contrastive.py b/monai/losses/contrastive.py\n--- a/monai/losses/contrastive.py\n+++ b/monai/losses/contrastive.py\n@@ -9,6 +9,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+from distutils.log import warn\n+\n import torch\n from torch.nn import functional as F\n from torch.nn.modules.loss import _Loss\n@@ -30,11 +32,10 @@\n \"\"\"\n \n @deprecated_arg(name=\"reduction\", since=\"0.8\", msg_suffix=\"`reduction` is no longer supported.\")\n- def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction=\"sum\") -> None:\n+ def __init__(self, temperature: float = 0.5, batch_size: int = -1, reduction=\"sum\") -> None:\n \"\"\"\n Args:\n temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.\n- batch_size: The number of samples.\n \n Raises:\n ValueError: When an input of dimension length > 2 is passed\n@@ -46,10 +47,11 @@\n \n \"\"\"\n super().__init__()\n-\n- self.batch_size = batch_size\n self.temperature = temperature\n \n+ if batch_size != -1:\n+ warn(\"batch_size is no longer required to be set. It will be estimated dynamically in the forward call\")\n+\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n@@ -66,17 +68,18 @@\n raise ValueError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n \n temperature_tensor = torch.as_tensor(self.temperature).to(input.device)\n+ batch_size = input.shape[0]\n \n norm_i = F.normalize(input, dim=1)\n norm_j = F.normalize(target, dim=1)\n \n- negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)\n+ negatives_mask = ~torch.eye(batch_size * 2, batch_size * 2, dtype=torch.bool)\n negatives_mask = torch.clone(negatives_mask.type(torch.float)).to(input.device)\n \n repr = torch.cat([norm_i, norm_j], dim=0)\n sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)\n- sim_ij = torch.diag(sim_matrix, self.batch_size)\n- sim_ji = torch.diag(sim_matrix, -self.batch_size)\n+ sim_ij = torch.diag(sim_matrix, batch_size)\n+ sim_ji = torch.diag(sim_matrix, -batch_size)\n \n positives = torch.cat([sim_ij, sim_ji], dim=0)\n nominator = torch.exp(positives / temperature_tensor)\n@@ -84,4 +87,4 @@\n \n loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))\n \n- return torch.sum(loss_partial) / (2 * self.batch_size)\n+ return torch.sum(loss_partial) / (2 * batch_size)\n", "issue": "ContrastiveLoss with dynamic batchsize\n**Is your feature request related to a problem? Please describe.**\r\nWhen we instantiate an instance of type class `ContrastiveLoss` we need to pass in the parameter for `batch_size`. This assumes that the batch size remains constant all through the training(which is not always the case). \r\n\r\n**Describe the solution you'd like**\r\nAs we know and have fixed in the [docs](https://docs.monai.io/en/latest/losses.html#monai.losses.ContrastiveLoss) that the forward accepts inputs of shape `B[F]`, why not pick up the Batch size dynamically in the forward. Is there a reason why it is not designed in that fashion?\r\n\r\nI would be happy to put a pull request if it is okay.\r\n\n", "before_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch.nn.modules.loss import _Loss\n\nfrom monai.utils import deprecated_arg\n\n\nclass ContrastiveLoss(_Loss):\n\n \"\"\"\n Compute the Contrastive loss defined in:\n\n Chen, Ting, et al. \"A simple framework for contrastive learning of visual representations.\" International\n conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html)\n\n Adapted from:\n https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5\n\n \"\"\"\n\n @deprecated_arg(name=\"reduction\", since=\"0.8\", msg_suffix=\"`reduction` is no longer supported.\")\n def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction=\"sum\") -> None:\n \"\"\"\n Args:\n temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.\n batch_size: The number of samples.\n\n Raises:\n ValueError: When an input of dimension length > 2 is passed\n ValueError: When input and target are of different shapes\n\n .. deprecated:: 0.8.0\n\n `reduction` is no longer supported.\n\n \"\"\"\n super().__init__()\n\n self.batch_size = batch_size\n self.temperature = temperature\n\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n input: the shape should be B[F].\n target: the shape should be B[F].\n \"\"\"\n if len(target.shape) > 2 or len(input.shape) > 2:\n raise ValueError(\n f\"Either target or input has dimensions greater than 2 where target \"\n f\"shape is ({target.shape}) and input shape is ({input.shape})\"\n )\n\n if target.shape != input.shape:\n raise ValueError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n\n temperature_tensor = torch.as_tensor(self.temperature).to(input.device)\n\n norm_i = F.normalize(input, dim=1)\n norm_j = F.normalize(target, dim=1)\n\n negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)\n negatives_mask = torch.clone(negatives_mask.type(torch.float)).to(input.device)\n\n repr = torch.cat([norm_i, norm_j], dim=0)\n sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)\n sim_ij = torch.diag(sim_matrix, self.batch_size)\n sim_ji = torch.diag(sim_matrix, -self.batch_size)\n\n positives = torch.cat([sim_ij, sim_ji], dim=0)\n nominator = torch.exp(positives / temperature_tensor)\n denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor)\n\n loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))\n\n return torch.sum(loss_partial) / (2 * self.batch_size)\n", "path": "monai/losses/contrastive.py"}]} | 1,720 | 723 |
gh_patches_debug_639 | rasdani/github-patches | git_diff | pex-tool__pex-2226 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.144
On the docket:
+ [x] Traverse directories in stable order when building a PEX #2220
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.143"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.143"
+__version__ = "2.1.144"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.143\"\n+__version__ = \"2.1.144\"\n", "issue": "Release 2.1.144\nOn the docket:\r\n+ [x] Traverse directories in stable order when building a PEX #2220\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.143\"\n", "path": "pex/version.py"}]} | 619 | 99 |
gh_patches_debug_6979 | rasdani/github-patches | git_diff | sopel-irc__sopel-1875 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
add ping/pong response to ping.py
### The problem
The default ping module doesn't include ping/pong despite it's name and I'd rather not replace it just for that (I could move it out of core hacks but why not upstream)!
### The solution
Add it, I will after I write this.
### Example
09:54:36 <+RhinosF1> .ping
09:54:37 <@ZppixBot-test> Pong!
</issue>
<code>
[start of sopel/modules/ping.py]
1 # coding=utf-8
2 """
3 ping.py - Sopel Ping Module
4 Copyright 2008 (?), Sean B. Palmer, inamidst.com
5
6 https://sopel.chat
7 """
8 from __future__ import absolute_import, division, print_function, unicode_literals
9
10 import random
11
12 from sopel.module import priority, rule, thread
13
14
15 @rule(r'(?i)(hi|hello|hey),? $nickname[ \t]*$')
16 def hello(bot, trigger):
17 greeting = random.choice(('Hi', 'Hey', 'Hello'))
18 punctuation = random.choice(('', '.', '…', '!'))
19 bot.say(greeting + ' ' + trigger.nick + punctuation)
20
21
22 @rule(r'(?i)(Fuck|Screw) you,? $nickname[ \t]*$')
23 def rude(bot, trigger):
24 bot.say('Watch your mouth, ' + trigger.nick + ', or I\'ll tell your mother!')
25
26
27 @rule('$nickname!')
28 @priority('high')
29 @thread(False)
30 def interjection(bot, trigger):
31 bot.say(trigger.nick + '!')
32
[end of sopel/modules/ping.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/ping.py b/sopel/modules/ping.py
--- a/sopel/modules/ping.py
+++ b/sopel/modules/ping.py
@@ -9,7 +9,7 @@
import random
-from sopel.module import priority, rule, thread
+from sopel.module import commands, priority, rule, thread
@rule(r'(?i)(hi|hello|hey),? $nickname[ \t]*$')
@@ -29,3 +29,9 @@
@thread(False)
def interjection(bot, trigger):
bot.say(trigger.nick + '!')
+
+
+@commands('ping')
+def ping(bot, trigger):
+ """Reply to ping command."""
+ bot.reply('Pong!')
| {"golden_diff": "diff --git a/sopel/modules/ping.py b/sopel/modules/ping.py\n--- a/sopel/modules/ping.py\n+++ b/sopel/modules/ping.py\n@@ -9,7 +9,7 @@\n \n import random\n \n-from sopel.module import priority, rule, thread\n+from sopel.module import commands, priority, rule, thread\n \n \n @rule(r'(?i)(hi|hello|hey),? $nickname[ \\t]*$')\n@@ -29,3 +29,9 @@\n @thread(False)\n def interjection(bot, trigger):\n bot.say(trigger.nick + '!')\n+\n+\n+@commands('ping')\n+def ping(bot, trigger):\n+ \"\"\"Reply to ping command.\"\"\"\n+ bot.reply('Pong!')\n", "issue": "add ping/pong response to ping.py\n### The problem\r\nThe default ping module doesn't include ping/pong despite it's name and I'd rather not replace it just for that (I could move it out of core hacks but why not upstream)!\r\n\r\n### The solution\r\nAdd it, I will after I write this.\r\n\r\n### Example\r\n\r\n09:54:36 <+RhinosF1> .ping\r\n09:54:37 <@ZppixBot-test> Pong!\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nping.py - Sopel Ping Module\nCopyright 2008 (?), Sean B. Palmer, inamidst.com\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport random\n\nfrom sopel.module import priority, rule, thread\n\n\n@rule(r'(?i)(hi|hello|hey),? $nickname[ \\t]*$')\ndef hello(bot, trigger):\n greeting = random.choice(('Hi', 'Hey', 'Hello'))\n punctuation = random.choice(('', '.', '\u2026', '!'))\n bot.say(greeting + ' ' + trigger.nick + punctuation)\n\n\n@rule(r'(?i)(Fuck|Screw) you,? $nickname[ \\t]*$')\ndef rude(bot, trigger):\n bot.say('Watch your mouth, ' + trigger.nick + ', or I\\'ll tell your mother!')\n\n\n@rule('$nickname!')\n@priority('high')\n@thread(False)\ndef interjection(bot, trigger):\n bot.say(trigger.nick + '!')\n", "path": "sopel/modules/ping.py"}]} | 931 | 165 |
gh_patches_debug_27278 | rasdani/github-patches | git_diff | Pylons__pyramid-2621 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pcreate -s shows wrong link to tutorials
after a
```
pcreate -s alchemy scaffold-alchemy
```
I see a link to tutorials, but this link is a 404:
```
Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials
```
</issue>
<code>
[start of pyramid/scaffolds/__init__.py]
1 import binascii
2 import os
3 from textwrap import dedent
4
5 from pyramid.compat import native_
6
7 from pyramid.scaffolds.template import Template # API
8
9 class PyramidTemplate(Template):
10 """
11 A class that can be used as a base class for Pyramid scaffolding
12 templates.
13 """
14 def pre(self, command, output_dir, vars):
15 """ Overrides :meth:`pyramid.scaffolds.template.Template.pre`, adding
16 several variables to the default variables list (including
17 ``random_string``, and ``package_logger``). It also prevents common
18 misnamings (such as naming a package "site" or naming a package
19 logger "root".
20 """
21 vars['random_string'] = native_(binascii.hexlify(os.urandom(20)))
22 package_logger = vars['package']
23 if package_logger == 'root':
24 # Rename the app logger in the rare case a project is named 'root'
25 package_logger = 'app'
26 vars['package_logger'] = package_logger
27 return Template.pre(self, command, output_dir, vars)
28
29 def post(self, command, output_dir, vars): # pragma: no cover
30 """ Overrides :meth:`pyramid.scaffolds.template.Template.post`, to
31 print "Welcome to Pyramid. Sorry for the convenience." after a
32 successful scaffolding rendering."""
33
34 separator = "=" * 79
35 msg = dedent(
36 """
37 %(separator)s
38 Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials
39 Documentation: http://docs.pylonsproject.org/projects/pyramid
40
41 Twitter (tips & updates): http://twitter.com/pylons
42 Mailing List: http://groups.google.com/group/pylons-discuss
43
44 Welcome to Pyramid. Sorry for the convenience.
45 %(separator)s
46 """ % {'separator': separator})
47
48 self.out(msg)
49 return Template.post(self, command, output_dir, vars)
50
51 def out(self, msg): # pragma: no cover (replaceable testing hook)
52 print(msg)
53
54 class StarterProjectTemplate(PyramidTemplate):
55 _template_dir = 'starter'
56 summary = 'Pyramid starter project'
57
58 class ZODBProjectTemplate(PyramidTemplate):
59 _template_dir = 'zodb'
60 summary = 'Pyramid ZODB project using traversal'
61
62 class AlchemyProjectTemplate(PyramidTemplate):
63 _template_dir = 'alchemy'
64 summary = 'Pyramid SQLAlchemy project using url dispatch'
65
[end of pyramid/scaffolds/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyramid/scaffolds/__init__.py b/pyramid/scaffolds/__init__.py
--- a/pyramid/scaffolds/__init__.py
+++ b/pyramid/scaffolds/__init__.py
@@ -35,11 +35,10 @@
msg = dedent(
"""
%(separator)s
- Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials
- Documentation: http://docs.pylonsproject.org/projects/pyramid
-
- Twitter (tips & updates): http://twitter.com/pylons
- Mailing List: http://groups.google.com/group/pylons-discuss
+ Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/
+ Documentation: http://docs.pylonsproject.org/projects/pyramid/en/latest/
+ Twitter: https://twitter.com/trypyramid
+ Mailing List: https://groups.google.com/forum/#!forum/pylons-discuss
Welcome to Pyramid. Sorry for the convenience.
%(separator)s
@@ -53,12 +52,13 @@
class StarterProjectTemplate(PyramidTemplate):
_template_dir = 'starter'
- summary = 'Pyramid starter project'
+ summary = 'Pyramid starter project using URL dispatch and Chameleon'
class ZODBProjectTemplate(PyramidTemplate):
_template_dir = 'zodb'
- summary = 'Pyramid ZODB project using traversal'
+ summary = 'Pyramid project using ZODB, traversal, and Chameleon'
class AlchemyProjectTemplate(PyramidTemplate):
_template_dir = 'alchemy'
- summary = 'Pyramid SQLAlchemy project using url dispatch'
+ summary = 'Pyramid project using SQLAlchemy, SQLite, URL dispatch, and'
+ ' Chameleon'
| {"golden_diff": "diff --git a/pyramid/scaffolds/__init__.py b/pyramid/scaffolds/__init__.py\n--- a/pyramid/scaffolds/__init__.py\n+++ b/pyramid/scaffolds/__init__.py\n@@ -35,11 +35,10 @@\n msg = dedent(\n \"\"\"\n %(separator)s\n- Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials\n- Documentation: http://docs.pylonsproject.org/projects/pyramid\n-\n- Twitter (tips & updates): http://twitter.com/pylons\n- Mailing List: http://groups.google.com/group/pylons-discuss\n+ Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/\n+ Documentation: http://docs.pylonsproject.org/projects/pyramid/en/latest/\n+ Twitter: https://twitter.com/trypyramid\n+ Mailing List: https://groups.google.com/forum/#!forum/pylons-discuss\n \n Welcome to Pyramid. Sorry for the convenience.\n %(separator)s\n@@ -53,12 +52,13 @@\n \n class StarterProjectTemplate(PyramidTemplate):\n _template_dir = 'starter'\n- summary = 'Pyramid starter project'\n+ summary = 'Pyramid starter project using URL dispatch and Chameleon'\n \n class ZODBProjectTemplate(PyramidTemplate):\n _template_dir = 'zodb'\n- summary = 'Pyramid ZODB project using traversal'\n+ summary = 'Pyramid project using ZODB, traversal, and Chameleon'\n \n class AlchemyProjectTemplate(PyramidTemplate):\n _template_dir = 'alchemy'\n- summary = 'Pyramid SQLAlchemy project using url dispatch'\n+ summary = 'Pyramid project using SQLAlchemy, SQLite, URL dispatch, and'\n+ ' Chameleon'\n", "issue": "pcreate -s shows wrong link to tutorials\nafter a \n\n```\npcreate -s alchemy scaffold-alchemy\n```\n\nI see a link to tutorials, but this link is a 404: \n\n```\nTutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials\n```\n\n", "before_files": [{"content": "import binascii\nimport os\nfrom textwrap import dedent\n\nfrom pyramid.compat import native_\n\nfrom pyramid.scaffolds.template import Template # API\n\nclass PyramidTemplate(Template):\n \"\"\"\n A class that can be used as a base class for Pyramid scaffolding\n templates.\n \"\"\"\n def pre(self, command, output_dir, vars):\n \"\"\" Overrides :meth:`pyramid.scaffolds.template.Template.pre`, adding\n several variables to the default variables list (including\n ``random_string``, and ``package_logger``). It also prevents common\n misnamings (such as naming a package \"site\" or naming a package\n logger \"root\".\n \"\"\"\n vars['random_string'] = native_(binascii.hexlify(os.urandom(20)))\n package_logger = vars['package']\n if package_logger == 'root':\n # Rename the app logger in the rare case a project is named 'root'\n package_logger = 'app'\n vars['package_logger'] = package_logger\n return Template.pre(self, command, output_dir, vars)\n\n def post(self, command, output_dir, vars): # pragma: no cover\n \"\"\" Overrides :meth:`pyramid.scaffolds.template.Template.post`, to\n print \"Welcome to Pyramid. Sorry for the convenience.\" after a\n successful scaffolding rendering.\"\"\"\n\n separator = \"=\" * 79\n msg = dedent(\n \"\"\"\n %(separator)s\n Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials\n Documentation: http://docs.pylonsproject.org/projects/pyramid\n\n Twitter (tips & updates): http://twitter.com/pylons\n Mailing List: http://groups.google.com/group/pylons-discuss\n\n Welcome to Pyramid. Sorry for the convenience.\n %(separator)s\n \"\"\" % {'separator': separator})\n\n self.out(msg)\n return Template.post(self, command, output_dir, vars)\n\n def out(self, msg): # pragma: no cover (replaceable testing hook)\n print(msg)\n\nclass StarterProjectTemplate(PyramidTemplate):\n _template_dir = 'starter'\n summary = 'Pyramid starter project'\n\nclass ZODBProjectTemplate(PyramidTemplate):\n _template_dir = 'zodb'\n summary = 'Pyramid ZODB project using traversal'\n\nclass AlchemyProjectTemplate(PyramidTemplate):\n _template_dir = 'alchemy'\n summary = 'Pyramid SQLAlchemy project using url dispatch'\n", "path": "pyramid/scaffolds/__init__.py"}]} | 1,258 | 399 |
gh_patches_debug_57899 | rasdani/github-patches | git_diff | chanzuckerberg__single-cell-curation-428 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feat(curation notebooks): display the x-request-id in the error response
When an error occurs especially 500 errors, the only piece of debug information the user has is the x-request-id. Making this value visible to the users will help developer trouble shoot errors seen by the users in the future.
</issue>
<code>
[start of notebooks/curation_api/python/src/utils/logger.py]
1 import logging
2 import os
3 import sys
4
5
6 def set_log_level(log_level: str):
7 """
8 :param log_level: the logging level ("NOTSET", "DEBUG", "INFO", "WARN", "ERROR", "FATAL")
9 """
10 all_levels = logging._nameToLevel.keys()
11 if log_level not in all_levels:
12 raise Exception(f"The log_level arg must be one of {list(all_levels)}")
13 os.environ["LOG_LEVEL"] = log_level
14 logger = logging.getLogger()
15 logger.setLevel(log_level)
16 for h in logger.handlers:
17 h.setLevel(log_level)
18 print(f"Set logging level to {log_level}")
19
20
21 def get_custom_logger() -> logging.Logger:
22 """
23 Get a custom logger that will still print to stdout in notebooks.
24 :return: the logger object
25 """
26 log_level = os.getenv("LOG_LEVEL", "INFO")
27 logging.basicConfig(level=log_level)
28 logger = logging.getLogger()
29 logger.removeHandler(logger.handlers[0])
30 ch = logging.StreamHandler(stream=sys.stdout)
31 ch.setLevel(level=log_level)
32 level_printout = f"{'%(levelname)s:' if logger.level in ('WARN', 'ERROR') else ''}"
33 formatter = logging.Formatter(f"{level_printout}%(message)s")
34 ch.setFormatter(formatter)
35 logger.addHandler(ch)
36 return logger
37
38
39 def failure(logger: logging.Logger, e, *messages):
40 logger.error("\n\033[1m\033[38;5;9mFAILED\033[0m\n") # 'FAILED' in bold red
41 logger.error(e.response.reason + "\n")
42 logger.error(e.response.text + "\n")
43 if messages:
44 [logger.error(m) for m in messages]
45
46
47 def success(logger: logging.Logger, *messages):
48 logger.info("\n\033[1m\033[38;5;10mSUCCESS\033[0m\n") # 'SUCCESS' in bold green
49 if messages:
50 [logger.info(m) for m in messages]
51
[end of notebooks/curation_api/python/src/utils/logger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/notebooks/curation_api/python/src/utils/logger.py b/notebooks/curation_api/python/src/utils/logger.py
--- a/notebooks/curation_api/python/src/utils/logger.py
+++ b/notebooks/curation_api/python/src/utils/logger.py
@@ -40,6 +40,8 @@
logger.error("\n\033[1m\033[38;5;9mFAILED\033[0m\n") # 'FAILED' in bold red
logger.error(e.response.reason + "\n")
logger.error(e.response.text + "\n")
+ logger.error(f"x-request-id: {e.response.headers.get('x-request-id')}")
+
if messages:
[logger.error(m) for m in messages]
| {"golden_diff": "diff --git a/notebooks/curation_api/python/src/utils/logger.py b/notebooks/curation_api/python/src/utils/logger.py\n--- a/notebooks/curation_api/python/src/utils/logger.py\n+++ b/notebooks/curation_api/python/src/utils/logger.py\n@@ -40,6 +40,8 @@\n logger.error(\"\\n\\033[1m\\033[38;5;9mFAILED\\033[0m\\n\") # 'FAILED' in bold red\n logger.error(e.response.reason + \"\\n\")\n logger.error(e.response.text + \"\\n\")\n+ logger.error(f\"x-request-id: {e.response.headers.get('x-request-id')}\")\n+\n if messages:\n [logger.error(m) for m in messages]\n", "issue": "Feat(curation notebooks): display the x-request-id in the error response\nWhen an error occurs especially 500 errors, the only piece of debug information the user has is the x-request-id. Making this value visible to the users will help developer trouble shoot errors seen by the users in the future.\n", "before_files": [{"content": "import logging\nimport os\nimport sys\n\n\ndef set_log_level(log_level: str):\n \"\"\"\n :param log_level: the logging level (\"NOTSET\", \"DEBUG\", \"INFO\", \"WARN\", \"ERROR\", \"FATAL\")\n \"\"\"\n all_levels = logging._nameToLevel.keys()\n if log_level not in all_levels:\n raise Exception(f\"The log_level arg must be one of {list(all_levels)}\")\n os.environ[\"LOG_LEVEL\"] = log_level\n logger = logging.getLogger()\n logger.setLevel(log_level)\n for h in logger.handlers:\n h.setLevel(log_level)\n print(f\"Set logging level to {log_level}\")\n\n\ndef get_custom_logger() -> logging.Logger:\n \"\"\"\n Get a custom logger that will still print to stdout in notebooks.\n :return: the logger object\n \"\"\"\n log_level = os.getenv(\"LOG_LEVEL\", \"INFO\")\n logging.basicConfig(level=log_level)\n logger = logging.getLogger()\n logger.removeHandler(logger.handlers[0])\n ch = logging.StreamHandler(stream=sys.stdout)\n ch.setLevel(level=log_level)\n level_printout = f\"{'%(levelname)s:' if logger.level in ('WARN', 'ERROR') else ''}\"\n formatter = logging.Formatter(f\"{level_printout}%(message)s\")\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n\ndef failure(logger: logging.Logger, e, *messages):\n logger.error(\"\\n\\033[1m\\033[38;5;9mFAILED\\033[0m\\n\") # 'FAILED' in bold red\n logger.error(e.response.reason + \"\\n\")\n logger.error(e.response.text + \"\\n\")\n if messages:\n [logger.error(m) for m in messages]\n\n\ndef success(logger: logging.Logger, *messages):\n logger.info(\"\\n\\033[1m\\033[38;5;10mSUCCESS\\033[0m\\n\") # 'SUCCESS' in bold green\n if messages:\n [logger.info(m) for m in messages]\n", "path": "notebooks/curation_api/python/src/utils/logger.py"}]} | 1,145 | 168 |
gh_patches_debug_21200 | rasdani/github-patches | git_diff | pyca__cryptography-2174 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update Windows linking code
We should remove the environment variable based linking and document that if you want to build it dynamic vs static then you should build the OpenSSL libs in your preferred fashion. The zip files we create in our jenkins job build them statically.
</issue>
<code>
[start of src/_cffi_src/build_openssl.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import os
8 import sys
9
10 from _cffi_src.utils import build_ffi_for_binding, extra_link_args
11
12
13 def _get_openssl_libraries(platform):
14 # OpenSSL goes by a different library name on different operating systems.
15 if platform != "win32":
16 # In some circumstances, the order in which these libs are
17 # specified on the linker command-line is significant;
18 # libssl must come before libcrypto
19 # (http://marc.info/?l=openssl-users&m=135361825921871)
20 return ["ssl", "crypto"]
21 else:
22 link_type = os.environ.get("PYCA_WINDOWS_LINK_TYPE", "static")
23 return _get_openssl_windows_libraries(link_type)
24
25
26 def _get_openssl_windows_libraries(link_type):
27 if link_type == "dynamic":
28 return ["libeay32", "ssleay32", "advapi32"]
29 elif link_type == "static" or link_type == "":
30 return ["libeay32mt", "ssleay32mt", "advapi32",
31 "crypt32", "gdi32", "user32", "ws2_32"]
32 else:
33 raise ValueError(
34 "PYCA_WINDOWS_LINK_TYPE must be 'static' or 'dynamic'"
35 )
36
37
38 _OSX_PRE_INCLUDE = """
39 #ifdef __APPLE__
40 #include <AvailabilityMacros.h>
41 #define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \
42 DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
43 #undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
44 #define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
45 #endif
46 """
47
48 _OSX_POST_INCLUDE = """
49 #ifdef __APPLE__
50 #undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
51 #define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \
52 __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
53 #endif
54 """
55
56
57 ffi = build_ffi_for_binding(
58 module_name="_openssl",
59 module_prefix="_cffi_src.openssl.",
60 modules=[
61 "aes",
62 "asn1",
63 "bignum",
64 "bio",
65 "cmac",
66 "cms",
67 "conf",
68 "crypto",
69 "dh",
70 "dsa",
71 "ec",
72 "ecdh",
73 "ecdsa",
74 "engine",
75 "err",
76 "evp",
77 "hmac",
78 "nid",
79 "objects",
80 "opensslv",
81 "pem",
82 "pkcs7",
83 "pkcs12",
84 "rand",
85 "rsa",
86 "ssl",
87 "x509",
88 "x509name",
89 "x509v3",
90 "x509_vfy"
91 ],
92 pre_include=_OSX_PRE_INCLUDE,
93 post_include=_OSX_POST_INCLUDE,
94 libraries=_get_openssl_libraries(sys.platform),
95 extra_link_args=extra_link_args(sys.platform),
96 )
97
[end of src/_cffi_src/build_openssl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/_cffi_src/build_openssl.py b/src/_cffi_src/build_openssl.py
--- a/src/_cffi_src/build_openssl.py
+++ b/src/_cffi_src/build_openssl.py
@@ -4,7 +4,6 @@
from __future__ import absolute_import, division, print_function
-import os
import sys
from _cffi_src.utils import build_ffi_for_binding, extra_link_args
@@ -19,20 +18,8 @@
# (http://marc.info/?l=openssl-users&m=135361825921871)
return ["ssl", "crypto"]
else:
- link_type = os.environ.get("PYCA_WINDOWS_LINK_TYPE", "static")
- return _get_openssl_windows_libraries(link_type)
-
-
-def _get_openssl_windows_libraries(link_type):
- if link_type == "dynamic":
- return ["libeay32", "ssleay32", "advapi32"]
- elif link_type == "static" or link_type == "":
- return ["libeay32mt", "ssleay32mt", "advapi32",
+ return ["libeay32", "ssleay32", "advapi32",
"crypt32", "gdi32", "user32", "ws2_32"]
- else:
- raise ValueError(
- "PYCA_WINDOWS_LINK_TYPE must be 'static' or 'dynamic'"
- )
_OSX_PRE_INCLUDE = """
| {"golden_diff": "diff --git a/src/_cffi_src/build_openssl.py b/src/_cffi_src/build_openssl.py\n--- a/src/_cffi_src/build_openssl.py\n+++ b/src/_cffi_src/build_openssl.py\n@@ -4,7 +4,6 @@\n \n from __future__ import absolute_import, division, print_function\n \n-import os\n import sys\n \n from _cffi_src.utils import build_ffi_for_binding, extra_link_args\n@@ -19,20 +18,8 @@\n # (http://marc.info/?l=openssl-users&m=135361825921871)\n return [\"ssl\", \"crypto\"]\n else:\n- link_type = os.environ.get(\"PYCA_WINDOWS_LINK_TYPE\", \"static\")\n- return _get_openssl_windows_libraries(link_type)\n-\n-\n-def _get_openssl_windows_libraries(link_type):\n- if link_type == \"dynamic\":\n- return [\"libeay32\", \"ssleay32\", \"advapi32\"]\n- elif link_type == \"static\" or link_type == \"\":\n- return [\"libeay32mt\", \"ssleay32mt\", \"advapi32\",\n+ return [\"libeay32\", \"ssleay32\", \"advapi32\",\n \"crypt32\", \"gdi32\", \"user32\", \"ws2_32\"]\n- else:\n- raise ValueError(\n- \"PYCA_WINDOWS_LINK_TYPE must be 'static' or 'dynamic'\"\n- )\n \n \n _OSX_PRE_INCLUDE = \"\"\"\n", "issue": "Update Windows linking code\nWe should remove the environment variable based linking and document that if you want to build it dynamic vs static then you should build the OpenSSL libs in your preferred fashion. The zip files we create in our jenkins job build them statically.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\n\nfrom _cffi_src.utils import build_ffi_for_binding, extra_link_args\n\n\ndef _get_openssl_libraries(platform):\n # OpenSSL goes by a different library name on different operating systems.\n if platform != \"win32\":\n # In some circumstances, the order in which these libs are\n # specified on the linker command-line is significant;\n # libssl must come before libcrypto\n # (http://marc.info/?l=openssl-users&m=135361825921871)\n return [\"ssl\", \"crypto\"]\n else:\n link_type = os.environ.get(\"PYCA_WINDOWS_LINK_TYPE\", \"static\")\n return _get_openssl_windows_libraries(link_type)\n\n\ndef _get_openssl_windows_libraries(link_type):\n if link_type == \"dynamic\":\n return [\"libeay32\", \"ssleay32\", \"advapi32\"]\n elif link_type == \"static\" or link_type == \"\":\n return [\"libeay32mt\", \"ssleay32mt\", \"advapi32\",\n \"crypt32\", \"gdi32\", \"user32\", \"ws2_32\"]\n else:\n raise ValueError(\n \"PYCA_WINDOWS_LINK_TYPE must be 'static' or 'dynamic'\"\n )\n\n\n_OSX_PRE_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#include <AvailabilityMacros.h>\n#define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n_OSX_POST_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n\nffi = build_ffi_for_binding(\n module_name=\"_openssl\",\n module_prefix=\"_cffi_src.openssl.\",\n modules=[\n \"aes\",\n \"asn1\",\n \"bignum\",\n \"bio\",\n \"cmac\",\n \"cms\",\n \"conf\",\n \"crypto\",\n \"dh\",\n \"dsa\",\n \"ec\",\n \"ecdh\",\n \"ecdsa\",\n \"engine\",\n \"err\",\n \"evp\",\n \"hmac\",\n \"nid\",\n \"objects\",\n \"opensslv\",\n \"pem\",\n \"pkcs7\",\n \"pkcs12\",\n \"rand\",\n \"rsa\",\n \"ssl\",\n \"x509\",\n \"x509name\",\n \"x509v3\",\n \"x509_vfy\"\n ],\n pre_include=_OSX_PRE_INCLUDE,\n post_include=_OSX_POST_INCLUDE,\n libraries=_get_openssl_libraries(sys.platform),\n extra_link_args=extra_link_args(sys.platform),\n)\n", "path": "src/_cffi_src/build_openssl.py"}]} | 1,531 | 356 |
gh_patches_debug_9414 | rasdani/github-patches | git_diff | pypa__setuptools-3042 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] INFO log level is no longer default for some commands
### setuptools version
60.0.0 and newer
### Python version
Python 3.8
### OS
OpenSUSE Tumbleweed
### Additional environment information
_No response_
### Description
When running certain commands setuptools seems to log at a `WARN` level and not `INFO` level which was the previous default. This appears to have changed with the switch from `stdlib` to `local` distutils vendoring in 60.0.0. 59.8.0 continues to log at an `INFO` level as expected. The latest 60.5.4 release continues to log at a WARN level by default. Commands like `sdist` appear to log at an `INFO` level though making me suspect that this is limited to distutils commands and not those that setuptools has overridden.
The example I've used below is `build_scripts` because it is relatively simple but it seems to affect others as well. Calling this out as recent changing to setuptools logging seem to indicate a desire to maintain backward compat with distutils for now and that seems to have broken though I don't understand how/why. Also, pointing this out in case it points to bigger issues in the local vendoring of distutils.
### Expected behavior
I expect setuptools to continue logging at INFO level by default.
### How to Reproduce
1. Create setup.py with scripts entry.
2. Install setuptools 59.8.0
3. Run `setup.py build_scripts --dry-run`
4. Install setuptools 60.0.0
5. Run `setup.py build_scripts --dry-run`
6. Note that there is no log.info() message indicating a mode change would occur.
Note that setting -v on the commands does not change the output. Additionally setting SETUPTOOLS_USE_DISTUTILS=stdlib corrects this on newer setuptools indicating the local vendoring is likely to blame.
### Output
```console
$ .tox/py38/bin/pip install -U setuptools==59.8.0
Collecting setuptools==59.8.0
Using cached setuptools-59.8.0-py3-none-any.whl (952 kB)
Installing collected packages: setuptools
Attempting uninstall: setuptools
Found existing installation: setuptools 60.0.0
Uninstalling setuptools-60.0.0:
Successfully uninstalled setuptools-60.0.0
Successfully installed setuptools-59.8.0
$ .tox/py38/bin/python3 setup.py build_scripts --dry-run
running build_scripts
creating build/scripts-3.8
warning: build_scripts: foo/bar.py is an empty file (skipping)
changing mode of build/scripts-3.8/bar.py
$ .tox/py38/bin/pip install -U setuptools==60.0.0
Collecting setuptools==60.0.0
Using cached setuptools-60.0.0-py3-none-any.whl (952 kB)
Installing collected packages: setuptools
Attempting uninstall: setuptools
Found existing installation: setuptools 59.8.0
Uninstalling setuptools-59.8.0:
Successfully uninstalled setuptools-59.8.0
Successfully installed setuptools-60.0.0
$ .tox/py38/bin/python3 setup.py build_scripts --dry-run
running build_scripts
creating build/scripts-3.8
warning: build_scripts: foo/bar.py is an empty file (skipping)
```
### Code of Conduct
- [X] I agree to follow the PSF Code of Conduct
</issue>
<code>
[start of setuptools/logging.py]
1 import sys
2 import logging
3 import distutils.log
4 from . import monkey
5
6
7 def _not_warning(record):
8 return record.levelno < logging.WARNING
9
10
11 def configure():
12 """
13 Configure logging to emit warning and above to stderr
14 and everything else to stdout. This behavior is provided
15 for compatibilty with distutils.log but may change in
16 the future.
17 """
18 err_handler = logging.StreamHandler()
19 err_handler.setLevel(logging.WARNING)
20 out_handler = logging.StreamHandler(sys.stdout)
21 out_handler.addFilter(_not_warning)
22 handlers = err_handler, out_handler
23 logging.basicConfig(
24 format="{message}", style='{', handlers=handlers, level=logging.DEBUG)
25 monkey.patch_func(set_threshold, distutils.log, 'set_threshold')
26
27
28 def set_threshold(level):
29 logging.root.setLevel(level*10)
30 return set_threshold.unpatched(level)
31
[end of setuptools/logging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setuptools/logging.py b/setuptools/logging.py
--- a/setuptools/logging.py
+++ b/setuptools/logging.py
@@ -24,6 +24,12 @@
format="{message}", style='{', handlers=handlers, level=logging.DEBUG)
monkey.patch_func(set_threshold, distutils.log, 'set_threshold')
+ # For some reason `distutils.log` module is getting cached in `distutils.dist`
+ # and then loaded again when patched,
+ # implying: id(distutils.log) != id(distutils.dist.log).
+ # Make sure the same module object is used everywhere:
+ distutils.dist.log = distutils.log
+
def set_threshold(level):
logging.root.setLevel(level*10)
| {"golden_diff": "diff --git a/setuptools/logging.py b/setuptools/logging.py\n--- a/setuptools/logging.py\n+++ b/setuptools/logging.py\n@@ -24,6 +24,12 @@\n format=\"{message}\", style='{', handlers=handlers, level=logging.DEBUG)\n monkey.patch_func(set_threshold, distutils.log, 'set_threshold')\n \n+ # For some reason `distutils.log` module is getting cached in `distutils.dist`\n+ # and then loaded again when patched,\n+ # implying: id(distutils.log) != id(distutils.dist.log).\n+ # Make sure the same module object is used everywhere:\n+ distutils.dist.log = distutils.log\n+\n \n def set_threshold(level):\n logging.root.setLevel(level*10)\n", "issue": "[BUG] INFO log level is no longer default for some commands\n### setuptools version\n\n60.0.0 and newer\n\n### Python version\n\nPython 3.8\n\n### OS\n\nOpenSUSE Tumbleweed\n\n### Additional environment information\n\n_No response_\n\n### Description\n\nWhen running certain commands setuptools seems to log at a `WARN` level and not `INFO` level which was the previous default. This appears to have changed with the switch from `stdlib` to `local` distutils vendoring in 60.0.0. 59.8.0 continues to log at an `INFO` level as expected. The latest 60.5.4 release continues to log at a WARN level by default. Commands like `sdist` appear to log at an `INFO` level though making me suspect that this is limited to distutils commands and not those that setuptools has overridden.\r\n\r\nThe example I've used below is `build_scripts` because it is relatively simple but it seems to affect others as well. Calling this out as recent changing to setuptools logging seem to indicate a desire to maintain backward compat with distutils for now and that seems to have broken though I don't understand how/why. Also, pointing this out in case it points to bigger issues in the local vendoring of distutils.\n\n### Expected behavior\n\nI expect setuptools to continue logging at INFO level by default.\n\n### How to Reproduce\n\n1. Create setup.py with scripts entry.\r\n2. Install setuptools 59.8.0\r\n3. Run `setup.py build_scripts --dry-run`\r\n4. Install setuptools 60.0.0\r\n5. Run `setup.py build_scripts --dry-run`\r\n6. Note that there is no log.info() message indicating a mode change would occur.\r\n\r\nNote that setting -v on the commands does not change the output. Additionally setting SETUPTOOLS_USE_DISTUTILS=stdlib corrects this on newer setuptools indicating the local vendoring is likely to blame.\n\n### Output\n\n```console\r\n$ .tox/py38/bin/pip install -U setuptools==59.8.0\r\nCollecting setuptools==59.8.0\r\n Using cached setuptools-59.8.0-py3-none-any.whl (952 kB)\r\nInstalling collected packages: setuptools\r\n Attempting uninstall: setuptools\r\n Found existing installation: setuptools 60.0.0\r\n Uninstalling setuptools-60.0.0:\r\n Successfully uninstalled setuptools-60.0.0\r\nSuccessfully installed setuptools-59.8.0\r\n$ .tox/py38/bin/python3 setup.py build_scripts --dry-run\r\nrunning build_scripts\r\ncreating build/scripts-3.8\r\nwarning: build_scripts: foo/bar.py is an empty file (skipping)\r\n\r\nchanging mode of build/scripts-3.8/bar.py\r\n$ .tox/py38/bin/pip install -U setuptools==60.0.0\r\nCollecting setuptools==60.0.0\r\n Using cached setuptools-60.0.0-py3-none-any.whl (952 kB)\r\nInstalling collected packages: setuptools\r\n Attempting uninstall: setuptools\r\n Found existing installation: setuptools 59.8.0\r\n Uninstalling setuptools-59.8.0:\r\n Successfully uninstalled setuptools-59.8.0\r\nSuccessfully installed setuptools-60.0.0\r\n$ .tox/py38/bin/python3 setup.py build_scripts --dry-run\r\nrunning build_scripts\r\ncreating build/scripts-3.8\r\nwarning: build_scripts: foo/bar.py is an empty file (skipping)\r\n\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the PSF Code of Conduct\n", "before_files": [{"content": "import sys\nimport logging\nimport distutils.log\nfrom . import monkey\n\n\ndef _not_warning(record):\n return record.levelno < logging.WARNING\n\n\ndef configure():\n \"\"\"\n Configure logging to emit warning and above to stderr\n and everything else to stdout. This behavior is provided\n for compatibilty with distutils.log but may change in\n the future.\n \"\"\"\n err_handler = logging.StreamHandler()\n err_handler.setLevel(logging.WARNING)\n out_handler = logging.StreamHandler(sys.stdout)\n out_handler.addFilter(_not_warning)\n handlers = err_handler, out_handler\n logging.basicConfig(\n format=\"{message}\", style='{', handlers=handlers, level=logging.DEBUG)\n monkey.patch_func(set_threshold, distutils.log, 'set_threshold')\n\n\ndef set_threshold(level):\n logging.root.setLevel(level*10)\n return set_threshold.unpatched(level)\n", "path": "setuptools/logging.py"}]} | 1,557 | 162 |
gh_patches_debug_35446 | rasdani/github-patches | git_diff | gratipay__gratipay.com-1902 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
repair paydays table
This isn't really used much yet but it should be (nice historical data). Two repairs needed:
- ~~recalculate nparticipants for paydays <= #3~~
- recalculate exchange_volume and exchange_fees_volume for paydays <= #8 (see #202)
</issue>
<code>
[start of gittip/models/__init__.py]
1 """
2
3 The most important object in the Gittip object model is Participant, and the
4 second most important one is Ccommunity. There are a few others, but those are
5 the most important two. Participant, in particular, is at the center of
6 everything on Gittip.
7
8 """
9 from postgres import Postgres
10
11 class GittipDB(Postgres):
12
13 def self_check(self):
14 """
15 Runs all available self checks on the database.
16 """
17 self._check_balances()
18 self._check_tips()
19 self._check_orphans()
20 self._check_orphans_no_tips()
21
22 def _check_tips(self):
23 """
24 Checks that there are no rows in tips with duplicate (tipper, tippee, mtime).
25
26 https://github.com/gittip/www.gittip.com/issues/1704
27 """
28 conflicting_tips = self.one("""
29 SELECT count(*)
30 FROM
31 (
32 SELECT * FROM tips
33 EXCEPT
34 SELECT DISTINCT ON(tipper, tippee, mtime) *
35 FROM tips
36 ORDER BY tipper, tippee, mtime
37 ) AS foo
38 """)
39 assert conflicting_tips == 0
40
41 def _check_balances(self):
42 """
43 Recalculates balances for all participants from transfers and exchanges.
44
45 https://github.com/gittip/www.gittip.com/issues/1118
46 """
47 with self.get_cursor() as cursor:
48 if cursor.one("select exists (select * from paydays where ts_end < ts_start) as running"):
49 # payday is running and the query bellow does not account for pending
50 return
51 b = cursor.one("""
52 select count(*)
53 from (
54 select username, sum(a) as balance
55 from (
56 select participant as username, sum(amount) as a
57 from exchanges
58 where amount > 0
59 group by participant
60
61 union
62
63 select participant as username, sum(amount-fee) as a
64 from exchanges
65 where amount < 0
66 group by participant
67
68 union
69
70 select tipper as username, sum(-amount) as a
71 from transfers
72 group by tipper
73
74 union
75
76 select tippee as username, sum(amount) as a
77 from transfers
78 group by tippee
79 ) as foo
80 group by username
81
82 except
83
84 select username, balance
85 from participants
86 ) as foo2
87 """)
88 assert b == 0, "conflicting balances: {}".format(b)
89
90 def _check_orphans(self):
91 """
92 Finds participants that
93 * does not have corresponding elsewhere account
94 * have not been absorbed by other participant
95
96 These are broken because new participants arise from elsewhere
97 and elsewhere is detached only by take over which makes a note
98 in absorptions if it removes the last elsewhere account.
99
100 Especially bad case is when also claimed_time is set because
101 there must have been elsewhere account attached and used to sign in.
102
103 https://github.com/gittip/www.gittip.com/issues/617
104 """
105 orphans = self.all("""
106 select username
107 from participants
108 where not exists (select * from elsewhere where elsewhere.participant=username)
109 and not exists (select * from absorptions where archived_as=username)
110 """)
111 assert len(orphans) == 0, "missing elsewheres: {}".format(list(orphans))
112
113 def _check_orphans_no_tips(self):
114 """
115 Finds participants
116 * without elsewhere account attached
117 * having non zero outstanding tip
118
119 This should not happen because when we remove the last elsewhere account
120 in take_over we also zero out all tips.
121 """
122 tips_with_orphans = self.all("""
123 WITH orphans AS (
124 SELECT username FROM participants
125 WHERE NOT EXISTS (SELECT 1 FROM elsewhere WHERE participant=username)
126 ), valid_tips AS (
127 SELECT * FROM (
128 SELECT DISTINCT ON (tipper, tippee) *
129 FROM tips
130 ORDER BY tipper, tippee, mtime DESC
131 ) AS foo
132 WHERE amount > 0
133 )
134 SELECT id FROM valid_tips
135 WHERE tipper IN (SELECT * FROM orphans)
136 OR tippee IN (SELECT * FROM orphans)
137 """)
138 known = set([25206]) # '4c074000c7bc', 'naderman', '3.00'
139 real = set(tips_with_orphans) - known
140 assert len(real) == 0, real
141 #
142
[end of gittip/models/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gittip/models/__init__.py b/gittip/models/__init__.py
--- a/gittip/models/__init__.py
+++ b/gittip/models/__init__.py
@@ -18,6 +18,7 @@
self._check_tips()
self._check_orphans()
self._check_orphans_no_tips()
+ self._check_paydays_volumes()
def _check_tips(self):
"""
@@ -138,4 +139,72 @@
known = set([25206]) # '4c074000c7bc', 'naderman', '3.00'
real = set(tips_with_orphans) - known
assert len(real) == 0, real
+
+ def _check_paydays_volumes(self):
+ """
+ Recalculate *_volume fields in paydays table using exchanges table.
+ """
+ charge_volume = self.all("""
+ select * from (
+ select id, ts_start, charge_volume, (
+ select sum(amount+fee)
+ from exchanges
+ where timestamp > ts_start
+ and timestamp < ts_end
+ and amount > 0
+ ) as ref
+ from paydays
+ order by id
+ ) as foo
+ where charge_volume != ref
+ """)
+ assert len(charge_volume) == 0
+
+ charge_fees_volume = self.all("""
+ select * from (
+ select id, ts_start, charge_fees_volume, (
+ select sum(fee)
+ from exchanges
+ where timestamp > ts_start
+ and timestamp < ts_end
+ and amount > 0
+ ) as ref
+ from paydays
+ order by id
+ ) as foo
+ where charge_fees_volume != ref
+ """)
+ assert len(charge_fees_volume) == 0
+
+ ach_volume = self.all("""
+ select * from (
+ select id, ts_start, ach_volume, (
+ select sum(amount)
+ from exchanges
+ where timestamp > ts_start
+ and timestamp < ts_end
+ and amount < 0
+ ) as ref
+ from paydays
+ order by id
+ ) as foo
+ where ach_volume != ref
+ """)
+ assert len(ach_volume) == 0
+
+ ach_fees_volume = self.all("""
+ select * from (
+ select id, ts_start, ach_fees_volume, (
+ select sum(fee)
+ from exchanges
+ where timestamp > ts_start
+ and timestamp < ts_end
+ and amount < 0
+ ) as ref
+ from paydays
+ order by id
+ ) as foo
+ where ach_fees_volume != ref
+ """)
+ assert len(ach_fees_volume) == 0
#
| {"golden_diff": "diff --git a/gittip/models/__init__.py b/gittip/models/__init__.py\n--- a/gittip/models/__init__.py\n+++ b/gittip/models/__init__.py\n@@ -18,6 +18,7 @@\n self._check_tips()\n self._check_orphans()\n self._check_orphans_no_tips()\n+ self._check_paydays_volumes()\n \n def _check_tips(self):\n \"\"\"\n@@ -138,4 +139,72 @@\n known = set([25206]) # '4c074000c7bc', 'naderman', '3.00'\n real = set(tips_with_orphans) - known\n assert len(real) == 0, real\n+\n+ def _check_paydays_volumes(self):\n+ \"\"\"\n+ Recalculate *_volume fields in paydays table using exchanges table.\n+ \"\"\"\n+ charge_volume = self.all(\"\"\"\n+ select * from (\n+ select id, ts_start, charge_volume, (\n+ select sum(amount+fee)\n+ from exchanges\n+ where timestamp > ts_start\n+ and timestamp < ts_end\n+ and amount > 0\n+ ) as ref\n+ from paydays\n+ order by id\n+ ) as foo\n+ where charge_volume != ref\n+ \"\"\")\n+ assert len(charge_volume) == 0\n+\n+ charge_fees_volume = self.all(\"\"\"\n+ select * from (\n+ select id, ts_start, charge_fees_volume, (\n+ select sum(fee)\n+ from exchanges\n+ where timestamp > ts_start\n+ and timestamp < ts_end\n+ and amount > 0\n+ ) as ref\n+ from paydays\n+ order by id\n+ ) as foo\n+ where charge_fees_volume != ref\n+ \"\"\")\n+ assert len(charge_fees_volume) == 0\n+\n+ ach_volume = self.all(\"\"\"\n+ select * from (\n+ select id, ts_start, ach_volume, (\n+ select sum(amount)\n+ from exchanges\n+ where timestamp > ts_start\n+ and timestamp < ts_end\n+ and amount < 0\n+ ) as ref\n+ from paydays\n+ order by id\n+ ) as foo\n+ where ach_volume != ref\n+ \"\"\")\n+ assert len(ach_volume) == 0\n+\n+ ach_fees_volume = self.all(\"\"\"\n+ select * from (\n+ select id, ts_start, ach_fees_volume, (\n+ select sum(fee)\n+ from exchanges\n+ where timestamp > ts_start\n+ and timestamp < ts_end\n+ and amount < 0\n+ ) as ref\n+ from paydays\n+ order by id\n+ ) as foo\n+ where ach_fees_volume != ref\n+ \"\"\")\n+ assert len(ach_fees_volume) == 0\n #\n", "issue": "repair paydays table\nThis isn't really used much yet but it should be (nice historical data). Two repairs needed:\n- ~~recalculate nparticipants for paydays <= #3~~\n- recalculate exchange_volume and exchange_fees_volume for paydays <= #8 (see #202)\n\n", "before_files": [{"content": "\"\"\"\n\nThe most important object in the Gittip object model is Participant, and the\nsecond most important one is Ccommunity. There are a few others, but those are\nthe most important two. Participant, in particular, is at the center of\neverything on Gittip.\n\n\"\"\"\nfrom postgres import Postgres\n\nclass GittipDB(Postgres):\n\n def self_check(self):\n \"\"\"\n Runs all available self checks on the database.\n \"\"\"\n self._check_balances()\n self._check_tips()\n self._check_orphans()\n self._check_orphans_no_tips()\n\n def _check_tips(self):\n \"\"\"\n Checks that there are no rows in tips with duplicate (tipper, tippee, mtime).\n\n https://github.com/gittip/www.gittip.com/issues/1704\n \"\"\"\n conflicting_tips = self.one(\"\"\"\n SELECT count(*)\n FROM\n (\n SELECT * FROM tips\n EXCEPT\n SELECT DISTINCT ON(tipper, tippee, mtime) *\n FROM tips\n ORDER BY tipper, tippee, mtime\n ) AS foo\n \"\"\")\n assert conflicting_tips == 0\n\n def _check_balances(self):\n \"\"\"\n Recalculates balances for all participants from transfers and exchanges.\n\n https://github.com/gittip/www.gittip.com/issues/1118\n \"\"\"\n with self.get_cursor() as cursor:\n if cursor.one(\"select exists (select * from paydays where ts_end < ts_start) as running\"):\n # payday is running and the query bellow does not account for pending\n return\n b = cursor.one(\"\"\"\n select count(*)\n from (\n select username, sum(a) as balance\n from (\n select participant as username, sum(amount) as a\n from exchanges\n where amount > 0\n group by participant\n\n union\n\n select participant as username, sum(amount-fee) as a\n from exchanges\n where amount < 0\n group by participant\n\n union\n\n select tipper as username, sum(-amount) as a\n from transfers\n group by tipper\n\n union\n\n select tippee as username, sum(amount) as a\n from transfers\n group by tippee\n ) as foo\n group by username\n\n except\n\n select username, balance\n from participants\n ) as foo2\n \"\"\")\n assert b == 0, \"conflicting balances: {}\".format(b)\n\n def _check_orphans(self):\n \"\"\"\n Finds participants that\n * does not have corresponding elsewhere account\n * have not been absorbed by other participant\n\n These are broken because new participants arise from elsewhere\n and elsewhere is detached only by take over which makes a note\n in absorptions if it removes the last elsewhere account.\n\n Especially bad case is when also claimed_time is set because\n there must have been elsewhere account attached and used to sign in.\n\n https://github.com/gittip/www.gittip.com/issues/617\n \"\"\"\n orphans = self.all(\"\"\"\n select username\n from participants\n where not exists (select * from elsewhere where elsewhere.participant=username)\n and not exists (select * from absorptions where archived_as=username)\n \"\"\")\n assert len(orphans) == 0, \"missing elsewheres: {}\".format(list(orphans))\n\n def _check_orphans_no_tips(self):\n \"\"\"\n Finds participants\n * without elsewhere account attached\n * having non zero outstanding tip\n\n This should not happen because when we remove the last elsewhere account\n in take_over we also zero out all tips.\n \"\"\"\n tips_with_orphans = self.all(\"\"\"\n WITH orphans AS (\n SELECT username FROM participants\n WHERE NOT EXISTS (SELECT 1 FROM elsewhere WHERE participant=username)\n ), valid_tips AS (\n SELECT * FROM (\n SELECT DISTINCT ON (tipper, tippee) *\n FROM tips\n ORDER BY tipper, tippee, mtime DESC\n ) AS foo\n WHERE amount > 0\n )\n SELECT id FROM valid_tips\n WHERE tipper IN (SELECT * FROM orphans)\n OR tippee IN (SELECT * FROM orphans)\n \"\"\")\n known = set([25206]) # '4c074000c7bc', 'naderman', '3.00'\n real = set(tips_with_orphans) - known\n assert len(real) == 0, real\n#\n", "path": "gittip/models/__init__.py"}]} | 1,920 | 670 |
gh_patches_debug_14903 | rasdani/github-patches | git_diff | facebookresearch__xformers-258 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fourier_mix attention does not support mixed precision training
Fourier mix attention does not support half precision.
```Traceback (most recent call last):
...
File "/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/content/muzik/src/modules/xformer/custom_block_factory.py", line 196, in forward
x = self.mha(q, k, v, att_mask=att_mask)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/content/muzik/src/modules/xformer/custom_multi_head_dispatch.py", line 254, in forward
y = self.attention(q=q, k=k, v=v, att_mask=att_mask)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/xformers/components/attention/fourier_mix.py", line 25, in forward
att = torch.fft.fft2(q).real
RuntimeError: Unsupported dtype Half```
</issue>
<code>
[start of xformers/components/attention/fourier_mix.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 import torch
7
8 from xformers.components.attention import Attention, AttentionConfig, register_attention
9
10
11 @register_attention("fourier_mix", AttentionConfig)
12 class FourierMix(Attention):
13 def __init__(self, dropout: float, *_, **__):
14 """
15 FFT-based pseudo-attention mechanism, from
16 "
17 "FNet: Mixing Tokens with Fourier Transforms"
18 Lee-Thorp et al., 2021, https://arxiv.org/pdf/2105.03824.pdf
19 """
20 super().__init__()
21 self.attn_drop = torch.nn.Dropout(dropout, inplace=False)
22 self.requires_input_projection = False
23
24 def forward(self, q: torch.Tensor, *_, **__):
25 att = torch.fft.fft2(q).real
26
27 att = self.attn_drop(att)
28
29 return att
30
[end of xformers/components/attention/fourier_mix.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xformers/components/attention/fourier_mix.py b/xformers/components/attention/fourier_mix.py
--- a/xformers/components/attention/fourier_mix.py
+++ b/xformers/components/attention/fourier_mix.py
@@ -4,6 +4,7 @@
# LICENSE file in the root directory of this source tree.
import torch
+from torch.cuda.amp import autocast
from xformers.components.attention import Attention, AttentionConfig, register_attention
@@ -22,7 +23,9 @@
self.requires_input_projection = False
def forward(self, q: torch.Tensor, *_, **__):
- att = torch.fft.fft2(q).real
+ # Guard against autocast / fp16, not supported by torch.fft.fft2
+ with autocast(enabled=False):
+ att = torch.fft.fft2(q).real
att = self.attn_drop(att)
| {"golden_diff": "diff --git a/xformers/components/attention/fourier_mix.py b/xformers/components/attention/fourier_mix.py\n--- a/xformers/components/attention/fourier_mix.py\n+++ b/xformers/components/attention/fourier_mix.py\n@@ -4,6 +4,7 @@\n # LICENSE file in the root directory of this source tree.\n \n import torch\n+from torch.cuda.amp import autocast\n \n from xformers.components.attention import Attention, AttentionConfig, register_attention\n \n@@ -22,7 +23,9 @@\n self.requires_input_projection = False\n \n def forward(self, q: torch.Tensor, *_, **__):\n- att = torch.fft.fft2(q).real\n+ # Guard against autocast / fp16, not supported by torch.fft.fft2\n+ with autocast(enabled=False):\n+ att = torch.fft.fft2(q).real\n \n att = self.attn_drop(att)\n", "issue": "fourier_mix attention does not support mixed precision training\nFourier mix attention does not support half precision.\r\n\r\n```Traceback (most recent call last):\r\n ...\r\n File \"/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\", line 1102, in _call_impl\r\n return forward_call(*input, **kwargs)\r\n File \"/content/muzik/src/modules/xformer/custom_block_factory.py\", line 196, in forward\r\n x = self.mha(q, k, v, att_mask=att_mask)\r\n File \"/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\", line 1102, in _call_impl\r\n return forward_call(*input, **kwargs)\r\n File \"/content/muzik/src/modules/xformer/custom_multi_head_dispatch.py\", line 254, in forward\r\n y = self.attention(q=q, k=k, v=v, att_mask=att_mask)\r\n File \"/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\", line 1102, in _call_impl\r\n return forward_call(*input, **kwargs)\r\n File \"/usr/local/lib/python3.7/dist-packages/xformers/components/attention/fourier_mix.py\", line 25, in forward\r\n att = torch.fft.fft2(q).real\r\nRuntimeError: Unsupported dtype Half```\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\n\nfrom xformers.components.attention import Attention, AttentionConfig, register_attention\n\n\n@register_attention(\"fourier_mix\", AttentionConfig)\nclass FourierMix(Attention):\n def __init__(self, dropout: float, *_, **__):\n \"\"\"\n FFT-based pseudo-attention mechanism, from\n \"\n \"FNet: Mixing Tokens with Fourier Transforms\"\n Lee-Thorp et al., 2021, https://arxiv.org/pdf/2105.03824.pdf\n \"\"\"\n super().__init__()\n self.attn_drop = torch.nn.Dropout(dropout, inplace=False)\n self.requires_input_projection = False\n\n def forward(self, q: torch.Tensor, *_, **__):\n att = torch.fft.fft2(q).real\n\n att = self.attn_drop(att)\n\n return att\n", "path": "xformers/components/attention/fourier_mix.py"}]} | 1,127 | 205 |
gh_patches_debug_13317 | rasdani/github-patches | git_diff | CTFd__CTFd-1830 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Potentially expose user standings in admin scoreboard
This could let people give out awards for users who earn the most points or something like that.
The issue with this is that user standings don't really mean anything. A user that earned all the points in a team might just have been the person to submit all the flags, not actually doing the work of a challenge.
I don't know if I want to expose this data for people to rely on when it fundamentally is based on a flawed data source.
</issue>
<code>
[start of CTFd/admin/scoreboard.py]
1 from flask import render_template
2
3 from CTFd.admin import admin
4 from CTFd.scoreboard import get_standings
5 from CTFd.utils.decorators import admins_only
6
7
8 @admin.route("/admin/scoreboard")
9 @admins_only
10 def scoreboard_listing():
11 standings = get_standings(admin=True)
12 return render_template("admin/scoreboard.html", standings=standings)
13
[end of CTFd/admin/scoreboard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/CTFd/admin/scoreboard.py b/CTFd/admin/scoreboard.py
--- a/CTFd/admin/scoreboard.py
+++ b/CTFd/admin/scoreboard.py
@@ -1,12 +1,16 @@
from flask import render_template
from CTFd.admin import admin
-from CTFd.scoreboard import get_standings
+from CTFd.utils.config import is_teams_mode
from CTFd.utils.decorators import admins_only
+from CTFd.utils.scores import get_standings, get_user_standings
@admin.route("/admin/scoreboard")
@admins_only
def scoreboard_listing():
standings = get_standings(admin=True)
- return render_template("admin/scoreboard.html", standings=standings)
+ user_standings = get_user_standings(admin=True) if is_teams_mode() else None
+ return render_template(
+ "admin/scoreboard.html", standings=standings, user_standings=user_standings
+ )
| {"golden_diff": "diff --git a/CTFd/admin/scoreboard.py b/CTFd/admin/scoreboard.py\n--- a/CTFd/admin/scoreboard.py\n+++ b/CTFd/admin/scoreboard.py\n@@ -1,12 +1,16 @@\n from flask import render_template\n \n from CTFd.admin import admin\n-from CTFd.scoreboard import get_standings\n+from CTFd.utils.config import is_teams_mode\n from CTFd.utils.decorators import admins_only\n+from CTFd.utils.scores import get_standings, get_user_standings\n \n \n @admin.route(\"/admin/scoreboard\")\n @admins_only\n def scoreboard_listing():\n standings = get_standings(admin=True)\n- return render_template(\"admin/scoreboard.html\", standings=standings)\n+ user_standings = get_user_standings(admin=True) if is_teams_mode() else None\n+ return render_template(\n+ \"admin/scoreboard.html\", standings=standings, user_standings=user_standings\n+ )\n", "issue": "Potentially expose user standings in admin scoreboard\nThis could let people give out awards for users who earn the most points or something like that. \r\n\r\nThe issue with this is that user standings don't really mean anything. A user that earned all the points in a team might just have been the person to submit all the flags, not actually doing the work of a challenge. \r\n\r\nI don't know if I want to expose this data for people to rely on when it fundamentally is based on a flawed data source. \n", "before_files": [{"content": "from flask import render_template\n\nfrom CTFd.admin import admin\nfrom CTFd.scoreboard import get_standings\nfrom CTFd.utils.decorators import admins_only\n\n\[email protected](\"/admin/scoreboard\")\n@admins_only\ndef scoreboard_listing():\n standings = get_standings(admin=True)\n return render_template(\"admin/scoreboard.html\", standings=standings)\n", "path": "CTFd/admin/scoreboard.py"}]} | 742 | 217 |
gh_patches_debug_581 | rasdani/github-patches | git_diff | pex-tool__pex-1139 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.22
On the docket:
+ [x] Fix `--help-variables` docs. #1113
+ [x] pex binary hangs on startup at atomic_directory #1119
+ [x] pex vendoring does not (always) isolate itself from a colliding setuptools install in site-packages #1031
+ [x] Remove long deprecated support for _pex module. #1135
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.21"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.21"
+__version__ = "2.1.22"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.21\"\n+__version__ = \"2.1.22\"\n", "issue": "Release 2.1.22\nOn the docket:\r\n+ [x] Fix `--help-variables` docs. #1113\r\n+ [x] pex binary hangs on startup at atomic_directory #1119\r\n+ [x] pex vendoring does not (always) isolate itself from a colliding setuptools install in site-packages #1031\r\n+ [x] Remove long deprecated support for _pex module. #1135\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.21\"\n", "path": "pex/version.py"}]} | 686 | 97 |
gh_patches_debug_11486 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-655 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"Koch Fractal" example is missing an import [develop branch]
The "Koch Fractal" example is missing the import for the `reduce()` function.
```
Traceback (most recent call last):
File "C:\Anaconda3\lib\site-packages\pyqtgraph\examples\fractal.py", line 106, in <module>
update()
File "C:\Anaconda3\lib\site-packages\pyqtgraph\examples\fractal.py", line 86, in update
mats = reduce(matmul, mats)
NameError: name 'reduce' is not defined
```
Adding `from functools import reduce` fixes it.
</issue>
<code>
[start of examples/fractal.py]
1 # -*- coding: utf-8 -*-
2 """
3 Displays an interactive Koch fractal
4 """
5 import initExample ## Add path to library (just for examples; you do not need this)
6
7 import pyqtgraph as pg
8 from pyqtgraph.Qt import QtCore, QtGui
9 import numpy as np
10
11 app = QtGui.QApplication([])
12
13 # Set up UI widgets
14 win = pg.QtGui.QWidget()
15 win.setWindowTitle('pyqtgraph example: fractal demo')
16 layout = pg.QtGui.QGridLayout()
17 win.setLayout(layout)
18 layout.setContentsMargins(0, 0, 0, 0)
19 depthLabel = pg.QtGui.QLabel('fractal depth:')
20 layout.addWidget(depthLabel, 0, 0)
21 depthSpin = pg.SpinBox(value=5, step=1, bounds=[1, 10], delay=0, int=True)
22 depthSpin.resize(100, 20)
23 layout.addWidget(depthSpin, 0, 1)
24 w = pg.GraphicsLayoutWidget()
25 layout.addWidget(w, 1, 0, 1, 2)
26 win.show()
27
28 # Set up graphics
29 v = w.addViewBox()
30 v.setAspectLocked()
31 baseLine = pg.PolyLineROI([[0, 0], [1, 0], [1.5, 1], [2, 0], [3, 0]], pen=(0, 255, 0, 100), movable=False)
32 v.addItem(baseLine)
33 fc = pg.PlotCurveItem(pen=(255, 255, 255, 200), antialias=True)
34 v.addItem(fc)
35 v.autoRange()
36
37
38 transformMap = [0, 0, None]
39
40
41 def update():
42 # recalculate and redraw the fractal curve
43
44 depth = depthSpin.value()
45 pts = baseLine.getState()['points']
46 nbseg = len(pts) - 1
47 nseg = nbseg**depth
48
49 # Get a transformation matrix for each base segment
50 trs = []
51 v1 = pts[-1] - pts[0]
52 l1 = v1.length()
53 for i in range(len(pts)-1):
54 p1 = pts[i]
55 p2 = pts[i+1]
56 v2 = p2 - p1
57 t = p1 - pts[0]
58 r = v2.angle(v1)
59 s = v2.length() / l1
60 trs.append(pg.SRTTransform({'pos': t, 'scale': (s, s), 'angle': r}))
61
62 basePts = [np.array(list(pt) + [1]) for pt in baseLine.getState()['points']]
63 baseMats = np.dstack([tr.matrix().T for tr in trs]).transpose(2, 0, 1)
64
65 # Generate an array of matrices to transform base points
66 global transformMap
67 if transformMap[:2] != [depth, nbseg]:
68 # we can cache the transform index to save a little time..
69 nseg = nbseg**depth
70 matInds = np.empty((depth, nseg), dtype=int)
71 for i in range(depth):
72 matInds[i] = np.tile(np.repeat(np.arange(nbseg), nbseg**(depth-1-i)), nbseg**i)
73 transformMap = [depth, nbseg, matInds]
74
75 # Each column in matInds contains the indices referring to the base transform
76 # matrices that must be multiplied together to generate the final transform
77 # for each segment of the fractal
78 matInds = transformMap[2]
79
80 # Collect all matrices needed for generating fractal curve
81 mats = baseMats[matInds]
82
83 # Magic-multiply stacks of matrices together
84 def matmul(a, b):
85 return np.sum(np.transpose(a,(0,2,1))[..., None] * b[..., None, :], axis=-3)
86 mats = reduce(matmul, mats)
87
88 # Transform base points through matrix array
89 pts = np.empty((nseg * nbseg + 1, 2))
90 for l in range(len(trs)):
91 bp = basePts[l]
92 pts[l:-1:len(trs)] = np.dot(mats, bp)[:, :2]
93
94 # Finish the curve with the last base point
95 pts[-1] = basePts[-1][:2]
96
97 # update fractal curve with new points
98 fc.setData(pts[:,0], pts[:,1])
99
100
101 # Update the fractal whenever the base shape or depth has changed
102 baseLine.sigRegionChanged.connect(update)
103 depthSpin.valueChanged.connect(update)
104
105 # Initialize
106 update()
107
108
109 ## Start Qt event loop unless running in interactive mode or using pyside.
110 if __name__ == '__main__':
111 import sys
112 if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
113 QtGui.QApplication.instance().exec_()
114
115
116
117
118
119
120
121
122
[end of examples/fractal.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/fractal.py b/examples/fractal.py
--- a/examples/fractal.py
+++ b/examples/fractal.py
@@ -4,6 +4,7 @@
"""
import initExample ## Add path to library (just for examples; you do not need this)
+from functools import reduce
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
@@ -111,12 +112,4 @@
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
-
-
-
-
-
-
-
-
\ No newline at end of file
| {"golden_diff": "diff --git a/examples/fractal.py b/examples/fractal.py\n--- a/examples/fractal.py\n+++ b/examples/fractal.py\n@@ -4,6 +4,7 @@\n \"\"\"\n import initExample ## Add path to library (just for examples; you do not need this)\n \n+from functools import reduce\n import pyqtgraph as pg\n from pyqtgraph.Qt import QtCore, QtGui\n import numpy as np\n@@ -111,12 +112,4 @@\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n-\n-\n-\n-\n-\n-\n-\n-\n \n\\ No newline at end of file\n", "issue": "\"Koch Fractal\" example is missing an import [develop branch]\nThe \"Koch Fractal\" example is missing the import for the `reduce()` function.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\Anaconda3\\lib\\site-packages\\pyqtgraph\\examples\\fractal.py\", line 106, in <module>\r\n update()\r\n File \"C:\\Anaconda3\\lib\\site-packages\\pyqtgraph\\examples\\fractal.py\", line 86, in update\r\n mats = reduce(matmul, mats)\r\nNameError: name 'reduce' is not defined\r\n```\r\n\r\nAdding `from functools import reduce` fixes it.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplays an interactive Koch fractal\n\"\"\"\nimport initExample ## Add path to library (just for examples; you do not need this)\n\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtCore, QtGui\nimport numpy as np\n\napp = QtGui.QApplication([])\n\n# Set up UI widgets\nwin = pg.QtGui.QWidget()\nwin.setWindowTitle('pyqtgraph example: fractal demo')\nlayout = pg.QtGui.QGridLayout()\nwin.setLayout(layout)\nlayout.setContentsMargins(0, 0, 0, 0)\ndepthLabel = pg.QtGui.QLabel('fractal depth:')\nlayout.addWidget(depthLabel, 0, 0)\ndepthSpin = pg.SpinBox(value=5, step=1, bounds=[1, 10], delay=0, int=True)\ndepthSpin.resize(100, 20)\nlayout.addWidget(depthSpin, 0, 1)\nw = pg.GraphicsLayoutWidget()\nlayout.addWidget(w, 1, 0, 1, 2)\nwin.show()\n\n# Set up graphics\nv = w.addViewBox()\nv.setAspectLocked()\nbaseLine = pg.PolyLineROI([[0, 0], [1, 0], [1.5, 1], [2, 0], [3, 0]], pen=(0, 255, 0, 100), movable=False)\nv.addItem(baseLine)\nfc = pg.PlotCurveItem(pen=(255, 255, 255, 200), antialias=True)\nv.addItem(fc)\nv.autoRange()\n\n\ntransformMap = [0, 0, None]\n\n\ndef update():\n # recalculate and redraw the fractal curve\n \n depth = depthSpin.value()\n pts = baseLine.getState()['points']\n nbseg = len(pts) - 1\n nseg = nbseg**depth\n \n # Get a transformation matrix for each base segment\n trs = []\n v1 = pts[-1] - pts[0]\n l1 = v1.length()\n for i in range(len(pts)-1):\n p1 = pts[i]\n p2 = pts[i+1]\n v2 = p2 - p1\n t = p1 - pts[0]\n r = v2.angle(v1)\n s = v2.length() / l1\n trs.append(pg.SRTTransform({'pos': t, 'scale': (s, s), 'angle': r}))\n\n basePts = [np.array(list(pt) + [1]) for pt in baseLine.getState()['points']]\n baseMats = np.dstack([tr.matrix().T for tr in trs]).transpose(2, 0, 1)\n\n # Generate an array of matrices to transform base points\n global transformMap\n if transformMap[:2] != [depth, nbseg]:\n # we can cache the transform index to save a little time..\n nseg = nbseg**depth\n matInds = np.empty((depth, nseg), dtype=int)\n for i in range(depth):\n matInds[i] = np.tile(np.repeat(np.arange(nbseg), nbseg**(depth-1-i)), nbseg**i)\n transformMap = [depth, nbseg, matInds]\n \n # Each column in matInds contains the indices referring to the base transform\n # matrices that must be multiplied together to generate the final transform\n # for each segment of the fractal \n matInds = transformMap[2]\n \n # Collect all matrices needed for generating fractal curve\n mats = baseMats[matInds]\n \n # Magic-multiply stacks of matrices together\n def matmul(a, b):\n return np.sum(np.transpose(a,(0,2,1))[..., None] * b[..., None, :], axis=-3)\n mats = reduce(matmul, mats)\n \n # Transform base points through matrix array\n pts = np.empty((nseg * nbseg + 1, 2))\n for l in range(len(trs)):\n bp = basePts[l]\n pts[l:-1:len(trs)] = np.dot(mats, bp)[:, :2]\n \n # Finish the curve with the last base point\n pts[-1] = basePts[-1][:2]\n\n # update fractal curve with new points\n fc.setData(pts[:,0], pts[:,1])\n\n\n# Update the fractal whenever the base shape or depth has changed\nbaseLine.sigRegionChanged.connect(update)\ndepthSpin.valueChanged.connect(update)\n\n# Initialize\nupdate()\n\n\n## Start Qt event loop unless running in interactive mode or using pyside.\nif __name__ == '__main__':\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n\n\n\n\n\n\n\n\n ", "path": "examples/fractal.py"}]} | 1,999 | 153 |
gh_patches_debug_28696 | rasdani/github-patches | git_diff | ocadotechnology__aimmo-268 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Minimum Product: Irrelevant MapFeature enum elements and their uses
Remove the irrelevant `MapFeature` elements in `world_state.py` and their uses throughout the codebase.
</issue>
<code>
[start of aimmo-game/simulation/state/world_state.py]
1 from enum import Enum
2 from collections import defaultdict
3
4 class MapFeature(Enum):
5 HEALTH_POINT = 'health_point'
6 SCORE_POINT = 'score_point'
7 PICKUP = 'pickup'
8 OBSTACLE = 'obstacle'
9
10 class WorldState():
11 """
12 The world updates class serves as a buffer between the updates generated
13 in different parts of the back-end and the data emitted by the socket.
14
15 It contains all the parameters that need to be sent to the client every
16 time the world is updated. These are:
17 - Player (aka avatars) creation, deletion and update.
18 - Changes in map features, i.e. creation and deletion of:
19 * Health points.
20 * Score points.
21 * Pickups.
22 * Obstacles.
23 """
24
25 def __init__(self, game_state):
26 self.game_state = game_state
27 self.players = defaultdict(dict)
28 self.map_features = defaultdict(dict)
29 self.clear_updates()
30
31 def get_updates(self):
32 self.refresh()
33
34 updates = {
35 'players' : dict(self.players),
36 'map_features' : dict(self.map_features)
37 }
38 self.clear_updates()
39
40 return updates
41
42 def clear_updates(self):
43 self.players = {
44 'update': [],
45 'create': [],
46 'delete': []
47 }
48 for map_feature in MapFeature:
49 self.map_features[map_feature.value] = {
50 'create': [],
51 'delete': []
52 }
53
54 # Player updates.
55 def create_player(self, player_data):
56 # Player data: {id, x, y, rotation, health, score, appearance?}
57 self.players["create"].append(player_data)
58
59 def delete_player(self, player_id):
60 # Player id: {id}
61 self.players["delete"].append(player_id)
62
63 def update_player(self, player_update):
64 # Player_update: {id, x, y, rotation, health, score}
65 self.players["update"].append(player_update)
66
67 # Map features updates.
68 def create_map_feature(self, map_feature, map_feature_data):
69 self.map_features[map_feature]["create"].append(map_feature_data)
70
71 def delete_map_feature(self, map_feature, map_feature_id):
72 self.map_features[map_feature]["delete"].append(map_feature_id)
73
74 # Refresh the world state. Basically gather information from the avatar manager
75 # and the world map and organise it.
76 def refresh(self):
77 def player_dict(avatar):
78 return {
79 'id' : avatar.player_id,
80 'x' : avatar.location.x,
81 'y' : avatar.location.y,
82 'score' : avatar.score,
83 'health': avatar.health,
84 # This is temporary, appearance will be more complex later on.
85 'colour': "#%06x" % (avatar.player_id * 4999)
86 }
87
88 def map_feature_dict(map_feature):
89 return {
90 'id' : str(hash(map_feature)),
91 'x' : map_feature.location.x,
92 'y' : map_feature.location.y
93 }
94
95 with self.game_state as game_state:
96 # Update active avatars.
97 for player in game_state.avatar_manager.avatars:
98 self.update_player(player_dict(player))
99
100 main_avatar_id = 1
101 avatar_view = game_state.avatar_manager.get_avatar(main_avatar_id).view
102 if avatar_view is None:
103 return
104
105 if avatar_view.is_empty:
106 avatar_view.reveal_all_cells(game_state.world_map)
107 avatar_view.is_empty = False
108
109 # Creation.
110 for cell in avatar_view.cells_to_reveal:
111 # There is an avatar.
112 if cell.avatar is not None:
113 self.create_player(player_dict(cell.avatar))
114 # Cell is an obstacle.
115 if not cell.habitable:
116 self.create_map_feature(MapFeature.OBSTACLE.value, map_feature_dict(cell))
117 # Cell is a score point.
118 if cell.generates_score:
119 self.create_map_feature(MapFeature.SCORE_POINT.value, map_feature_dict(cell))
120
121 # Updates.
122 for cell in avatar_view.cells_in_view:
123 if cell.add_to_scene is not None:
124 self.create_map_feature(cell.add_to_scene.value, map_feature_dict(cell))
125 if cell.remove_from_scene is not None:
126 self.delete_map_feature(cell.remove_from_scene.value, map_feature_dict(cell))
127
128 # Deletion.
129 for cell in avatar_view.cells_to_clear:
130 # There is an avatar.
131 if cell.avatar is not None:
132 self.delete_player(player_dict(cell.avatar))
133 # Cell is an obstacle.
134 if not cell.habitable:
135 self.delete_map_feature(MapFeature.OBSTACLE.value, map_feature_dict(cell))
136 # Cell is a score point.
137 if cell.generates_score:
138 self.delete_map_feature(MapFeature.SCORE_POINT.value, map_feature_dict(cell))
139
[end of aimmo-game/simulation/state/world_state.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/aimmo-game/simulation/state/world_state.py b/aimmo-game/simulation/state/world_state.py
--- a/aimmo-game/simulation/state/world_state.py
+++ b/aimmo-game/simulation/state/world_state.py
@@ -1,12 +1,12 @@
from enum import Enum
from collections import defaultdict
+
class MapFeature(Enum):
- HEALTH_POINT = 'health_point'
- SCORE_POINT = 'score_point'
PICKUP = 'pickup'
OBSTACLE = 'obstacle'
+
class WorldState():
"""
The world updates class serves as a buffer between the updates generated
@@ -114,9 +114,6 @@
# Cell is an obstacle.
if not cell.habitable:
self.create_map_feature(MapFeature.OBSTACLE.value, map_feature_dict(cell))
- # Cell is a score point.
- if cell.generates_score:
- self.create_map_feature(MapFeature.SCORE_POINT.value, map_feature_dict(cell))
# Updates.
for cell in avatar_view.cells_in_view:
@@ -133,6 +130,3 @@
# Cell is an obstacle.
if not cell.habitable:
self.delete_map_feature(MapFeature.OBSTACLE.value, map_feature_dict(cell))
- # Cell is a score point.
- if cell.generates_score:
- self.delete_map_feature(MapFeature.SCORE_POINT.value, map_feature_dict(cell))
| {"golden_diff": "diff --git a/aimmo-game/simulation/state/world_state.py b/aimmo-game/simulation/state/world_state.py\n--- a/aimmo-game/simulation/state/world_state.py\n+++ b/aimmo-game/simulation/state/world_state.py\n@@ -1,12 +1,12 @@\n from enum import Enum\n from collections import defaultdict\n \n+\n class MapFeature(Enum):\n- HEALTH_POINT = 'health_point'\n- SCORE_POINT = 'score_point'\n PICKUP = 'pickup'\n OBSTACLE = 'obstacle'\n \n+\n class WorldState():\n \"\"\"\n The world updates class serves as a buffer between the updates generated\n@@ -114,9 +114,6 @@\n # Cell is an obstacle.\n if not cell.habitable:\n self.create_map_feature(MapFeature.OBSTACLE.value, map_feature_dict(cell))\n- # Cell is a score point.\n- if cell.generates_score:\n- self.create_map_feature(MapFeature.SCORE_POINT.value, map_feature_dict(cell))\n \n # Updates.\n for cell in avatar_view.cells_in_view:\n@@ -133,6 +130,3 @@\n # Cell is an obstacle.\n if not cell.habitable:\n self.delete_map_feature(MapFeature.OBSTACLE.value, map_feature_dict(cell))\n- # Cell is a score point.\n- if cell.generates_score:\n- self.delete_map_feature(MapFeature.SCORE_POINT.value, map_feature_dict(cell))\n", "issue": "Minimum Product: Irrelevant MapFeature enum elements and their uses\nRemove the irrelevant `MapFeature` elements in `world_state.py` and their uses throughout the codebase.\n", "before_files": [{"content": "from enum import Enum\nfrom collections import defaultdict\n\nclass MapFeature(Enum):\n HEALTH_POINT = 'health_point'\n SCORE_POINT = 'score_point'\n PICKUP = 'pickup'\n OBSTACLE = 'obstacle'\n\nclass WorldState():\n \"\"\"\n The world updates class serves as a buffer between the updates generated\n in different parts of the back-end and the data emitted by the socket.\n\n It contains all the parameters that need to be sent to the client every\n time the world is updated. These are:\n - Player (aka avatars) creation, deletion and update.\n - Changes in map features, i.e. creation and deletion of:\n * Health points.\n * Score points.\n * Pickups.\n * Obstacles.\n \"\"\"\n\n def __init__(self, game_state):\n self.game_state = game_state\n self.players = defaultdict(dict)\n self.map_features = defaultdict(dict)\n self.clear_updates()\n\n def get_updates(self):\n self.refresh()\n\n updates = {\n 'players' : dict(self.players),\n 'map_features' : dict(self.map_features)\n }\n self.clear_updates()\n\n return updates\n\n def clear_updates(self):\n self.players = {\n 'update': [],\n 'create': [],\n 'delete': []\n }\n for map_feature in MapFeature:\n self.map_features[map_feature.value] = {\n 'create': [],\n 'delete': []\n }\n\n # Player updates.\n def create_player(self, player_data):\n # Player data: {id, x, y, rotation, health, score, appearance?}\n self.players[\"create\"].append(player_data)\n\n def delete_player(self, player_id):\n # Player id: {id}\n self.players[\"delete\"].append(player_id)\n\n def update_player(self, player_update):\n # Player_update: {id, x, y, rotation, health, score}\n self.players[\"update\"].append(player_update)\n\n # Map features updates.\n def create_map_feature(self, map_feature, map_feature_data):\n self.map_features[map_feature][\"create\"].append(map_feature_data)\n\n def delete_map_feature(self, map_feature, map_feature_id):\n self.map_features[map_feature][\"delete\"].append(map_feature_id)\n\n # Refresh the world state. Basically gather information from the avatar manager\n # and the world map and organise it.\n def refresh(self):\n def player_dict(avatar):\n return {\n 'id' : avatar.player_id,\n 'x' : avatar.location.x,\n 'y' : avatar.location.y,\n 'score' : avatar.score,\n 'health': avatar.health,\n # This is temporary, appearance will be more complex later on.\n 'colour': \"#%06x\" % (avatar.player_id * 4999)\n }\n\n def map_feature_dict(map_feature):\n return {\n 'id' : str(hash(map_feature)),\n 'x' : map_feature.location.x,\n 'y' : map_feature.location.y\n }\n\n with self.game_state as game_state:\n # Update active avatars.\n for player in game_state.avatar_manager.avatars:\n self.update_player(player_dict(player))\n\n main_avatar_id = 1\n avatar_view = game_state.avatar_manager.get_avatar(main_avatar_id).view\n if avatar_view is None:\n return\n\n if avatar_view.is_empty:\n avatar_view.reveal_all_cells(game_state.world_map)\n avatar_view.is_empty = False\n\n # Creation.\n for cell in avatar_view.cells_to_reveal:\n # There is an avatar.\n if cell.avatar is not None:\n self.create_player(player_dict(cell.avatar))\n # Cell is an obstacle.\n if not cell.habitable:\n self.create_map_feature(MapFeature.OBSTACLE.value, map_feature_dict(cell))\n # Cell is a score point.\n if cell.generates_score:\n self.create_map_feature(MapFeature.SCORE_POINT.value, map_feature_dict(cell))\n\n # Updates.\n for cell in avatar_view.cells_in_view:\n if cell.add_to_scene is not None:\n self.create_map_feature(cell.add_to_scene.value, map_feature_dict(cell))\n if cell.remove_from_scene is not None:\n self.delete_map_feature(cell.remove_from_scene.value, map_feature_dict(cell))\n\n # Deletion.\n for cell in avatar_view.cells_to_clear:\n # There is an avatar.\n if cell.avatar is not None:\n self.delete_player(player_dict(cell.avatar))\n # Cell is an obstacle.\n if not cell.habitable:\n self.delete_map_feature(MapFeature.OBSTACLE.value, map_feature_dict(cell))\n # Cell is a score point.\n if cell.generates_score:\n self.delete_map_feature(MapFeature.SCORE_POINT.value, map_feature_dict(cell))\n", "path": "aimmo-game/simulation/state/world_state.py"}]} | 1,921 | 314 |
gh_patches_debug_38900 | rasdani/github-patches | git_diff | qtile__qtile-2896 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
setting foreground in extension_defaults result with non working CommandSet
# Issue description
setting `foreground` in `extension_defaults` result that `CommandSet` don't appear at all
probably other extension don't work too
# Qtile version
0.18.1 up to latest git commit
# Stack traces
```
error, cannot allocate color 'ffffff'
```
# Configuration
```
key_exensions = Key([mod], 's',
lazy.run_extension(
extension.CommandSet(
commands={
'reboot': 'reboot',
'shutdown': 'shutdown',
'suspend': 'systemctl suspend',
'lock session': '/home/tokariew/.local/bin/lockme',
'restart qtile': 'qtile cmd-obj -o cmd -f restart',
'lazy restart': 'lazy.restart',
'logout': 'qtile cmd-obj -o cmd -f shutdown',
},
dmenu_lines=6
)
)),
keys.extend(key_exensions)
extension_defaults = dict(
font="FiraCode Nerd Font",
fontsize=14,
padding=4,
padding_y=0,
foreground='ffffff',
)
```
</issue>
<code>
[start of libqtile/extension/base.py]
1 # Copyright (c) 2017 Dario Giovannetti
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 import shlex
22 from subprocess import PIPE, Popen
23 from typing import Any, List, Tuple # noqa: F401
24
25 from libqtile import configurable
26
27
28 class _Extension(configurable.Configurable):
29 """Base Extension class"""
30
31 installed_extensions = [] # type: List
32
33 defaults = [
34 ("font", "sans", "defines the font name to be used"),
35 ("fontsize", None, "defines the font size to be used"),
36 ("background", None, "defines the normal background color"),
37 ("foreground", None, "defines the normal foreground color"),
38 ("selected_background", None, "defines the selected background color"),
39 ("selected_foreground", None, "defines the selected foreground color"),
40 ]
41
42 def __init__(self, **config):
43 configurable.Configurable.__init__(self, **config)
44 self.add_defaults(_Extension.defaults)
45 _Extension.installed_extensions.append(self)
46
47 def _configure(self, qtile):
48 self.qtile = qtile
49
50 def run(self):
51 """
52 This method must be implemented by the subclasses.
53 """
54 raise NotImplementedError()
55
56
57 class RunCommand(_Extension):
58 """
59 Run an arbitrary command.
60
61 Mostly useful as a superclass for more specific extensions that need to
62 interact with the qtile object.
63
64 Also consider simply using lazy.spawn() or writing a
65 `client <http://docs.qtile.org/en/latest/manual/commands/scripting.html>`_.
66 """
67 defaults = [
68 # NOTE: Do not use a list as a default value, since it would be shared
69 # among all the objects inheriting this class, and if one of them
70 # modified it, all the other objects would see the modified list;
71 # use a string or a tuple instead, which are immutable
72 ("command", None, "the command to be launched (string or list with arguments)"),
73 ] # type: List[Tuple[str, Any, str]]
74
75 def __init__(self, **config):
76 _Extension.__init__(self, **config)
77 self.add_defaults(RunCommand.defaults)
78
79 def run(self):
80 """
81 An extension can inherit this class, define configured_command and use
82 the process object by overriding this method and using super():
83
84 .. code-block:: python
85
86 def _configure(self, qtile):
87 Superclass._configure(self, qtile)
88 self.configured_command = "foo --bar"
89
90 def run(self):
91 process = super(Subclass, self).run()
92 """
93 if self.configured_command:
94 if isinstance(self.configured_command, str):
95 self.configured_command = shlex.split(self.configured_command)
96 # Else assume that self.configured_command is already a sequence
97 else:
98 self.configured_command = self.command
99 return Popen(self.configured_command, stdout=PIPE, stdin=PIPE)
100
[end of libqtile/extension/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/extension/base.py b/libqtile/extension/base.py
--- a/libqtile/extension/base.py
+++ b/libqtile/extension/base.py
@@ -1,4 +1,5 @@
# Copyright (c) 2017 Dario Giovannetti
+# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -17,12 +18,15 @@
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-
+import re
import shlex
from subprocess import PIPE, Popen
from typing import Any, List, Tuple # noqa: F401
from libqtile import configurable
+from libqtile.log_utils import logger
+
+RGB = re.compile(r"^#?([a-fA-F0-9]{3}|[a-fA-F0-9]{6})$")
class _Extension(configurable.Configurable):
@@ -33,16 +37,40 @@
defaults = [
("font", "sans", "defines the font name to be used"),
("fontsize", None, "defines the font size to be used"),
- ("background", None, "defines the normal background color"),
- ("foreground", None, "defines the normal foreground color"),
- ("selected_background", None, "defines the selected background color"),
- ("selected_foreground", None, "defines the selected foreground color"),
+ ("background", None, "defines the normal background color (#RGB or #RRGGBB)"),
+ ("foreground", None, "defines the normal foreground color (#RGB or #RRGGBB)"),
+ ("selected_background", None, "defines the selected background color (#RGB or #RRGGBB)"),
+ ("selected_foreground", None, "defines the selected foreground color (#RGB or #RRGGBB)"),
]
def __init__(self, **config):
configurable.Configurable.__init__(self, **config)
self.add_defaults(_Extension.defaults)
_Extension.installed_extensions.append(self)
+ self._check_colors()
+
+ def _check_colors(self):
+ """
+ dmenu needs colours to be in #rgb or #rrggbb format.
+
+ Checks colour value, removes invalid values and adds # if missing.
+ """
+ for c in ["background", "foreground", "selected_background", "selected_foreground"]:
+ col = getattr(self, c, None)
+ if col is None:
+ continue
+
+ if not isinstance(col, str) or not RGB.match(col):
+ logger.warning(
+ f"Invalid extension '{c}' color: {col}. "
+ f"Must be #RGB or #RRGGBB string."
+ )
+ setattr(self, c, None)
+ continue
+
+ if not col.startswith("#"):
+ col = f"#{col}"
+ setattr(self, c, col)
def _configure(self, qtile):
self.qtile = qtile
| {"golden_diff": "diff --git a/libqtile/extension/base.py b/libqtile/extension/base.py\n--- a/libqtile/extension/base.py\n+++ b/libqtile/extension/base.py\n@@ -1,4 +1,5 @@\n # Copyright (c) 2017 Dario Giovannetti\n+# Copyright (c) 2021 elParaguayo\n #\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n@@ -17,12 +18,15 @@\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n-\n+import re\n import shlex\n from subprocess import PIPE, Popen\n from typing import Any, List, Tuple # noqa: F401\n \n from libqtile import configurable\n+from libqtile.log_utils import logger\n+\n+RGB = re.compile(r\"^#?([a-fA-F0-9]{3}|[a-fA-F0-9]{6})$\")\n \n \n class _Extension(configurable.Configurable):\n@@ -33,16 +37,40 @@\n defaults = [\n (\"font\", \"sans\", \"defines the font name to be used\"),\n (\"fontsize\", None, \"defines the font size to be used\"),\n- (\"background\", None, \"defines the normal background color\"),\n- (\"foreground\", None, \"defines the normal foreground color\"),\n- (\"selected_background\", None, \"defines the selected background color\"),\n- (\"selected_foreground\", None, \"defines the selected foreground color\"),\n+ (\"background\", None, \"defines the normal background color (#RGB or #RRGGBB)\"),\n+ (\"foreground\", None, \"defines the normal foreground color (#RGB or #RRGGBB)\"),\n+ (\"selected_background\", None, \"defines the selected background color (#RGB or #RRGGBB)\"),\n+ (\"selected_foreground\", None, \"defines the selected foreground color (#RGB or #RRGGBB)\"),\n ]\n \n def __init__(self, **config):\n configurable.Configurable.__init__(self, **config)\n self.add_defaults(_Extension.defaults)\n _Extension.installed_extensions.append(self)\n+ self._check_colors()\n+\n+ def _check_colors(self):\n+ \"\"\"\n+ dmenu needs colours to be in #rgb or #rrggbb format.\n+\n+ Checks colour value, removes invalid values and adds # if missing.\n+ \"\"\"\n+ for c in [\"background\", \"foreground\", \"selected_background\", \"selected_foreground\"]:\n+ col = getattr(self, c, None)\n+ if col is None:\n+ continue\n+\n+ if not isinstance(col, str) or not RGB.match(col):\n+ logger.warning(\n+ f\"Invalid extension '{c}' color: {col}. \"\n+ f\"Must be #RGB or #RRGGBB string.\"\n+ )\n+ setattr(self, c, None)\n+ continue\n+\n+ if not col.startswith(\"#\"):\n+ col = f\"#{col}\"\n+ setattr(self, c, col)\n \n def _configure(self, qtile):\n self.qtile = qtile\n", "issue": "setting foreground in extension_defaults result with non working CommandSet\n# Issue description\r\n\r\nsetting `foreground` in `extension_defaults` result that `CommandSet` don't appear at all\r\nprobably other extension don't work too\r\n# Qtile version\r\n\r\n0.18.1 up to latest git commit\r\n\r\n# Stack traces\r\n\r\n```\r\nerror, cannot allocate color 'ffffff'\r\n```\r\n# Configuration\r\n\r\n```\r\nkey_exensions = Key([mod], 's',\r\n lazy.run_extension(\r\n extension.CommandSet(\r\n commands={\r\n 'reboot': 'reboot',\r\n 'shutdown': 'shutdown',\r\n 'suspend': 'systemctl suspend',\r\n 'lock session': '/home/tokariew/.local/bin/lockme',\r\n 'restart qtile': 'qtile cmd-obj -o cmd -f restart',\r\n 'lazy restart': 'lazy.restart',\r\n 'logout': 'qtile cmd-obj -o cmd -f shutdown',\r\n },\r\n dmenu_lines=6\r\n )\r\n )),\r\n\r\nkeys.extend(key_exensions)\r\n\r\nextension_defaults = dict(\r\n font=\"FiraCode Nerd Font\",\r\n fontsize=14,\r\n padding=4,\r\n padding_y=0,\r\n foreground='ffffff',\r\n)\r\n```\r\n\n", "before_files": [{"content": "# Copyright (c) 2017 Dario Giovannetti\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport shlex\nfrom subprocess import PIPE, Popen\nfrom typing import Any, List, Tuple # noqa: F401\n\nfrom libqtile import configurable\n\n\nclass _Extension(configurable.Configurable):\n \"\"\"Base Extension class\"\"\"\n\n installed_extensions = [] # type: List\n\n defaults = [\n (\"font\", \"sans\", \"defines the font name to be used\"),\n (\"fontsize\", None, \"defines the font size to be used\"),\n (\"background\", None, \"defines the normal background color\"),\n (\"foreground\", None, \"defines the normal foreground color\"),\n (\"selected_background\", None, \"defines the selected background color\"),\n (\"selected_foreground\", None, \"defines the selected foreground color\"),\n ]\n\n def __init__(self, **config):\n configurable.Configurable.__init__(self, **config)\n self.add_defaults(_Extension.defaults)\n _Extension.installed_extensions.append(self)\n\n def _configure(self, qtile):\n self.qtile = qtile\n\n def run(self):\n \"\"\"\n This method must be implemented by the subclasses.\n \"\"\"\n raise NotImplementedError()\n\n\nclass RunCommand(_Extension):\n \"\"\"\n Run an arbitrary command.\n\n Mostly useful as a superclass for more specific extensions that need to\n interact with the qtile object.\n\n Also consider simply using lazy.spawn() or writing a\n `client <http://docs.qtile.org/en/latest/manual/commands/scripting.html>`_.\n \"\"\"\n defaults = [\n # NOTE: Do not use a list as a default value, since it would be shared\n # among all the objects inheriting this class, and if one of them\n # modified it, all the other objects would see the modified list;\n # use a string or a tuple instead, which are immutable\n (\"command\", None, \"the command to be launched (string or list with arguments)\"),\n ] # type: List[Tuple[str, Any, str]]\n\n def __init__(self, **config):\n _Extension.__init__(self, **config)\n self.add_defaults(RunCommand.defaults)\n\n def run(self):\n \"\"\"\n An extension can inherit this class, define configured_command and use\n the process object by overriding this method and using super():\n\n .. code-block:: python\n\n def _configure(self, qtile):\n Superclass._configure(self, qtile)\n self.configured_command = \"foo --bar\"\n\n def run(self):\n process = super(Subclass, self).run()\n \"\"\"\n if self.configured_command:\n if isinstance(self.configured_command, str):\n self.configured_command = shlex.split(self.configured_command)\n # Else assume that self.configured_command is already a sequence\n else:\n self.configured_command = self.command\n return Popen(self.configured_command, stdout=PIPE, stdin=PIPE)\n", "path": "libqtile/extension/base.py"}]} | 1,847 | 717 |
gh_patches_debug_26177 | rasdani/github-patches | git_diff | Mailu__Mailu-1393 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Postconf overrides should ignore comments
Comment lines in overrides/postfix.cf are sent to postconf in core/postfix/start.py:37. This results in error messages.
Comment lines should be ignored by core/postfix/start.py.
</issue>
<code>
[start of core/postfix/start.py]
1 #!/usr/bin/python3
2
3 import os
4 import glob
5 import shutil
6 import multiprocessing
7 import logging as log
8 import sys
9
10 from podop import run_server
11 from socrate import system, conf
12
13 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
14
15 def start_podop():
16 os.setuid(100)
17 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/"
18 # TODO: Remove verbosity setting from Podop?
19 run_server(0, "postfix", "/tmp/podop.socket", [
20 ("transport", "url", url + "transport/§"),
21 ("alias", "url", url + "alias/§"),
22 ("domain", "url", url + "domain/§"),
23 ("mailbox", "url", url + "mailbox/§"),
24 ("recipientmap", "url", url + "recipient/map/§"),
25 ("sendermap", "url", url + "sender/map/§"),
26 ("senderaccess", "url", url + "sender/access/§"),
27 ("senderlogin", "url", url + "sender/login/§")
28 ])
29
30 # Actual startup script
31 os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
32 os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
33 os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332")
34 os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525")
35
36 for postfix_file in glob.glob("/conf/*.cf"):
37 conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
38
39 if os.path.exists("/overrides/postfix.cf"):
40 for line in open("/overrides/postfix.cf").read().strip().split("\n"):
41 os.system('postconf -e "{}"'.format(line))
42
43 if os.path.exists("/overrides/postfix.master"):
44 for line in open("/overrides/postfix.master").read().strip().split("\n"):
45 os.system('postconf -Me "{}"'.format(line))
46
47 for map_file in glob.glob("/overrides/*.map"):
48 destination = os.path.join("/etc/postfix", os.path.basename(map_file))
49 shutil.copyfile(map_file, destination)
50 os.system("postmap {}".format(destination))
51 os.remove(destination)
52
53 if "RELAYUSER" in os.environ:
54 path = "/etc/postfix/sasl_passwd"
55 conf.jinja("/conf/sasl_passwd", os.environ, path)
56 os.system("postmap {}".format(path))
57
58 # Run Podop and Postfix
59 multiprocessing.Process(target=start_podop).start()
60 os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing")
61 os.system("postfix start-fg")
62
[end of core/postfix/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/postfix/start.py b/core/postfix/start.py
--- a/core/postfix/start.py
+++ b/core/postfix/start.py
@@ -27,6 +27,10 @@
("senderlogin", "url", url + "sender/login/§")
])
+def is_valid_postconf_line(line):
+ return not line.startswith("#") \
+ and not line == ''
+
# Actual startup script
os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
@@ -38,11 +42,13 @@
if os.path.exists("/overrides/postfix.cf"):
for line in open("/overrides/postfix.cf").read().strip().split("\n"):
- os.system('postconf -e "{}"'.format(line))
+ if is_valid_postconf_line(line):
+ os.system('postconf -e "{}"'.format(line))
if os.path.exists("/overrides/postfix.master"):
for line in open("/overrides/postfix.master").read().strip().split("\n"):
- os.system('postconf -Me "{}"'.format(line))
+ if is_valid_postconf_line(line):
+ os.system('postconf -Me "{}"'.format(line))
for map_file in glob.glob("/overrides/*.map"):
destination = os.path.join("/etc/postfix", os.path.basename(map_file))
| {"golden_diff": "diff --git a/core/postfix/start.py b/core/postfix/start.py\n--- a/core/postfix/start.py\n+++ b/core/postfix/start.py\n@@ -27,6 +27,10 @@\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\")\n ])\n \n+def is_valid_postconf_line(line):\n+ return not line.startswith(\"#\") \\\n+ and not line == ''\n+\n # Actual startup script\n os.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\n os.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\n@@ -38,11 +42,13 @@\n \n if os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n- os.system('postconf -e \"{}\"'.format(line))\n+ if is_valid_postconf_line(line):\n+ os.system('postconf -e \"{}\"'.format(line))\n \n if os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n- os.system('postconf -Me \"{}\"'.format(line))\n+ if is_valid_postconf_line(line):\n+ os.system('postconf -Me \"{}\"'.format(line))\n \n for map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n", "issue": "Postconf overrides should ignore comments\nComment lines in overrides/postfix.cf are sent to postconf in core/postfix/start.py:37. This results in error messages.\r\n\r\nComment lines should be ignored by core/postfix/start.py.\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(100)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n\t\t(\"transport\", \"url\", url + \"transport/\u00a7\"),\n\t\t(\"alias\", \"url\", url + \"alias/\u00a7\"),\n\t\t(\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"recipientmap\", \"url\", url + \"recipient/map/\u00a7\"),\n (\"sendermap\", \"url\", url + \"sender/map/\u00a7\"),\n (\"senderaccess\", \"url\", url + \"sender/access/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\")\n ])\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_MILTER_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_MILTER\", \"antispam:11332\")\nos.environ[\"LMTP_ADDRESS\"] = system.get_host_address_from_environment(\"LMTP\", \"imap:2525\")\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n conf.jinja(postfix_file, os.environ, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n conf.jinja(\"/conf/sasl_passwd\", os.environ, path)\n os.system(\"postmap {}\".format(path))\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\nos.system(\"postfix start-fg\")\n", "path": "core/postfix/start.py"}]} | 1,330 | 319 |
gh_patches_debug_587 | rasdani/github-patches | git_diff | pex-tool__pex-1450 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.50
On the docket:
+ [x] Fix zipapp layout identification. #1448
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.49"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.49"
+__version__ = "2.1.50"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.49\"\n+__version__ = \"2.1.50\"\n", "issue": "Release 2.1.50\nOn the docket:\r\n+ [x] Fix zipapp layout identification. #1448 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.49\"\n", "path": "pex/version.py"}]} | 614 | 97 |
gh_patches_debug_36479 | rasdani/github-patches | git_diff | mozilla__bugbug-739 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
In the trainer script, use db.download instead of implementing download manually
script/trainer.py is currently implementing DB download manually, it should instead use db.download (similarly to scripts/commit_classifier.py).
</issue>
<code>
[start of scripts/trainer.py]
1 # -*- coding: utf-8 -*-
2
3 import argparse
4 import json
5 import os
6 from logging import INFO, basicConfig, getLogger
7 from urllib.request import urlretrieve
8
9 import zstandard
10
11 from bugbug import get_bugbug_version, model
12 from bugbug.models import get_model_class
13 from bugbug.utils import CustomJsonEncoder
14
15 basicConfig(level=INFO)
16 logger = getLogger(__name__)
17
18 BASE_URL = "https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.{}/artifacts/public"
19
20
21 class Trainer(object):
22 def decompress_file(self, path):
23 dctx = zstandard.ZstdDecompressor()
24 with open(f"{path}.zst", "rb") as input_f:
25 with open(path, "wb") as output_f:
26 dctx.copy_stream(input_f, output_f)
27 assert os.path.exists(path), "Decompressed file exists"
28
29 def compress_file(self, path):
30 cctx = zstandard.ZstdCompressor()
31 with open(path, "rb") as input_f:
32 with open(f"{path}.zst", "wb") as output_f:
33 cctx.copy_stream(input_f, output_f)
34
35 def download_db(self, db_type):
36 path = f"data/{db_type}.json"
37 formatted_base_url = BASE_URL.format(db_type, f"v{get_bugbug_version()}")
38 url = f"{formatted_base_url}/{db_type}.json.zst"
39 logger.info(f"Downloading {db_type} database from {url} to {path}.zst")
40 urlretrieve(url, f"{path}.zst")
41 assert os.path.exists(f"{path}.zst"), "Downloaded file exists"
42 logger.info(f"Decompressing {db_type} database")
43 self.decompress_file(path)
44
45 def go(self, model_name):
46 # Download datasets that were built by bugbug_data.
47 os.makedirs("data", exist_ok=True)
48
49 model_class = get_model_class(model_name)
50 model_obj = model_class()
51
52 if (
53 isinstance(model_obj, model.BugModel)
54 or isinstance(model_obj, model.BugCoupleModel)
55 or (hasattr(model_obj, "bug_data") and model_obj.bug_data)
56 ):
57 self.download_db("bugs")
58
59 if isinstance(model_obj, model.CommitModel):
60 self.download_db("commits")
61
62 logger.info(f"Training *{model_name}* model")
63 metrics = model_obj.train()
64
65 # Save the metrics as a file that can be uploaded as an artifact.
66 metric_file_path = "metrics.json"
67 with open(metric_file_path, "w") as metric_file:
68 json.dump(metrics, metric_file, cls=CustomJsonEncoder)
69
70 logger.info(f"Training done")
71
72 model_file_name = f"{model_name}model"
73 assert os.path.exists(model_file_name)
74 self.compress_file(model_file_name)
75
76 logger.info(f"Model compressed")
77
78
79 def main():
80 description = "Train the models"
81 parser = argparse.ArgumentParser(description=description)
82
83 parser.add_argument("model", help="Which model to train.")
84
85 args = parser.parse_args()
86
87 retriever = Trainer()
88 retriever.go(args.model)
89
[end of scripts/trainer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/trainer.py b/scripts/trainer.py
--- a/scripts/trainer.py
+++ b/scripts/trainer.py
@@ -4,44 +4,24 @@
import json
import os
from logging import INFO, basicConfig, getLogger
-from urllib.request import urlretrieve
import zstandard
-from bugbug import get_bugbug_version, model
+from bugbug import bugzilla, db, model, repository
from bugbug.models import get_model_class
from bugbug.utils import CustomJsonEncoder
basicConfig(level=INFO)
logger = getLogger(__name__)
-BASE_URL = "https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.{}/artifacts/public"
-
class Trainer(object):
- def decompress_file(self, path):
- dctx = zstandard.ZstdDecompressor()
- with open(f"{path}.zst", "rb") as input_f:
- with open(path, "wb") as output_f:
- dctx.copy_stream(input_f, output_f)
- assert os.path.exists(path), "Decompressed file exists"
-
def compress_file(self, path):
cctx = zstandard.ZstdCompressor()
with open(path, "rb") as input_f:
with open(f"{path}.zst", "wb") as output_f:
cctx.copy_stream(input_f, output_f)
- def download_db(self, db_type):
- path = f"data/{db_type}.json"
- formatted_base_url = BASE_URL.format(db_type, f"v{get_bugbug_version()}")
- url = f"{formatted_base_url}/{db_type}.json.zst"
- logger.info(f"Downloading {db_type} database from {url} to {path}.zst")
- urlretrieve(url, f"{path}.zst")
- assert os.path.exists(f"{path}.zst"), "Downloaded file exists"
- logger.info(f"Decompressing {db_type} database")
- self.decompress_file(path)
-
def go(self, model_name):
# Download datasets that were built by bugbug_data.
os.makedirs("data", exist_ok=True)
@@ -54,10 +34,10 @@
or isinstance(model_obj, model.BugCoupleModel)
or (hasattr(model_obj, "bug_data") and model_obj.bug_data)
):
- self.download_db("bugs")
+ db.download(bugzilla.BUGS_DB, force=True)
if isinstance(model_obj, model.CommitModel):
- self.download_db("commits")
+ db.download(repository.COMMITS_DB, force=True)
logger.info(f"Training *{model_name}* model")
metrics = model_obj.train()
| {"golden_diff": "diff --git a/scripts/trainer.py b/scripts/trainer.py\n--- a/scripts/trainer.py\n+++ b/scripts/trainer.py\n@@ -4,44 +4,24 @@\n import json\n import os\n from logging import INFO, basicConfig, getLogger\n-from urllib.request import urlretrieve\n \n import zstandard\n \n-from bugbug import get_bugbug_version, model\n+from bugbug import bugzilla, db, model, repository\n from bugbug.models import get_model_class\n from bugbug.utils import CustomJsonEncoder\n \n basicConfig(level=INFO)\n logger = getLogger(__name__)\n \n-BASE_URL = \"https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.{}/artifacts/public\"\n-\n \n class Trainer(object):\n- def decompress_file(self, path):\n- dctx = zstandard.ZstdDecompressor()\n- with open(f\"{path}.zst\", \"rb\") as input_f:\n- with open(path, \"wb\") as output_f:\n- dctx.copy_stream(input_f, output_f)\n- assert os.path.exists(path), \"Decompressed file exists\"\n-\n def compress_file(self, path):\n cctx = zstandard.ZstdCompressor()\n with open(path, \"rb\") as input_f:\n with open(f\"{path}.zst\", \"wb\") as output_f:\n cctx.copy_stream(input_f, output_f)\n \n- def download_db(self, db_type):\n- path = f\"data/{db_type}.json\"\n- formatted_base_url = BASE_URL.format(db_type, f\"v{get_bugbug_version()}\")\n- url = f\"{formatted_base_url}/{db_type}.json.zst\"\n- logger.info(f\"Downloading {db_type} database from {url} to {path}.zst\")\n- urlretrieve(url, f\"{path}.zst\")\n- assert os.path.exists(f\"{path}.zst\"), \"Downloaded file exists\"\n- logger.info(f\"Decompressing {db_type} database\")\n- self.decompress_file(path)\n-\n def go(self, model_name):\n # Download datasets that were built by bugbug_data.\n os.makedirs(\"data\", exist_ok=True)\n@@ -54,10 +34,10 @@\n or isinstance(model_obj, model.BugCoupleModel)\n or (hasattr(model_obj, \"bug_data\") and model_obj.bug_data)\n ):\n- self.download_db(\"bugs\")\n+ db.download(bugzilla.BUGS_DB, force=True)\n \n if isinstance(model_obj, model.CommitModel):\n- self.download_db(\"commits\")\n+ db.download(repository.COMMITS_DB, force=True)\n \n logger.info(f\"Training *{model_name}* model\")\n metrics = model_obj.train()\n", "issue": "In the trainer script, use db.download instead of implementing download manually\nscript/trainer.py is currently implementing DB download manually, it should instead use db.download (similarly to scripts/commit_classifier.py).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport json\nimport os\nfrom logging import INFO, basicConfig, getLogger\nfrom urllib.request import urlretrieve\n\nimport zstandard\n\nfrom bugbug import get_bugbug_version, model\nfrom bugbug.models import get_model_class\nfrom bugbug.utils import CustomJsonEncoder\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\nBASE_URL = \"https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.{}/artifacts/public\"\n\n\nclass Trainer(object):\n def decompress_file(self, path):\n dctx = zstandard.ZstdDecompressor()\n with open(f\"{path}.zst\", \"rb\") as input_f:\n with open(path, \"wb\") as output_f:\n dctx.copy_stream(input_f, output_f)\n assert os.path.exists(path), \"Decompressed file exists\"\n\n def compress_file(self, path):\n cctx = zstandard.ZstdCompressor()\n with open(path, \"rb\") as input_f:\n with open(f\"{path}.zst\", \"wb\") as output_f:\n cctx.copy_stream(input_f, output_f)\n\n def download_db(self, db_type):\n path = f\"data/{db_type}.json\"\n formatted_base_url = BASE_URL.format(db_type, f\"v{get_bugbug_version()}\")\n url = f\"{formatted_base_url}/{db_type}.json.zst\"\n logger.info(f\"Downloading {db_type} database from {url} to {path}.zst\")\n urlretrieve(url, f\"{path}.zst\")\n assert os.path.exists(f\"{path}.zst\"), \"Downloaded file exists\"\n logger.info(f\"Decompressing {db_type} database\")\n self.decompress_file(path)\n\n def go(self, model_name):\n # Download datasets that were built by bugbug_data.\n os.makedirs(\"data\", exist_ok=True)\n\n model_class = get_model_class(model_name)\n model_obj = model_class()\n\n if (\n isinstance(model_obj, model.BugModel)\n or isinstance(model_obj, model.BugCoupleModel)\n or (hasattr(model_obj, \"bug_data\") and model_obj.bug_data)\n ):\n self.download_db(\"bugs\")\n\n if isinstance(model_obj, model.CommitModel):\n self.download_db(\"commits\")\n\n logger.info(f\"Training *{model_name}* model\")\n metrics = model_obj.train()\n\n # Save the metrics as a file that can be uploaded as an artifact.\n metric_file_path = \"metrics.json\"\n with open(metric_file_path, \"w\") as metric_file:\n json.dump(metrics, metric_file, cls=CustomJsonEncoder)\n\n logger.info(f\"Training done\")\n\n model_file_name = f\"{model_name}model\"\n assert os.path.exists(model_file_name)\n self.compress_file(model_file_name)\n\n logger.info(f\"Model compressed\")\n\n\ndef main():\n description = \"Train the models\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to train.\")\n\n args = parser.parse_args()\n\n retriever = Trainer()\n retriever.go(args.model)\n", "path": "scripts/trainer.py"}]} | 1,434 | 597 |
gh_patches_debug_2134 | rasdani/github-patches | git_diff | qtile__qtile-1645 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
lazy.when
# Issue description
I found this quite oldy [`config.py`](https://gist.github.com/cjbarnes18/4151805) that uses a `when` operation to limit the keybinding to a layout but it throws an exception. I also couldn't find it in the docs, so is it supposed to exist? Is there another similar intended way to limit key bindings to a layout.
```python
from libqtile.lazy import lazy
```
...
```python
Key([mod], 'Left',
lazy.layout.left().when('xmonadtall'))
```
# Qtile version
v0.15.0
# Stack traces
```
libqtile xcore.py:_xpoll():L277 Got an exception in poll loop
Traceback (most recent call last):
File "/share/git/qtile/libqtile/backend/x11/xcore.py", line 246, in _xpoll
ret = target(event)
File "/share/git/qtile/libqtile/backend/x11/xcore.py", line 494, in handle_KeyPress
self.qtile.process_key_event(keysym, event.state & self._valid_mask)
File "/share/git/qtile/libqtile/core/manager.py", line 342, in process_key_event
if cmd.check(self):
AttributeError: 'NoneType' object has no attribute 'check'
```
Thanks for any help
</issue>
<code>
[start of libqtile/lazy.py]
1 # Copyright (c) 2019, Sean Vig. All rights reserved.
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 from typing import Dict, List, Optional, Tuple, Union # noqa: F401
22
23 from libqtile.command_client import InteractiveCommandClient
24 from libqtile.command_graph import (
25 CommandGraphCall,
26 CommandGraphNode,
27 SelectorType,
28 )
29 from libqtile.command_interface import CommandInterface
30
31
32 class LazyCall:
33 def __init__(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> None:
34 """The lazily evaluated command graph call
35
36 Parameters
37 ----------
38 call : CommandGraphCall
39 The call that is made
40 args : Tuple
41 The args passed to the call when it is evaluated.
42 kwargs : Dict
43 The kwargs passed to the call when it is evaluated.
44 """
45 self._call = call
46 self._args = args
47 self._kwargs = kwargs
48
49 self._layout = None # type: Optional[str]
50 self._when_floating = True
51
52 @property
53 def selectors(self) -> List[SelectorType]:
54 """The selectors for the given call"""
55 return self._call.selectors
56
57 @property
58 def name(self) -> str:
59 """The name of the given call"""
60 return self._call.name
61
62 @property
63 def args(self) -> Tuple:
64 """The args to the given call"""
65 return self._args
66
67 @property
68 def kwargs(self) -> Dict:
69 """The kwargs to the given call"""
70 return self._kwargs
71
72 def when(self, layout=None, when_floating=True):
73 self._layout = layout
74 self._when_floating = when_floating
75
76 def check(self, q) -> bool:
77 if self._layout is not None:
78 if self._layout == 'floating':
79 if q.current_window.floating:
80 return True
81 return False
82 if q.current_layout.name != self._layout:
83 if q.current_window and q.current_window.floating and not self._when_floating:
84 return False
85 return True
86
87
88 class LazyCommandObject(CommandInterface):
89 """A lazy loading command object
90
91 Allows all commands and items to be resolved at run time, and returns
92 lazily evaluated commands.
93 """
94
95 def execute(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> LazyCall:
96 """Lazily evaluate the given call"""
97 return LazyCall(call, args, kwargs)
98
99 def has_command(self, node: CommandGraphNode, command: str) -> bool:
100 """Lazily resolve the given command"""
101 return True
102
103 def has_item(self, node: CommandGraphNode, object_type: str, item: Union[str, int]) -> bool:
104 """Lazily resolve the given item"""
105 return True
106
107
108 lazy = InteractiveCommandClient(LazyCommandObject())
109
[end of libqtile/lazy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/lazy.py b/libqtile/lazy.py
--- a/libqtile/lazy.py
+++ b/libqtile/lazy.py
@@ -72,6 +72,7 @@
def when(self, layout=None, when_floating=True):
self._layout = layout
self._when_floating = when_floating
+ return self
def check(self, q) -> bool:
if self._layout is not None:
| {"golden_diff": "diff --git a/libqtile/lazy.py b/libqtile/lazy.py\n--- a/libqtile/lazy.py\n+++ b/libqtile/lazy.py\n@@ -72,6 +72,7 @@\n def when(self, layout=None, when_floating=True):\n self._layout = layout\n self._when_floating = when_floating\n+ return self\n \n def check(self, q) -> bool:\n if self._layout is not None:\n", "issue": "lazy.when\n# Issue description\r\n\r\nI found this quite oldy [`config.py`](https://gist.github.com/cjbarnes18/4151805) that uses a `when` operation to limit the keybinding to a layout but it throws an exception. I also couldn't find it in the docs, so is it supposed to exist? Is there another similar intended way to limit key bindings to a layout.\r\n\r\n\r\n```python\r\nfrom libqtile.lazy import lazy\r\n```\r\n...\r\n```python\r\n Key([mod], 'Left',\r\n lazy.layout.left().when('xmonadtall'))\r\n```\r\n\r\n# Qtile version\r\n\r\nv0.15.0\r\n\r\n# Stack traces\r\n\r\n```\r\nlibqtile xcore.py:_xpoll():L277 Got an exception in poll loop\r\nTraceback (most recent call last):\r\n File \"/share/git/qtile/libqtile/backend/x11/xcore.py\", line 246, in _xpoll\r\n ret = target(event)\r\n File \"/share/git/qtile/libqtile/backend/x11/xcore.py\", line 494, in handle_KeyPress\r\n self.qtile.process_key_event(keysym, event.state & self._valid_mask)\r\n File \"/share/git/qtile/libqtile/core/manager.py\", line 342, in process_key_event\r\n if cmd.check(self):\r\nAttributeError: 'NoneType' object has no attribute 'check'\r\n```\r\n\r\nThanks for any help\n", "before_files": [{"content": "# Copyright (c) 2019, Sean Vig. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom typing import Dict, List, Optional, Tuple, Union # noqa: F401\n\nfrom libqtile.command_client import InteractiveCommandClient\nfrom libqtile.command_graph import (\n CommandGraphCall,\n CommandGraphNode,\n SelectorType,\n)\nfrom libqtile.command_interface import CommandInterface\n\n\nclass LazyCall:\n def __init__(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> None:\n \"\"\"The lazily evaluated command graph call\n\n Parameters\n ----------\n call : CommandGraphCall\n The call that is made\n args : Tuple\n The args passed to the call when it is evaluated.\n kwargs : Dict\n The kwargs passed to the call when it is evaluated.\n \"\"\"\n self._call = call\n self._args = args\n self._kwargs = kwargs\n\n self._layout = None # type: Optional[str]\n self._when_floating = True\n\n @property\n def selectors(self) -> List[SelectorType]:\n \"\"\"The selectors for the given call\"\"\"\n return self._call.selectors\n\n @property\n def name(self) -> str:\n \"\"\"The name of the given call\"\"\"\n return self._call.name\n\n @property\n def args(self) -> Tuple:\n \"\"\"The args to the given call\"\"\"\n return self._args\n\n @property\n def kwargs(self) -> Dict:\n \"\"\"The kwargs to the given call\"\"\"\n return self._kwargs\n\n def when(self, layout=None, when_floating=True):\n self._layout = layout\n self._when_floating = when_floating\n\n def check(self, q) -> bool:\n if self._layout is not None:\n if self._layout == 'floating':\n if q.current_window.floating:\n return True\n return False\n if q.current_layout.name != self._layout:\n if q.current_window and q.current_window.floating and not self._when_floating:\n return False\n return True\n\n\nclass LazyCommandObject(CommandInterface):\n \"\"\"A lazy loading command object\n\n Allows all commands and items to be resolved at run time, and returns\n lazily evaluated commands.\n \"\"\"\n\n def execute(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> LazyCall:\n \"\"\"Lazily evaluate the given call\"\"\"\n return LazyCall(call, args, kwargs)\n\n def has_command(self, node: CommandGraphNode, command: str) -> bool:\n \"\"\"Lazily resolve the given command\"\"\"\n return True\n\n def has_item(self, node: CommandGraphNode, object_type: str, item: Union[str, int]) -> bool:\n \"\"\"Lazily resolve the given item\"\"\"\n return True\n\n\nlazy = InteractiveCommandClient(LazyCommandObject())\n", "path": "libqtile/lazy.py"}]} | 1,918 | 104 |
gh_patches_debug_26387 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-8359 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problems with Spiders built on YextSpider
I've noticed a few of the spiders whose results I was using in my OSM tool have recently stopped returning any results, including five_guys_de_es_fr_gb.py , matalan_gb.py , and three_gb.py .
A common feature of these spiders is that they use the YextSpider class. Checking some other spiders that are also based on this class, reveals they've also stopped working. The spider stats suggest 404 and 403 errors are preventing the data being retrieved.
If this is a general problem affecting multiple spiders, would someone be able to take a look and see if it can be fixed?
</issue>
<code>
[start of locations/spiders/fazolis_us.py]
1 from locations.categories import Extras, apply_yes_no
2 from locations.storefinders.yext import YextSpider
3
4
5 class FazolisUSSpider(YextSpider):
6 name = "fazolis_us"
7 item_attributes = {"brand": "Fazoli's", "brand_wikidata": "Q1399195"}
8 api_key = "e6cda871db03e0fc9eb2211470649126"
9
10 def parse_item(self, item, location):
11 if location.get("c_pagesURL"):
12 item["website"] = location["c_pagesURL"]
13 item.pop("twitter")
14 item["extras"].pop("contact:instagram", None)
15 if location.get("products"):
16 apply_yes_no(Extras.DRIVE_THROUGH, item, "Drive-thru" in location["products"], False)
17 apply_yes_no(Extras.TAKEAWAY, item, "Take-out" in location["products"], False)
18 apply_yes_no(Extras.WIFI, item, "Free Wi-Fi" in location["products"], False)
19 apply_yes_no(Extras.WHEELCHAIR, item, "Handicap Accessible" in location["products"], False)
20 yield item
21
[end of locations/spiders/fazolis_us.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/fazolis_us.py b/locations/spiders/fazolis_us.py
--- a/locations/spiders/fazolis_us.py
+++ b/locations/spiders/fazolis_us.py
@@ -1,20 +1,17 @@
-from locations.categories import Extras, apply_yes_no
-from locations.storefinders.yext import YextSpider
+from locations.categories import Categories, Extras, apply_yes_no
+from locations.storefinders.yext_answers import YextAnswersSpider
-class FazolisUSSpider(YextSpider):
+class FazolisUSSpider(YextAnswersSpider):
name = "fazolis_us"
- item_attributes = {"brand": "Fazoli's", "brand_wikidata": "Q1399195"}
+ item_attributes = {"brand": "Fazoli's", "brand_wikidata": "Q1399195", "extras": Categories.FAST_FOOD.value}
api_key = "e6cda871db03e0fc9eb2211470649126"
+ experience_key = "locator"
- def parse_item(self, item, location):
- if location.get("c_pagesURL"):
- item["website"] = location["c_pagesURL"]
- item.pop("twitter")
- item["extras"].pop("contact:instagram", None)
- if location.get("products"):
- apply_yes_no(Extras.DRIVE_THROUGH, item, "Drive-thru" in location["products"], False)
- apply_yes_no(Extras.TAKEAWAY, item, "Take-out" in location["products"], False)
- apply_yes_no(Extras.WIFI, item, "Free Wi-Fi" in location["products"], False)
- apply_yes_no(Extras.WHEELCHAIR, item, "Handicap Accessible" in location["products"], False)
+ def parse_item(self, location, item):
+ if amenities := location["data"].get("products"):
+ apply_yes_no(Extras.DRIVE_THROUGH, item, "Drive-thru" in amenities, False)
+ apply_yes_no(Extras.TAKEAWAY, item, "Take-out" in amenities, False)
+ apply_yes_no(Extras.WIFI, item, "Free Wi-Fi" in amenities, False)
+ apply_yes_no(Extras.WHEELCHAIR, item, "Handicap Accessible" in amenities, False)
yield item
| {"golden_diff": "diff --git a/locations/spiders/fazolis_us.py b/locations/spiders/fazolis_us.py\n--- a/locations/spiders/fazolis_us.py\n+++ b/locations/spiders/fazolis_us.py\n@@ -1,20 +1,17 @@\n-from locations.categories import Extras, apply_yes_no\n-from locations.storefinders.yext import YextSpider\n+from locations.categories import Categories, Extras, apply_yes_no\n+from locations.storefinders.yext_answers import YextAnswersSpider\n \n \n-class FazolisUSSpider(YextSpider):\n+class FazolisUSSpider(YextAnswersSpider):\n name = \"fazolis_us\"\n- item_attributes = {\"brand\": \"Fazoli's\", \"brand_wikidata\": \"Q1399195\"}\n+ item_attributes = {\"brand\": \"Fazoli's\", \"brand_wikidata\": \"Q1399195\", \"extras\": Categories.FAST_FOOD.value}\n api_key = \"e6cda871db03e0fc9eb2211470649126\"\n+ experience_key = \"locator\"\n \n- def parse_item(self, item, location):\n- if location.get(\"c_pagesURL\"):\n- item[\"website\"] = location[\"c_pagesURL\"]\n- item.pop(\"twitter\")\n- item[\"extras\"].pop(\"contact:instagram\", None)\n- if location.get(\"products\"):\n- apply_yes_no(Extras.DRIVE_THROUGH, item, \"Drive-thru\" in location[\"products\"], False)\n- apply_yes_no(Extras.TAKEAWAY, item, \"Take-out\" in location[\"products\"], False)\n- apply_yes_no(Extras.WIFI, item, \"Free Wi-Fi\" in location[\"products\"], False)\n- apply_yes_no(Extras.WHEELCHAIR, item, \"Handicap Accessible\" in location[\"products\"], False)\n+ def parse_item(self, location, item):\n+ if amenities := location[\"data\"].get(\"products\"):\n+ apply_yes_no(Extras.DRIVE_THROUGH, item, \"Drive-thru\" in amenities, False)\n+ apply_yes_no(Extras.TAKEAWAY, item, \"Take-out\" in amenities, False)\n+ apply_yes_no(Extras.WIFI, item, \"Free Wi-Fi\" in amenities, False)\n+ apply_yes_no(Extras.WHEELCHAIR, item, \"Handicap Accessible\" in amenities, False)\n yield item\n", "issue": "Problems with Spiders built on YextSpider\nI've noticed a few of the spiders whose results I was using in my OSM tool have recently stopped returning any results, including five_guys_de_es_fr_gb.py , matalan_gb.py , and three_gb.py .\r\n\r\nA common feature of these spiders is that they use the YextSpider class. Checking some other spiders that are also based on this class, reveals they've also stopped working. The spider stats suggest 404 and 403 errors are preventing the data being retrieved.\r\n\r\nIf this is a general problem affecting multiple spiders, would someone be able to take a look and see if it can be fixed?\n", "before_files": [{"content": "from locations.categories import Extras, apply_yes_no\nfrom locations.storefinders.yext import YextSpider\n\n\nclass FazolisUSSpider(YextSpider):\n name = \"fazolis_us\"\n item_attributes = {\"brand\": \"Fazoli's\", \"brand_wikidata\": \"Q1399195\"}\n api_key = \"e6cda871db03e0fc9eb2211470649126\"\n\n def parse_item(self, item, location):\n if location.get(\"c_pagesURL\"):\n item[\"website\"] = location[\"c_pagesURL\"]\n item.pop(\"twitter\")\n item[\"extras\"].pop(\"contact:instagram\", None)\n if location.get(\"products\"):\n apply_yes_no(Extras.DRIVE_THROUGH, item, \"Drive-thru\" in location[\"products\"], False)\n apply_yes_no(Extras.TAKEAWAY, item, \"Take-out\" in location[\"products\"], False)\n apply_yes_no(Extras.WIFI, item, \"Free Wi-Fi\" in location[\"products\"], False)\n apply_yes_no(Extras.WHEELCHAIR, item, \"Handicap Accessible\" in location[\"products\"], False)\n yield item\n", "path": "locations/spiders/fazolis_us.py"}]} | 985 | 549 |
gh_patches_debug_8122 | rasdani/github-patches | git_diff | getnikola__nikola-2632 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
new install - new_page fails
steps to reproduce:
1. install nikola.
2. init a new project and cd to it's dir
3. run "nikola new_page" throws an exception.
4. however, run "nikola new_post -p" and it works just fine!??
i am puzzled, why does "new_post -p" work and "new_page" fail???
the exception trace i get is:
Traceback (most recent call last):
File "/home/smg/.virtualenvs_py3/nikola/lib/python3.5/site-packages/doit/doit_cmd.py", line 167, in run
return command.parse_execute(args)
File "/home/smg/.virtualenvs_py3/nikola/lib/python3.5/site-packages/doit/cmd_base.py", line 120, in parse_execute
return self.execute(params, args)
File "/home/smg/.virtualenvs_py3/nikola/lib/python3.5/site-packages/nikola/plugin_categories.py", line 130, in execute
return self._execute(options, args)
File "/home/smg/.virtualenvs_py3/nikola/lib/python3.5/site-packages/nikola/plugins/command/new_page.py", line 113, in _execute
return p.execute(options, args)
File "/home/smg/.virtualenvs_py3/nikola/lib/python3.5/site-packages/nikola/plugin_categories.py", line 130, in execute
return self._execute(options, args)
File "/home/smg/.virtualenvs_py3/nikola/lib/python3.5/site-packages/nikola/plugins/command/new_post.py", line 244, in _execute
date_path_opt = options['date-path']
KeyError: 'date-path'
</issue>
<code>
[start of nikola/plugins/command/new_page.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2017 Roberto Alsina, Chris Warrick and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Create a new page."""
28
29 from __future__ import unicode_literals, print_function
30
31 from nikola.plugin_categories import Command
32
33
34 class CommandNewPage(Command):
35 """Create a new page."""
36
37 name = "new_page"
38 doc_usage = "[options] [path]"
39 doc_purpose = "create a new page in the site"
40 cmd_options = [
41 {
42 'name': 'title',
43 'short': 't',
44 'long': 'title',
45 'type': str,
46 'default': '',
47 'help': 'Title for the page.'
48 },
49 {
50 'name': 'author',
51 'short': 'a',
52 'long': 'author',
53 'type': str,
54 'default': '',
55 'help': 'Author of the post.'
56 },
57 {
58 'name': 'onefile',
59 'short': '1',
60 'type': bool,
61 'default': False,
62 'help': 'Create the page with embedded metadata (single file format)'
63 },
64 {
65 'name': 'twofile',
66 'short': '2',
67 'type': bool,
68 'default': False,
69 'help': 'Create the page with separate metadata (two file format)'
70 },
71 {
72 'name': 'edit',
73 'short': 'e',
74 'type': bool,
75 'default': False,
76 'help': 'Open the page (and meta file, if any) in $EDITOR after creation.'
77 },
78 {
79 'name': 'content_format',
80 'short': 'f',
81 'long': 'format',
82 'type': str,
83 'default': '',
84 'help': 'Markup format for the page (use --available-formats for list)',
85 },
86 {
87 'name': 'available-formats',
88 'short': 'F',
89 'long': 'available-formats',
90 'type': bool,
91 'default': False,
92 'help': 'List all available input formats'
93 },
94 {
95 'name': 'import',
96 'short': 'i',
97 'long': 'import',
98 'type': str,
99 'default': '',
100 'help': 'Import an existing file instead of creating a placeholder'
101 },
102 ]
103
104 def _execute(self, options, args):
105 """Create a new page."""
106 # Defaults for some values that don’t apply to pages and the is_page option (duh!)
107 options['tags'] = ''
108 options['schedule'] = False
109 options['is_page'] = True
110 # Even though stuff was split into `new_page`, it’s easier to do it
111 # there not to duplicate the code.
112 p = self.site.plugin_manager.getPluginByName('new_post', 'Command').plugin_object
113 return p.execute(options, args)
114
[end of nikola/plugins/command/new_page.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nikola/plugins/command/new_page.py b/nikola/plugins/command/new_page.py
--- a/nikola/plugins/command/new_page.py
+++ b/nikola/plugins/command/new_page.py
@@ -107,6 +107,7 @@
options['tags'] = ''
options['schedule'] = False
options['is_page'] = True
+ options['date-path'] = False
# Even though stuff was split into `new_page`, it’s easier to do it
# there not to duplicate the code.
p = self.site.plugin_manager.getPluginByName('new_post', 'Command').plugin_object
| {"golden_diff": "diff --git a/nikola/plugins/command/new_page.py b/nikola/plugins/command/new_page.py\n--- a/nikola/plugins/command/new_page.py\n+++ b/nikola/plugins/command/new_page.py\n@@ -107,6 +107,7 @@\n options['tags'] = ''\n options['schedule'] = False\n options['is_page'] = True\n+ options['date-path'] = False\n # Even though stuff was split into `new_page`, it\u2019s easier to do it\n # there not to duplicate the code.\n p = self.site.plugin_manager.getPluginByName('new_post', 'Command').plugin_object\n", "issue": "new install - new_page fails\nsteps to reproduce:\r\n\r\n1. install nikola.\r\n2. init a new project and cd to it's dir\r\n3. run \"nikola new_page\" throws an exception.\r\n4. however, run \"nikola new_post -p\" and it works just fine!??\r\n\r\ni am puzzled, why does \"new_post -p\" work and \"new_page\" fail???\r\n\r\nthe exception trace i get is:\r\nTraceback (most recent call last):\r\n File \"/home/smg/.virtualenvs_py3/nikola/lib/python3.5/site-packages/doit/doit_cmd.py\", line 167, in run\r\n return command.parse_execute(args)\r\n File \"/home/smg/.virtualenvs_py3/nikola/lib/python3.5/site-packages/doit/cmd_base.py\", line 120, in parse_execute\r\n return self.execute(params, args)\r\n File \"/home/smg/.virtualenvs_py3/nikola/lib/python3.5/site-packages/nikola/plugin_categories.py\", line 130, in execute\r\n return self._execute(options, args)\r\n File \"/home/smg/.virtualenvs_py3/nikola/lib/python3.5/site-packages/nikola/plugins/command/new_page.py\", line 113, in _execute\r\n return p.execute(options, args)\r\n File \"/home/smg/.virtualenvs_py3/nikola/lib/python3.5/site-packages/nikola/plugin_categories.py\", line 130, in execute\r\n return self._execute(options, args)\r\n File \"/home/smg/.virtualenvs_py3/nikola/lib/python3.5/site-packages/nikola/plugins/command/new_post.py\", line 244, in _execute\r\n date_path_opt = options['date-path']\r\nKeyError: 'date-path'\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2017 Roberto Alsina, Chris Warrick and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Create a new page.\"\"\"\n\nfrom __future__ import unicode_literals, print_function\n\nfrom nikola.plugin_categories import Command\n\n\nclass CommandNewPage(Command):\n \"\"\"Create a new page.\"\"\"\n\n name = \"new_page\"\n doc_usage = \"[options] [path]\"\n doc_purpose = \"create a new page in the site\"\n cmd_options = [\n {\n 'name': 'title',\n 'short': 't',\n 'long': 'title',\n 'type': str,\n 'default': '',\n 'help': 'Title for the page.'\n },\n {\n 'name': 'author',\n 'short': 'a',\n 'long': 'author',\n 'type': str,\n 'default': '',\n 'help': 'Author of the post.'\n },\n {\n 'name': 'onefile',\n 'short': '1',\n 'type': bool,\n 'default': False,\n 'help': 'Create the page with embedded metadata (single file format)'\n },\n {\n 'name': 'twofile',\n 'short': '2',\n 'type': bool,\n 'default': False,\n 'help': 'Create the page with separate metadata (two file format)'\n },\n {\n 'name': 'edit',\n 'short': 'e',\n 'type': bool,\n 'default': False,\n 'help': 'Open the page (and meta file, if any) in $EDITOR after creation.'\n },\n {\n 'name': 'content_format',\n 'short': 'f',\n 'long': 'format',\n 'type': str,\n 'default': '',\n 'help': 'Markup format for the page (use --available-formats for list)',\n },\n {\n 'name': 'available-formats',\n 'short': 'F',\n 'long': 'available-formats',\n 'type': bool,\n 'default': False,\n 'help': 'List all available input formats'\n },\n {\n 'name': 'import',\n 'short': 'i',\n 'long': 'import',\n 'type': str,\n 'default': '',\n 'help': 'Import an existing file instead of creating a placeholder'\n },\n ]\n\n def _execute(self, options, args):\n \"\"\"Create a new page.\"\"\"\n # Defaults for some values that don\u2019t apply to pages and the is_page option (duh!)\n options['tags'] = ''\n options['schedule'] = False\n options['is_page'] = True\n # Even though stuff was split into `new_page`, it\u2019s easier to do it\n # there not to duplicate the code.\n p = self.site.plugin_manager.getPluginByName('new_post', 'Command').plugin_object\n return p.execute(options, args)\n", "path": "nikola/plugins/command/new_page.py"}]} | 2,028 | 141 |
gh_patches_debug_448 | rasdani/github-patches | git_diff | tensorflow__addons-1770 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enable multiprocessing when testing with GPU and support distributed strategies in the tests.
**Describe the feature and the current behavior/state.**
Here I'm not going to discuss the bazel case as it's much more complicated to handle, and we currently advertize using pytest anyway to run the tests. We can of course make sure everything is compatible though.
This revamping of gpu testing has multiple objectives:
* The tests should behave the same weither the contributor has a gpu or not. Meaning we shouldn't run all the tests on a gpu just because a gpu is available, otherwise it hurts reproducibility.
* The test suite should be able to run with multiple workers in kokoro or when a user has multiple gpus. Pytest should use all gpus visible by the main process.
* We need to support testing with distributed strategies. Currently it doesn't work. A fix has been started in #1209 but we need to update it for pytest.
* Making the whole thing simple to use and to maintain. Notably, we would get rid of this file: https://github.com/tensorflow/addons/blob/master/tools/testing/parallel_gpu_execute.sh which is quite hard to work on.
To do all that, here is my proposal:
Stuff to know:
* Pytest-xdist uses multiprocessing to run the tests, not multithreading.
* 2 environement variables are available in each of the workers to identify them. https://github.com/pytest-dev/pytest-xdist#identifying-the-worker-process-during-a-test
### Test workers
Suppose we have a machine with 10CPUs and 4 GPUs, 10 processes will start to run the test suite. Workers 0 to 3 will have ownership of one GPU each (we can use CUDA_VISIBLE_DEVICES to enforce that, but I'm not even sure that's needed with the proposed implementation). Workers 4 to 9 will have no gpu available.
### Virtual devices
Each of those processes, when starting, will split their physical device into 2 virtual device. Tests that just need to run on gpu will use the first of those virtual devices. Processes which need to test distributed strategies will use the two of them. We assume here that 2 virtual devices are enough to test distributed strategies.
### Impact on the contributors:
For this whole machinery to work, we need to know which test needs to run on CPU, GPU, or in distributed strategies. To do that we'll use pytest markers: `@pytest.mark.....`
* By default, if no marker is found, the test will run on CPU: `with device("CPU:0")`. It's equivalent to
`@pytest.mark.run_on(["cpu"])`.
* To run with gpu only: `@pytest.mark.run_on(["gpu"])`.
* To run on the cpu and gpu: `@pytest.mark.run_on(["cpu", "gpu"])` (test runs twice)
* To run in within a distributed strategy `@pytest.mark.run_on(["distributed strategy"])`. (runs once here).
* To run with everything `@pytest.mark.run_on(["cpu", "gpu", "distributed strategy"])`
* To make crazy stuff, and not run the test in any device scope: `@pytest.mark.no_device_scope`. Then the contributor can do whatever he/she wants in the test.
Of course, if no gpu is available, we just skip the tests needing a distribution strategy or the gpu. Contributors who handle the devices manually have to make sure to skip manually the test if the gpu is used.
Since gpu are often the scarsest ressource (nb gpus << nb cpus), tests needing the gpu will also be marked with `@pytest.mark.tryfirst` to ensure that we don't have workers starvation at the end (to get maximum speed).
To implement that, we need first to convert all tests to pytest (as opposed to unittest) it's currently 80% done and thanks a lot @autoih for putting a LOT of work into that.
**Relevant information**
- Are you willing to contribute it (yes/no): yes
- Are you willing to maintain it going forward? (yes/no): yes
- Is there a relevant academic paper? (if so, where): no
- Is there already an implementation in another framework? (if so, where): no
- Was it part of tf.contrib? (if so, where): no
**Which API type would this fall under (layer, metric, optimizer, etc.)**
Testing
**Who will benefit with this feature?**
Contributors with gpu, CI.
**Any other info.**
I believe that the implementation will first go in tensorflow addons because we have 4 GPUs available in the CI. Later on when it's stable we can split it from tensorflow addons and make it a separate pytest plugin with a public API.
Comments welcome. Especially from @Squadrick , @hyang0129 , @seanpmorgan since I'm not a ninja of tf.device.
</issue>
<code>
[start of tensorflow_addons/conftest.py]
1 from tensorflow_addons.utils.test_utils import ( # noqa: F401
2 maybe_run_functions_eagerly,
3 pytest_make_parametrize_id,
4 data_format,
5 set_seeds,
6 pytest_addoption,
7 set_global_variables,
8 pytest_configure,
9 device,
10 pytest_generate_tests,
11 )
12
13 # fixtures present in this file will be available
14 # when running tests and can be referenced with strings
15 # https://docs.pytest.org/en/latest/fixture.html#conftest-py-sharing-fixture-functions
16
[end of tensorflow_addons/conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tensorflow_addons/conftest.py b/tensorflow_addons/conftest.py
--- a/tensorflow_addons/conftest.py
+++ b/tensorflow_addons/conftest.py
@@ -8,6 +8,7 @@
pytest_configure,
device,
pytest_generate_tests,
+ pytest_collection_modifyitems,
)
# fixtures present in this file will be available
| {"golden_diff": "diff --git a/tensorflow_addons/conftest.py b/tensorflow_addons/conftest.py\n--- a/tensorflow_addons/conftest.py\n+++ b/tensorflow_addons/conftest.py\n@@ -8,6 +8,7 @@\n pytest_configure,\n device,\n pytest_generate_tests,\n+ pytest_collection_modifyitems,\n )\n \n # fixtures present in this file will be available\n", "issue": "Enable multiprocessing when testing with GPU and support distributed strategies in the tests.\n**Describe the feature and the current behavior/state.**\r\n\r\nHere I'm not going to discuss the bazel case as it's much more complicated to handle, and we currently advertize using pytest anyway to run the tests. We can of course make sure everything is compatible though.\r\n\r\nThis revamping of gpu testing has multiple objectives:\r\n\r\n* The tests should behave the same weither the contributor has a gpu or not. Meaning we shouldn't run all the tests on a gpu just because a gpu is available, otherwise it hurts reproducibility.\r\n* The test suite should be able to run with multiple workers in kokoro or when a user has multiple gpus. Pytest should use all gpus visible by the main process.\r\n* We need to support testing with distributed strategies. Currently it doesn't work. A fix has been started in #1209 but we need to update it for pytest. \r\n* Making the whole thing simple to use and to maintain. Notably, we would get rid of this file: https://github.com/tensorflow/addons/blob/master/tools/testing/parallel_gpu_execute.sh which is quite hard to work on.\r\n\r\n\r\nTo do all that, here is my proposal:\r\n\r\nStuff to know: \r\n* Pytest-xdist uses multiprocessing to run the tests, not multithreading. \r\n* 2 environement variables are available in each of the workers to identify them. https://github.com/pytest-dev/pytest-xdist#identifying-the-worker-process-during-a-test\r\n\r\n\r\n### Test workers\r\nSuppose we have a machine with 10CPUs and 4 GPUs, 10 processes will start to run the test suite. Workers 0 to 3 will have ownership of one GPU each (we can use CUDA_VISIBLE_DEVICES to enforce that, but I'm not even sure that's needed with the proposed implementation). Workers 4 to 9 will have no gpu available.\r\n\r\n### Virtual devices\r\nEach of those processes, when starting, will split their physical device into 2 virtual device. Tests that just need to run on gpu will use the first of those virtual devices. Processes which need to test distributed strategies will use the two of them. We assume here that 2 virtual devices are enough to test distributed strategies.\r\n\r\n### Impact on the contributors:\r\nFor this whole machinery to work, we need to know which test needs to run on CPU, GPU, or in distributed strategies. To do that we'll use pytest markers: `@pytest.mark.....`\r\n\r\n* By default, if no marker is found, the test will run on CPU: `with device(\"CPU:0\")`. It's equivalent to \r\n`@pytest.mark.run_on([\"cpu\"])`.\r\n* To run with gpu only: `@pytest.mark.run_on([\"gpu\"])`.\r\n* To run on the cpu and gpu: `@pytest.mark.run_on([\"cpu\", \"gpu\"])` (test runs twice)\r\n* To run in within a distributed strategy `@pytest.mark.run_on([\"distributed strategy\"])`. (runs once here).\r\n* To run with everything `@pytest.mark.run_on([\"cpu\", \"gpu\", \"distributed strategy\"])`\r\n* To make crazy stuff, and not run the test in any device scope: `@pytest.mark.no_device_scope`. Then the contributor can do whatever he/she wants in the test.\r\n\r\nOf course, if no gpu is available, we just skip the tests needing a distribution strategy or the gpu. Contributors who handle the devices manually have to make sure to skip manually the test if the gpu is used.\r\n\r\nSince gpu are often the scarsest ressource (nb gpus << nb cpus), tests needing the gpu will also be marked with `@pytest.mark.tryfirst` to ensure that we don't have workers starvation at the end (to get maximum speed).\r\n\r\nTo implement that, we need first to convert all tests to pytest (as opposed to unittest) it's currently 80% done and thanks a lot @autoih for putting a LOT of work into that.\r\n\r\n\r\n\r\n**Relevant information**\r\n- Are you willing to contribute it (yes/no): yes\r\n- Are you willing to maintain it going forward? (yes/no): yes\r\n- Is there a relevant academic paper? (if so, where): no\r\n- Is there already an implementation in another framework? (if so, where): no\r\n- Was it part of tf.contrib? (if so, where): no\r\n\r\n**Which API type would this fall under (layer, metric, optimizer, etc.)**\r\n\r\nTesting\r\n\r\n**Who will benefit with this feature?**\r\n\r\nContributors with gpu, CI.\r\n\r\n**Any other info.**\r\n\r\nI believe that the implementation will first go in tensorflow addons because we have 4 GPUs available in the CI. Later on when it's stable we can split it from tensorflow addons and make it a separate pytest plugin with a public API.\r\n\r\nComments welcome. Especially from @Squadrick , @hyang0129 , @seanpmorgan since I'm not a ninja of tf.device.\n", "before_files": [{"content": "from tensorflow_addons.utils.test_utils import ( # noqa: F401\n maybe_run_functions_eagerly,\n pytest_make_parametrize_id,\n data_format,\n set_seeds,\n pytest_addoption,\n set_global_variables,\n pytest_configure,\n device,\n pytest_generate_tests,\n)\n\n# fixtures present in this file will be available\n# when running tests and can be referenced with strings\n# https://docs.pytest.org/en/latest/fixture.html#conftest-py-sharing-fixture-functions\n", "path": "tensorflow_addons/conftest.py"}]} | 1,711 | 90 |
gh_patches_debug_29057 | rasdani/github-patches | git_diff | vega__altair-2355 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow Paths in save()
Instead of allowing only string paths or file-likes, allow pathlib.Paths to be passed to `save()`.
Are these two the only places that would have to be changed?
https://github.com/altair-viz/altair/blob/54e03d403c1cec9ce2f2e8b14dc3d936c6686128/altair/utils/save.py#L8
https://github.com/altair-viz/altair/blob/54e03d403c1cec9ce2f2e8b14dc3d936c6686128/altair/utils/save.py#L72
</issue>
<code>
[start of altair/utils/save.py]
1 import json
2
3 from .mimebundle import spec_to_mimebundle
4
5
6 def write_file_or_filename(fp, content, mode="w"):
7 """Write content to fp, whether fp is a string or a file-like object"""
8 if isinstance(fp, str):
9 with open(fp, mode) as f:
10 f.write(content)
11 else:
12 fp.write(content)
13
14
15 def save(
16 chart,
17 fp,
18 vega_version,
19 vegaembed_version,
20 format=None,
21 mode=None,
22 vegalite_version=None,
23 embed_options=None,
24 json_kwds=None,
25 webdriver="chrome",
26 scale_factor=1,
27 **kwargs,
28 ):
29 """Save a chart to file in a variety of formats
30
31 Supported formats are [json, html, png, svg]
32
33 Parameters
34 ----------
35 chart : alt.Chart
36 the chart instance to save
37 fp : string filename or file-like object
38 file in which to write the chart.
39 format : string (optional)
40 the format to write: one of ['json', 'html', 'png', 'svg'].
41 If not specified, the format will be determined from the filename.
42 mode : string (optional)
43 Either 'vega' or 'vegalite'. If not specified, then infer the mode from
44 the '$schema' property of the spec, or the ``opt`` dictionary.
45 If it's not specified in either of those places, then use 'vegalite'.
46 vega_version : string
47 For html output, the version of vega.js to use
48 vegalite_version : string
49 For html output, the version of vegalite.js to use
50 vegaembed_version : string
51 For html output, the version of vegaembed.js to use
52 embed_options : dict
53 The vegaEmbed options dictionary. Default is {}
54 (See https://github.com/vega/vega-embed for details)
55 json_kwds : dict
56 Additional keyword arguments are passed to the output method
57 associated with the specified format.
58 webdriver : string {'chrome' | 'firefox'}
59 Webdriver to use for png or svg output
60 scale_factor : float
61 scale_factor to use to change size/resolution of png or svg output
62 **kwargs :
63 additional kwargs passed to spec_to_mimebundle.
64 """
65 if json_kwds is None:
66 json_kwds = {}
67
68 if embed_options is None:
69 embed_options = {}
70
71 if format is None:
72 if isinstance(fp, str):
73 format = fp.split(".")[-1]
74 else:
75 raise ValueError(
76 "must specify file format: " "['png', 'svg', 'pdf', 'html', 'json']"
77 )
78
79 spec = chart.to_dict()
80
81 if mode is None:
82 if "mode" in embed_options:
83 mode = embed_options["mode"]
84 elif "$schema" in spec:
85 mode = spec["$schema"].split("/")[-2]
86 else:
87 mode = "vega-lite"
88
89 if mode not in ["vega", "vega-lite"]:
90 raise ValueError("mode must be 'vega' or 'vega-lite', " "not '{}'".format(mode))
91
92 if mode == "vega-lite" and vegalite_version is None:
93 raise ValueError("must specify vega-lite version")
94
95 if format == "json":
96 json_spec = json.dumps(spec, **json_kwds)
97 write_file_or_filename(fp, json_spec, mode="w")
98 elif format == "html":
99 mimebundle = spec_to_mimebundle(
100 spec=spec,
101 format=format,
102 mode=mode,
103 vega_version=vega_version,
104 vegalite_version=vegalite_version,
105 vegaembed_version=vegaembed_version,
106 embed_options=embed_options,
107 json_kwds=json_kwds,
108 **kwargs,
109 )
110 write_file_or_filename(fp, mimebundle["text/html"], mode="w")
111 elif format in ["png", "svg", "pdf"]:
112 mimebundle = spec_to_mimebundle(
113 spec=spec,
114 format=format,
115 mode=mode,
116 vega_version=vega_version,
117 vegalite_version=vegalite_version,
118 vegaembed_version=vegaembed_version,
119 webdriver=webdriver,
120 scale_factor=scale_factor,
121 **kwargs,
122 )
123 if format == "png":
124 write_file_or_filename(fp, mimebundle["image/png"], mode="wb")
125 elif format == "pdf":
126 write_file_or_filename(fp, mimebundle["application/pdf"], mode="wb")
127 else:
128 write_file_or_filename(fp, mimebundle["image/svg+xml"], mode="w")
129 else:
130 raise ValueError("unrecognized format: '{}'".format(format))
131
[end of altair/utils/save.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/altair/utils/save.py b/altair/utils/save.py
--- a/altair/utils/save.py
+++ b/altair/utils/save.py
@@ -1,11 +1,13 @@
import json
+import pathlib
from .mimebundle import spec_to_mimebundle
def write_file_or_filename(fp, content, mode="w"):
- """Write content to fp, whether fp is a string or a file-like object"""
- if isinstance(fp, str):
+ """Write content to fp, whether fp is a string, a pathlib Path or a
+ file-like object"""
+ if isinstance(fp, str) or isinstance(fp, pathlib.PurePath):
with open(fp, mode) as f:
f.write(content)
else:
@@ -34,8 +36,8 @@
----------
chart : alt.Chart
the chart instance to save
- fp : string filename or file-like object
- file in which to write the chart.
+ fp : string filename, pathlib.Path or file-like object
+ file to which to write the chart.
format : string (optional)
the format to write: one of ['json', 'html', 'png', 'svg'].
If not specified, the format will be determined from the filename.
@@ -71,6 +73,8 @@
if format is None:
if isinstance(fp, str):
format = fp.split(".")[-1]
+ elif isinstance(fp, pathlib.PurePath):
+ format = fp.suffix.lstrip(".")
else:
raise ValueError(
"must specify file format: " "['png', 'svg', 'pdf', 'html', 'json']"
| {"golden_diff": "diff --git a/altair/utils/save.py b/altair/utils/save.py\n--- a/altair/utils/save.py\n+++ b/altair/utils/save.py\n@@ -1,11 +1,13 @@\n import json\n+import pathlib\n \n from .mimebundle import spec_to_mimebundle\n \n \n def write_file_or_filename(fp, content, mode=\"w\"):\n- \"\"\"Write content to fp, whether fp is a string or a file-like object\"\"\"\n- if isinstance(fp, str):\n+ \"\"\"Write content to fp, whether fp is a string, a pathlib Path or a\n+ file-like object\"\"\"\n+ if isinstance(fp, str) or isinstance(fp, pathlib.PurePath):\n with open(fp, mode) as f:\n f.write(content)\n else:\n@@ -34,8 +36,8 @@\n ----------\n chart : alt.Chart\n the chart instance to save\n- fp : string filename or file-like object\n- file in which to write the chart.\n+ fp : string filename, pathlib.Path or file-like object\n+ file to which to write the chart.\n format : string (optional)\n the format to write: one of ['json', 'html', 'png', 'svg'].\n If not specified, the format will be determined from the filename.\n@@ -71,6 +73,8 @@\n if format is None:\n if isinstance(fp, str):\n format = fp.split(\".\")[-1]\n+ elif isinstance(fp, pathlib.PurePath):\n+ format = fp.suffix.lstrip(\".\")\n else:\n raise ValueError(\n \"must specify file format: \" \"['png', 'svg', 'pdf', 'html', 'json']\"\n", "issue": "Allow Paths in save()\nInstead of allowing only string paths or file-likes, allow pathlib.Paths to be passed to `save()`.\r\n\r\nAre these two the only places that would have to be changed?\r\n\r\nhttps://github.com/altair-viz/altair/blob/54e03d403c1cec9ce2f2e8b14dc3d936c6686128/altair/utils/save.py#L8\r\nhttps://github.com/altair-viz/altair/blob/54e03d403c1cec9ce2f2e8b14dc3d936c6686128/altair/utils/save.py#L72\n", "before_files": [{"content": "import json\n\nfrom .mimebundle import spec_to_mimebundle\n\n\ndef write_file_or_filename(fp, content, mode=\"w\"):\n \"\"\"Write content to fp, whether fp is a string or a file-like object\"\"\"\n if isinstance(fp, str):\n with open(fp, mode) as f:\n f.write(content)\n else:\n fp.write(content)\n\n\ndef save(\n chart,\n fp,\n vega_version,\n vegaembed_version,\n format=None,\n mode=None,\n vegalite_version=None,\n embed_options=None,\n json_kwds=None,\n webdriver=\"chrome\",\n scale_factor=1,\n **kwargs,\n):\n \"\"\"Save a chart to file in a variety of formats\n\n Supported formats are [json, html, png, svg]\n\n Parameters\n ----------\n chart : alt.Chart\n the chart instance to save\n fp : string filename or file-like object\n file in which to write the chart.\n format : string (optional)\n the format to write: one of ['json', 'html', 'png', 'svg'].\n If not specified, the format will be determined from the filename.\n mode : string (optional)\n Either 'vega' or 'vegalite'. If not specified, then infer the mode from\n the '$schema' property of the spec, or the ``opt`` dictionary.\n If it's not specified in either of those places, then use 'vegalite'.\n vega_version : string\n For html output, the version of vega.js to use\n vegalite_version : string\n For html output, the version of vegalite.js to use\n vegaembed_version : string\n For html output, the version of vegaembed.js to use\n embed_options : dict\n The vegaEmbed options dictionary. Default is {}\n (See https://github.com/vega/vega-embed for details)\n json_kwds : dict\n Additional keyword arguments are passed to the output method\n associated with the specified format.\n webdriver : string {'chrome' | 'firefox'}\n Webdriver to use for png or svg output\n scale_factor : float\n scale_factor to use to change size/resolution of png or svg output\n **kwargs :\n additional kwargs passed to spec_to_mimebundle.\n \"\"\"\n if json_kwds is None:\n json_kwds = {}\n\n if embed_options is None:\n embed_options = {}\n\n if format is None:\n if isinstance(fp, str):\n format = fp.split(\".\")[-1]\n else:\n raise ValueError(\n \"must specify file format: \" \"['png', 'svg', 'pdf', 'html', 'json']\"\n )\n\n spec = chart.to_dict()\n\n if mode is None:\n if \"mode\" in embed_options:\n mode = embed_options[\"mode\"]\n elif \"$schema\" in spec:\n mode = spec[\"$schema\"].split(\"/\")[-2]\n else:\n mode = \"vega-lite\"\n\n if mode not in [\"vega\", \"vega-lite\"]:\n raise ValueError(\"mode must be 'vega' or 'vega-lite', \" \"not '{}'\".format(mode))\n\n if mode == \"vega-lite\" and vegalite_version is None:\n raise ValueError(\"must specify vega-lite version\")\n\n if format == \"json\":\n json_spec = json.dumps(spec, **json_kwds)\n write_file_or_filename(fp, json_spec, mode=\"w\")\n elif format == \"html\":\n mimebundle = spec_to_mimebundle(\n spec=spec,\n format=format,\n mode=mode,\n vega_version=vega_version,\n vegalite_version=vegalite_version,\n vegaembed_version=vegaembed_version,\n embed_options=embed_options,\n json_kwds=json_kwds,\n **kwargs,\n )\n write_file_or_filename(fp, mimebundle[\"text/html\"], mode=\"w\")\n elif format in [\"png\", \"svg\", \"pdf\"]:\n mimebundle = spec_to_mimebundle(\n spec=spec,\n format=format,\n mode=mode,\n vega_version=vega_version,\n vegalite_version=vegalite_version,\n vegaembed_version=vegaembed_version,\n webdriver=webdriver,\n scale_factor=scale_factor,\n **kwargs,\n )\n if format == \"png\":\n write_file_or_filename(fp, mimebundle[\"image/png\"], mode=\"wb\")\n elif format == \"pdf\":\n write_file_or_filename(fp, mimebundle[\"application/pdf\"], mode=\"wb\")\n else:\n write_file_or_filename(fp, mimebundle[\"image/svg+xml\"], mode=\"w\")\n else:\n raise ValueError(\"unrecognized format: '{}'\".format(format))\n", "path": "altair/utils/save.py"}]} | 2,003 | 370 |
gh_patches_debug_1091 | rasdani/github-patches | git_diff | streamlink__streamlink-5926 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.mangomolo: error: No plugin can handle URL
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
[cli][info] Your Streamlink version (6.7.2) is up to date!
### Description
Unable to get stream for Kuwaiti channels.. error message: "error: No plugin can handle URL:"
sample URLs:
https://www.media.gov.kw/LiveTV.aspx
https://www.media.gov.kw/LiveTV.aspx?PanChannel=Drama
### Debug log
```text
user@desktop:~ $ streamlink https://www.media.gov.kw/LiveTV.aspx --loglevel=debug
[cli][debug] OS: Linux-6.1.21+-armv6l-with-glibc2.31
[cli][debug] Python: 3.9.2
[cli][debug] OpenSSL: OpenSSL 1.1.1w 11 Sep 2023
[cli][debug] Streamlink: 6.7.2
[cli][debug] Dependencies:
[cli][debug] certifi: 2023.7.22
[cli][debug] exceptiongroup: 1.1.3
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.3
[cli][debug] pycountry: 20.7.3
[cli][debug] pycryptodome: 3.18.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.31.0
[cli][debug] trio: 0.22.2
[cli][debug] trio-websocket: 0.10.3
[cli][debug] typing-extensions: 4.7.1
[cli][debug] urllib3: 2.0.4
[cli][debug] websocket-client: 1.6.2
[cli][debug] Arguments:
[cli][debug] url=https://www.media.gov.kw/LiveTV.aspx
[cli][debug] --loglevel=debug
error: No plugin can handle URL: https://www.media.gov.kw/LiveTV.aspx
```
</issue>
<code>
[start of src/streamlink/plugins/mangomolo.py]
1 """
2 $description OTT video platform owned by Alpha Technology Group
3 $url player.mangomolo.com
4 $url media.gov.kw
5 $type live
6 """
7
8 import logging
9 import re
10
11 from streamlink.exceptions import NoStreamsError
12 from streamlink.plugin import Plugin, pluginmatcher
13 from streamlink.plugin.api import validate
14 from streamlink.stream import HLSStream
15 from streamlink.utils.url import update_scheme
16
17
18 log = logging.getLogger(__name__)
19
20
21 @pluginmatcher(
22 name="mangomoloplayer",
23 pattern=re.compile(r"https?://player\.mangomolo\.com/v1/"),
24 )
25 @pluginmatcher(
26 name="mediagovkw",
27 pattern=re.compile(r"https?://media\.gov\.kw/"),
28 )
29 class Mangomolo(Plugin):
30 def _get_player_url(self):
31 player_url = self.session.http.get(self.url, schema=validate.Schema(
32 validate.parse_html(),
33 validate.xml_xpath_string(".//iframe[contains(@src,'//player.mangomolo.com/v1/')][1]/@src"),
34 ))
35 if not player_url:
36 log.error("Could not find embedded player")
37 raise NoStreamsError
38
39 self.url = update_scheme("https://", player_url)
40
41 def _get_streams(self):
42 headers = {}
43 if not self.matches["mangomoloplayer"]:
44 headers["Referer"] = self.url
45 self._get_player_url()
46
47 hls_url = self.session.http.get(self.url, headers=headers, schema=validate.Schema(
48 re.compile(r"src\s*:\s*(?P<q>[\"'])(?P<url>https?://\S+?\.m3u8\S*?)(?P=q)"),
49 validate.none_or_all(validate.get("url")),
50 ))
51 if hls_url:
52 return HLSStream.parse_variant_playlist(self.session, hls_url)
53
54
55 __plugin__ = Mangomolo
56
[end of src/streamlink/plugins/mangomolo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/mangomolo.py b/src/streamlink/plugins/mangomolo.py
--- a/src/streamlink/plugins/mangomolo.py
+++ b/src/streamlink/plugins/mangomolo.py
@@ -24,7 +24,7 @@
)
@pluginmatcher(
name="mediagovkw",
- pattern=re.compile(r"https?://media\.gov\.kw/"),
+ pattern=re.compile(r"https?://(www\.)?media\.gov\.kw/"),
)
class Mangomolo(Plugin):
def _get_player_url(self):
| {"golden_diff": "diff --git a/src/streamlink/plugins/mangomolo.py b/src/streamlink/plugins/mangomolo.py\n--- a/src/streamlink/plugins/mangomolo.py\n+++ b/src/streamlink/plugins/mangomolo.py\n@@ -24,7 +24,7 @@\n )\n @pluginmatcher(\n name=\"mediagovkw\",\n- pattern=re.compile(r\"https?://media\\.gov\\.kw/\"),\n+ pattern=re.compile(r\"https?://(www\\.)?media\\.gov\\.kw/\"),\n )\n class Mangomolo(Plugin):\n def _get_player_url(self):\n", "issue": "plugins.mangomolo: error: No plugin can handle URL\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\n[cli][info] Your Streamlink version (6.7.2) is up to date!\n\n### Description\n\nUnable to get stream for Kuwaiti channels.. error message: \"error: No plugin can handle URL:\"\r\nsample URLs:\r\n https://www.media.gov.kw/LiveTV.aspx\r\n https://www.media.gov.kw/LiveTV.aspx?PanChannel=Drama\r\n\n\n### Debug log\n\n```text\nuser@desktop:~ $ streamlink https://www.media.gov.kw/LiveTV.aspx --loglevel=debug\r\n[cli][debug] OS: Linux-6.1.21+-armv6l-with-glibc2.31\r\n[cli][debug] Python: 3.9.2\r\n[cli][debug] OpenSSL: OpenSSL 1.1.1w 11 Sep 2023\r\n[cli][debug] Streamlink: 6.7.2\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2023.7.22\r\n[cli][debug] exceptiongroup: 1.1.3\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.3\r\n[cli][debug] pycountry: 20.7.3\r\n[cli][debug] pycryptodome: 3.18.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.31.0\r\n[cli][debug] trio: 0.22.2\r\n[cli][debug] trio-websocket: 0.10.3\r\n[cli][debug] typing-extensions: 4.7.1\r\n[cli][debug] urllib3: 2.0.4\r\n[cli][debug] websocket-client: 1.6.2\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.media.gov.kw/LiveTV.aspx\r\n[cli][debug] --loglevel=debug\r\nerror: No plugin can handle URL: https://www.media.gov.kw/LiveTV.aspx\n```\n\n", "before_files": [{"content": "\"\"\"\n$description OTT video platform owned by Alpha Technology Group\n$url player.mangomolo.com\n$url media.gov.kw\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.exceptions import NoStreamsError\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils.url import update_scheme\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(\n name=\"mangomoloplayer\",\n pattern=re.compile(r\"https?://player\\.mangomolo\\.com/v1/\"),\n)\n@pluginmatcher(\n name=\"mediagovkw\",\n pattern=re.compile(r\"https?://media\\.gov\\.kw/\"),\n)\nclass Mangomolo(Plugin):\n def _get_player_url(self):\n player_url = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//iframe[contains(@src,'//player.mangomolo.com/v1/')][1]/@src\"),\n ))\n if not player_url:\n log.error(\"Could not find embedded player\")\n raise NoStreamsError\n\n self.url = update_scheme(\"https://\", player_url)\n\n def _get_streams(self):\n headers = {}\n if not self.matches[\"mangomoloplayer\"]:\n headers[\"Referer\"] = self.url\n self._get_player_url()\n\n hls_url = self.session.http.get(self.url, headers=headers, schema=validate.Schema(\n re.compile(r\"src\\s*:\\s*(?P<q>[\\\"'])(?P<url>https?://\\S+?\\.m3u8\\S*?)(?P=q)\"),\n validate.none_or_all(validate.get(\"url\")),\n ))\n if hls_url:\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n\n__plugin__ = Mangomolo\n", "path": "src/streamlink/plugins/mangomolo.py"}]} | 1,718 | 127 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.