problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_5672 | rasdani/github-patches | git_diff | sosreport__sos-471 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[block] Don't use parted human readable output - rhbz #1183770
Changed the parted command to return data in sectors units
instead of human readable form.
Signed-off-by: Shane Bradley [email protected]
</issue>
<code>
[start of sos/plugins/block.py]
1 # This program is free software; you can redistribute it and/or modify
2 # it under the terms of the GNU General Public License as published by
3 # the Free Software Foundation; either version 2 of the License, or
4 # (at your option) any later version.
5
6 # This program is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # GNU General Public License for more details.
10
11 # You should have received a copy of the GNU General Public License
12 # along with this program; if not, write to the Free Software
13 # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
14
15 import os
16 from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
17
18
19 class Block(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
20 """Block device information
21 """
22
23 plugin_name = 'block'
24 profiles = ('storage', 'hardware')
25
26 def setup(self):
27 self.add_cmd_output([
28 "lsblk",
29 "blkid -c /dev/null",
30 "ls -lanR /dev",
31 "ls -lanR /sys/block"
32 ])
33
34 # legacy location for non-/run distributions
35 self.add_copy_spec([
36 "/etc/blkid.tab",
37 "/run/blkid/blkid.tab",
38 "/proc/partitions",
39 "/proc/diskstats"
40 ])
41
42 if os.path.isdir("/sys/block"):
43 for disk in os.listdir("/sys/block"):
44 if disk in [".", ".."] or disk.startswith("ram"):
45 continue
46 disk_path = os.path.join('/dev/', disk)
47 self.add_cmd_output([
48 "udevadm info -ap /sys/block/%s" % (disk),
49 "parted -s %s print" % (disk_path),
50 "fdisk -l %s" % disk_path
51 ])
52
53 # vim: et ts=4 sw=4
54
[end of sos/plugins/block.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sos/plugins/block.py b/sos/plugins/block.py
--- a/sos/plugins/block.py
+++ b/sos/plugins/block.py
@@ -46,7 +46,7 @@
disk_path = os.path.join('/dev/', disk)
self.add_cmd_output([
"udevadm info -ap /sys/block/%s" % (disk),
- "parted -s %s print" % (disk_path),
+ "parted -s %s unit s print" % (disk_path),
"fdisk -l %s" % disk_path
])
| {"golden_diff": "diff --git a/sos/plugins/block.py b/sos/plugins/block.py\n--- a/sos/plugins/block.py\n+++ b/sos/plugins/block.py\n@@ -46,7 +46,7 @@\n disk_path = os.path.join('/dev/', disk)\n self.add_cmd_output([\n \"udevadm info -ap /sys/block/%s\" % (disk),\n- \"parted -s %s print\" % (disk_path),\n+ \"parted -s %s unit s print\" % (disk_path),\n \"fdisk -l %s\" % disk_path\n ])\n", "issue": "[block] Don't use parted human readable output - rhbz #1183770\nChanged the parted command to return data in sectors units\ninstead of human readable form.\n\nSigned-off-by: Shane Bradley [email protected]\n\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n\nimport os\nfrom sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin\n\n\nclass Block(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):\n \"\"\"Block device information\n \"\"\"\n\n plugin_name = 'block'\n profiles = ('storage', 'hardware')\n\n def setup(self):\n self.add_cmd_output([\n \"lsblk\",\n \"blkid -c /dev/null\",\n \"ls -lanR /dev\",\n \"ls -lanR /sys/block\"\n ])\n\n # legacy location for non-/run distributions\n self.add_copy_spec([\n \"/etc/blkid.tab\",\n \"/run/blkid/blkid.tab\",\n \"/proc/partitions\",\n \"/proc/diskstats\"\n ])\n\n if os.path.isdir(\"/sys/block\"):\n for disk in os.listdir(\"/sys/block\"):\n if disk in [\".\", \"..\"] or disk.startswith(\"ram\"):\n continue\n disk_path = os.path.join('/dev/', disk)\n self.add_cmd_output([\n \"udevadm info -ap /sys/block/%s\" % (disk),\n \"parted -s %s print\" % (disk_path),\n \"fdisk -l %s\" % disk_path\n ])\n\n# vim: et ts=4 sw=4\n", "path": "sos/plugins/block.py"}]} | 1,123 | 129 |
gh_patches_debug_50224 | rasdani/github-patches | git_diff | pex-tool__pex-1692 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.74
On the docket:
+ [x] Add support for locking VCS requirements. (#1687)
+ [x] Fix `--lock` for multiplatform via sdists. (#1689)
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.73"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.73"
+__version__ = "2.1.74"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.73\"\n+__version__ = \"2.1.74\"\n", "issue": "Release 2.1.74\nOn the docket:\r\n+ [x] Add support for locking VCS requirements. (#1687)\r\n+ [x] Fix `--lock` for multiplatform via sdists. (#1689)\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.73\"\n", "path": "pex/version.py"}]} | 639 | 97 |
gh_patches_debug_19495 | rasdani/github-patches | git_diff | Pyomo__pyomo-1273 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PyNumero support on Windows
We need to make sure PyNumero installs/runs on Windows and make sure any extra installation steps are well documented. Some discussion on this was started in #1253. When this is resolved we also need to make sure to enable the tests currently being skipped on Windows.
PyNumero support on Windows
We need to make sure PyNumero installs/runs on Windows and make sure any extra installation steps are well documented. Some discussion on this was started in #1253. When this is resolved we also need to make sure to enable the tests currently being skipped on Windows.
</issue>
<code>
[start of pyomo/contrib/pynumero/extensions/utils.py]
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10 from ctypes.util import find_library
11 import sys
12 import os
13
14
15 def find_pynumero_library(library_name):
16
17 asl_path = find_library(library_name)
18 if asl_path is not None:
19 return asl_path
20 else:
21 # try looking into extensions directory now
22 file_path = os.path.abspath(__file__)
23 dir_path = os.path.dirname(file_path)
24
25 if os.name in ['nt', 'dos']:
26 libname = 'lib/Windows/lib{}.dll'.format(library_name)
27 elif sys.platform in ['darwin']:
28 libname = 'lib/Darwin/lib{}.dylib'.format(library_name)
29 else:
30 libname = 'lib/Linux/lib{}.so'.format(library_name)
31
32 asl_lib_path = os.path.join(dir_path, libname)
33
34 if os.path.exists(asl_lib_path):
35 return asl_lib_path
36 return None
37
38
39 def found_pynumero_libraries():
40
41 p1 = find_pynumero_library('pynumero_ASL')
42 p2 = find_pynumero_library('pynumero_SPARSE')
43
44 if p1 is not None and p2 is not None:
45 return True
46 return False
47
[end of pyomo/contrib/pynumero/extensions/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyomo/contrib/pynumero/extensions/utils.py b/pyomo/contrib/pynumero/extensions/utils.py
--- a/pyomo/contrib/pynumero/extensions/utils.py
+++ b/pyomo/contrib/pynumero/extensions/utils.py
@@ -14,9 +14,14 @@
def find_pynumero_library(library_name):
- asl_path = find_library(library_name)
- if asl_path is not None:
- return asl_path
+ lib_path = find_library(library_name)
+ if lib_path is not None:
+ return lib_path
+
+ # On windows the library is prefixed with 'lib'
+ lib_path = find_library('lib'+library_name)
+ if lib_path is not None:
+ return lib_path
else:
# try looking into extensions directory now
file_path = os.path.abspath(__file__)
@@ -29,10 +34,10 @@
else:
libname = 'lib/Linux/lib{}.so'.format(library_name)
- asl_lib_path = os.path.join(dir_path, libname)
+ lib_path = os.path.join(dir_path, libname)
- if os.path.exists(asl_lib_path):
- return asl_lib_path
+ if os.path.exists(lib_path):
+ return lib_path
return None
| {"golden_diff": "diff --git a/pyomo/contrib/pynumero/extensions/utils.py b/pyomo/contrib/pynumero/extensions/utils.py\n--- a/pyomo/contrib/pynumero/extensions/utils.py\n+++ b/pyomo/contrib/pynumero/extensions/utils.py\n@@ -14,9 +14,14 @@\n \n def find_pynumero_library(library_name):\n \n- asl_path = find_library(library_name)\n- if asl_path is not None:\n- return asl_path\n+ lib_path = find_library(library_name)\n+ if lib_path is not None:\n+ return lib_path\n+\n+ # On windows the library is prefixed with 'lib'\n+ lib_path = find_library('lib'+library_name)\n+ if lib_path is not None:\n+ return lib_path\n else:\n # try looking into extensions directory now\n file_path = os.path.abspath(__file__)\n@@ -29,10 +34,10 @@\n else:\n libname = 'lib/Linux/lib{}.so'.format(library_name)\n \n- asl_lib_path = os.path.join(dir_path, libname)\n+ lib_path = os.path.join(dir_path, libname)\n \n- if os.path.exists(asl_lib_path):\n- return asl_lib_path\n+ if os.path.exists(lib_path):\n+ return lib_path\n return None\n", "issue": "PyNumero support on Windows\nWe need to make sure PyNumero installs/runs on Windows and make sure any extra installation steps are well documented. Some discussion on this was started in #1253. When this is resolved we also need to make sure to enable the tests currently being skipped on Windows.\nPyNumero support on Windows\nWe need to make sure PyNumero installs/runs on Windows and make sure any extra installation steps are well documented. Some discussion on this was started in #1253. When this is resolved we also need to make sure to enable the tests currently being skipped on Windows.\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\nfrom ctypes.util import find_library\nimport sys\nimport os\n\n\ndef find_pynumero_library(library_name):\n\n asl_path = find_library(library_name)\n if asl_path is not None:\n return asl_path\n else:\n # try looking into extensions directory now\n file_path = os.path.abspath(__file__)\n dir_path = os.path.dirname(file_path)\n\n if os.name in ['nt', 'dos']:\n libname = 'lib/Windows/lib{}.dll'.format(library_name)\n elif sys.platform in ['darwin']:\n libname = 'lib/Darwin/lib{}.dylib'.format(library_name)\n else:\n libname = 'lib/Linux/lib{}.so'.format(library_name)\n\n asl_lib_path = os.path.join(dir_path, libname)\n\n if os.path.exists(asl_lib_path):\n return asl_lib_path\n return None\n\n\ndef found_pynumero_libraries():\n\n p1 = find_pynumero_library('pynumero_ASL')\n p2 = find_pynumero_library('pynumero_SPARSE')\n\n if p1 is not None and p2 is not None:\n return True\n return False\n", "path": "pyomo/contrib/pynumero/extensions/utils.py"}]} | 1,125 | 301 |
gh_patches_debug_3268 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1158 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AU battery returning type error
```
fetch_production("AUS-SA") ->
Traceback (most recent call last):
File "AU.py", line 558, in <module>
print(fetch_production('AUS-SA'))
File "AU.py", line 422, in fetch_production
data['storage']['battery'] = AU_battery.fetch_SA_battery()
File "/home/chris/electricitymap/parsers/lib/AU_battery.py", line 30, in fetch_SA_battery
latest = json.loads(data[-1])
File "/usr/lib/python3.5/json/__init__.py", line 312, in loads
s.__class__.__name__))
TypeError: the JSON object must be str, not 'bytes'
```
</issue>
<code>
[start of parsers/lib/AU_battery.py]
1 #!/usr/bin/env python3
2
3 """Parser for South Australia's 129MWh battery built by Tesla."""
4 import arrow
5 import json
6 import requests
7
8 # nemlog_url gets generation status in 5 min intervals.
9
10
11 def fetch_SA_battery(session=None):
12 """
13 Makes a request to the nemlog api for South Australia battery data.
14 Returns a float or None.
15 """
16
17 today = arrow.now('Australia/Adelaide')
18 current = today.format('YYYYMMDD')
19 old = today.shift(days=-2).format('YYYYMMDD')
20 nemlog_url = 'http://nemlog.com.au/api/unit/HPRL1/{}/{}/json'.format(old, current)
21
22 s = session or requests.Session()
23 req = s.get(nemlog_url)
24
25 data = []
26 for line in req.iter_lines():
27 data.append(line)
28
29 try:
30 latest = json.loads(data[-1])
31 except IndexError:
32 # No data available.
33 return None
34
35 state = float(latest["SCADAVALUE"])
36
37 # Source classifies charge/discharge opposite to EM.
38 battery_status = -1 * state
39
40 return battery_status
41
42
43 if __name__ == '__main__':
44 print('fetch_SA_battery() ->')
45 print(fetch_SA_battery())
46
[end of parsers/lib/AU_battery.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/lib/AU_battery.py b/parsers/lib/AU_battery.py
--- a/parsers/lib/AU_battery.py
+++ b/parsers/lib/AU_battery.py
@@ -21,11 +21,9 @@
s = session or requests.Session()
req = s.get(nemlog_url)
-
data = []
- for line in req.iter_lines():
+ for line in req.iter_lines(decode_unicode=True):
data.append(line)
-
try:
latest = json.loads(data[-1])
except IndexError:
| {"golden_diff": "diff --git a/parsers/lib/AU_battery.py b/parsers/lib/AU_battery.py\n--- a/parsers/lib/AU_battery.py\n+++ b/parsers/lib/AU_battery.py\n@@ -21,11 +21,9 @@\n \n s = session or requests.Session()\n req = s.get(nemlog_url)\n-\n data = []\n- for line in req.iter_lines():\n+ for line in req.iter_lines(decode_unicode=True):\n data.append(line)\n-\n try:\n latest = json.loads(data[-1])\n except IndexError:\n", "issue": "AU battery returning type error\n```\r\nfetch_production(\"AUS-SA\") ->\r\nTraceback (most recent call last):\r\n File \"AU.py\", line 558, in <module>\r\n print(fetch_production('AUS-SA'))\r\n File \"AU.py\", line 422, in fetch_production\r\n data['storage']['battery'] = AU_battery.fetch_SA_battery()\r\n File \"/home/chris/electricitymap/parsers/lib/AU_battery.py\", line 30, in fetch_SA_battery\r\n latest = json.loads(data[-1])\r\n File \"/usr/lib/python3.5/json/__init__.py\", line 312, in loads\r\n s.__class__.__name__))\r\nTypeError: the JSON object must be str, not 'bytes'\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Parser for South Australia's 129MWh battery built by Tesla.\"\"\"\nimport arrow\nimport json\nimport requests\n\n# nemlog_url gets generation status in 5 min intervals.\n\n\ndef fetch_SA_battery(session=None):\n \"\"\"\n Makes a request to the nemlog api for South Australia battery data.\n Returns a float or None.\n \"\"\"\n\n today = arrow.now('Australia/Adelaide')\n current = today.format('YYYYMMDD')\n old = today.shift(days=-2).format('YYYYMMDD')\n nemlog_url = 'http://nemlog.com.au/api/unit/HPRL1/{}/{}/json'.format(old, current)\n\n s = session or requests.Session()\n req = s.get(nemlog_url)\n\n data = []\n for line in req.iter_lines():\n data.append(line)\n\n try:\n latest = json.loads(data[-1])\n except IndexError:\n # No data available.\n return None\n\n state = float(latest[\"SCADAVALUE\"])\n\n # Source classifies charge/discharge opposite to EM.\n battery_status = -1 * state\n\n return battery_status\n\n\nif __name__ == '__main__':\n print('fetch_SA_battery() ->')\n print(fetch_SA_battery())\n", "path": "parsers/lib/AU_battery.py"}]} | 1,064 | 122 |
gh_patches_debug_16023 | rasdani/github-patches | git_diff | databricks__koalas-161 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Show pandas style Table of Contents on the left side in docs
Right now our docs show a weird Table of Contents for only a section, rather than the entire doc. Can we fix it so it shows the Table of Contents of the entire docs, e.g. start from the top level?
<img width="647" alt="Screen Shot 2019-04-23 at 4 40 38 PM" src="https://user-images.githubusercontent.com/323388/56622865-9351b600-65e6-11e9-98b3-7930660b1c93.png">
</issue>
<code>
[start of docs/source/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # http://www.sphinx-doc.org/en/master/config
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12
13 import os
14 import sys
15 from databricks import koalas
16 sys.path.insert(0, os.path.abspath('.'))
17
18
19 # -- Project information -----------------------------------------------------
20
21 project = 'Koalas'
22 copyright = '2019, Databricks'
23 author = 'The Koalas Team'
24
25 # The full version, including alpha/beta/rc tags
26 release = os.environ.get('RELEASE_VERSION', koalas.__version__)
27
28
29 # -- General configuration ---------------------------------------------------
30
31 # If your documentation needs a minimal Sphinx version, state it here.
32 needs_sphinx = '1.2'
33
34 # Add any Sphinx extension module names here, as strings. They can be
35 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
36 # ones.
37 extensions = [
38 'sphinx.ext.autodoc',
39 'sphinx.ext.viewcode',
40 'numpydoc', # handle NumPy documentation formatted docstrings. Needs to install
41 'nbsphinx', # Jupyter Notebook. Needs to install
42 ]
43
44 # Add any paths that contain templates here, relative to this directory.
45 templates_path = ['_templates']
46
47 # List of patterns, relative to source directory, that match files and
48 # directories to ignore when looking for source files.
49 # This pattern also affects html_static_path and html_extra_path.
50 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
51
52 # The name of the Pygments (syntax highlighting) style to use.
53 pygments_style = 'sphinx'
54
55 # The master toctree document.
56 master_doc = 'index'
57
58 numpydoc_show_class_members = False
59
60 # -- Options for auto output -------------------------------------------------
61
62 autoclass_content = 'both'
63 autosummary_generate = True
64
65
66 # -- Options for HTML output -------------------------------------------------
67
68 # The theme to use for HTML and HTML Help pages. See the documentation for
69 # a list of builtin themes.
70 #
71 html_theme = 'nature'
72
73 # Add any paths that contain custom static files (such as style sheets) here,
74 # relative to this directory. They are copied after the builtin static files,
75 # so a file named "default.css" will overwrite the builtin "default.css".
76 html_static_path = ['_static']
77
78 # If false, no index is generated.
79 html_use_index = False
80
81 # If false, no module index is generated.
82 html_domain_indices = False
83
84
85 # -- Options for manual page output ---------------------------------------
86
87 # One entry per manual page. List of tuples
88 # (source start file, name, description, authors, manual section).
89 man_pages = [
90 ('index', 'databricks.koalas', u'databricks.koalas Documentation',
91 [u'Author'], 1)
92 ]
93
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -68,13 +68,16 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
-html_theme = 'nature'
+html_theme = 'nature_with_gtoc'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
+# Add any paths that contain custom themes here, relative to this directory.
+html_theme_path = ['themes']
+
# If false, no index is generated.
html_use_index = False
| {"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -68,13 +68,16 @@\n # The theme to use for HTML and HTML Help pages. See the documentation for\n # a list of builtin themes.\n #\n-html_theme = 'nature'\n+html_theme = 'nature_with_gtoc'\n \n # Add any paths that contain custom static files (such as style sheets) here,\n # relative to this directory. They are copied after the builtin static files,\n # so a file named \"default.css\" will overwrite the builtin \"default.css\".\n html_static_path = ['_static']\n \n+# Add any paths that contain custom themes here, relative to this directory.\n+html_theme_path = ['themes']\n+\n # If false, no index is generated.\n html_use_index = False\n", "issue": "Show pandas style Table of Contents on the left side in docs\nRight now our docs show a weird Table of Contents for only a section, rather than the entire doc. Can we fix it so it shows the Table of Contents of the entire docs, e.g. start from the top level?\r\n\r\n<img width=\"647\" alt=\"Screen Shot 2019-04-23 at 4 40 38 PM\" src=\"https://user-images.githubusercontent.com/323388/56622865-9351b600-65e6-11e9-98b3-7930660b1c93.png\">\r\n\r\n\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nfrom databricks import koalas\nsys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Koalas'\ncopyright = '2019, Databricks'\nauthor = 'The Koalas Team'\n\n# The full version, including alpha/beta/rc tags\nrelease = os.environ.get('RELEASE_VERSION', koalas.__version__)\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '1.2'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.viewcode',\n 'numpydoc', # handle NumPy documentation formatted docstrings. Needs to install\n 'nbsphinx', # Jupyter Notebook. Needs to install\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# The master toctree document.\nmaster_doc = 'index'\n\nnumpydoc_show_class_members = False\n\n# -- Options for auto output -------------------------------------------------\n\nautoclass_content = 'both'\nautosummary_generate = True\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'nature'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If false, no index is generated.\nhtml_use_index = False\n\n# If false, no module index is generated.\nhtml_domain_indices = False\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'databricks.koalas', u'databricks.koalas Documentation',\n [u'Author'], 1)\n]\n", "path": "docs/source/conf.py"}]} | 1,549 | 183 |
gh_patches_debug_23082 | rasdani/github-patches | git_diff | microsoft__playwright-python-401 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Interactive mode (REPL) Error !!!
**pip install playwright==0.162.2**
from playwright import sync_playwright
**playwright = sync_playwright().start()**
Traceback (most recent call last):
File "<pyshell#1>", line 1, in
playwright = sync_playwright().start()
File "C:\Python37\lib\site-packages\playwright_init_.py", line 34, in sync_playwright
return SyncPlaywrightContextManager()
File "C:\Python37\lib\site-packages\playwright\main.py", line 81, in init
self._connection = run_driver()
File "C:\Python37\lib\site-packages\playwright\main.py", line 76, in run_driver
return loop.run_until_complete(run_driver_async())
File "C:\Python37\lib\asyncio\base_events.py", line 587, in run_until_complete
return future.result()
File "C:\Python37\lib\site-packages\playwright\main.py", line 61, in run_driver_async
stderr=_get_stderr_fileno(),
File "C:\Python37\lib\site-packages\playwright\main.py", line 54, in _get_stderr_fileno
return sys.stderr.fileno()
**AttributeError: 'NoneType' object has no attribute 'fileno'**
</issue>
<code>
[start of playwright/_impl/_transport.py]
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import asyncio
16 import json
17 import os
18 import sys
19 from pathlib import Path
20 from typing import Dict
21
22
23 class Transport:
24 def __init__(self, driver_executable: Path) -> None:
25 super().__init__()
26 self.on_message = lambda _: None
27 self._stopped = False
28 self._driver_executable = driver_executable
29 self._loop: asyncio.AbstractEventLoop
30
31 def stop(self) -> None:
32 self._stopped = True
33 self._output.close()
34
35 async def run(self) -> None:
36 self._loop = asyncio.get_running_loop()
37 driver_executable = self._driver_executable
38
39 proc = await asyncio.create_subprocess_exec(
40 str(driver_executable),
41 "run-driver",
42 stdin=asyncio.subprocess.PIPE,
43 stdout=asyncio.subprocess.PIPE,
44 stderr=sys.stderr,
45 limit=32768,
46 )
47 assert proc.stdout
48 assert proc.stdin
49 self._output = proc.stdin
50
51 while not self._stopped:
52 try:
53 buffer = await proc.stdout.readexactly(4)
54 length = int.from_bytes(buffer, byteorder="little", signed=False)
55 buffer = bytes(0)
56 while length:
57 to_read = min(length, 32768)
58 data = await proc.stdout.readexactly(to_read)
59 length -= to_read
60 if len(buffer):
61 buffer = buffer + data
62 else:
63 buffer = data
64 obj = json.loads(buffer)
65
66 if "DEBUGP" in os.environ: # pragma: no cover
67 print("\x1b[33mRECV>\x1b[0m", json.dumps(obj, indent=2))
68 self.on_message(obj)
69 except asyncio.IncompleteReadError:
70 break
71 await asyncio.sleep(0)
72
73 def send(self, message: Dict) -> None:
74 msg = json.dumps(message)
75 if "DEBUGP" in os.environ: # pragma: no cover
76 print("\x1b[32mSEND>\x1b[0m", json.dumps(message, indent=2))
77 data = msg.encode()
78 self._output.write(
79 len(data).to_bytes(4, byteorder="little", signed=False) + data
80 )
81
[end of playwright/_impl/_transport.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/playwright/_impl/_transport.py b/playwright/_impl/_transport.py
--- a/playwright/_impl/_transport.py
+++ b/playwright/_impl/_transport.py
@@ -13,11 +13,25 @@
# limitations under the License.
import asyncio
+import io
import json
import os
import sys
from pathlib import Path
-from typing import Dict
+from typing import Dict, Optional
+
+
+# Sourced from: https://github.com/pytest-dev/pytest/blob/da01ee0a4bb0af780167ecd228ab3ad249511302/src/_pytest/faulthandler.py#L69-L77
+def _get_stderr_fileno() -> Optional[int]:
+ try:
+ return sys.stderr.fileno()
+ except (AttributeError, io.UnsupportedOperation):
+ # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.
+ # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors
+ # This is potentially dangerous, but the best we can do.
+ if not hasattr(sys, "__stderr__"):
+ return None
+ return sys.__stderr__.fileno()
class Transport:
@@ -41,7 +55,7 @@
"run-driver",
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
- stderr=sys.stderr,
+ stderr=_get_stderr_fileno(),
limit=32768,
)
assert proc.stdout
| {"golden_diff": "diff --git a/playwright/_impl/_transport.py b/playwright/_impl/_transport.py\n--- a/playwright/_impl/_transport.py\n+++ b/playwright/_impl/_transport.py\n@@ -13,11 +13,25 @@\n # limitations under the License.\n \n import asyncio\n+import io\n import json\n import os\n import sys\n from pathlib import Path\n-from typing import Dict\n+from typing import Dict, Optional\n+\n+\n+# Sourced from: https://github.com/pytest-dev/pytest/blob/da01ee0a4bb0af780167ecd228ab3ad249511302/src/_pytest/faulthandler.py#L69-L77\n+def _get_stderr_fileno() -> Optional[int]:\n+ try:\n+ return sys.stderr.fileno()\n+ except (AttributeError, io.UnsupportedOperation):\n+ # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\n+ # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\n+ # This is potentially dangerous, but the best we can do.\n+ if not hasattr(sys, \"__stderr__\"):\n+ return None\n+ return sys.__stderr__.fileno()\n \n \n class Transport:\n@@ -41,7 +55,7 @@\n \"run-driver\",\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n- stderr=sys.stderr,\n+ stderr=_get_stderr_fileno(),\n limit=32768,\n )\n assert proc.stdout\n", "issue": "Interactive mode (REPL) Error !!!\n**pip install playwright==0.162.2**\r\n\r\nfrom playwright import sync_playwright\r\n**playwright = sync_playwright().start()**\r\n\r\nTraceback (most recent call last):\r\nFile \"<pyshell#1>\", line 1, in\r\nplaywright = sync_playwright().start()\r\nFile \"C:\\Python37\\lib\\site-packages\\playwright_init_.py\", line 34, in sync_playwright\r\nreturn SyncPlaywrightContextManager()\r\nFile \"C:\\Python37\\lib\\site-packages\\playwright\\main.py\", line 81, in init\r\nself._connection = run_driver()\r\nFile \"C:\\Python37\\lib\\site-packages\\playwright\\main.py\", line 76, in run_driver\r\nreturn loop.run_until_complete(run_driver_async())\r\nFile \"C:\\Python37\\lib\\asyncio\\base_events.py\", line 587, in run_until_complete\r\nreturn future.result()\r\nFile \"C:\\Python37\\lib\\site-packages\\playwright\\main.py\", line 61, in run_driver_async\r\nstderr=_get_stderr_fileno(),\r\nFile \"C:\\Python37\\lib\\site-packages\\playwright\\main.py\", line 54, in _get_stderr_fileno\r\nreturn sys.stderr.fileno()\r\n**AttributeError: 'NoneType' object has no attribute 'fileno'**\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport json\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import Dict\n\n\nclass Transport:\n def __init__(self, driver_executable: Path) -> None:\n super().__init__()\n self.on_message = lambda _: None\n self._stopped = False\n self._driver_executable = driver_executable\n self._loop: asyncio.AbstractEventLoop\n\n def stop(self) -> None:\n self._stopped = True\n self._output.close()\n\n async def run(self) -> None:\n self._loop = asyncio.get_running_loop()\n driver_executable = self._driver_executable\n\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n \"run-driver\",\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=sys.stderr,\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n self._output = proc.stdin\n\n while not self._stopped:\n try:\n buffer = await proc.stdout.readexactly(4)\n length = int.from_bytes(buffer, byteorder=\"little\", signed=False)\n buffer = bytes(0)\n while length:\n to_read = min(length, 32768)\n data = await proc.stdout.readexactly(to_read)\n length -= to_read\n if len(buffer):\n buffer = buffer + data\n else:\n buffer = data\n obj = json.loads(buffer)\n\n if \"DEBUGP\" in os.environ: # pragma: no cover\n print(\"\\x1b[33mRECV>\\x1b[0m\", json.dumps(obj, indent=2))\n self.on_message(obj)\n except asyncio.IncompleteReadError:\n break\n await asyncio.sleep(0)\n\n def send(self, message: Dict) -> None:\n msg = json.dumps(message)\n if \"DEBUGP\" in os.environ: # pragma: no cover\n print(\"\\x1b[32mSEND>\\x1b[0m\", json.dumps(message, indent=2))\n data = msg.encode()\n self._output.write(\n len(data).to_bytes(4, byteorder=\"little\", signed=False) + data\n )\n", "path": "playwright/_impl/_transport.py"}]} | 1,608 | 351 |
gh_patches_debug_26087 | rasdani/github-patches | git_diff | nvaccess__nvda-9208 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NVDA hangs up in terminal, when a large piece of text is loaded
I know that normal user doesn't have this problem.
But developers, mainly developers working in terminal, could have.
When large piece of text is loaded to the terminal at the same time, for example, 10000 characters of more, NVDA is hanging up. Also, after a moment, the system hangs up.
The only way is to wait.
NVDA is not reading the text, it's reading pieces of text, then a moment of silence, different piece, silence...
For example, I can call this in ruby by writing
for i in 1..100000
print("A fragment number #{i.to_s} ")
end
Also, we can find this error, when we'll using in terminal app, which writes big pieces of text.
In console commands, like tree, we won't observe this eror, because it isn't loading of text at the same time, there's a while between printing new files.
What is interesting...
The problem is hanging up the all system, you can not open task manager or other apps.
Thank you for help
Greetings,
Dawid Pieper
</issue>
<code>
[start of source/winInputHook.py]
1 #winInputHook.py
2 #A part of NonVisual Desktop Access (NVDA)
3 #Copyright (C) 2006-2008 NVDA Contributors <http://www.nvda-project.org/>
4 #This file is covered by the GNU General Public License.
5 #See the file COPYING for more details.
6
7 import threading
8 import comtypes.client
9 import time
10 from ctypes import *
11 from ctypes.wintypes import *
12 from win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED
13
14 class KBDLLHOOKSTRUCT(Structure):
15 _fields_=[
16 ('vkCode',DWORD),
17 ('scanCode',DWORD),
18 ('flags',DWORD),
19 ('time',DWORD),
20 ('dwExtraInfo',DWORD),
21 ]
22
23 class MSLLHOOKSTRUCT(Structure):
24 _fields_=[
25 ('pt',POINT),
26 ('mouseData',DWORD),
27 ('flags',DWORD),
28 ('time',DWORD),
29 ('dwExtraInfo',DWORD),
30 ]
31
32 keyDownCallback=None
33 keyUpCallback=None
34 mouseCallback=None
35
36 @WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)
37 def keyboardHook(code,wParam,lParam):
38 if code!=HC_ACTION:
39 return windll.user32.CallNextHookEx(0,code,wParam,lParam)
40 kbd=KBDLLHOOKSTRUCT.from_address(lParam)
41 if keyUpCallback and kbd.flags&LLKHF_UP:
42 if not keyUpCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):
43 return 1
44 elif keyDownCallback:
45 if not keyDownCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):
46 return 1
47 return windll.user32.CallNextHookEx(0,code,wParam,lParam)
48
49 @WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)
50 def mouseHook(code,wParam,lParam):
51 if code!=HC_ACTION:
52 return windll.user32.CallNextHookEx(0,code,wParam,lParam)
53 msll=MSLLHOOKSTRUCT.from_address(lParam)
54 if mouseCallback:
55 if not mouseCallback(wParam,msll.pt.x,msll.pt.y,msll.flags&LLMHF_INJECTED):
56 return 1
57 return windll.user32.CallNextHookEx(0,code,wParam,lParam)
58
59 hookThread=None
60 hookThreadRefCount=0
61
62 def hookThreadFunc():
63 keyHookID=windll.user32.SetWindowsHookExW(WH_KEYBOARD_LL,keyboardHook,windll.kernel32.GetModuleHandleW(None),0)
64 if keyHookID==0:
65 raise OSError("Could not register keyboard hook")
66 mouseHookID=windll.user32.SetWindowsHookExW(WH_MOUSE_LL,mouseHook,windll.kernel32.GetModuleHandleW(None),0)
67 if mouseHookID==0:
68 raise OSError("Could not register mouse hook")
69 msg=MSG()
70 while windll.user32.GetMessageW(byref(msg),None,0,0):
71 pass
72 if windll.user32.UnhookWindowsHookEx(keyHookID)==0:
73 raise OSError("could not unregister key hook %s"%keyHookID)
74 if windll.user32.UnhookWindowsHookEx(mouseHookID)==0:
75 raise OSError("could not unregister mouse hook %s"%mouseHookID)
76
77 def initialize():
78 global hookThread, hookThreadRefCount
79 hookThreadRefCount+=1
80 if hookThreadRefCount==1:
81 hookThread=threading.Thread(target=hookThreadFunc)
82 hookThread.start()
83
84 def setCallbacks(keyUp=None,keyDown=None,mouse=None):
85 global keyUpCallback, keyDownCallback, mouseCallback
86 if keyUp:
87 keyUpCallback=keyUp
88 if keyDown:
89 keyDownCallback=keyDown
90 if mouse:
91 mouseCallback=mouse
92
93 def terminate():
94 global hookThread, hookThreadRefCount
95 if not hookThread:
96 raise RuntimeError("winInputHook not running")
97 hookThreadRefCount-=1
98 if hookThreadRefCount==0:
99 windll.user32.PostThreadMessageW(hookThread.ident,WM_QUIT,0,0)
100 hookThread.join()
101 hookThread=None
102
[end of source/winInputHook.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/source/winInputHook.py b/source/winInputHook.py
--- a/source/winInputHook.py
+++ b/source/winInputHook.py
@@ -10,6 +10,7 @@
from ctypes import *
from ctypes.wintypes import *
from win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED
+import watchdog
class KBDLLHOOKSTRUCT(Structure):
_fields_=[
@@ -35,7 +36,7 @@
@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)
def keyboardHook(code,wParam,lParam):
- if code!=HC_ACTION:
+ if watchdog.isAttemptingRecovery or code!=HC_ACTION:
return windll.user32.CallNextHookEx(0,code,wParam,lParam)
kbd=KBDLLHOOKSTRUCT.from_address(lParam)
if keyUpCallback and kbd.flags&LLKHF_UP:
@@ -48,7 +49,7 @@
@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)
def mouseHook(code,wParam,lParam):
- if code!=HC_ACTION:
+ if watchdog.isAttemptingRecovery or code!=HC_ACTION:
return windll.user32.CallNextHookEx(0,code,wParam,lParam)
msll=MSLLHOOKSTRUCT.from_address(lParam)
if mouseCallback:
| {"golden_diff": "diff --git a/source/winInputHook.py b/source/winInputHook.py\n--- a/source/winInputHook.py\n+++ b/source/winInputHook.py\n@@ -10,6 +10,7 @@\n from ctypes import *\r\n from ctypes.wintypes import *\r\n from win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED\r\n+import watchdog\r\n \r\n class KBDLLHOOKSTRUCT(Structure):\r\n \t_fields_=[\r\n@@ -35,7 +36,7 @@\n \r\n @WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\n def keyboardHook(code,wParam,lParam):\r\n-\tif code!=HC_ACTION:\r\n+\tif watchdog.isAttemptingRecovery or code!=HC_ACTION:\r\n \t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n \tkbd=KBDLLHOOKSTRUCT.from_address(lParam)\r\n \tif keyUpCallback and kbd.flags&LLKHF_UP:\r\n@@ -48,7 +49,7 @@\n \r\n @WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\n def mouseHook(code,wParam,lParam):\r\n-\tif code!=HC_ACTION:\r\n+\tif watchdog.isAttemptingRecovery or code!=HC_ACTION:\r\n \t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n \tmsll=MSLLHOOKSTRUCT.from_address(lParam)\r\n \tif mouseCallback:\n", "issue": "NVDA hangs up in terminal, when a large piece of text is loaded\nI know that normal user doesn't have this problem.\nBut developers, mainly developers working in terminal, could have.\n\nWhen large piece of text is loaded to the terminal at the same time, for example, 10000 characters of more, NVDA is hanging up. Also, after a moment, the system hangs up.\nThe only way is to wait.\nNVDA is not reading the text, it's reading pieces of text, then a moment of silence, different piece, silence...\n\nFor example, I can call this in ruby by writing\n\nfor i in 1..100000\nprint(\"A fragment number #{i.to_s} \")\nend\n\nAlso, we can find this error, when we'll using in terminal app, which writes big pieces of text.\nIn console commands, like tree, we won't observe this eror, because it isn't loading of text at the same time, there's a while between printing new files.\n\nWhat is interesting...\nThe problem is hanging up the all system, you can not open task manager or other apps.\n\nThank you for help\nGreetings,\nDawid Pieper\n\n", "before_files": [{"content": "#winInputHook.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2006-2008 NVDA Contributors <http://www.nvda-project.org/>\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\nimport threading\r\nimport comtypes.client\r\nimport time\r\nfrom ctypes import *\r\nfrom ctypes.wintypes import *\r\nfrom win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED\r\n\r\nclass KBDLLHOOKSTRUCT(Structure):\r\n\t_fields_=[\r\n\t\t('vkCode',DWORD),\r\n\t\t('scanCode',DWORD),\r\n\t\t('flags',DWORD),\r\n\t\t('time',DWORD),\r\n\t\t('dwExtraInfo',DWORD),\r\n\t]\r\n\r\nclass MSLLHOOKSTRUCT(Structure):\r\n\t_fields_=[\r\n\t\t('pt',POINT),\r\n\t\t('mouseData',DWORD),\r\n\t\t('flags',DWORD),\r\n\t\t('time',DWORD),\r\n\t\t('dwExtraInfo',DWORD),\r\n\t]\r\n\r\nkeyDownCallback=None\r\nkeyUpCallback=None\r\nmouseCallback=None\r\n\r\n@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\ndef keyboardHook(code,wParam,lParam):\r\n\tif code!=HC_ACTION:\r\n\t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\tkbd=KBDLLHOOKSTRUCT.from_address(lParam)\r\n\tif keyUpCallback and kbd.flags&LLKHF_UP:\r\n\t\tif not keyUpCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):\r\n\t\t\treturn 1\r\n\telif keyDownCallback:\r\n\t\tif not keyDownCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):\r\n\t\t\treturn 1\r\n\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\r\n@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\ndef mouseHook(code,wParam,lParam):\r\n\tif code!=HC_ACTION:\r\n\t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\tmsll=MSLLHOOKSTRUCT.from_address(lParam)\r\n\tif mouseCallback:\r\n\t\tif not mouseCallback(wParam,msll.pt.x,msll.pt.y,msll.flags&LLMHF_INJECTED):\r\n\t\t\treturn 1\r\n\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\r\nhookThread=None\r\nhookThreadRefCount=0\r\n\r\ndef hookThreadFunc():\r\n\tkeyHookID=windll.user32.SetWindowsHookExW(WH_KEYBOARD_LL,keyboardHook,windll.kernel32.GetModuleHandleW(None),0)\r\n\tif keyHookID==0:\r\n\t\traise OSError(\"Could not register keyboard hook\")\r\n\tmouseHookID=windll.user32.SetWindowsHookExW(WH_MOUSE_LL,mouseHook,windll.kernel32.GetModuleHandleW(None),0)\r\n\tif mouseHookID==0:\r\n\t\traise OSError(\"Could not register mouse hook\")\r\n\tmsg=MSG()\r\n\twhile windll.user32.GetMessageW(byref(msg),None,0,0):\r\n\t\tpass\r\n\tif windll.user32.UnhookWindowsHookEx(keyHookID)==0:\r\n\t\traise OSError(\"could not unregister key hook %s\"%keyHookID)\r\n\tif windll.user32.UnhookWindowsHookEx(mouseHookID)==0:\r\n\t\traise OSError(\"could not unregister mouse hook %s\"%mouseHookID)\r\n\r\ndef initialize():\r\n\tglobal hookThread, hookThreadRefCount\r\n\thookThreadRefCount+=1\r\n\tif hookThreadRefCount==1:\r\n\t\thookThread=threading.Thread(target=hookThreadFunc)\r\n\t\thookThread.start()\r\n\r\ndef setCallbacks(keyUp=None,keyDown=None,mouse=None):\r\n\tglobal keyUpCallback, keyDownCallback, mouseCallback\r\n\tif keyUp:\r\n\t\tkeyUpCallback=keyUp\r\n\tif keyDown:\r\n\t\tkeyDownCallback=keyDown\r\n\tif mouse:\r\n\t\tmouseCallback=mouse\r\n\r\ndef terminate():\r\n\tglobal hookThread, hookThreadRefCount\r\n\tif not hookThread:\r\n\t\traise RuntimeError(\"winInputHook not running\")\r\n\thookThreadRefCount-=1\r\n\tif hookThreadRefCount==0:\r\n\t\twindll.user32.PostThreadMessageW(hookThread.ident,WM_QUIT,0,0)\r\n\t\thookThread.join()\r\n\t\thookThread=None\r\n", "path": "source/winInputHook.py"}]} | 1,966 | 316 |
gh_patches_debug_33666 | rasdani/github-patches | git_diff | google__fuzzbench-776 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move/publish reports of experimental experiments under fuzzbench.com/reports/experimental
Experimental experiments are experiments with fuzzers not in this list:
https://github.com/google/fuzzbench/blob/master/service/core-fuzzers.yaml
</issue>
<code>
[start of experiment/reporter.py]
1 #!/usr/bin/env python3
2 # Copyright 2020 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """A module containing the interface used by an experiment for generating
16 reports."""
17 import os
18 import posixpath
19
20 from common import experiment_utils
21 from common import experiment_path as exp_path
22 from common import filesystem
23 from common import filestore_utils
24 from common import logs
25 from common import yaml_utils
26 from analysis import generate_report
27 from analysis import data_utils
28
29 CORE_FUZZERS_YAML = os.path.join(os.path.dirname(__file__), '..', 'service',
30 'core-fuzzers.yaml')
31
32 logger = logs.Logger('reporter') # pylint: disable=invalid-name
33
34
35 def get_reports_dir():
36 """Return reports directory."""
37 return exp_path.path('reports')
38
39
40 def output_report(experiment_config: dict,
41 in_progress=False,
42 coverage_report=False):
43 """Generate the HTML report and write it to |web_bucket|."""
44 experiment_name = experiment_utils.get_experiment_name()
45 web_filestore_path = posixpath.join(experiment_config['report_filestore'],
46 experiment_name)
47
48 reports_dir = get_reports_dir()
49
50 core_fuzzers = yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']
51 fuzzers = sorted(set(experiment_config['fuzzers']).union(set(core_fuzzers)))
52
53 # Don't merge with nonprivate experiments until the very end as doing it
54 # while the experiment is in progress will produce unusable realtime
55 # results.
56 merge_with_nonprivate = (not in_progress and experiment_config.get(
57 'merge_with_nonprivate', False))
58
59 try:
60 logger.debug('Generating report.')
61 filesystem.recreate_directory(reports_dir)
62 generate_report.generate_report(
63 [experiment_name],
64 str(reports_dir),
65 report_name=experiment_name,
66 fuzzers=fuzzers,
67 in_progress=in_progress,
68 merge_with_clobber_nonprivate=merge_with_nonprivate,
69 coverage_report=coverage_report)
70 filestore_utils.rsync(
71 str(reports_dir),
72 web_filestore_path,
73 delete=False, # Don't remove existing coverage jsons.
74 gsutil_options=[
75 '-h', 'Cache-Control:public,max-age=0,no-transform'
76 ])
77 logger.debug('Done generating report.')
78 except data_utils.EmptyDataError:
79 logs.warning('No snapshot data.')
80 except Exception: # pylint: disable=broad-except
81 logger.error('Error generating HTML report.')
82
[end of experiment/reporter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/experiment/reporter.py b/experiment/reporter.py
--- a/experiment/reporter.py
+++ b/experiment/reporter.py
@@ -26,8 +26,9 @@
from analysis import generate_report
from analysis import data_utils
-CORE_FUZZERS_YAML = os.path.join(os.path.dirname(__file__), '..', 'service',
- 'core-fuzzers.yaml')
+CORE_FUZZERS_YAML = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '..', 'service',
+ 'core-fuzzers.yaml'))
logger = logs.Logger('reporter') # pylint: disable=invalid-name
@@ -37,18 +38,29 @@
return exp_path.path('reports')
+def get_core_fuzzers():
+ """Return list of core fuzzers to be used for merging experiment data."""
+ return yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']
+
+
def output_report(experiment_config: dict,
in_progress=False,
coverage_report=False):
"""Generate the HTML report and write it to |web_bucket|."""
experiment_name = experiment_utils.get_experiment_name()
- web_filestore_path = posixpath.join(experiment_config['report_filestore'],
- experiment_name)
-
reports_dir = get_reports_dir()
- core_fuzzers = yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']
- fuzzers = sorted(set(experiment_config['fuzzers']).union(set(core_fuzzers)))
+ core_fuzzers = set(get_core_fuzzers())
+ experiment_fuzzers = set(experiment_config['fuzzers'])
+ fuzzers = experiment_fuzzers.union(core_fuzzers)
+
+ # Calculate path to store report files in filestore.
+ web_filestore_path = experiment_config['report_filestore']
+ if not fuzzers.issubset(core_fuzzers):
+ # This means that we are running an experimental report with fuzzers
+ # not in the core list. So, store these in |experimental| sub-directory.
+ web_filestore_path = os.path.join(web_filestore_path, 'experimental')
+ web_filestore_path = posixpath.join(web_filestore_path, experiment_name)
# Don't merge with nonprivate experiments until the very end as doing it
# while the experiment is in progress will produce unusable realtime
| {"golden_diff": "diff --git a/experiment/reporter.py b/experiment/reporter.py\n--- a/experiment/reporter.py\n+++ b/experiment/reporter.py\n@@ -26,8 +26,9 @@\n from analysis import generate_report\n from analysis import data_utils\n \n-CORE_FUZZERS_YAML = os.path.join(os.path.dirname(__file__), '..', 'service',\n- 'core-fuzzers.yaml')\n+CORE_FUZZERS_YAML = os.path.abspath(\n+ os.path.join(os.path.dirname(__file__), '..', 'service',\n+ 'core-fuzzers.yaml'))\n \n logger = logs.Logger('reporter') # pylint: disable=invalid-name\n \n@@ -37,18 +38,29 @@\n return exp_path.path('reports')\n \n \n+def get_core_fuzzers():\n+ \"\"\"Return list of core fuzzers to be used for merging experiment data.\"\"\"\n+ return yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']\n+\n+\n def output_report(experiment_config: dict,\n in_progress=False,\n coverage_report=False):\n \"\"\"Generate the HTML report and write it to |web_bucket|.\"\"\"\n experiment_name = experiment_utils.get_experiment_name()\n- web_filestore_path = posixpath.join(experiment_config['report_filestore'],\n- experiment_name)\n-\n reports_dir = get_reports_dir()\n \n- core_fuzzers = yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']\n- fuzzers = sorted(set(experiment_config['fuzzers']).union(set(core_fuzzers)))\n+ core_fuzzers = set(get_core_fuzzers())\n+ experiment_fuzzers = set(experiment_config['fuzzers'])\n+ fuzzers = experiment_fuzzers.union(core_fuzzers)\n+\n+ # Calculate path to store report files in filestore.\n+ web_filestore_path = experiment_config['report_filestore']\n+ if not fuzzers.issubset(core_fuzzers):\n+ # This means that we are running an experimental report with fuzzers\n+ # not in the core list. So, store these in |experimental| sub-directory.\n+ web_filestore_path = os.path.join(web_filestore_path, 'experimental')\n+ web_filestore_path = posixpath.join(web_filestore_path, experiment_name)\n \n # Don't merge with nonprivate experiments until the very end as doing it\n # while the experiment is in progress will produce unusable realtime\n", "issue": "Move/publish reports of experimental experiments under fuzzbench.com/reports/experimental\nExperimental experiments are experiments with fuzzers not in this list:\r\nhttps://github.com/google/fuzzbench/blob/master/service/core-fuzzers.yaml\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A module containing the interface used by an experiment for generating\nreports.\"\"\"\nimport os\nimport posixpath\n\nfrom common import experiment_utils\nfrom common import experiment_path as exp_path\nfrom common import filesystem\nfrom common import filestore_utils\nfrom common import logs\nfrom common import yaml_utils\nfrom analysis import generate_report\nfrom analysis import data_utils\n\nCORE_FUZZERS_YAML = os.path.join(os.path.dirname(__file__), '..', 'service',\n 'core-fuzzers.yaml')\n\nlogger = logs.Logger('reporter') # pylint: disable=invalid-name\n\n\ndef get_reports_dir():\n \"\"\"Return reports directory.\"\"\"\n return exp_path.path('reports')\n\n\ndef output_report(experiment_config: dict,\n in_progress=False,\n coverage_report=False):\n \"\"\"Generate the HTML report and write it to |web_bucket|.\"\"\"\n experiment_name = experiment_utils.get_experiment_name()\n web_filestore_path = posixpath.join(experiment_config['report_filestore'],\n experiment_name)\n\n reports_dir = get_reports_dir()\n\n core_fuzzers = yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']\n fuzzers = sorted(set(experiment_config['fuzzers']).union(set(core_fuzzers)))\n\n # Don't merge with nonprivate experiments until the very end as doing it\n # while the experiment is in progress will produce unusable realtime\n # results.\n merge_with_nonprivate = (not in_progress and experiment_config.get(\n 'merge_with_nonprivate', False))\n\n try:\n logger.debug('Generating report.')\n filesystem.recreate_directory(reports_dir)\n generate_report.generate_report(\n [experiment_name],\n str(reports_dir),\n report_name=experiment_name,\n fuzzers=fuzzers,\n in_progress=in_progress,\n merge_with_clobber_nonprivate=merge_with_nonprivate,\n coverage_report=coverage_report)\n filestore_utils.rsync(\n str(reports_dir),\n web_filestore_path,\n delete=False, # Don't remove existing coverage jsons.\n gsutil_options=[\n '-h', 'Cache-Control:public,max-age=0,no-transform'\n ])\n logger.debug('Done generating report.')\n except data_utils.EmptyDataError:\n logs.warning('No snapshot data.')\n except Exception: # pylint: disable=broad-except\n logger.error('Error generating HTML report.')\n", "path": "experiment/reporter.py"}]} | 1,374 | 526 |
gh_patches_debug_31036 | rasdani/github-patches | git_diff | goauthentik__authentik-6325 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
docker compose run --rm server ldap_sync - doesn't work anymore to debug
**Describe the bug**
this command doesnt work anymore as described here (https://goauthentik.io/docs/troubleshooting/ldap_source)
```
docker compose run --rm server ldap_sync nxnet
```
it will just create a backgorund taks instead of running it in foreground!
**To Reproduce**
```
docker compose run --rm server ldap_sync SLUGofLDAPsource
```
**Expected behavior**
it will run an LDAP- synchronization in the foreground and see any errors or warnings that might happen directly
**Logs**
{"event": "Task published", "level": "info", "logger": "authentik.root.celery", "pid": 7, "task_id": "17af668f-1d9f-4732-a0eb-044c4a16beed", "task_name": "authentik.sources.ldap.tasks.ldap_sync", "timestamp": "2023-07-20T15:10:12.575247"}
**Version and Deployment (please complete the following information):**
- authentik version: 2023.6.1
- Deployment: docker compose
</issue>
<code>
[start of authentik/sources/ldap/management/commands/ldap_sync.py]
1 """LDAP Sync"""
2 from django.core.management.base import BaseCommand
3 from structlog.stdlib import get_logger
4
5 from authentik.sources.ldap.models import LDAPSource
6 from authentik.sources.ldap.tasks import ldap_sync_single
7
8 LOGGER = get_logger()
9
10
11 class Command(BaseCommand):
12 """Run sync for an LDAP Source"""
13
14 def add_arguments(self, parser):
15 parser.add_argument("source_slugs", nargs="+", type=str)
16
17 def handle(self, **options):
18 for source_slug in options["source_slugs"]:
19 source = LDAPSource.objects.filter(slug=source_slug).first()
20 if not source:
21 LOGGER.warning("Source does not exist", slug=source_slug)
22 continue
23 ldap_sync_single(source)
24
[end of authentik/sources/ldap/management/commands/ldap_sync.py]
[start of authentik/sources/ldap/sync/users.py]
1 """Sync LDAP Users into authentik"""
2 from typing import Generator
3
4 from django.core.exceptions import FieldError
5 from django.db.utils import IntegrityError
6 from ldap3 import ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES, SUBTREE
7
8 from authentik.core.models import User
9 from authentik.events.models import Event, EventAction
10 from authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer
11 from authentik.sources.ldap.sync.vendor.freeipa import FreeIPA
12 from authentik.sources.ldap.sync.vendor.ms_ad import MicrosoftActiveDirectory
13
14
15 class UserLDAPSynchronizer(BaseLDAPSynchronizer):
16 """Sync LDAP Users into authentik"""
17
18 @staticmethod
19 def name() -> str:
20 return "users"
21
22 def get_objects(self, **kwargs) -> Generator:
23 return self.search_paginator(
24 search_base=self.base_dn_users,
25 search_filter=self._source.user_object_filter,
26 search_scope=SUBTREE,
27 attributes=[ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES],
28 **kwargs,
29 )
30
31 def sync(self, page_data: list) -> int:
32 """Iterate over all LDAP Users and create authentik_core.User instances"""
33 if not self._source.sync_users:
34 self.message("User syncing is disabled for this Source")
35 return -1
36 user_count = 0
37 for user in page_data:
38 if "attributes" not in user:
39 continue
40 attributes = user.get("attributes", {})
41 user_dn = self._flatten(user.get("entryDN", user.get("dn")))
42 if self._source.object_uniqueness_field not in attributes:
43 self.message(
44 f"Cannot find uniqueness field in attributes: '{user_dn}'",
45 attributes=attributes.keys(),
46 dn=user_dn,
47 )
48 continue
49 uniq = self._flatten(attributes[self._source.object_uniqueness_field])
50 try:
51 defaults = self.build_user_properties(user_dn, **attributes)
52 self._logger.debug("Creating user with attributes", **defaults)
53 if "username" not in defaults:
54 raise IntegrityError("Username was not set by propertymappings")
55 ak_user, created = self.update_or_create_attributes(
56 User, {f"attributes__{LDAP_UNIQUENESS}": uniq}, defaults
57 )
58 except (IntegrityError, FieldError, TypeError, AttributeError) as exc:
59 Event.new(
60 EventAction.CONFIGURATION_ERROR,
61 message=(
62 f"Failed to create user: {str(exc)} "
63 "To merge new user with existing user, set the user's "
64 f"Attribute '{LDAP_UNIQUENESS}' to '{uniq}'"
65 ),
66 source=self._source,
67 dn=user_dn,
68 ).save()
69 else:
70 self._logger.debug("Synced User", user=ak_user.username, created=created)
71 user_count += 1
72 MicrosoftActiveDirectory(self._source).sync(attributes, ak_user, created)
73 FreeIPA(self._source).sync(attributes, ak_user, created)
74 return user_count
75
[end of authentik/sources/ldap/sync/users.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/authentik/sources/ldap/management/commands/ldap_sync.py b/authentik/sources/ldap/management/commands/ldap_sync.py
--- a/authentik/sources/ldap/management/commands/ldap_sync.py
+++ b/authentik/sources/ldap/management/commands/ldap_sync.py
@@ -3,7 +3,10 @@
from structlog.stdlib import get_logger
from authentik.sources.ldap.models import LDAPSource
-from authentik.sources.ldap.tasks import ldap_sync_single
+from authentik.sources.ldap.sync.groups import GroupLDAPSynchronizer
+from authentik.sources.ldap.sync.membership import MembershipLDAPSynchronizer
+from authentik.sources.ldap.sync.users import UserLDAPSynchronizer
+from authentik.sources.ldap.tasks import ldap_sync_paginator
LOGGER = get_logger()
@@ -20,4 +23,10 @@
if not source:
LOGGER.warning("Source does not exist", slug=source_slug)
continue
- ldap_sync_single(source)
+ tasks = (
+ ldap_sync_paginator(source, UserLDAPSynchronizer)
+ + ldap_sync_paginator(source, GroupLDAPSynchronizer)
+ + ldap_sync_paginator(source, MembershipLDAPSynchronizer)
+ )
+ for task in tasks:
+ task()
diff --git a/authentik/sources/ldap/sync/users.py b/authentik/sources/ldap/sync/users.py
--- a/authentik/sources/ldap/sync/users.py
+++ b/authentik/sources/ldap/sync/users.py
@@ -49,7 +49,7 @@
uniq = self._flatten(attributes[self._source.object_uniqueness_field])
try:
defaults = self.build_user_properties(user_dn, **attributes)
- self._logger.debug("Creating user with attributes", **defaults)
+ self._logger.debug("Writing user with attributes", **defaults)
if "username" not in defaults:
raise IntegrityError("Username was not set by propertymappings")
ak_user, created = self.update_or_create_attributes(
| {"golden_diff": "diff --git a/authentik/sources/ldap/management/commands/ldap_sync.py b/authentik/sources/ldap/management/commands/ldap_sync.py\n--- a/authentik/sources/ldap/management/commands/ldap_sync.py\n+++ b/authentik/sources/ldap/management/commands/ldap_sync.py\n@@ -3,7 +3,10 @@\n from structlog.stdlib import get_logger\n \n from authentik.sources.ldap.models import LDAPSource\n-from authentik.sources.ldap.tasks import ldap_sync_single\n+from authentik.sources.ldap.sync.groups import GroupLDAPSynchronizer\n+from authentik.sources.ldap.sync.membership import MembershipLDAPSynchronizer\n+from authentik.sources.ldap.sync.users import UserLDAPSynchronizer\n+from authentik.sources.ldap.tasks import ldap_sync_paginator\n \n LOGGER = get_logger()\n \n@@ -20,4 +23,10 @@\n if not source:\n LOGGER.warning(\"Source does not exist\", slug=source_slug)\n continue\n- ldap_sync_single(source)\n+ tasks = (\n+ ldap_sync_paginator(source, UserLDAPSynchronizer)\n+ + ldap_sync_paginator(source, GroupLDAPSynchronizer)\n+ + ldap_sync_paginator(source, MembershipLDAPSynchronizer)\n+ )\n+ for task in tasks:\n+ task()\ndiff --git a/authentik/sources/ldap/sync/users.py b/authentik/sources/ldap/sync/users.py\n--- a/authentik/sources/ldap/sync/users.py\n+++ b/authentik/sources/ldap/sync/users.py\n@@ -49,7 +49,7 @@\n uniq = self._flatten(attributes[self._source.object_uniqueness_field])\n try:\n defaults = self.build_user_properties(user_dn, **attributes)\n- self._logger.debug(\"Creating user with attributes\", **defaults)\n+ self._logger.debug(\"Writing user with attributes\", **defaults)\n if \"username\" not in defaults:\n raise IntegrityError(\"Username was not set by propertymappings\")\n ak_user, created = self.update_or_create_attributes(\n", "issue": "docker compose run --rm server ldap_sync - doesn't work anymore to debug\n**Describe the bug**\r\nthis command doesnt work anymore as described here (https://goauthentik.io/docs/troubleshooting/ldap_source)\r\n```\r\ndocker compose run --rm server ldap_sync nxnet\r\n```\r\nit will just create a backgorund taks instead of running it in foreground!\r\n\r\n**To Reproduce**\r\n```\r\ndocker compose run --rm server ldap_sync SLUGofLDAPsource\r\n```\r\n\r\n**Expected behavior**\r\nit will run an LDAP- synchronization in the foreground and see any errors or warnings that might happen directly\r\n\r\n**Logs**\r\n{\"event\": \"Task published\", \"level\": \"info\", \"logger\": \"authentik.root.celery\", \"pid\": 7, \"task_id\": \"17af668f-1d9f-4732-a0eb-044c4a16beed\", \"task_name\": \"authentik.sources.ldap.tasks.ldap_sync\", \"timestamp\": \"2023-07-20T15:10:12.575247\"}\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2023.6.1\r\n- Deployment: docker compose\r\n\r\n\n", "before_files": [{"content": "\"\"\"LDAP Sync\"\"\"\nfrom django.core.management.base import BaseCommand\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.ldap.models import LDAPSource\nfrom authentik.sources.ldap.tasks import ldap_sync_single\n\nLOGGER = get_logger()\n\n\nclass Command(BaseCommand):\n \"\"\"Run sync for an LDAP Source\"\"\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"source_slugs\", nargs=\"+\", type=str)\n\n def handle(self, **options):\n for source_slug in options[\"source_slugs\"]:\n source = LDAPSource.objects.filter(slug=source_slug).first()\n if not source:\n LOGGER.warning(\"Source does not exist\", slug=source_slug)\n continue\n ldap_sync_single(source)\n", "path": "authentik/sources/ldap/management/commands/ldap_sync.py"}, {"content": "\"\"\"Sync LDAP Users into authentik\"\"\"\nfrom typing import Generator\n\nfrom django.core.exceptions import FieldError\nfrom django.db.utils import IntegrityError\nfrom ldap3 import ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES, SUBTREE\n\nfrom authentik.core.models import User\nfrom authentik.events.models import Event, EventAction\nfrom authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer\nfrom authentik.sources.ldap.sync.vendor.freeipa import FreeIPA\nfrom authentik.sources.ldap.sync.vendor.ms_ad import MicrosoftActiveDirectory\n\n\nclass UserLDAPSynchronizer(BaseLDAPSynchronizer):\n \"\"\"Sync LDAP Users into authentik\"\"\"\n\n @staticmethod\n def name() -> str:\n return \"users\"\n\n def get_objects(self, **kwargs) -> Generator:\n return self.search_paginator(\n search_base=self.base_dn_users,\n search_filter=self._source.user_object_filter,\n search_scope=SUBTREE,\n attributes=[ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES],\n **kwargs,\n )\n\n def sync(self, page_data: list) -> int:\n \"\"\"Iterate over all LDAP Users and create authentik_core.User instances\"\"\"\n if not self._source.sync_users:\n self.message(\"User syncing is disabled for this Source\")\n return -1\n user_count = 0\n for user in page_data:\n if \"attributes\" not in user:\n continue\n attributes = user.get(\"attributes\", {})\n user_dn = self._flatten(user.get(\"entryDN\", user.get(\"dn\")))\n if self._source.object_uniqueness_field not in attributes:\n self.message(\n f\"Cannot find uniqueness field in attributes: '{user_dn}'\",\n attributes=attributes.keys(),\n dn=user_dn,\n )\n continue\n uniq = self._flatten(attributes[self._source.object_uniqueness_field])\n try:\n defaults = self.build_user_properties(user_dn, **attributes)\n self._logger.debug(\"Creating user with attributes\", **defaults)\n if \"username\" not in defaults:\n raise IntegrityError(\"Username was not set by propertymappings\")\n ak_user, created = self.update_or_create_attributes(\n User, {f\"attributes__{LDAP_UNIQUENESS}\": uniq}, defaults\n )\n except (IntegrityError, FieldError, TypeError, AttributeError) as exc:\n Event.new(\n EventAction.CONFIGURATION_ERROR,\n message=(\n f\"Failed to create user: {str(exc)} \"\n \"To merge new user with existing user, set the user's \"\n f\"Attribute '{LDAP_UNIQUENESS}' to '{uniq}'\"\n ),\n source=self._source,\n dn=user_dn,\n ).save()\n else:\n self._logger.debug(\"Synced User\", user=ak_user.username, created=created)\n user_count += 1\n MicrosoftActiveDirectory(self._source).sync(attributes, ak_user, created)\n FreeIPA(self._source).sync(attributes, ak_user, created)\n return user_count\n", "path": "authentik/sources/ldap/sync/users.py"}]} | 1,848 | 453 |
gh_patches_debug_51991 | rasdani/github-patches | git_diff | pydantic__pydantic-391 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include a PEP 561 marker file
# Feature Request
Hi,
The new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.
I think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.
https://mypy.readthedocs.io/en/latest/installed_packages.html
https://www.python.org/dev/peps/pep-0561/
Include a PEP 561 marker file
# Feature Request
Hi,
The new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.
I think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.
https://mypy.readthedocs.io/en/latest/installed_packages.html
https://www.python.org/dev/peps/pep-0561/
</issue>
<code>
[start of setup.py]
1 import re
2 from importlib.machinery import SourceFileLoader
3 from pathlib import Path
4 from setuptools import setup
5
6
7 class ReplaceLinks:
8 def __init__(self):
9 self.links = set()
10
11 def replace_issues(self, m):
12 id = m.group(1)
13 self.links.add(f'.. _#{id}: https://github.com/samuelcolvin/pydantic/issues/{id}')
14 return f'`#{id}`_'
15
16 def replace_users(self, m):
17 name = m.group(2)
18 self.links.add(f'.. _@{name}: https://github.com/{name}')
19 return f'{m.group(1)}`@{name}`_'
20
21 def extra(self):
22 return '\n\n' + '\n'.join(self.links) + '\n'
23
24
25 description = 'Data validation and settings management using python 3.6 type hinting'
26 THIS_DIR = Path(__file__).resolve().parent
27 try:
28 history = THIS_DIR.joinpath('HISTORY.rst').read_text()
29
30 replacer = ReplaceLinks()
31 history = re.sub(r'#(\d+)', replacer.replace_issues, history)
32 history = re.sub(r'( +)@(\w+)', replacer.replace_users, history, flags=re.I)
33 history = re.sub(r'@@', '@', history)
34 history += replacer.extra()
35
36 long_description = '\n\n'.join([THIS_DIR.joinpath('README.rst').read_text(), history])
37 except FileNotFoundError:
38 long_description = description + '.\n\nSee https://pydantic-docs.helpmanual.io/ for documentation.'
39
40 # avoid loading the package before requirements are installed:
41 version = SourceFileLoader('version', 'pydantic/version.py').load_module()
42
43 setup(
44 name='pydantic',
45 version=str(version.VERSION),
46 description=description,
47 long_description=long_description,
48 classifiers=[
49 'Development Status :: 5 - Production/Stable',
50 'Programming Language :: Python',
51 'Programming Language :: Python :: 3',
52 'Programming Language :: Python :: 3 :: Only',
53 'Programming Language :: Python :: 3.6',
54 'Programming Language :: Python :: 3.7',
55 'Intended Audience :: Developers',
56 'Intended Audience :: Information Technology',
57 'Intended Audience :: System Administrators',
58 'License :: OSI Approved :: MIT License',
59 'Operating System :: Unix',
60 'Operating System :: POSIX :: Linux',
61 'Environment :: Console',
62 'Environment :: MacOS X',
63 'Topic :: Software Development :: Libraries :: Python Modules',
64 'Topic :: Internet',
65 ],
66 author='Samuel Colvin',
67 author_email='[email protected]',
68 url='https://github.com/samuelcolvin/pydantic',
69 license='MIT',
70 packages=['pydantic'],
71 python_requires='>=3.6',
72 zip_safe=True,
73 install_requires=[
74 'dataclasses>=0.6;python_version<"3.7"'
75 ],
76 extras_require={
77 'ujson': ['ujson>=1.35'],
78 'email': ['email-validator>=1.0.3'],
79 }
80 )
81
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -68,8 +68,9 @@
url='https://github.com/samuelcolvin/pydantic',
license='MIT',
packages=['pydantic'],
+ package_data={'pydantic': ['py.typed']},
python_requires='>=3.6',
- zip_safe=True,
+ zip_safe=False, # https://mypy.readthedocs.io/en/latest/installed_packages.html
install_requires=[
'dataclasses>=0.6;python_version<"3.7"'
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -68,8 +68,9 @@\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n+ package_data={'pydantic': ['py.typed']},\n python_requires='>=3.6',\n- zip_safe=True,\n+ zip_safe=False, # https://mypy.readthedocs.io/en/latest/installed_packages.html\n install_requires=[\n 'dataclasses>=0.6;python_version<\"3.7\"'\n ],\n", "issue": "Include a PEP 561 marker file\n# Feature Request\r\n\r\nHi,\r\n\r\nThe new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.\r\n\r\nI think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.\r\n\r\nhttps://mypy.readthedocs.io/en/latest/installed_packages.html\r\nhttps://www.python.org/dev/peps/pep-0561/\nInclude a PEP 561 marker file\n# Feature Request\r\n\r\nHi,\r\n\r\nThe new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.\r\n\r\nI think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.\r\n\r\nhttps://mypy.readthedocs.io/en/latest/installed_packages.html\r\nhttps://www.python.org/dev/peps/pep-0561/\n", "before_files": [{"content": "import re\nfrom importlib.machinery import SourceFileLoader\nfrom pathlib import Path\nfrom setuptools import setup\n\n\nclass ReplaceLinks:\n def __init__(self):\n self.links = set()\n\n def replace_issues(self, m):\n id = m.group(1)\n self.links.add(f'.. _#{id}: https://github.com/samuelcolvin/pydantic/issues/{id}')\n return f'`#{id}`_'\n\n def replace_users(self, m):\n name = m.group(2)\n self.links.add(f'.. _@{name}: https://github.com/{name}')\n return f'{m.group(1)}`@{name}`_'\n\n def extra(self):\n return '\\n\\n' + '\\n'.join(self.links) + '\\n'\n\n\ndescription = 'Data validation and settings management using python 3.6 type hinting'\nTHIS_DIR = Path(__file__).resolve().parent\ntry:\n history = THIS_DIR.joinpath('HISTORY.rst').read_text()\n\n replacer = ReplaceLinks()\n history = re.sub(r'#(\\d+)', replacer.replace_issues, history)\n history = re.sub(r'( +)@(\\w+)', replacer.replace_users, history, flags=re.I)\n history = re.sub(r'@@', '@', history)\n history += replacer.extra()\n\n long_description = '\\n\\n'.join([THIS_DIR.joinpath('README.rst').read_text(), history])\nexcept FileNotFoundError:\n long_description = description + '.\\n\\nSee https://pydantic-docs.helpmanual.io/ for documentation.'\n\n# avoid loading the package before requirements are installed:\nversion = SourceFileLoader('version', 'pydantic/version.py').load_module()\n\nsetup(\n name='pydantic',\n version=str(version.VERSION),\n description=description,\n long_description=long_description,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet',\n ],\n author='Samuel Colvin',\n author_email='[email protected]',\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n python_requires='>=3.6',\n zip_safe=True,\n install_requires=[\n 'dataclasses>=0.6;python_version<\"3.7\"'\n ],\n extras_require={\n 'ujson': ['ujson>=1.35'],\n 'email': ['email-validator>=1.0.3'],\n }\n)\n", "path": "setup.py"}]} | 1,677 | 135 |
gh_patches_debug_38208 | rasdani/github-patches | git_diff | lk-geimfari__mimesis-772 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Restructure Numbers provider
# Feature request
## Thesis
While I was implementing the ``matrix()`` function in the ``Numbers`` provider and I was thinking about some changes that we could make in this provider:
- Add a function ``complex(start, end, length)`` that return a random array of complex numbers
- Make the API uniform, so that every function in the ``Numbers`` provider has the arguments ``start, end, length`` (where possible). Maybe in the ``complex()`` function we can add ``start_real, end_real, start_imaginary, end_imaginary`` ?
- Remove the function ``ranting()`` and add an argument ``decimal_digits`` in the function ``floats()`` to specify the number of decimal digits to keep.
## Reasoning
I think these changes would make the provider more uniform and easy to use.
</issue>
<code>
[start of mimesis/providers/numbers.py]
1 # -*- coding: utf-8 -*-
2
3 """Provides data related to numbers."""
4
5 from typing import List, Union
6
7 from mimesis.providers.base import BaseProvider
8
9 __all__ = ['Numbers']
10
11
12 class Numbers(BaseProvider):
13 """Class for generating numbers."""
14
15 class Meta:
16 """Class for metadata."""
17
18 name = 'numbers'
19
20 def floats(self, n: int = 2) -> List[float]:
21 """Generate a list of random float numbers.
22
23 :param n: Raise 10 to the 'n' power.
24 :return: The list of floating-point numbers.
25 """
26 nums = [self.random.random()
27 for _ in range(10 ** int(n))]
28 return nums
29
30 def integers(self, start: int = 0, end: int = 10,
31 length: int = 10) -> List[int]:
32 """Generate a list of random integers.
33
34 Integers can be negative or positive numbers.
35 .. note: You can use both positive and negative numbers.
36
37 :param start: Start.
38 :param end: End.
39 :param length: Length of list.
40 :return: List of integers.
41
42 :Example:
43 [-20, -19, -18, -17]
44 """
45 return self.random.randints(
46 length, start, end)
47
48 @staticmethod
49 def primes(start: int = 1, end: int = 999) -> List[int]:
50 """Generate a list of prime numbers.
51
52 :param start: First value of range.
53 :param end: Last value of range.
54 :return: A list of prime numbers from start to end.
55 """
56 # TODO: It should generate random primes with passed length.
57 sieve_size = (end // 2 - 1) if end % 2 == 0 else (end // 2)
58 sieve = [True] * sieve_size
59
60 primes = [] # list of primes
61 # add 2 to the list if it's in the given range
62 if end >= 2:
63 primes.append(2)
64 for i in range(sieve_size):
65 if sieve[i]:
66 value_at_i = i * 2 + 3
67 primes.append(value_at_i)
68 for j in range(i, sieve_size, value_at_i):
69 sieve[j] = False
70
71 chop_index = 0
72 for i in range(len(primes)):
73 if primes[i] >= start:
74 chop_index = i
75 break
76 return primes[chop_index:]
77
78 def digit(self, to_bin: bool = False) -> Union[str, int]:
79 """Get a random digit.
80
81 :param to_bin: If True then convert to binary.
82 :return: Digit.
83
84 :Example:
85 4.
86 """
87 digit = self.random.randint(0, 9)
88
89 if to_bin:
90 return bin(digit)
91
92 return digit
93
94 def between(self, minimum: int = 1, maximum: int = 1000) -> int:
95 """Generate a random number between minimum and maximum.
96
97 :param minimum: Minimum of range.
98 :param maximum: Maximum of range.
99 :return: Number.
100 """
101 return self.random.randint(minimum, maximum)
102
103 def rating(self, maximum: float = 5.0) -> float:
104 """Generate a random rating for something.
105
106 :param maximum: Maximum value (default is 5.0).
107 :return: Rating.
108
109 :Example:
110 4.7
111 """
112 return self.random.uniform(0, maximum, 1)
113
[end of mimesis/providers/numbers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mimesis/providers/numbers.py b/mimesis/providers/numbers.py
--- a/mimesis/providers/numbers.py
+++ b/mimesis/providers/numbers.py
@@ -17,18 +17,20 @@
name = 'numbers'
- def floats(self, n: int = 2) -> List[float]:
+ def floats(self, start: float = 0, end: float = 1, n: int = 10,
+ rounding: int = 15) -> List[float]:
"""Generate a list of random float numbers.
- :param n: Raise 10 to the 'n' power.
+ :param start: Start range.
+ :param end: End range.
+ :param n: Length of the list.
+ :param rounding: Max number of decimal digits.
:return: The list of floating-point numbers.
"""
- nums = [self.random.random()
- for _ in range(10 ** int(n))]
- return nums
+ return [self.random.uniform(start, end, rounding) for _ in range(n)]
def integers(self, start: int = 0, end: int = 10,
- length: int = 10) -> List[int]:
+ n: int = 10) -> List[int]:
"""Generate a list of random integers.
Integers can be negative or positive numbers.
@@ -36,14 +38,33 @@
:param start: Start.
:param end: End.
- :param length: Length of list.
+ :param n: Length of list.
:return: List of integers.
:Example:
[-20, -19, -18, -17]
"""
- return self.random.randints(
- length, start, end)
+ return self.random.randints(n, start, end)
+
+ def complexes(self, start_real: float = 0, end_real: float = 1,
+ start_imag: float = 0, end_imag: float = 1,
+ rounding_real: int = 15, rounding_imag: int = 15,
+ n: int = 10) -> List[complex]:
+ """Generate a list of random complex numbers.
+
+ :param start_real: Start real range.
+ :param end_real: End real range.
+ :param start_imag: Start imaginary range.
+ :param end_imag: End imaginary range.
+ :param rounding_real: Rounding real part.
+ :param rounding_imag: Roungind imaginary part.
+ :param n: Length of the list.
+ :return: A list of random complex numbers.
+ """
+ return [
+ complex(self.random.uniform(start_real, end_real, rounding_real),
+ self.random.uniform(start_imag, end_imag, rounding_imag))
+ for _ in range(n)]
@staticmethod
def primes(start: int = 1, end: int = 999) -> List[int]:
@@ -99,14 +120,3 @@
:return: Number.
"""
return self.random.randint(minimum, maximum)
-
- def rating(self, maximum: float = 5.0) -> float:
- """Generate a random rating for something.
-
- :param maximum: Maximum value (default is 5.0).
- :return: Rating.
-
- :Example:
- 4.7
- """
- return self.random.uniform(0, maximum, 1)
| {"golden_diff": "diff --git a/mimesis/providers/numbers.py b/mimesis/providers/numbers.py\n--- a/mimesis/providers/numbers.py\n+++ b/mimesis/providers/numbers.py\n@@ -17,18 +17,20 @@\n \n name = 'numbers'\n \n- def floats(self, n: int = 2) -> List[float]:\n+ def floats(self, start: float = 0, end: float = 1, n: int = 10,\n+ rounding: int = 15) -> List[float]:\n \"\"\"Generate a list of random float numbers.\n \n- :param n: Raise 10 to the 'n' power.\n+ :param start: Start range.\n+ :param end: End range.\n+ :param n: Length of the list.\n+ :param rounding: Max number of decimal digits.\n :return: The list of floating-point numbers.\n \"\"\"\n- nums = [self.random.random()\n- for _ in range(10 ** int(n))]\n- return nums\n+ return [self.random.uniform(start, end, rounding) for _ in range(n)]\n \n def integers(self, start: int = 0, end: int = 10,\n- length: int = 10) -> List[int]:\n+ n: int = 10) -> List[int]:\n \"\"\"Generate a list of random integers.\n \n Integers can be negative or positive numbers.\n@@ -36,14 +38,33 @@\n \n :param start: Start.\n :param end: End.\n- :param length: Length of list.\n+ :param n: Length of list.\n :return: List of integers.\n \n :Example:\n [-20, -19, -18, -17]\n \"\"\"\n- return self.random.randints(\n- length, start, end)\n+ return self.random.randints(n, start, end)\n+\n+ def complexes(self, start_real: float = 0, end_real: float = 1,\n+ start_imag: float = 0, end_imag: float = 1,\n+ rounding_real: int = 15, rounding_imag: int = 15,\n+ n: int = 10) -> List[complex]:\n+ \"\"\"Generate a list of random complex numbers.\n+\n+ :param start_real: Start real range.\n+ :param end_real: End real range.\n+ :param start_imag: Start imaginary range.\n+ :param end_imag: End imaginary range.\n+ :param rounding_real: Rounding real part.\n+ :param rounding_imag: Roungind imaginary part.\n+ :param n: Length of the list.\n+ :return: A list of random complex numbers.\n+ \"\"\"\n+ return [\n+ complex(self.random.uniform(start_real, end_real, rounding_real),\n+ self.random.uniform(start_imag, end_imag, rounding_imag))\n+ for _ in range(n)]\n \n @staticmethod\n def primes(start: int = 1, end: int = 999) -> List[int]:\n@@ -99,14 +120,3 @@\n :return: Number.\n \"\"\"\n return self.random.randint(minimum, maximum)\n-\n- def rating(self, maximum: float = 5.0) -> float:\n- \"\"\"Generate a random rating for something.\n-\n- :param maximum: Maximum value (default is 5.0).\n- :return: Rating.\n-\n- :Example:\n- 4.7\n- \"\"\"\n- return self.random.uniform(0, maximum, 1)\n", "issue": "Restructure Numbers provider\n# Feature request\r\n\r\n## Thesis\r\n\r\nWhile I was implementing the ``matrix()`` function in the ``Numbers`` provider and I was thinking about some changes that we could make in this provider:\r\n\r\n- Add a function ``complex(start, end, length)`` that return a random array of complex numbers\r\n- Make the API uniform, so that every function in the ``Numbers`` provider has the arguments ``start, end, length`` (where possible). Maybe in the ``complex()`` function we can add ``start_real, end_real, start_imaginary, end_imaginary`` ?\r\n- Remove the function ``ranting()`` and add an argument ``decimal_digits`` in the function ``floats()`` to specify the number of decimal digits to keep.\r\n\r\n## Reasoning\r\n\r\nI think these changes would make the provider more uniform and easy to use.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Provides data related to numbers.\"\"\"\n\nfrom typing import List, Union\n\nfrom mimesis.providers.base import BaseProvider\n\n__all__ = ['Numbers']\n\n\nclass Numbers(BaseProvider):\n \"\"\"Class for generating numbers.\"\"\"\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'numbers'\n\n def floats(self, n: int = 2) -> List[float]:\n \"\"\"Generate a list of random float numbers.\n\n :param n: Raise 10 to the 'n' power.\n :return: The list of floating-point numbers.\n \"\"\"\n nums = [self.random.random()\n for _ in range(10 ** int(n))]\n return nums\n\n def integers(self, start: int = 0, end: int = 10,\n length: int = 10) -> List[int]:\n \"\"\"Generate a list of random integers.\n\n Integers can be negative or positive numbers.\n .. note: You can use both positive and negative numbers.\n\n :param start: Start.\n :param end: End.\n :param length: Length of list.\n :return: List of integers.\n\n :Example:\n [-20, -19, -18, -17]\n \"\"\"\n return self.random.randints(\n length, start, end)\n\n @staticmethod\n def primes(start: int = 1, end: int = 999) -> List[int]:\n \"\"\"Generate a list of prime numbers.\n\n :param start: First value of range.\n :param end: Last value of range.\n :return: A list of prime numbers from start to end.\n \"\"\"\n # TODO: It should generate random primes with passed length.\n sieve_size = (end // 2 - 1) if end % 2 == 0 else (end // 2)\n sieve = [True] * sieve_size\n\n primes = [] # list of primes\n # add 2 to the list if it's in the given range\n if end >= 2:\n primes.append(2)\n for i in range(sieve_size):\n if sieve[i]:\n value_at_i = i * 2 + 3\n primes.append(value_at_i)\n for j in range(i, sieve_size, value_at_i):\n sieve[j] = False\n\n chop_index = 0\n for i in range(len(primes)):\n if primes[i] >= start:\n chop_index = i\n break\n return primes[chop_index:]\n\n def digit(self, to_bin: bool = False) -> Union[str, int]:\n \"\"\"Get a random digit.\n\n :param to_bin: If True then convert to binary.\n :return: Digit.\n\n :Example:\n 4.\n \"\"\"\n digit = self.random.randint(0, 9)\n\n if to_bin:\n return bin(digit)\n\n return digit\n\n def between(self, minimum: int = 1, maximum: int = 1000) -> int:\n \"\"\"Generate a random number between minimum and maximum.\n\n :param minimum: Minimum of range.\n :param maximum: Maximum of range.\n :return: Number.\n \"\"\"\n return self.random.randint(minimum, maximum)\n\n def rating(self, maximum: float = 5.0) -> float:\n \"\"\"Generate a random rating for something.\n\n :param maximum: Maximum value (default is 5.0).\n :return: Rating.\n\n :Example:\n 4.7\n \"\"\"\n return self.random.uniform(0, maximum, 1)\n", "path": "mimesis/providers/numbers.py"}]} | 1,740 | 796 |
gh_patches_debug_3599 | rasdani/github-patches | git_diff | certbot__certbot-606 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nginx plugin destroys config
I have a config file called webp.conf in /etc/nginx/conf.d/ which works great.
After running letsencrypt -d example.org run the webp.conf is broken because it's missing a closing }
https://pastebin.mozilla.org/8837365
Line 18 gets removed.
</issue>
<code>
[start of letsencrypt-nginx/letsencrypt_nginx/nginxparser.py]
1 """Very low-level nginx config parser based on pyparsing."""
2 import string
3
4 from pyparsing import (
5 Literal, White, Word, alphanums, CharsNotIn, Forward, Group,
6 Optional, OneOrMore, Regex, ZeroOrMore)
7 from pyparsing import stringEnd
8 from pyparsing import restOfLine
9
10 class RawNginxParser(object):
11 # pylint: disable=expression-not-assigned
12 """A class that parses nginx configuration with pyparsing."""
13
14 # constants
15 left_bracket = Literal("{").suppress()
16 right_bracket = Literal("}").suppress()
17 semicolon = Literal(";").suppress()
18 space = White().suppress()
19 key = Word(alphanums + "_/")
20 # Matches anything that is not a special character AND any chars in single
21 # or double quotes
22 value = Regex(r"((\".*\")?(\'.*\')?[^\{\};,]?)+")
23 location = CharsNotIn("{};," + string.whitespace)
24 # modifier for location uri [ = | ~ | ~* | ^~ ]
25 modifier = Literal("=") | Literal("~*") | Literal("~") | Literal("^~")
26
27 # rules
28 comment = Literal('#') + restOfLine()
29 assignment = (key + Optional(space + value, default=None) + semicolon)
30 location_statement = Optional(space + modifier) + Optional(space + location)
31 if_statement = Literal("if") + space + Regex(r"\(.+\)") + space
32 block = Forward()
33
34 block << Group(
35 (Group(key + location_statement) ^ Group(if_statement))
36 + left_bracket
37 + Group(ZeroOrMore(Group(comment | assignment) | block))
38 + right_bracket)
39
40 script = OneOrMore(Group(comment | assignment) | block) + stringEnd
41
42 def __init__(self, source):
43 self.source = source
44
45 def parse(self):
46 """Returns the parsed tree."""
47 return self.script.parseString(self.source)
48
49 def as_list(self):
50 """Returns the parsed tree as a list."""
51 return self.parse().asList()
52
53
54 class RawNginxDumper(object):
55 # pylint: disable=too-few-public-methods
56 """A class that dumps nginx configuration from the provided tree."""
57 def __init__(self, blocks, indentation=4):
58 self.blocks = blocks
59 self.indentation = indentation
60
61 def __iter__(self, blocks=None, current_indent=0, spacer=' '):
62 """Iterates the dumped nginx content."""
63 blocks = blocks or self.blocks
64 for key, values in blocks:
65 indentation = spacer * current_indent
66 if isinstance(key, list):
67 if current_indent:
68 yield ''
69 yield indentation + spacer.join(key) + ' {'
70
71 for parameter in values:
72 dumped = self.__iter__([parameter], current_indent + self.indentation)
73 for line in dumped:
74 yield line
75
76 yield indentation + '}'
77 else:
78 if key == '#':
79 yield spacer * current_indent + key + values
80 else:
81 if values is None:
82 yield spacer * current_indent + key + ';'
83 else:
84 yield spacer * current_indent + key + spacer + values + ';'
85
86 def as_string(self):
87 """Return the parsed block as a string."""
88 return '\n'.join(self) + '\n'
89
90
91 # Shortcut functions to respect Python's serialization interface
92 # (like pyyaml, picker or json)
93
94 def loads(source):
95 """Parses from a string.
96
97 :param str souce: The string to parse
98 :returns: The parsed tree
99 :rtype: list
100
101 """
102 return RawNginxParser(source).as_list()
103
104
105 def load(_file):
106 """Parses from a file.
107
108 :param file _file: The file to parse
109 :returns: The parsed tree
110 :rtype: list
111
112 """
113 return loads(_file.read())
114
115
116 def dumps(blocks, indentation=4):
117 """Dump to a string.
118
119 :param list block: The parsed tree
120 :param int indentation: The number of spaces to indent
121 :rtype: str
122
123 """
124 return RawNginxDumper(blocks, indentation).as_string()
125
126
127 def dump(blocks, _file, indentation=4):
128 """Dump to a file.
129
130 :param list block: The parsed tree
131 :param file _file: The file to dump to
132 :param int indentation: The number of spaces to indent
133 :rtype: NoneType
134
135 """
136 return _file.write(dumps(blocks, indentation))
137
[end of letsencrypt-nginx/letsencrypt_nginx/nginxparser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py b/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py
--- a/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py
+++ b/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py
@@ -37,7 +37,7 @@
+ Group(ZeroOrMore(Group(comment | assignment) | block))
+ right_bracket)
- script = OneOrMore(Group(comment | assignment) | block) + stringEnd
+ script = OneOrMore(Group(comment | assignment) ^ block) + stringEnd
def __init__(self, source):
self.source = source
| {"golden_diff": "diff --git a/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py b/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py\n--- a/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py\n+++ b/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py\n@@ -37,7 +37,7 @@\n + Group(ZeroOrMore(Group(comment | assignment) | block))\n + right_bracket)\n \n- script = OneOrMore(Group(comment | assignment) | block) + stringEnd\n+ script = OneOrMore(Group(comment | assignment) ^ block) + stringEnd\n \n def __init__(self, source):\n self.source = source\n", "issue": "nginx plugin destroys config\nI have a config file called webp.conf in /etc/nginx/conf.d/ which works great.\nAfter running letsencrypt -d example.org run the webp.conf is broken because it's missing a closing }\nhttps://pastebin.mozilla.org/8837365\nLine 18 gets removed.\n\n", "before_files": [{"content": "\"\"\"Very low-level nginx config parser based on pyparsing.\"\"\"\nimport string\n\nfrom pyparsing import (\n Literal, White, Word, alphanums, CharsNotIn, Forward, Group,\n Optional, OneOrMore, Regex, ZeroOrMore)\nfrom pyparsing import stringEnd\nfrom pyparsing import restOfLine\n\nclass RawNginxParser(object):\n # pylint: disable=expression-not-assigned\n \"\"\"A class that parses nginx configuration with pyparsing.\"\"\"\n\n # constants\n left_bracket = Literal(\"{\").suppress()\n right_bracket = Literal(\"}\").suppress()\n semicolon = Literal(\";\").suppress()\n space = White().suppress()\n key = Word(alphanums + \"_/\")\n # Matches anything that is not a special character AND any chars in single\n # or double quotes\n value = Regex(r\"((\\\".*\\\")?(\\'.*\\')?[^\\{\\};,]?)+\")\n location = CharsNotIn(\"{};,\" + string.whitespace)\n # modifier for location uri [ = | ~ | ~* | ^~ ]\n modifier = Literal(\"=\") | Literal(\"~*\") | Literal(\"~\") | Literal(\"^~\")\n\n # rules\n comment = Literal('#') + restOfLine()\n assignment = (key + Optional(space + value, default=None) + semicolon)\n location_statement = Optional(space + modifier) + Optional(space + location)\n if_statement = Literal(\"if\") + space + Regex(r\"\\(.+\\)\") + space\n block = Forward()\n\n block << Group(\n (Group(key + location_statement) ^ Group(if_statement))\n + left_bracket\n + Group(ZeroOrMore(Group(comment | assignment) | block))\n + right_bracket)\n\n script = OneOrMore(Group(comment | assignment) | block) + stringEnd\n\n def __init__(self, source):\n self.source = source\n\n def parse(self):\n \"\"\"Returns the parsed tree.\"\"\"\n return self.script.parseString(self.source)\n\n def as_list(self):\n \"\"\"Returns the parsed tree as a list.\"\"\"\n return self.parse().asList()\n\n\nclass RawNginxDumper(object):\n # pylint: disable=too-few-public-methods\n \"\"\"A class that dumps nginx configuration from the provided tree.\"\"\"\n def __init__(self, blocks, indentation=4):\n self.blocks = blocks\n self.indentation = indentation\n\n def __iter__(self, blocks=None, current_indent=0, spacer=' '):\n \"\"\"Iterates the dumped nginx content.\"\"\"\n blocks = blocks or self.blocks\n for key, values in blocks:\n indentation = spacer * current_indent\n if isinstance(key, list):\n if current_indent:\n yield ''\n yield indentation + spacer.join(key) + ' {'\n\n for parameter in values:\n dumped = self.__iter__([parameter], current_indent + self.indentation)\n for line in dumped:\n yield line\n\n yield indentation + '}'\n else:\n if key == '#':\n yield spacer * current_indent + key + values\n else:\n if values is None:\n yield spacer * current_indent + key + ';'\n else:\n yield spacer * current_indent + key + spacer + values + ';'\n\n def as_string(self):\n \"\"\"Return the parsed block as a string.\"\"\"\n return '\\n'.join(self) + '\\n'\n\n\n# Shortcut functions to respect Python's serialization interface\n# (like pyyaml, picker or json)\n\ndef loads(source):\n \"\"\"Parses from a string.\n\n :param str souce: The string to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return RawNginxParser(source).as_list()\n\n\ndef load(_file):\n \"\"\"Parses from a file.\n\n :param file _file: The file to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return loads(_file.read())\n\n\ndef dumps(blocks, indentation=4):\n \"\"\"Dump to a string.\n\n :param list block: The parsed tree\n :param int indentation: The number of spaces to indent\n :rtype: str\n\n \"\"\"\n return RawNginxDumper(blocks, indentation).as_string()\n\n\ndef dump(blocks, _file, indentation=4):\n \"\"\"Dump to a file.\n\n :param list block: The parsed tree\n :param file _file: The file to dump to\n :param int indentation: The number of spaces to indent\n :rtype: NoneType\n\n \"\"\"\n return _file.write(dumps(blocks, indentation))\n", "path": "letsencrypt-nginx/letsencrypt_nginx/nginxparser.py"}]} | 1,922 | 152 |
gh_patches_debug_16124 | rasdani/github-patches | git_diff | getnikola__nikola-3159 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Responsive youtube embed
I'm trying to set an embedded youtube link to 100% width in a .rst file. Is this possible?
I've tried:
```
.. youtube:: 3XsQCkF1SrE
:align: center
:width: 100%
```
</issue>
<code>
[start of nikola/plugins/compile/rest/youtube.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2018 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """YouTube directive for reStructuredText."""
28
29 from docutils import nodes
30 from docutils.parsers.rst import Directive, directives
31 from nikola.plugins.compile.rest import _align_choice, _align_options_base
32
33 from nikola.plugin_categories import RestExtension
34
35
36 class Plugin(RestExtension):
37 """Plugin for the youtube directive."""
38
39 name = "rest_youtube"
40
41 def set_site(self, site):
42 """Set Nikola site."""
43 self.site = site
44 directives.register_directive('youtube', Youtube)
45 return super(Plugin, self).set_site(site)
46
47
48 CODE = """\
49 <div class="youtube-video{align}">
50 <iframe width="{width}" height="{height}"
51 src="https://www.youtube-nocookie.com/embed/{yid}?rel=0&wmode=transparent"
52 frameborder="0" allow="encrypted-media" allowfullscreen
53 ></iframe>
54 </div>"""
55
56
57 class Youtube(Directive):
58 """reST extension for inserting youtube embedded videos.
59
60 Usage:
61 .. youtube:: lyViVmaBQDg
62 :height: 400
63 :width: 600
64
65 """
66
67 has_content = True
68 required_arguments = 1
69 option_spec = {
70 "width": directives.positive_int,
71 "height": directives.positive_int,
72 "align": _align_choice
73 }
74
75 def run(self):
76 """Run the youtube directive."""
77 self.check_content()
78 options = {
79 'yid': self.arguments[0],
80 'width': 560,
81 'height': 315,
82 }
83 options.update(self.options)
84 if self.options.get('align') in _align_options_base:
85 options['align'] = ' align-' + self.options['align']
86 else:
87 options['align'] = ''
88 return [nodes.raw('', CODE.format(**options), format='html')]
89
90 def check_content(self):
91 """Check if content exists."""
92 if self.content: # pragma: no cover
93 raise self.warning("This directive does not accept content. The "
94 "'key=value' format for options is deprecated, "
95 "use ':key: value' instead")
96
[end of nikola/plugins/compile/rest/youtube.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nikola/plugins/compile/rest/youtube.py b/nikola/plugins/compile/rest/youtube.py
--- a/nikola/plugins/compile/rest/youtube.py
+++ b/nikola/plugins/compile/rest/youtube.py
@@ -67,8 +67,8 @@
has_content = True
required_arguments = 1
option_spec = {
- "width": directives.positive_int,
- "height": directives.positive_int,
+ "width": directives.unchanged,
+ "height": directives.unchanged,
"align": _align_choice
}
@@ -80,7 +80,7 @@
'width': 560,
'height': 315,
}
- options.update(self.options)
+ options.update({k: v for k, v in self.options.items() if v})
if self.options.get('align') in _align_options_base:
options['align'] = ' align-' + self.options['align']
else:
| {"golden_diff": "diff --git a/nikola/plugins/compile/rest/youtube.py b/nikola/plugins/compile/rest/youtube.py\n--- a/nikola/plugins/compile/rest/youtube.py\n+++ b/nikola/plugins/compile/rest/youtube.py\n@@ -67,8 +67,8 @@\n has_content = True\n required_arguments = 1\n option_spec = {\n- \"width\": directives.positive_int,\n- \"height\": directives.positive_int,\n+ \"width\": directives.unchanged,\n+ \"height\": directives.unchanged,\n \"align\": _align_choice\n }\n \n@@ -80,7 +80,7 @@\n 'width': 560,\n 'height': 315,\n }\n- options.update(self.options)\n+ options.update({k: v for k, v in self.options.items() if v})\n if self.options.get('align') in _align_options_base:\n options['align'] = ' align-' + self.options['align']\n else:\n", "issue": "Responsive youtube embed\nI'm trying to set an embedded youtube link to 100% width in a .rst file. Is this possible?\r\n\r\nI've tried:\r\n\r\n```\r\n.. youtube:: 3XsQCkF1SrE\r\n :align: center\r\n :width: 100%\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2018 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"YouTube directive for reStructuredText.\"\"\"\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive, directives\nfrom nikola.plugins.compile.rest import _align_choice, _align_options_base\n\nfrom nikola.plugin_categories import RestExtension\n\n\nclass Plugin(RestExtension):\n \"\"\"Plugin for the youtube directive.\"\"\"\n\n name = \"rest_youtube\"\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n self.site = site\n directives.register_directive('youtube', Youtube)\n return super(Plugin, self).set_site(site)\n\n\nCODE = \"\"\"\\\n<div class=\"youtube-video{align}\">\n<iframe width=\"{width}\" height=\"{height}\"\nsrc=\"https://www.youtube-nocookie.com/embed/{yid}?rel=0&wmode=transparent\"\nframeborder=\"0\" allow=\"encrypted-media\" allowfullscreen\n></iframe>\n</div>\"\"\"\n\n\nclass Youtube(Directive):\n \"\"\"reST extension for inserting youtube embedded videos.\n\n Usage:\n .. youtube:: lyViVmaBQDg\n :height: 400\n :width: 600\n\n \"\"\"\n\n has_content = True\n required_arguments = 1\n option_spec = {\n \"width\": directives.positive_int,\n \"height\": directives.positive_int,\n \"align\": _align_choice\n }\n\n def run(self):\n \"\"\"Run the youtube directive.\"\"\"\n self.check_content()\n options = {\n 'yid': self.arguments[0],\n 'width': 560,\n 'height': 315,\n }\n options.update(self.options)\n if self.options.get('align') in _align_options_base:\n options['align'] = ' align-' + self.options['align']\n else:\n options['align'] = ''\n return [nodes.raw('', CODE.format(**options), format='html')]\n\n def check_content(self):\n \"\"\"Check if content exists.\"\"\"\n if self.content: # pragma: no cover\n raise self.warning(\"This directive does not accept content. The \"\n \"'key=value' format for options is deprecated, \"\n \"use ':key: value' instead\")\n", "path": "nikola/plugins/compile/rest/youtube.py"}]} | 1,519 | 225 |
gh_patches_debug_334 | rasdani/github-patches | git_diff | searx__searx-2391 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SUGGESTION: Contacting the instance's maintainer(s)
Hello, so I use searx, but I personally think that there should be any way to contact the maintainer(s) of a public instance (email for example). It is harder to trust this awesome service if there is no way to contact the maintainer(s).
</issue>
<code>
[start of searx/brand.py]
1 GIT_URL = 'https://github.com/searx/searx'
2 GIT_BRANCH = 'master'
3 ISSUE_URL = 'https://github.com/searx/searx/issues'
4 SEARX_URL = 'https://searx.me'
5 DOCS_URL = 'https://searx.github.io/searx'
6 PUBLIC_INSTANCES = 'https://searx.space'
7
[end of searx/brand.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/brand.py b/searx/brand.py
--- a/searx/brand.py
+++ b/searx/brand.py
@@ -4,3 +4,4 @@
SEARX_URL = 'https://searx.me'
DOCS_URL = 'https://searx.github.io/searx'
PUBLIC_INSTANCES = 'https://searx.space'
+CONTACT_URL = 'mailto:[email protected]'
| {"golden_diff": "diff --git a/searx/brand.py b/searx/brand.py\n--- a/searx/brand.py\n+++ b/searx/brand.py\n@@ -4,3 +4,4 @@\n SEARX_URL = 'https://searx.me'\n DOCS_URL = 'https://searx.github.io/searx'\n PUBLIC_INSTANCES = 'https://searx.space'\n+CONTACT_URL = 'mailto:[email protected]'\n", "issue": "SUGGESTION: Contacting the instance's maintainer(s)\nHello, so I use searx, but I personally think that there should be any way to contact the maintainer(s) of a public instance (email for example). It is harder to trust this awesome service if there is no way to contact the maintainer(s). \r\n\n", "before_files": [{"content": "GIT_URL = 'https://github.com/searx/searx'\nGIT_BRANCH = 'master'\nISSUE_URL = 'https://github.com/searx/searx/issues'\nSEARX_URL = 'https://searx.me'\nDOCS_URL = 'https://searx.github.io/searx'\nPUBLIC_INSTANCES = 'https://searx.space'\n", "path": "searx/brand.py"}]} | 695 | 99 |
gh_patches_debug_5157 | rasdani/github-patches | git_diff | python__peps-2090 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"PEP numbers never change" verbiage is not in PEP 1
## Problem
The introduction to PEP 0 references PEP 1 as a source for
> PEP numbers are assigned by the PEP editors, and once assigned are never changed [1].
But PEP 1 doesn't say anything about PEP numbers never changing.
## Research
While skimming PEP 1, I found information about the PEP editor assigning a number:
> Once the PEP is ready for the repository, a PEP editor will:
Assign a PEP number (almost always just the next available number, but sometimes it's a special/joke number, like 666 or 3141). (Clarification: For Python 3, numbers in the 3000s were used for Py3k-specific proposals. But now that all new features go into Python 3 only, the process is back to using numbers in the 100s again. Remember that numbers below 100 are meta-PEPs.)
and
> The PEP editors are individuals responsible for managing the administrative and editorial aspects of the PEP workflow (e.g. assigning PEP numbers and changing their status). See PEP Editor Responsibilities & Workflow for details.
But I didn't find any reference to that number never changing.
## Proposal:
Can we change PEP 0's introduction so that the reference is specific to assigning numbers?
```
PEP numbers are assigned by the PEP editors[1], and once assigned are never changed.
```
## Link
https://github.com/python/peps/blob/40ef5625b7d42655f49090ffd2c0860ecf8d1d9f/pep0/constants.py#L22-L27
</issue>
<code>
[start of pep0/constants.py]
1 # -*- coding: utf-8 -*-
2 text_type = str
3 title_length = 55
4 author_length = 40
5 table_separator = "== ==== " + "="*title_length + " " + "="*author_length
6 column_format = (
7 '%(type)1s%(status)1s %(number)4s %(title)-{title_length}s %(authors)-s'
8 ).format(title_length=title_length)
9
10 header = """\
11 PEP: 0
12 Title: Index of Python Enhancement Proposals (PEPs)
13 Version: N/A
14 Last-Modified: %s
15 Author: python-dev <[email protected]>
16 Status: Active
17 Type: Informational
18 Content-Type: text/x-rst
19 Created: 13-Jul-2000
20 """
21
22 intro = """\
23 This PEP contains the index of all Python Enhancement Proposals,
24 known as PEPs. PEP numbers are assigned by the PEP editors, and
25 once assigned are never changed [1_]. The version control history [2_] of
26 the PEP texts represent their historical record.
27 """
28
29 references = """\
30 .. [1] PEP 1: PEP Purpose and Guidelines
31 .. [2] View PEP history online: https://github.com/python/peps
32 """
33
34 footer = """\
35 ..
36 Local Variables:
37 mode: indented-text
38 indent-tabs-mode: nil
39 sentence-end-double-space: t
40 fill-column: 70
41 coding: utf-8
42 End:\
43 """
44
[end of pep0/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pep0/constants.py b/pep0/constants.py
--- a/pep0/constants.py
+++ b/pep0/constants.py
@@ -21,8 +21,8 @@
intro = """\
This PEP contains the index of all Python Enhancement Proposals,
-known as PEPs. PEP numbers are assigned by the PEP editors, and
-once assigned are never changed [1_]. The version control history [2_] of
+known as PEPs. PEP numbers are assigned by the PEP editors[1_], and
+once assigned are never changed. The version control history [2_] of
the PEP texts represent their historical record.
"""
| {"golden_diff": "diff --git a/pep0/constants.py b/pep0/constants.py\n--- a/pep0/constants.py\n+++ b/pep0/constants.py\n@@ -21,8 +21,8 @@\n \n intro = \"\"\"\\\n This PEP contains the index of all Python Enhancement Proposals,\n-known as PEPs. PEP numbers are assigned by the PEP editors, and\n-once assigned are never changed [1_]. The version control history [2_] of\n+known as PEPs. PEP numbers are assigned by the PEP editors[1_], and\n+once assigned are never changed. The version control history [2_] of\n the PEP texts represent their historical record.\n \"\"\"\n", "issue": "\"PEP numbers never change\" verbiage is not in PEP 1\n## Problem\r\n\r\nThe introduction to PEP 0 references PEP 1 as a source for\r\n\r\n> PEP numbers are assigned by the PEP editors, and once assigned are never changed [1].\r\n\r\nBut PEP 1 doesn't say anything about PEP numbers never changing.\r\n\r\n## Research\r\n\r\nWhile skimming PEP 1, I found information about the PEP editor assigning a number:\r\n\r\n> Once the PEP is ready for the repository, a PEP editor will:\r\nAssign a PEP number (almost always just the next available number, but sometimes it's a special/joke number, like 666 or 3141). (Clarification: For Python 3, numbers in the 3000s were used for Py3k-specific proposals. But now that all new features go into Python 3 only, the process is back to using numbers in the 100s again. Remember that numbers below 100 are meta-PEPs.)\r\n\r\nand\r\n\r\n> The PEP editors are individuals responsible for managing the administrative and editorial aspects of the PEP workflow (e.g. assigning PEP numbers and changing their status). See PEP Editor Responsibilities & Workflow for details.\r\n\r\nBut I didn't find any reference to that number never changing. \r\n\r\n## Proposal:\r\n\r\nCan we change PEP 0's introduction so that the reference is specific to assigning numbers?\r\n\r\n```\r\nPEP numbers are assigned by the PEP editors[1], and once assigned are never changed.\r\n```\r\n\r\n## Link\r\n\r\nhttps://github.com/python/peps/blob/40ef5625b7d42655f49090ffd2c0860ecf8d1d9f/pep0/constants.py#L22-L27\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\ntext_type = str\ntitle_length = 55\nauthor_length = 40\ntable_separator = \"== ==== \" + \"=\"*title_length + \" \" + \"=\"*author_length\ncolumn_format = (\n '%(type)1s%(status)1s %(number)4s %(title)-{title_length}s %(authors)-s'\n).format(title_length=title_length)\n\nheader = \"\"\"\\\nPEP: 0\nTitle: Index of Python Enhancement Proposals (PEPs)\nVersion: N/A\nLast-Modified: %s\nAuthor: python-dev <[email protected]>\nStatus: Active\nType: Informational\nContent-Type: text/x-rst\nCreated: 13-Jul-2000\n\"\"\"\n\nintro = \"\"\"\\\nThis PEP contains the index of all Python Enhancement Proposals,\nknown as PEPs. PEP numbers are assigned by the PEP editors, and\nonce assigned are never changed [1_]. The version control history [2_] of\nthe PEP texts represent their historical record.\n\"\"\"\n\nreferences = \"\"\"\\\n.. [1] PEP 1: PEP Purpose and Guidelines\n.. [2] View PEP history online: https://github.com/python/peps\n\"\"\"\n\nfooter = \"\"\"\f\\\n..\n Local Variables:\n mode: indented-text\n indent-tabs-mode: nil\n sentence-end-double-space: t\n fill-column: 70\n coding: utf-8\n End:\\\n\"\"\"\n", "path": "pep0/constants.py"}]} | 1,330 | 158 |
gh_patches_debug_13686 | rasdani/github-patches | git_diff | cobbler__cobbler-3649 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SELinux issue when service is restarted
This issue was initially reported at
https://bugzilla.redhat.com/show_bug.cgi?id=1754430
There is a SELinux denial when the cobblerd service is restarted because of the permissions of the web.ss and others webui_sessions files.
I'm not sure to understand if this is 3.0.x only or also only exists in 2.8.x, but for me there is a need to understand why cobblerd (uid root) tries to read theses files...
Of course it can probably be fixed by using 640 perm on theses files. (to be tested) but it just workaround the problem.
</issue>
<code>
[start of cobbler/cobblerd.py]
1 """
2 Cobbler daemon for logging remote syslog traffic during automatic installation
3
4 Copyright 2007-2009, Red Hat, Inc and Others
5 Michael DeHaan <michael.dehaan AT gmail>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301 USA
21 """
22
23 import binascii
24 import os
25 import pwd
26 import time
27
28 from cobbler import remote
29 from cobbler import utils
30 from cobbler.api import CobblerAPI
31
32
33 def core(cobbler_api: CobblerAPI):
34 """
35 Starts Cobbler.
36
37 :param cobbler_api: The cobbler_api instance which is used for this method.
38 """
39 settings = cobbler_api.settings()
40 xmlrpc_port = settings.xmlrpc_port
41
42 regen_ss_file()
43 do_xmlrpc_rw(cobbler_api, settings, xmlrpc_port)
44
45
46 def regen_ss_file():
47 """
48 This is only used for Kerberos auth at the moment. It identifies XMLRPC requests from Apache that have already been
49 cleared by Kerberos.
50 """
51 ssfile = "/var/lib/cobbler/web.ss"
52 with open("/dev/urandom", 'rb') as fd:
53 data = fd.read(512)
54
55 with open(ssfile, 'wb', 0o660) as fd:
56 fd.write(binascii.hexlify(data))
57
58 http_user = "apache"
59 family = utils.get_family()
60 if family == "debian":
61 http_user = "www-data"
62 elif family == "suse":
63 http_user = "wwwrun"
64 os.lchown("/var/lib/cobbler/web.ss", pwd.getpwnam(http_user)[2], -1)
65
66
67 def do_xmlrpc_rw(cobbler_api: CobblerAPI, settings, port):
68 """
69 This trys to bring up the Cobbler xmlrpc_api and restart it if it fails.
70
71 :param cobbler_api: The cobbler_api instance which is used for this method.
72 :param settings: The Cobbler settings instance which is used for this method.
73 :param port: The port where the xmlrpc api should run on.
74 """
75 xinterface = remote.ProxiedXMLRPCInterface(cobbler_api, remote.CobblerXMLRPCInterface)
76 server = remote.CobblerXMLRPCServer(('127.0.0.1', port))
77 server.logRequests = 0 # don't print stuff
78 xinterface.logger.debug("XMLRPC running on %s" % port)
79 server.register_instance(xinterface)
80
81 while True:
82 try:
83 print("SERVING!")
84 server.serve_forever()
85 except IOError:
86 # interrupted? try to serve again
87 time.sleep(0.5)
88
89
90 if __name__ == "__main__":
91 core(CobblerAPI())
92
[end of cobbler/cobblerd.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cobbler/cobblerd.py b/cobbler/cobblerd.py
--- a/cobbler/cobblerd.py
+++ b/cobbler/cobblerd.py
@@ -52,7 +52,7 @@
with open("/dev/urandom", 'rb') as fd:
data = fd.read(512)
- with open(ssfile, 'wb', 0o660) as fd:
+ with open(ssfile, 'wb', 0o640) as fd:
fd.write(binascii.hexlify(data))
http_user = "apache"
@@ -61,7 +61,7 @@
http_user = "www-data"
elif family == "suse":
http_user = "wwwrun"
- os.lchown("/var/lib/cobbler/web.ss", pwd.getpwnam(http_user)[2], -1)
+ os.lchown(ssfile, 0, pwd.getpwnam(http_user)[3])
def do_xmlrpc_rw(cobbler_api: CobblerAPI, settings, port):
| {"golden_diff": "diff --git a/cobbler/cobblerd.py b/cobbler/cobblerd.py\n--- a/cobbler/cobblerd.py\n+++ b/cobbler/cobblerd.py\n@@ -52,7 +52,7 @@\n with open(\"/dev/urandom\", 'rb') as fd:\n data = fd.read(512)\n \n- with open(ssfile, 'wb', 0o660) as fd:\n+ with open(ssfile, 'wb', 0o640) as fd:\n fd.write(binascii.hexlify(data))\n \n http_user = \"apache\"\n@@ -61,7 +61,7 @@\n http_user = \"www-data\"\n elif family == \"suse\":\n http_user = \"wwwrun\"\n- os.lchown(\"/var/lib/cobbler/web.ss\", pwd.getpwnam(http_user)[2], -1)\n+ os.lchown(ssfile, 0, pwd.getpwnam(http_user)[3])\n \n \n def do_xmlrpc_rw(cobbler_api: CobblerAPI, settings, port):\n", "issue": "SELinux issue when service is restarted\nThis issue was initially reported at\r\nhttps://bugzilla.redhat.com/show_bug.cgi?id=1754430\r\n\r\nThere is a SELinux denial when the cobblerd service is restarted because of the permissions of the web.ss and others webui_sessions files.\r\n\r\nI'm not sure to understand if this is 3.0.x only or also only exists in 2.8.x, but for me there is a need to understand why cobblerd (uid root) tries to read theses files...\r\nOf course it can probably be fixed by using 640 perm on theses files. (to be tested) but it just workaround the problem.\n", "before_files": [{"content": "\"\"\"\nCobbler daemon for logging remote syslog traffic during automatic installation\n\nCopyright 2007-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\n\nimport binascii\nimport os\nimport pwd\nimport time\n\nfrom cobbler import remote\nfrom cobbler import utils\nfrom cobbler.api import CobblerAPI\n\n\ndef core(cobbler_api: CobblerAPI):\n \"\"\"\n Starts Cobbler.\n\n :param cobbler_api: The cobbler_api instance which is used for this method.\n \"\"\"\n settings = cobbler_api.settings()\n xmlrpc_port = settings.xmlrpc_port\n\n regen_ss_file()\n do_xmlrpc_rw(cobbler_api, settings, xmlrpc_port)\n\n\ndef regen_ss_file():\n \"\"\"\n This is only used for Kerberos auth at the moment. It identifies XMLRPC requests from Apache that have already been\n cleared by Kerberos.\n \"\"\"\n ssfile = \"/var/lib/cobbler/web.ss\"\n with open(\"/dev/urandom\", 'rb') as fd:\n data = fd.read(512)\n\n with open(ssfile, 'wb', 0o660) as fd:\n fd.write(binascii.hexlify(data))\n\n http_user = \"apache\"\n family = utils.get_family()\n if family == \"debian\":\n http_user = \"www-data\"\n elif family == \"suse\":\n http_user = \"wwwrun\"\n os.lchown(\"/var/lib/cobbler/web.ss\", pwd.getpwnam(http_user)[2], -1)\n\n\ndef do_xmlrpc_rw(cobbler_api: CobblerAPI, settings, port):\n \"\"\"\n This trys to bring up the Cobbler xmlrpc_api and restart it if it fails.\n\n :param cobbler_api: The cobbler_api instance which is used for this method.\n :param settings: The Cobbler settings instance which is used for this method.\n :param port: The port where the xmlrpc api should run on.\n \"\"\"\n xinterface = remote.ProxiedXMLRPCInterface(cobbler_api, remote.CobblerXMLRPCInterface)\n server = remote.CobblerXMLRPCServer(('127.0.0.1', port))\n server.logRequests = 0 # don't print stuff\n xinterface.logger.debug(\"XMLRPC running on %s\" % port)\n server.register_instance(xinterface)\n\n while True:\n try:\n print(\"SERVING!\")\n server.serve_forever()\n except IOError:\n # interrupted? try to serve again\n time.sleep(0.5)\n\n\nif __name__ == \"__main__\":\n core(CobblerAPI())\n", "path": "cobbler/cobblerd.py"}]} | 1,620 | 245 |
gh_patches_debug_25206 | rasdani/github-patches | git_diff | ansible__ansible-modules-extras-1049 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
About option of win_package
I found two issue.
I think Product-ID parameter is not "product_id" , is it "productid"?
Also , it seems the required columns is "yes".
```
fatal: [10.1.1.6]: FAILED! => {"changed": false, "failed": true, "msg": "Missing required argument: productid"
```
Therefore , it take a mistake about "ProductId" below an example on document of win_package:
```
# Playbook example
- name: Install the vc thingy
win_package:
name="Microsoft Visual C thingy"
path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe"
ProductId="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
Arguments="/install /passive /norestart"
```
</issue>
<code>
[start of windows/win_package.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2014, Trond Hindenes <[email protected]>, and others
5 #
6 # This file is part of Ansible
7 #
8 # Ansible is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # Ansible is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
20
21 # this is a windows documentation stub. actual code lives in the .ps1
22 # file of the same name
23
24 DOCUMENTATION = '''
25 ---
26 module: win_package
27 version_added: "1.7"
28 short_description: Installs/Uninstalls a installable package, either from local file system or url
29 description:
30 - Installs or uninstalls a package
31 options:
32 path:
33 description:
34 - Location of the package to be installed (either on file system, network share or url)
35 required: true
36 default: null
37 aliases: []
38 name:
39 description:
40 - name of the package. Just for logging reasons, will use the value of path if name isn't specified
41 required: false
42 default: null
43 aliases: []
44 product_id:
45 description:
46 - product id of the installed package (used for checking if already installed)
47 required: false
48 default: null
49 aliases: []
50 arguments:
51 description:
52 - Any arguments the installer needs
53 default: null
54 aliases: []
55 state:
56 description:
57 - Install or Uninstall
58 choices:
59 - present
60 - absent
61 default: present
62 aliases: [ensure]
63 user_name:
64 description:
65 - Username of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.
66 default: null
67 aliases: []
68 user_password:
69 description:
70 - Password of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.
71 default: null
72 aliases: []
73 author: Trond Hindenes
74 '''
75
76 EXAMPLES = '''
77 # Playbook example
78 - name: Install the vc thingy
79 win_package:
80 name="Microsoft Visual C thingy"
81 path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe"
82 ProductId="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
83 Arguments="/install /passive /norestart"
84
85
86 '''
87
88
[end of windows/win_package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/windows/win_package.py b/windows/win_package.py
--- a/windows/win_package.py
+++ b/windows/win_package.py
@@ -25,6 +25,7 @@
---
module: win_package
version_added: "1.7"
+author: Trond Hindenes
short_description: Installs/Uninstalls a installable package, either from local file system or url
description:
- Installs or uninstalls a package
@@ -44,9 +45,9 @@
product_id:
description:
- product id of the installed package (used for checking if already installed)
- required: false
+ required: true
default: null
- aliases: []
+ aliases: [productid]
arguments:
description:
- Any arguments the installer needs
@@ -79,7 +80,7 @@
win_package:
name="Microsoft Visual C thingy"
path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe"
- ProductId="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
+ Product_Id="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
Arguments="/install /passive /norestart"
| {"golden_diff": "diff --git a/windows/win_package.py b/windows/win_package.py\n--- a/windows/win_package.py\n+++ b/windows/win_package.py\n@@ -25,6 +25,7 @@\n ---\n module: win_package\n version_added: \"1.7\"\n+author: Trond Hindenes\n short_description: Installs/Uninstalls a installable package, either from local file system or url\n description:\n - Installs or uninstalls a package\n@@ -44,9 +45,9 @@\n product_id:\n description:\n - product id of the installed package (used for checking if already installed)\n- required: false\n+ required: true\n default: null\n- aliases: []\n+ aliases: [productid]\n arguments:\n description:\n - Any arguments the installer needs\n@@ -79,7 +80,7 @@\n win_package:\n name=\"Microsoft Visual C thingy\"\n path=\"http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe\"\n- ProductId=\"{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}\"\n+ Product_Id=\"{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}\"\n Arguments=\"/install /passive /norestart\"\n", "issue": "About option of win_package\nI found two issue.\n\nI think Product-ID parameter is not \"product_id\" , is it \"productid\"?\nAlso , it seems the required columns is \"yes\".\n\n```\nfatal: [10.1.1.6]: FAILED! => {\"changed\": false, \"failed\": true, \"msg\": \"Missing required argument: productid\"\n```\n\nTherefore , it take a mistake about \"ProductId\" below an example on document of win_package:\n\n```\n# Playbook example\n - name: Install the vc thingy\n win_package:\n name=\"Microsoft Visual C thingy\"\n path=\"http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe\"\n ProductId=\"{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}\"\n Arguments=\"/install /passive /norestart\"\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, Trond Hindenes <[email protected]>, and others\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# this is a windows documentation stub. actual code lives in the .ps1\n# file of the same name\n\nDOCUMENTATION = '''\n---\nmodule: win_package\nversion_added: \"1.7\"\nshort_description: Installs/Uninstalls a installable package, either from local file system or url\ndescription:\n - Installs or uninstalls a package\noptions:\n path:\n description:\n - Location of the package to be installed (either on file system, network share or url)\n required: true\n default: null\n aliases: []\n name:\n description:\n - name of the package. Just for logging reasons, will use the value of path if name isn't specified\n required: false\n default: null\n aliases: []\n product_id:\n description:\n - product id of the installed package (used for checking if already installed)\n required: false\n default: null\n aliases: []\n arguments:\n description:\n - Any arguments the installer needs\n default: null\n aliases: []\n state:\n description:\n - Install or Uninstall\n choices:\n - present\n - absent\n default: present\n aliases: [ensure]\n user_name:\n description:\n - Username of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.\n default: null\n aliases: []\n user_password:\n description:\n - Password of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.\n default: null\n aliases: []\nauthor: Trond Hindenes\n'''\n\nEXAMPLES = '''\n# Playbook example\n - name: Install the vc thingy\n win_package:\n name=\"Microsoft Visual C thingy\"\n path=\"http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe\"\n ProductId=\"{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}\"\n Arguments=\"/install /passive /norestart\"\n\n\n'''\n\n", "path": "windows/win_package.py"}]} | 1,664 | 346 |
gh_patches_debug_2029 | rasdani/github-patches | git_diff | netbox-community__netbox-15568 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo in Tag model
### Deployment Type
Self-hosted
### NetBox Version
v3.7.4
### Python Version
3.8
### Steps to Reproduce
Typo in help_text where "this" is mistakenly repeated.
https://github.com/netbox-community/netbox/blob/69c0aac1051015660133b2ae3c86607dabd8084b/netbox/extras/models/tags.py#L40
### Expected Behavior
The object type(s) to which this tag can be applied.
### Observed Behavior
The object type(s) to which this this tag can be applied.
</issue>
<code>
[start of netbox/extras/models/tags.py]
1 from django.conf import settings
2 from django.db import models
3 from django.urls import reverse
4 from django.utils.text import slugify
5 from django.utils.translation import gettext_lazy as _
6 from taggit.models import TagBase, GenericTaggedItemBase
7
8 from netbox.models import ChangeLoggedModel
9 from netbox.models.features import CloningMixin, ExportTemplatesMixin
10 from utilities.choices import ColorChoices
11 from utilities.fields import ColorField
12
13 __all__ = (
14 'Tag',
15 'TaggedItem',
16 )
17
18
19 #
20 # Tags
21 #
22
23 class Tag(CloningMixin, ExportTemplatesMixin, ChangeLoggedModel, TagBase):
24 id = models.BigAutoField(
25 primary_key=True
26 )
27 color = ColorField(
28 verbose_name=_('color'),
29 default=ColorChoices.COLOR_GREY
30 )
31 description = models.CharField(
32 verbose_name=_('description'),
33 max_length=200,
34 blank=True,
35 )
36 object_types = models.ManyToManyField(
37 to='contenttypes.ContentType',
38 related_name='+',
39 blank=True,
40 help_text=_("The object type(s) to which this this tag can be applied.")
41 )
42
43 clone_fields = (
44 'color', 'description', 'object_types',
45 )
46
47 class Meta:
48 ordering = ['name']
49 verbose_name = _('tag')
50 verbose_name_plural = _('tags')
51
52 def get_absolute_url(self):
53 return reverse('extras:tag', args=[self.pk])
54
55 @property
56 def docs_url(self):
57 return f'{settings.STATIC_URL}docs/models/extras/tag/'
58
59 def slugify(self, tag, i=None):
60 # Allow Unicode in Tag slugs (avoids empty slugs for Tags with all-Unicode names)
61 slug = slugify(tag, allow_unicode=True)
62 if i is not None:
63 slug += "_%d" % i
64 return slug
65
66
67 class TaggedItem(GenericTaggedItemBase):
68 tag = models.ForeignKey(
69 to=Tag,
70 related_name="%(app_label)s_%(class)s_items",
71 on_delete=models.CASCADE
72 )
73
74 _netbox_private = True
75
76 class Meta:
77 indexes = [models.Index(fields=["content_type", "object_id"])]
78 verbose_name = _('tagged item')
79 verbose_name_plural = _('tagged items')
80
[end of netbox/extras/models/tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/extras/models/tags.py b/netbox/extras/models/tags.py
--- a/netbox/extras/models/tags.py
+++ b/netbox/extras/models/tags.py
@@ -37,7 +37,7 @@
to='contenttypes.ContentType',
related_name='+',
blank=True,
- help_text=_("The object type(s) to which this this tag can be applied.")
+ help_text=_("The object type(s) to which this tag can be applied.")
)
clone_fields = (
| {"golden_diff": "diff --git a/netbox/extras/models/tags.py b/netbox/extras/models/tags.py\n--- a/netbox/extras/models/tags.py\n+++ b/netbox/extras/models/tags.py\n@@ -37,7 +37,7 @@\n to='contenttypes.ContentType',\n related_name='+',\n blank=True,\n- help_text=_(\"The object type(s) to which this this tag can be applied.\")\n+ help_text=_(\"The object type(s) to which this tag can be applied.\")\n )\n \n clone_fields = (\n", "issue": "Typo in Tag model\n### Deployment Type\n\nSelf-hosted\n\n### NetBox Version\n\nv3.7.4\n\n### Python Version\n\n3.8\n\n### Steps to Reproduce\n\nTypo in help_text where \"this\" is mistakenly repeated.\r\n\r\nhttps://github.com/netbox-community/netbox/blob/69c0aac1051015660133b2ae3c86607dabd8084b/netbox/extras/models/tags.py#L40\n\n### Expected Behavior\n\nThe object type(s) to which this tag can be applied.\n\n### Observed Behavior\n\nThe object type(s) to which this this tag can be applied.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.text import slugify\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.models import TagBase, GenericTaggedItemBase\n\nfrom netbox.models import ChangeLoggedModel\nfrom netbox.models.features import CloningMixin, ExportTemplatesMixin\nfrom utilities.choices import ColorChoices\nfrom utilities.fields import ColorField\n\n__all__ = (\n 'Tag',\n 'TaggedItem',\n)\n\n\n#\n# Tags\n#\n\nclass Tag(CloningMixin, ExportTemplatesMixin, ChangeLoggedModel, TagBase):\n id = models.BigAutoField(\n primary_key=True\n )\n color = ColorField(\n verbose_name=_('color'),\n default=ColorChoices.COLOR_GREY\n )\n description = models.CharField(\n verbose_name=_('description'),\n max_length=200,\n blank=True,\n )\n object_types = models.ManyToManyField(\n to='contenttypes.ContentType',\n related_name='+',\n blank=True,\n help_text=_(\"The object type(s) to which this this tag can be applied.\")\n )\n\n clone_fields = (\n 'color', 'description', 'object_types',\n )\n\n class Meta:\n ordering = ['name']\n verbose_name = _('tag')\n verbose_name_plural = _('tags')\n\n def get_absolute_url(self):\n return reverse('extras:tag', args=[self.pk])\n\n @property\n def docs_url(self):\n return f'{settings.STATIC_URL}docs/models/extras/tag/'\n\n def slugify(self, tag, i=None):\n # Allow Unicode in Tag slugs (avoids empty slugs for Tags with all-Unicode names)\n slug = slugify(tag, allow_unicode=True)\n if i is not None:\n slug += \"_%d\" % i\n return slug\n\n\nclass TaggedItem(GenericTaggedItemBase):\n tag = models.ForeignKey(\n to=Tag,\n related_name=\"%(app_label)s_%(class)s_items\",\n on_delete=models.CASCADE\n )\n\n _netbox_private = True\n\n class Meta:\n indexes = [models.Index(fields=[\"content_type\", \"object_id\"])]\n verbose_name = _('tagged item')\n verbose_name_plural = _('tagged items')\n", "path": "netbox/extras/models/tags.py"}]} | 1,328 | 114 |
gh_patches_debug_2293 | rasdani/github-patches | git_diff | inventree__InvenTree-4285 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Increase worker-timeout to account for install times
I might have another instance of the same worker-timeout-loop during startup to add to the issue. My docker production installation at InvenTree:latest is running on a Raspberry Pi 3B+.
The loop occured after I had added the `inventree-brother-plugin` to `plugins.txt` - the initial plugin installation took too long during startup so that the worker processes timed out and were constantly restartet.
My "solution" was to increase the gunicorn timeout variable in the `.env` file to
```
# Options for gunicorn server
INVENTREE_GUNICORN_TIMEOUT=60
```
but maybe actions like pip installs should somehow generally not count against the worker timeout? (I'm not sure about the technical internals on this one at the moment...)
_Originally posted by @simonkuehling in https://github.com/inventree/InvenTree/issues/4180#issuecomment-1410348943_
</issue>
<code>
[start of docker/gunicorn.conf.py]
1 """Gunicorn configuration for InvenTree."""
2
3 import logging
4 import multiprocessing
5 import os
6
7 # Logger configuration
8 logger = logging.getLogger('inventree')
9 accesslog = '-'
10 errorlog = '-'
11 loglevel = os.environ.get('INVENTREE_LOG_LEVEL', 'warning').lower()
12 capture_output = True
13
14 # Worker configuration
15 # TODO: Implement support for gevent
16 # worker_class = 'gevent' # Allow multi-threading support
17 worker_tmp_dir = '/dev/shm' # Write temp file to RAM (faster)
18 threads = 4
19
20
21 # Worker timeout (default = 30 seconds)
22 timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 30)
23
24 # Number of worker processes
25 workers = os.environ.get('INVENTREE_GUNICORN_WORKERS', None)
26
27 if workers is not None:
28 try:
29 workers = int(workers)
30 except ValueError:
31 workers = None
32
33 if workers is None:
34 workers = multiprocessing.cpu_count() * 2 + 1
35
36 logger.info(f"Starting gunicorn server with {workers} workers")
37
38 max_requests = 1000
39 max_requests_jitter = 50
40
[end of docker/gunicorn.conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/gunicorn.conf.py b/docker/gunicorn.conf.py
--- a/docker/gunicorn.conf.py
+++ b/docker/gunicorn.conf.py
@@ -18,8 +18,8 @@
threads = 4
-# Worker timeout (default = 30 seconds)
-timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 30)
+# Worker timeout (default = 90 seconds)
+timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 90)
# Number of worker processes
workers = os.environ.get('INVENTREE_GUNICORN_WORKERS', None)
| {"golden_diff": "diff --git a/docker/gunicorn.conf.py b/docker/gunicorn.conf.py\n--- a/docker/gunicorn.conf.py\n+++ b/docker/gunicorn.conf.py\n@@ -18,8 +18,8 @@\n threads = 4\n \n \n-# Worker timeout (default = 30 seconds)\n-timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 30)\n+# Worker timeout (default = 90 seconds)\n+timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 90)\n \n # Number of worker processes\n workers = os.environ.get('INVENTREE_GUNICORN_WORKERS', None)\n", "issue": "[BUG] Increase worker-timeout to account for install times\n I might have another instance of the same worker-timeout-loop during startup to add to the issue. My docker production installation at InvenTree:latest is running on a Raspberry Pi 3B+.\r\nThe loop occured after I had added the `inventree-brother-plugin` to `plugins.txt` - the initial plugin installation took too long during startup so that the worker processes timed out and were constantly restartet.\r\n\r\nMy \"solution\" was to increase the gunicorn timeout variable in the `.env` file to\r\n```\r\n# Options for gunicorn server\r\nINVENTREE_GUNICORN_TIMEOUT=60\r\n```\r\nbut maybe actions like pip installs should somehow generally not count against the worker timeout? (I'm not sure about the technical internals on this one at the moment...)\r\n\r\n_Originally posted by @simonkuehling in https://github.com/inventree/InvenTree/issues/4180#issuecomment-1410348943_\r\n \n", "before_files": [{"content": "\"\"\"Gunicorn configuration for InvenTree.\"\"\"\n\nimport logging\nimport multiprocessing\nimport os\n\n# Logger configuration\nlogger = logging.getLogger('inventree')\naccesslog = '-'\nerrorlog = '-'\nloglevel = os.environ.get('INVENTREE_LOG_LEVEL', 'warning').lower()\ncapture_output = True\n\n# Worker configuration\n# TODO: Implement support for gevent\n# worker_class = 'gevent' # Allow multi-threading support\nworker_tmp_dir = '/dev/shm' # Write temp file to RAM (faster)\nthreads = 4\n\n\n# Worker timeout (default = 30 seconds)\ntimeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 30)\n\n# Number of worker processes\nworkers = os.environ.get('INVENTREE_GUNICORN_WORKERS', None)\n\nif workers is not None:\n try:\n workers = int(workers)\n except ValueError:\n workers = None\n\nif workers is None:\n workers = multiprocessing.cpu_count() * 2 + 1\n\nlogger.info(f\"Starting gunicorn server with {workers} workers\")\n\nmax_requests = 1000\nmax_requests_jitter = 50\n", "path": "docker/gunicorn.conf.py"}]} | 1,086 | 139 |
gh_patches_debug_35648 | rasdani/github-patches | git_diff | searxng__searxng-2747 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FEATURE REQUEST] language filtering and safe search with odysee
**Is your feature request related to a problem? Please describe.**
No
**Describe the solution you'd like**
We can use, for example, language=de in the search URL. en, de-DE, and en-US also seem to work. There is no list of supported languages afaik, we just need to try things out one by one.
for safe search Moderate/Strict we should use nsfw=false in the URL
**Additional context**
The information that you need for this is here: https://github.com/searx/searx/issues/2504
----
Related
- https://github.com/searxng/searxng/pull/2656
- https://github.com/searxng/searxng/issues/590
- [lbr command line](https://gitlab.com/gardenappl/lbt/-/blob/main/lbt?ref_type=heads)
- [LBRY SDK ](https://github.com/lbryio/lbry-sdk/)
</issue>
<code>
[start of searx/engines/odysee.py]
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 # lint: pylint
3 """Odysee_ is a decentralised video hosting platform.
4
5 .. _Odysee: https://github.com/OdyseeTeam/odysee-frontend
6 """
7
8 import time
9 from urllib.parse import urlencode
10 from datetime import datetime
11
12 # Engine metadata
13 about = {
14 "website": "https://odysee.com/",
15 "wikidata_id": "Q102046570",
16 "official_api_documentation": None,
17 "use_official_api": False,
18 "require_api_key": False,
19 "results": "JSON",
20 }
21
22 # Engine configuration
23 paging = True
24 results_per_page = 20
25 categories = ['videos']
26
27 # Search URL (Note: lighthouse.lbry.com/search works too, and may be faster at times)
28 base_url = "https://lighthouse.odysee.tv/search"
29
30
31 def request(query, params):
32 start_index = (params["pageno"] - 1) * results_per_page
33 query_params = {
34 "s": query,
35 "size": results_per_page,
36 "from": start_index,
37 "include": "channel,thumbnail_url,title,description,duration,release_time",
38 "mediaType": "video",
39 }
40
41 params["url"] = f"{base_url}?{urlencode(query_params)}"
42 return params
43
44
45 # Format the video duration
46 def format_duration(duration):
47 seconds = int(duration)
48 length = time.gmtime(seconds)
49 if length.tm_hour:
50 return time.strftime("%H:%M:%S", length)
51 return time.strftime("%M:%S", length)
52
53
54 def response(resp):
55 data = resp.json()
56 results = []
57
58 for item in data:
59 name = item["name"]
60 claim_id = item["claimId"]
61 title = item["title"]
62 thumbnail_url = item["thumbnail_url"]
63 description = item["description"] or ""
64 channel = item["channel"]
65 release_time = item["release_time"]
66 duration = item["duration"]
67
68 release_date = datetime.strptime(release_time.split("T")[0], "%Y-%m-%d")
69 formatted_date = datetime.utcfromtimestamp(release_date.timestamp())
70
71 url = f"https://odysee.com/{name}:{claim_id}"
72 iframe_url = f"https://odysee.com/$/embed/{name}:{claim_id}"
73 odysee_thumbnail = f"https://thumbnails.odycdn.com/optimize/s:390:0/quality:85/plain/{thumbnail_url}"
74 formatted_duration = format_duration(duration)
75
76 results.append(
77 {
78 "title": title,
79 "url": url,
80 "content": description,
81 "author": channel,
82 "publishedDate": formatted_date,
83 "length": formatted_duration,
84 "thumbnail": odysee_thumbnail,
85 "iframe_src": iframe_url,
86 "template": "videos.html",
87 }
88 )
89
90 return results
91
[end of searx/engines/odysee.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/odysee.py b/searx/engines/odysee.py
--- a/searx/engines/odysee.py
+++ b/searx/engines/odysee.py
@@ -9,6 +9,14 @@
from urllib.parse import urlencode
from datetime import datetime
+import babel
+
+from searx.network import get
+from searx.locales import language_tag
+from searx.enginelib.traits import EngineTraits
+
+traits: EngineTraits
+
# Engine metadata
about = {
"website": "https://odysee.com/",
@@ -21,6 +29,7 @@
# Engine configuration
paging = True
+time_range_support = True
results_per_page = 20
categories = ['videos']
@@ -29,6 +38,13 @@
def request(query, params):
+ time_range_dict = {
+ "day": "today",
+ "week": "thisweek",
+ "month": "thismonth",
+ "year": "thisyear",
+ }
+
start_index = (params["pageno"] - 1) * results_per_page
query_params = {
"s": query,
@@ -38,6 +54,13 @@
"mediaType": "video",
}
+ lang = traits.get_language(params['searxng_locale'], None)
+ if lang is not None:
+ query_params['language'] = lang
+
+ if params['time_range'] in time_range_dict:
+ query_params['time_filter'] = time_range_dict[params['time_range']]
+
params["url"] = f"{base_url}?{urlencode(query_params)}"
return params
@@ -88,3 +111,35 @@
)
return results
+
+
+def fetch_traits(engine_traits: EngineTraits):
+ """
+ Fetch languages from Odysee's source code.
+ """
+
+ resp = get(
+ 'https://raw.githubusercontent.com/OdyseeTeam/odysee-frontend/master/ui/constants/supported_browser_languages.js', # pylint: disable=line-too-long
+ timeout=60,
+ )
+
+ if not resp.ok:
+ print("ERROR: can't determine languages from Odysee")
+ return
+
+ for line in resp.text.split("\n")[1:-4]:
+ lang_tag = line.strip().split(": ")[0].replace("'", "")
+
+ try:
+ sxng_tag = language_tag(babel.Locale.parse(lang_tag, sep="-"))
+ except babel.UnknownLocaleError:
+ print("ERROR: %s is unknown by babel" % lang_tag)
+ continue
+
+ conflict = engine_traits.languages.get(sxng_tag)
+ if conflict:
+ if conflict != lang_tag:
+ print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, lang_tag))
+ continue
+
+ engine_traits.languages[sxng_tag] = lang_tag
| {"golden_diff": "diff --git a/searx/engines/odysee.py b/searx/engines/odysee.py\n--- a/searx/engines/odysee.py\n+++ b/searx/engines/odysee.py\n@@ -9,6 +9,14 @@\n from urllib.parse import urlencode\n from datetime import datetime\n \n+import babel\n+\n+from searx.network import get\n+from searx.locales import language_tag\n+from searx.enginelib.traits import EngineTraits\n+\n+traits: EngineTraits\n+\n # Engine metadata\n about = {\n \"website\": \"https://odysee.com/\",\n@@ -21,6 +29,7 @@\n \n # Engine configuration\n paging = True\n+time_range_support = True\n results_per_page = 20\n categories = ['videos']\n \n@@ -29,6 +38,13 @@\n \n \n def request(query, params):\n+ time_range_dict = {\n+ \"day\": \"today\",\n+ \"week\": \"thisweek\",\n+ \"month\": \"thismonth\",\n+ \"year\": \"thisyear\",\n+ }\n+\n start_index = (params[\"pageno\"] - 1) * results_per_page\n query_params = {\n \"s\": query,\n@@ -38,6 +54,13 @@\n \"mediaType\": \"video\",\n }\n \n+ lang = traits.get_language(params['searxng_locale'], None)\n+ if lang is not None:\n+ query_params['language'] = lang\n+\n+ if params['time_range'] in time_range_dict:\n+ query_params['time_filter'] = time_range_dict[params['time_range']]\n+\n params[\"url\"] = f\"{base_url}?{urlencode(query_params)}\"\n return params\n \n@@ -88,3 +111,35 @@\n )\n \n return results\n+\n+\n+def fetch_traits(engine_traits: EngineTraits):\n+ \"\"\"\n+ Fetch languages from Odysee's source code.\n+ \"\"\"\n+\n+ resp = get(\n+ 'https://raw.githubusercontent.com/OdyseeTeam/odysee-frontend/master/ui/constants/supported_browser_languages.js', # pylint: disable=line-too-long\n+ timeout=60,\n+ )\n+\n+ if not resp.ok:\n+ print(\"ERROR: can't determine languages from Odysee\")\n+ return\n+\n+ for line in resp.text.split(\"\\n\")[1:-4]:\n+ lang_tag = line.strip().split(\": \")[0].replace(\"'\", \"\")\n+\n+ try:\n+ sxng_tag = language_tag(babel.Locale.parse(lang_tag, sep=\"-\"))\n+ except babel.UnknownLocaleError:\n+ print(\"ERROR: %s is unknown by babel\" % lang_tag)\n+ continue\n+\n+ conflict = engine_traits.languages.get(sxng_tag)\n+ if conflict:\n+ if conflict != lang_tag:\n+ print(\"CONFLICT: babel %s --> %s, %s\" % (sxng_tag, conflict, lang_tag))\n+ continue\n+\n+ engine_traits.languages[sxng_tag] = lang_tag\n", "issue": "[FEATURE REQUEST] language filtering and safe search with odysee\n**Is your feature request related to a problem? Please describe.**\r\nNo\r\n\r\n**Describe the solution you'd like**\r\nWe can use, for example, language=de in the search URL. en, de-DE, and en-US also seem to work. There is no list of supported languages afaik, we just need to try things out one by one.\r\n\r\nfor safe search Moderate/Strict we should use nsfw=false in the URL\r\n\r\n**Additional context**\r\nThe information that you need for this is here: https://github.com/searx/searx/issues/2504\r\n\r\n\r\n----\r\nRelated\r\n\r\n- https://github.com/searxng/searxng/pull/2656\r\n- https://github.com/searxng/searxng/issues/590\r\n- [lbr command line](https://gitlab.com/gardenappl/lbt/-/blob/main/lbt?ref_type=heads)\r\n- [LBRY SDK ](https://github.com/lbryio/lbry-sdk/)\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Odysee_ is a decentralised video hosting platform.\n\n.. _Odysee: https://github.com/OdyseeTeam/odysee-frontend\n\"\"\"\n\nimport time\nfrom urllib.parse import urlencode\nfrom datetime import datetime\n\n# Engine metadata\nabout = {\n \"website\": \"https://odysee.com/\",\n \"wikidata_id\": \"Q102046570\",\n \"official_api_documentation\": None,\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\n\n# Engine configuration\npaging = True\nresults_per_page = 20\ncategories = ['videos']\n\n# Search URL (Note: lighthouse.lbry.com/search works too, and may be faster at times)\nbase_url = \"https://lighthouse.odysee.tv/search\"\n\n\ndef request(query, params):\n start_index = (params[\"pageno\"] - 1) * results_per_page\n query_params = {\n \"s\": query,\n \"size\": results_per_page,\n \"from\": start_index,\n \"include\": \"channel,thumbnail_url,title,description,duration,release_time\",\n \"mediaType\": \"video\",\n }\n\n params[\"url\"] = f\"{base_url}?{urlencode(query_params)}\"\n return params\n\n\n# Format the video duration\ndef format_duration(duration):\n seconds = int(duration)\n length = time.gmtime(seconds)\n if length.tm_hour:\n return time.strftime(\"%H:%M:%S\", length)\n return time.strftime(\"%M:%S\", length)\n\n\ndef response(resp):\n data = resp.json()\n results = []\n\n for item in data:\n name = item[\"name\"]\n claim_id = item[\"claimId\"]\n title = item[\"title\"]\n thumbnail_url = item[\"thumbnail_url\"]\n description = item[\"description\"] or \"\"\n channel = item[\"channel\"]\n release_time = item[\"release_time\"]\n duration = item[\"duration\"]\n\n release_date = datetime.strptime(release_time.split(\"T\")[0], \"%Y-%m-%d\")\n formatted_date = datetime.utcfromtimestamp(release_date.timestamp())\n\n url = f\"https://odysee.com/{name}:{claim_id}\"\n iframe_url = f\"https://odysee.com/$/embed/{name}:{claim_id}\"\n odysee_thumbnail = f\"https://thumbnails.odycdn.com/optimize/s:390:0/quality:85/plain/{thumbnail_url}\"\n formatted_duration = format_duration(duration)\n\n results.append(\n {\n \"title\": title,\n \"url\": url,\n \"content\": description,\n \"author\": channel,\n \"publishedDate\": formatted_date,\n \"length\": formatted_duration,\n \"thumbnail\": odysee_thumbnail,\n \"iframe_src\": iframe_url,\n \"template\": \"videos.html\",\n }\n )\n\n return results\n", "path": "searx/engines/odysee.py"}]} | 1,588 | 678 |
gh_patches_debug_25875 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2292 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Invalid query parameters cause internal server error
```
File "akvo/rest/filters.py", line 61, in filter_queryset
queryset = getattr(queryset, param)(**args_or_kwargs)
```
In the new API filtering, you get an internal server error when the query is not correct, e.g.: http://rsr.akvo.org/rest/v1/project/?filter={'incorrect':'code'}.
We should catch this and return the error message in a dict with detail as key. E.g. `{detail: 'Related Field got invalid lookup: incorrect'}`
</issue>
<code>
[start of akvo/rest/filters.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo Reporting is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 import ast
8
9 from django.db.models import Q
10
11 from rest_framework import filters
12
13
14 class RSRGenericFilterBackend(filters.BaseFilterBackend):
15
16 def filter_queryset(self, request, queryset, view):
17 """
18 Return a queryset possibly filtered by query param values.
19 The filter looks for the query param keys filter and exclude
20 For each of these query param the value is evaluated using ast.literal_eval() and used as
21 kwargs in queryset.filter and queryset.exclude respectively.
22
23 Example URLs:
24 http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water','currency':'EUR'}
25 http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water'}&exclude={'currency':'EUR'}
26
27 It's also possible to specify models to be included in select_related() and
28 prefetch_related() calls on the queryset, but specifying these in lists of strings as the
29 values for the query sting params select_relates and prefetch_related.
30
31 Example:
32 http://rsr.akvo.org/rest/v1/project/?filter={'partners__in':[42,43]}&prefetch_related=['partners']
33
34 Finally limited support for filtering on multiple arguments using logical OR between
35 those expressions is available. To use this supply two or more query string keywords on the
36 form q_filter1, q_filter2... where the value is a dict that can be used as a kwarg in a Q
37 object. All those Q objects created are used in a queryset.filter() call concatenated using
38 the | operator.
39 """
40 def eval_query_value(request, key):
41 """
42 Use ast.literal_eval() to evaluate a query string value as a python data type object
43 :param request: the django request object
44 :param param: the query string param key
45 :return: a python data type object, or None if literal_eval() fails
46 """
47 value = request.QUERY_PARAMS.get(key, None)
48 try:
49 return ast.literal_eval(value)
50 except ValueError:
51 return None
52
53 qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related']
54
55 # evaluate each query string param, and apply the queryset method with the same name
56 for param in qs_params:
57 args_or_kwargs = eval_query_value(request, param)
58 if args_or_kwargs:
59 # filter and exclude are called with a dict kwarg, the _related methods with a list
60 if param in ['filter', 'exclude',]:
61 queryset = getattr(queryset, param)(**args_or_kwargs)
62 else:
63 queryset = getattr(queryset, param)(*args_or_kwargs)
64
65 # support for Q expressions, limited to OR-concatenated filtering
66 if request.QUERY_PARAMS.get('q_filter1', None):
67 i = 1
68 q_queries = []
69 while request.QUERY_PARAMS.get('q_filter{}'.format(i), None):
70 query_arg = eval_query_value(request, 'q_filter{}'.format(i))
71 if query_arg:
72 q_queries += [query_arg]
73 i += 1
74
75 q_expr = Q(**q_queries[0])
76 for query in q_queries[1:]:
77 q_expr = q_expr | Q(**query)
78
79 queryset = queryset.filter(q_expr)
80
81 return queryset
82
[end of akvo/rest/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/filters.py b/akvo/rest/filters.py
--- a/akvo/rest/filters.py
+++ b/akvo/rest/filters.py
@@ -7,8 +7,10 @@
import ast
from django.db.models import Q
+from django.core.exceptions import FieldError
-from rest_framework import filters
+from rest_framework import filters, status
+from rest_framework.exceptions import APIException
class RSRGenericFilterBackend(filters.BaseFilterBackend):
@@ -57,10 +59,14 @@
args_or_kwargs = eval_query_value(request, param)
if args_or_kwargs:
# filter and exclude are called with a dict kwarg, the _related methods with a list
- if param in ['filter', 'exclude',]:
- queryset = getattr(queryset, param)(**args_or_kwargs)
- else:
- queryset = getattr(queryset, param)(*args_or_kwargs)
+ try:
+ if param in ['filter', 'exclude',]:
+ queryset = getattr(queryset, param)(**args_or_kwargs)
+ else:
+ queryset = getattr(queryset, param)(*args_or_kwargs)
+
+ except FieldError as e:
+ raise APIException("Error in request: {message}".format(message=e.message))
# support for Q expressions, limited to OR-concatenated filtering
if request.QUERY_PARAMS.get('q_filter1', None):
| {"golden_diff": "diff --git a/akvo/rest/filters.py b/akvo/rest/filters.py\n--- a/akvo/rest/filters.py\n+++ b/akvo/rest/filters.py\n@@ -7,8 +7,10 @@\n import ast\n \n from django.db.models import Q\n+from django.core.exceptions import FieldError\n \n-from rest_framework import filters\n+from rest_framework import filters, status\n+from rest_framework.exceptions import APIException\n \n \n class RSRGenericFilterBackend(filters.BaseFilterBackend):\n@@ -57,10 +59,14 @@\n args_or_kwargs = eval_query_value(request, param)\n if args_or_kwargs:\n # filter and exclude are called with a dict kwarg, the _related methods with a list\n- if param in ['filter', 'exclude',]:\n- queryset = getattr(queryset, param)(**args_or_kwargs)\n- else:\n- queryset = getattr(queryset, param)(*args_or_kwargs)\n+ try:\n+ if param in ['filter', 'exclude',]:\n+ queryset = getattr(queryset, param)(**args_or_kwargs)\n+ else:\n+ queryset = getattr(queryset, param)(*args_or_kwargs)\n+\n+ except FieldError as e:\n+ raise APIException(\"Error in request: {message}\".format(message=e.message))\n \n # support for Q expressions, limited to OR-concatenated filtering\n if request.QUERY_PARAMS.get('q_filter1', None):\n", "issue": "Invalid query parameters cause internal server error\n```\n File \"akvo/rest/filters.py\", line 61, in filter_queryset\n queryset = getattr(queryset, param)(**args_or_kwargs)\n```\n\nIn the new API filtering, you get an internal server error when the query is not correct, e.g.: http://rsr.akvo.org/rest/v1/project/?filter={'incorrect':'code'}. \n\nWe should catch this and return the error message in a dict with detail as key. E.g. `{detail: 'Related Field got invalid lookup: incorrect'}`\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo Reporting is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport ast\n\nfrom django.db.models import Q\n\nfrom rest_framework import filters\n\n\nclass RSRGenericFilterBackend(filters.BaseFilterBackend):\n\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n Return a queryset possibly filtered by query param values.\n The filter looks for the query param keys filter and exclude\n For each of these query param the value is evaluated using ast.literal_eval() and used as\n kwargs in queryset.filter and queryset.exclude respectively.\n\n Example URLs:\n http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water','currency':'EUR'}\n http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water'}&exclude={'currency':'EUR'}\n\n It's also possible to specify models to be included in select_related() and\n prefetch_related() calls on the queryset, but specifying these in lists of strings as the\n values for the query sting params select_relates and prefetch_related.\n\n Example:\n http://rsr.akvo.org/rest/v1/project/?filter={'partners__in':[42,43]}&prefetch_related=['partners']\n\n Finally limited support for filtering on multiple arguments using logical OR between\n those expressions is available. To use this supply two or more query string keywords on the\n form q_filter1, q_filter2... where the value is a dict that can be used as a kwarg in a Q\n object. All those Q objects created are used in a queryset.filter() call concatenated using\n the | operator.\n \"\"\"\n def eval_query_value(request, key):\n \"\"\"\n Use ast.literal_eval() to evaluate a query string value as a python data type object\n :param request: the django request object\n :param param: the query string param key\n :return: a python data type object, or None if literal_eval() fails\n \"\"\"\n value = request.QUERY_PARAMS.get(key, None)\n try:\n return ast.literal_eval(value)\n except ValueError:\n return None\n\n qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related']\n\n # evaluate each query string param, and apply the queryset method with the same name\n for param in qs_params:\n args_or_kwargs = eval_query_value(request, param)\n if args_or_kwargs:\n # filter and exclude are called with a dict kwarg, the _related methods with a list\n if param in ['filter', 'exclude',]:\n queryset = getattr(queryset, param)(**args_or_kwargs)\n else:\n queryset = getattr(queryset, param)(*args_or_kwargs)\n\n # support for Q expressions, limited to OR-concatenated filtering\n if request.QUERY_PARAMS.get('q_filter1', None):\n i = 1\n q_queries = []\n while request.QUERY_PARAMS.get('q_filter{}'.format(i), None):\n query_arg = eval_query_value(request, 'q_filter{}'.format(i))\n if query_arg:\n q_queries += [query_arg]\n i += 1\n\n q_expr = Q(**q_queries[0])\n for query in q_queries[1:]:\n q_expr = q_expr | Q(**query)\n\n queryset = queryset.filter(q_expr)\n\n return queryset\n", "path": "akvo/rest/filters.py"}]} | 1,581 | 312 |
gh_patches_debug_38763 | rasdani/github-patches | git_diff | Gallopsled__pwntools-227 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
atexit module can't handle multiple instances
The `atexit` module uses a dictionary to keep track of the routines to call. This means that no routines can be registered more than once, for example with different arguments.
Additionally, this means that `pwnlib.atexit` does not guarantee order.
Per the [Python documentation](https://docs.python.org/2/library/atexit.html#module-atexit) for `atexit`:
_atexit runs these functions in the reverse order in which they were registered; if you register A, B, and C, at interpreter termination time they will be run in the order C, B, A._
However, multiple registration of routines breaks `unregister` in its current incarnation. The suggested remedy for this is to return a unique ID for each `register` call that is passed to `unregister`, rather than passing in the function to `unregister`.
</issue>
<code>
[start of pwnlib/atexit.py]
1 """
2 Replacement for the Python standard library's atexit.py.
3
4 Whereas the standard :mod:`atexit` module only defines :func:`atexit.register`,
5 this replacement module also defines :func:`unregister`.
6
7 This module also fixes a the issue that exceptions raised by an exit handler is
8 printed twice when the standard :mod:`atexit` is used.
9 """
10
11 import sys, traceback
12
13 __all__ = ['register', 'unregister']
14
15 _handlers = {}
16
17 def register(func, *args, **kwargs):
18 """register(func, *args, **kwargs)
19
20 Registers a function to be called when an unhandled exception occurs. The
21 function will be called with positional arguments `args` and keyword
22 arguments `kwargs`, i.e. ``func(*args, **kwargs)``.
23
24 If `func` is already registered then `args` and `kwargs` will be updated.
25
26 This function can be used as a decorator::
27
28 def f():
29 ...
30 atexit.register(f)
31
32 is equivalent to this::
33
34 @atexit.register
35 def f():
36 ...
37
38 """
39 _handlers[func] = (args, kwargs)
40 return func
41
42 def unregister(func):
43 """unregister(func)
44
45 Remove `func` from the collection of registered functions. If `func` isn't
46 registered this is a no-op.
47 """
48 if func in _handlers:
49 del _handlers[func]
50
51 def _run_handlers():
52 """_run_handlers()
53
54 Run registered exit handlers. The order is arbitrary.
55
56 If a handler raises an exception, it will be printed but nothing else
57 happens, i.e. other handlers will be run and `sys.excepthook` will not be
58 called for that reason.
59 """
60 for func, (args, kwargs) in _handlers.items():
61 try:
62 func(*args, **kwargs)
63 except SystemExit:
64 pass
65 except:
66 # extract the current exception and rewind the traceback to where it
67 # originated
68 typ, val, tb = sys.exc_info()
69 traceback.print_exception(typ, val, tb.tb_next)
70
71 # if there's already an exitfunc registered be sure to run that too
72 if hasattr(sys, "exitfunc"):
73 register(sys.exitfunc)
74
75 sys.exitfunc = _run_handlers
76
[end of pwnlib/atexit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwnlib/atexit.py b/pwnlib/atexit.py
--- a/pwnlib/atexit.py
+++ b/pwnlib/atexit.py
@@ -8,58 +8,76 @@
printed twice when the standard :mod:`atexit` is used.
"""
-import sys, traceback
+import sys, traceback, threading
+from .context import context
__all__ = ['register', 'unregister']
+_lock = threading.Lock()
+_ident = 0
_handlers = {}
def register(func, *args, **kwargs):
"""register(func, *args, **kwargs)
- Registers a function to be called when an unhandled exception occurs. The
- function will be called with positional arguments `args` and keyword
- arguments `kwargs`, i.e. ``func(*args, **kwargs)``.
+ Registers a function to be called on program termination. The function will
+ be called with positional arguments `args` and keyword arguments `kwargs`,
+ i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will
+ be the one used when the handler is run.
- If `func` is already registered then `args` and `kwargs` will be updated.
+ E.g. to suppress logging output from an exit-handler one could write::
- This function can be used as a decorator::
+ with context.local(log_level = 'error'):
+ atexit.register(handler)
- def f():
- ...
- atexit.register(f)
+ An identifier is returned which can be used to unregister the exit-handler.
- is equivalent to this::
+ This function can be used as a decorator::
@atexit.register
- def f():
+ def handler():
...
- """
- _handlers[func] = (args, kwargs)
- return func
+ Notice however that this will bind ``handler`` to the identifier and not the
+ actual exit-handler. The exit-handler can then be unregistered with::
+
+ atexit.unregister(handler)
-def unregister(func):
- """unregister(func)
+ This function is thread safe.
- Remove `func` from the collection of registered functions. If `func` isn't
- registered this is a no-op.
"""
- if func in _handlers:
- del _handlers[func]
+ global _ident
+ with _lock:
+ ident = _ident
+ _ident += 1
+ _handlers[ident] = (func, args, kwargs, vars(context))
+ return ident
+
+def unregister(ident):
+ """unregister(ident)
+
+ Remove the exit-handler identified by `ident` from the list of registered
+ handlers. If `ident` isn't registered this is a no-op.
+ """
+ if ident in _handlers:
+ del _handlers[ident]
def _run_handlers():
"""_run_handlers()
- Run registered exit handlers. The order is arbitrary.
+ Run registered exit-handlers. They run in the reverse order of which they
+ were registered.
If a handler raises an exception, it will be printed but nothing else
happens, i.e. other handlers will be run and `sys.excepthook` will not be
called for that reason.
"""
- for func, (args, kwargs) in _handlers.items():
+ context.clear()
+ for _ident, (func, args, kwargs, ctx) in \
+ sorted(_handlers.items(), reverse = True):
try:
- func(*args, **kwargs)
+ with context.local(**ctx):
+ func(*args, **kwargs)
except SystemExit:
pass
except:
| {"golden_diff": "diff --git a/pwnlib/atexit.py b/pwnlib/atexit.py\n--- a/pwnlib/atexit.py\n+++ b/pwnlib/atexit.py\n@@ -8,58 +8,76 @@\n printed twice when the standard :mod:`atexit` is used.\n \"\"\"\n \n-import sys, traceback\n+import sys, traceback, threading\n+from .context import context\n \n __all__ = ['register', 'unregister']\n \n+_lock = threading.Lock()\n+_ident = 0\n _handlers = {}\n \n def register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n \n- Registers a function to be called when an unhandled exception occurs. The\n- function will be called with positional arguments `args` and keyword\n- arguments `kwargs`, i.e. ``func(*args, **kwargs)``.\n+ Registers a function to be called on program termination. The function will\n+ be called with positional arguments `args` and keyword arguments `kwargs`,\n+ i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will\n+ be the one used when the handler is run.\n \n- If `func` is already registered then `args` and `kwargs` will be updated.\n+ E.g. to suppress logging output from an exit-handler one could write::\n \n- This function can be used as a decorator::\n+ with context.local(log_level = 'error'):\n+ atexit.register(handler)\n \n- def f():\n- ...\n- atexit.register(f)\n+ An identifier is returned which can be used to unregister the exit-handler.\n \n- is equivalent to this::\n+ This function can be used as a decorator::\n \n @atexit.register\n- def f():\n+ def handler():\n ...\n \n- \"\"\"\n- _handlers[func] = (args, kwargs)\n- return func\n+ Notice however that this will bind ``handler`` to the identifier and not the\n+ actual exit-handler. The exit-handler can then be unregistered with::\n+\n+ atexit.unregister(handler)\n \n-def unregister(func):\n- \"\"\"unregister(func)\n+ This function is thread safe.\n \n- Remove `func` from the collection of registered functions. If `func` isn't\n- registered this is a no-op.\n \"\"\"\n- if func in _handlers:\n- del _handlers[func]\n+ global _ident\n+ with _lock:\n+ ident = _ident\n+ _ident += 1\n+ _handlers[ident] = (func, args, kwargs, vars(context))\n+ return ident\n+\n+def unregister(ident):\n+ \"\"\"unregister(ident)\n+\n+ Remove the exit-handler identified by `ident` from the list of registered\n+ handlers. If `ident` isn't registered this is a no-op.\n+ \"\"\"\n+ if ident in _handlers:\n+ del _handlers[ident]\n \n def _run_handlers():\n \"\"\"_run_handlers()\n \n- Run registered exit handlers. The order is arbitrary.\n+ Run registered exit-handlers. They run in the reverse order of which they\n+ were registered.\n \n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n- for func, (args, kwargs) in _handlers.items():\n+ context.clear()\n+ for _ident, (func, args, kwargs, ctx) in \\\n+ sorted(_handlers.items(), reverse = True):\n try:\n- func(*args, **kwargs)\n+ with context.local(**ctx):\n+ func(*args, **kwargs)\n except SystemExit:\n pass\n except:\n", "issue": "atexit module can't handle multiple instances\nThe `atexit` module uses a dictionary to keep track of the routines to call. This means that no routines can be registered more than once, for example with different arguments.\n\nAdditionally, this means that `pwnlib.atexit` does not guarantee order.\n\nPer the [Python documentation](https://docs.python.org/2/library/atexit.html#module-atexit) for `atexit`:\n\n_atexit runs these functions in the reverse order in which they were registered; if you register A, B, and C, at interpreter termination time they will be run in the order C, B, A._\n\nHowever, multiple registration of routines breaks `unregister` in its current incarnation. The suggested remedy for this is to return a unique ID for each `register` call that is passed to `unregister`, rather than passing in the function to `unregister`.\n\n", "before_files": [{"content": "\"\"\"\nReplacement for the Python standard library's atexit.py.\n\nWhereas the standard :mod:`atexit` module only defines :func:`atexit.register`,\nthis replacement module also defines :func:`unregister`.\n\nThis module also fixes a the issue that exceptions raised by an exit handler is\nprinted twice when the standard :mod:`atexit` is used.\n\"\"\"\n\nimport sys, traceback\n\n__all__ = ['register', 'unregister']\n\n_handlers = {}\n\ndef register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n\n Registers a function to be called when an unhandled exception occurs. The\n function will be called with positional arguments `args` and keyword\n arguments `kwargs`, i.e. ``func(*args, **kwargs)``.\n\n If `func` is already registered then `args` and `kwargs` will be updated.\n\n This function can be used as a decorator::\n\n def f():\n ...\n atexit.register(f)\n\n is equivalent to this::\n\n @atexit.register\n def f():\n ...\n\n \"\"\"\n _handlers[func] = (args, kwargs)\n return func\n\ndef unregister(func):\n \"\"\"unregister(func)\n\n Remove `func` from the collection of registered functions. If `func` isn't\n registered this is a no-op.\n \"\"\"\n if func in _handlers:\n del _handlers[func]\n\ndef _run_handlers():\n \"\"\"_run_handlers()\n\n Run registered exit handlers. The order is arbitrary.\n\n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n for func, (args, kwargs) in _handlers.items():\n try:\n func(*args, **kwargs)\n except SystemExit:\n pass\n except:\n # extract the current exception and rewind the traceback to where it\n # originated\n typ, val, tb = sys.exc_info()\n traceback.print_exception(typ, val, tb.tb_next)\n\n# if there's already an exitfunc registered be sure to run that too\nif hasattr(sys, \"exitfunc\"):\n register(sys.exitfunc)\n\nsys.exitfunc = _run_handlers\n", "path": "pwnlib/atexit.py"}]} | 1,368 | 836 |
gh_patches_debug_8954 | rasdani/github-patches | git_diff | mne-tools__mne-bids-1028 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
complete `.zenodo.json` metadata
Our `.zenodo.json` file controls which contributors get listed in the [Zenodo code archive](https://zenodo.org/record/6359371):
https://github.com/mne-tools/mne-bids/blob/13d2b975393f5ba55f4b3a7a8bee4a164c08fa1a/.zenodo.json#L1-L4
I feel like adding the contributors that are currently not listed to that file in a PR, and making that a "custom" (to add new contributors also to the zenodo metadata). I think that's a nice "reward" for contributions. Any objections?
</issue>
<code>
[start of setup.py]
1 """Setup MNE-BIDS."""
2 import sys
3
4 from setuptools import setup
5
6 # Give setuptools a hint to complain if it's too old a version
7 SETUP_REQUIRES = ["setuptools >= 46.4.0"]
8 # This enables setuptools to install wheel on-the-fly
9 SETUP_REQUIRES += ["wheel"] if "bdist_wheel" in sys.argv else []
10
11 version = None
12 with open('mne_bids/__init__.py', 'r') as fid:
13 for line in fid:
14 line = line.strip()
15 if line.startswith('__version__ = '):
16 version = line.split(' = ')[1].split('#')[0].strip('\'')
17 break
18 if version is None:
19 raise RuntimeError('Could not determine version')
20
21
22 if __name__ == "__main__":
23 setup(
24 version=version,
25 setup_requires=SETUP_REQUIRES,
26 )
27
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,5 @@
"""Setup MNE-BIDS."""
+import os
import sys
from setuptools import setup
@@ -9,7 +10,7 @@
SETUP_REQUIRES += ["wheel"] if "bdist_wheel" in sys.argv else []
version = None
-with open('mne_bids/__init__.py', 'r') as fid:
+with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:
for line in fid:
line = line.strip()
if line.startswith('__version__ = '):
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,5 @@\n \"\"\"Setup MNE-BIDS.\"\"\"\n+import os\n import sys\n \n from setuptools import setup\n@@ -9,7 +10,7 @@\n SETUP_REQUIRES += [\"wheel\"] if \"bdist_wheel\" in sys.argv else []\n \n version = None\n-with open('mne_bids/__init__.py', 'r') as fid:\n+with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in fid:\n line = line.strip()\n if line.startswith('__version__ = '):\n", "issue": "complete `.zenodo.json` metadata\nOur `.zenodo.json` file controls which contributors get listed in the [Zenodo code archive](https://zenodo.org/record/6359371):\r\n\r\nhttps://github.com/mne-tools/mne-bids/blob/13d2b975393f5ba55f4b3a7a8bee4a164c08fa1a/.zenodo.json#L1-L4\r\n\r\nI feel like adding the contributors that are currently not listed to that file in a PR, and making that a \"custom\" (to add new contributors also to the zenodo metadata). I think that's a nice \"reward\" for contributions. Any objections?\n", "before_files": [{"content": "\"\"\"Setup MNE-BIDS.\"\"\"\nimport sys\n\nfrom setuptools import setup\n\n# Give setuptools a hint to complain if it's too old a version\nSETUP_REQUIRES = [\"setuptools >= 46.4.0\"]\n# This enables setuptools to install wheel on-the-fly\nSETUP_REQUIRES += [\"wheel\"] if \"bdist_wheel\" in sys.argv else []\n\nversion = None\nwith open('mne_bids/__init__.py', 'r') as fid:\n for line in fid:\n line = line.strip()\n if line.startswith('__version__ = '):\n version = line.split(' = ')[1].split('#')[0].strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\nif __name__ == \"__main__\":\n setup(\n version=version,\n setup_requires=SETUP_REQUIRES,\n )\n", "path": "setup.py"}]} | 917 | 147 |
gh_patches_debug_60760 | rasdani/github-patches | git_diff | coala__coala-bears-310 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GitCommitBear: body_line_length shall be 72 and not 73
The standard git commit guidelines suggest wrapping the body at 72 chars. So just replace the default of 73 with 72.
</issue>
<code>
[start of bears/vcs/git/GitCommitBear.py]
1 import re
2 import shutil
3 import os
4
5 from coalib.bears.GlobalBear import GlobalBear
6 from coalib.misc.ContextManagers import change_directory
7 from coalib.misc.Shell import run_shell_command
8 from coalib.results.Result import Result
9
10
11 class GitCommitBear(GlobalBear):
12
13 @classmethod
14 def check_prerequisites(cls):
15 if shutil.which("git") is None:
16 return "git is not installed."
17 else:
18 return True
19
20 def run(self,
21 shortlog_length: int=50,
22 body_line_length: int=73,
23 force_body: bool=False,
24 allow_empty_commit_message: bool=False,
25 shortlog_regex: str="",
26 shortlog_trailing_period: bool=None):
27 """
28 Checks the current git commit message at HEAD.
29
30 This bear ensures that the shortlog and body do not exceed a given
31 line-length and that a newline lies between them.
32
33 :param shortlog_length: The maximum length of the shortlog.
34 The shortlog is the first line of
35 the commit message. The newline
36 character at end does not count to
37 the length.
38 :param body_line_length: The maximum line-length of the body.
39 The newline character at each line
40 end does not count to the length.
41 :param force_body: Whether a body shall exist or not.
42 :param allow_empty_commit_message: Whether empty commit messages are
43 allowed or not.
44 :param shortlog_regex: A regex to check the shortlog with.
45 A full match of this regex is then
46 required. Passing an empty string
47 disable the regex-check.
48 :param shortlog_trailing_period: Whether a dot shall be enforced at
49 the end of the shortlog line.
50 Providing ``None`` means
51 "doesn't care".
52 """
53 with change_directory(self.get_config_dir() or os.getcwd()):
54 stdout, stderr = run_shell_command("git log -1 --pretty=%B")
55
56 if stderr:
57 self.err("git:", repr(stderr))
58 return
59
60 stdout = stdout.rstrip("\n").splitlines()
61
62 if len(stdout) == 0:
63 if not allow_empty_commit_message:
64 yield Result(self, "HEAD commit has no message.")
65 return
66
67 yield from self.check_shortlog(shortlog_length,
68 shortlog_regex,
69 shortlog_trailing_period,
70 stdout[0])
71 yield from self.check_body(body_line_length, force_body, stdout[1:])
72
73 def check_shortlog(self,
74 shortlog_length,
75 regex,
76 shortlog_trailing_period,
77 shortlog):
78 """
79 Checks the given shortlog.
80
81 :param shortlog_length: The maximum length of the shortlog.
82 The newline character at end does not
83 count to the length.
84 :param regex: A regex to check the shortlog with.
85 :param shortlog_trailing_period: Whether a dot shall be enforced at end
86 end or not (or ``None`` for "don't
87 care").
88 :param shortlog: The shortlog message string.
89 """
90 diff = len(shortlog) - shortlog_length
91 if diff > 0:
92 yield Result(self,
93 "Shortlog of HEAD commit is {} character(s) longer "
94 "than the limit ({} > {}).".format(
95 diff, len(shortlog), shortlog_length))
96
97 if (shortlog[-1] != ".") == shortlog_trailing_period:
98 yield Result(self,
99 "Shortlog of HEAD commit contains no period at end."
100 if shortlog_trailing_period else
101 "Shortlog of HEAD commit contains a period at end.")
102
103 if regex != "":
104 match = re.match(regex, shortlog)
105 # fullmatch() inside re-module exists sadly since 3.4, but we
106 # support 3.3 so we need to check that the regex matched completely
107 # ourselves.
108 if not match or match.end() != len(shortlog):
109 yield Result(
110 self,
111 "Shortlog of HEAD commit does not match given regex.")
112
113 def check_body(self, body_line_length, force_body, body):
114 """
115 Checks the given commit body.
116
117 :param body_line_length: The maximum line-length of the body. The
118 newline character at each line end does not
119 count to the length.
120 :param force_body: Whether a body shall exist or not.
121 :param body: The commit body splitted by lines.
122 """
123 if len(body) == 0:
124 if force_body:
125 yield Result(self, "No commit message body at HEAD.")
126 return
127
128 if body[0] != "":
129 yield Result(self, "No newline between shortlog and body at HEAD.")
130 return
131
132 if any(len(line) > body_line_length for line in body[1:]):
133 yield Result(self, "Body of HEAD commit contains too long lines.")
134
[end of bears/vcs/git/GitCommitBear.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bears/vcs/git/GitCommitBear.py b/bears/vcs/git/GitCommitBear.py
--- a/bears/vcs/git/GitCommitBear.py
+++ b/bears/vcs/git/GitCommitBear.py
@@ -19,7 +19,7 @@
def run(self,
shortlog_length: int=50,
- body_line_length: int=73,
+ body_line_length: int=72,
force_body: bool=False,
allow_empty_commit_message: bool=False,
shortlog_regex: str="",
| {"golden_diff": "diff --git a/bears/vcs/git/GitCommitBear.py b/bears/vcs/git/GitCommitBear.py\n--- a/bears/vcs/git/GitCommitBear.py\n+++ b/bears/vcs/git/GitCommitBear.py\n@@ -19,7 +19,7 @@\n \n def run(self,\n shortlog_length: int=50,\n- body_line_length: int=73,\n+ body_line_length: int=72,\n force_body: bool=False,\n allow_empty_commit_message: bool=False,\n shortlog_regex: str=\"\",\n", "issue": "GitCommitBear: body_line_length shall be 72 and not 73\nThe standard git commit guidelines suggest wrapping the body at 72 chars. So just replace the default of 73 with 72.\n\n", "before_files": [{"content": "import re\nimport shutil\nimport os\n\nfrom coalib.bears.GlobalBear import GlobalBear\nfrom coalib.misc.ContextManagers import change_directory\nfrom coalib.misc.Shell import run_shell_command\nfrom coalib.results.Result import Result\n\n\nclass GitCommitBear(GlobalBear):\n\n @classmethod\n def check_prerequisites(cls):\n if shutil.which(\"git\") is None:\n return \"git is not installed.\"\n else:\n return True\n\n def run(self,\n shortlog_length: int=50,\n body_line_length: int=73,\n force_body: bool=False,\n allow_empty_commit_message: bool=False,\n shortlog_regex: str=\"\",\n shortlog_trailing_period: bool=None):\n \"\"\"\n Checks the current git commit message at HEAD.\n\n This bear ensures that the shortlog and body do not exceed a given\n line-length and that a newline lies between them.\n\n :param shortlog_length: The maximum length of the shortlog.\n The shortlog is the first line of\n the commit message. The newline\n character at end does not count to\n the length.\n :param body_line_length: The maximum line-length of the body.\n The newline character at each line\n end does not count to the length.\n :param force_body: Whether a body shall exist or not.\n :param allow_empty_commit_message: Whether empty commit messages are\n allowed or not.\n :param shortlog_regex: A regex to check the shortlog with.\n A full match of this regex is then\n required. Passing an empty string\n disable the regex-check.\n :param shortlog_trailing_period: Whether a dot shall be enforced at\n the end of the shortlog line.\n Providing ``None`` means\n \"doesn't care\".\n \"\"\"\n with change_directory(self.get_config_dir() or os.getcwd()):\n stdout, stderr = run_shell_command(\"git log -1 --pretty=%B\")\n\n if stderr:\n self.err(\"git:\", repr(stderr))\n return\n\n stdout = stdout.rstrip(\"\\n\").splitlines()\n\n if len(stdout) == 0:\n if not allow_empty_commit_message:\n yield Result(self, \"HEAD commit has no message.\")\n return\n\n yield from self.check_shortlog(shortlog_length,\n shortlog_regex,\n shortlog_trailing_period,\n stdout[0])\n yield from self.check_body(body_line_length, force_body, stdout[1:])\n\n def check_shortlog(self,\n shortlog_length,\n regex,\n shortlog_trailing_period,\n shortlog):\n \"\"\"\n Checks the given shortlog.\n\n :param shortlog_length: The maximum length of the shortlog.\n The newline character at end does not\n count to the length.\n :param regex: A regex to check the shortlog with.\n :param shortlog_trailing_period: Whether a dot shall be enforced at end\n end or not (or ``None`` for \"don't\n care\").\n :param shortlog: The shortlog message string.\n \"\"\"\n diff = len(shortlog) - shortlog_length\n if diff > 0:\n yield Result(self,\n \"Shortlog of HEAD commit is {} character(s) longer \"\n \"than the limit ({} > {}).\".format(\n diff, len(shortlog), shortlog_length))\n\n if (shortlog[-1] != \".\") == shortlog_trailing_period:\n yield Result(self,\n \"Shortlog of HEAD commit contains no period at end.\"\n if shortlog_trailing_period else\n \"Shortlog of HEAD commit contains a period at end.\")\n\n if regex != \"\":\n match = re.match(regex, shortlog)\n # fullmatch() inside re-module exists sadly since 3.4, but we\n # support 3.3 so we need to check that the regex matched completely\n # ourselves.\n if not match or match.end() != len(shortlog):\n yield Result(\n self,\n \"Shortlog of HEAD commit does not match given regex.\")\n\n def check_body(self, body_line_length, force_body, body):\n \"\"\"\n Checks the given commit body.\n\n :param body_line_length: The maximum line-length of the body. The\n newline character at each line end does not\n count to the length.\n :param force_body: Whether a body shall exist or not.\n :param body: The commit body splitted by lines.\n \"\"\"\n if len(body) == 0:\n if force_body:\n yield Result(self, \"No commit message body at HEAD.\")\n return\n\n if body[0] != \"\":\n yield Result(self, \"No newline between shortlog and body at HEAD.\")\n return\n\n if any(len(line) > body_line_length for line in body[1:]):\n yield Result(self, \"Body of HEAD commit contains too long lines.\")\n", "path": "bears/vcs/git/GitCommitBear.py"}]} | 1,943 | 127 |
gh_patches_debug_8972 | rasdani/github-patches | git_diff | pyro-ppl__pyro-2367 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] pyro.distributions.InverseGamma.sample does not work on GPU
### Issue Description
`pyro.distributions.InverseGamma.sample` fails with the following error when its arguments are on the GPU:
```
RuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.
```
I think the problem is in `__init__`:
```python
def __init__(self, concentration, rate, validate_args=None):
base_dist = Gamma(concentration, rate)
super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)
```
The argument to `PowerTransform` should probably be something like `-torch.ones_like(rate)`.
### Environment
Ubuntu 16.04, Python 3.7, PyTorch 1.4.0, Pyro 1.3.0
### Code Snippet
```python
>>> import torch
>>> from pyro.distributions import Gamma, InverseGamma, TransformedDistribution
>>> from pyro.distributions.transforms import PowerTransform
>>>
>>> concentration = torch.tensor(1.0).to("cuda")
>>> rate = torch.tensor(1.0).to("cuda")
>>>
>>> # InverseGamma.sample fails with an error
>>> InverseGamma(concentration, rate).sample()
RuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.
>>>
>>> # The equivalent TransformedDistribution is fine
>>> TransformedDistribution(
... Gamma(concentration, rate),
... PowerTransform(torch.tensor(-1.0).to("cuda")),
... ).sample()
tensor(0.5707, device='cuda:0')
```
</issue>
<code>
[start of pyro/distributions/inverse_gamma.py]
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 from torch.distributions import constraints
5 from torch.distributions.transforms import PowerTransform
6 from pyro.distributions.torch import Gamma, TransformedDistribution
7
8
9 class InverseGamma(TransformedDistribution):
10 r"""
11 Creates an inverse-gamma distribution parameterized by
12 `concentration` and `rate`.
13
14 X ~ Gamma(concentration, rate)
15 Y = 1/X ~ InverseGamma(concentration, rate)
16
17 :param torch.Tensor concentration: the concentration parameter (i.e. alpha).
18 :param torch.Tensor rate: the rate parameter (i.e. beta).
19 """
20 arg_constraints = {'concentration': constraints.positive, 'rate': constraints.positive}
21 support = constraints.positive
22 has_rsample = True
23
24 def __init__(self, concentration, rate, validate_args=None):
25 base_dist = Gamma(concentration, rate)
26 super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)
27
28 def expand(self, batch_shape, _instance=None):
29 new = self._get_checked_instance(InverseGamma, _instance)
30 return super().expand(batch_shape, _instance=new)
31
32 @property
33 def concentration(self):
34 return self.base_dist.concentration
35
36 @property
37 def rate(self):
38 return self.base_dist.rate
39
[end of pyro/distributions/inverse_gamma.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyro/distributions/inverse_gamma.py b/pyro/distributions/inverse_gamma.py
--- a/pyro/distributions/inverse_gamma.py
+++ b/pyro/distributions/inverse_gamma.py
@@ -23,7 +23,8 @@
def __init__(self, concentration, rate, validate_args=None):
base_dist = Gamma(concentration, rate)
- super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)
+ super().__init__(base_dist, PowerTransform(-base_dist.rate.new_ones(())),
+ validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(InverseGamma, _instance)
| {"golden_diff": "diff --git a/pyro/distributions/inverse_gamma.py b/pyro/distributions/inverse_gamma.py\n--- a/pyro/distributions/inverse_gamma.py\n+++ b/pyro/distributions/inverse_gamma.py\n@@ -23,7 +23,8 @@\n \n def __init__(self, concentration, rate, validate_args=None):\n base_dist = Gamma(concentration, rate)\n- super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)\n+ super().__init__(base_dist, PowerTransform(-base_dist.rate.new_ones(())),\n+ validate_args=validate_args)\n \n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(InverseGamma, _instance)\n", "issue": "[bug] pyro.distributions.InverseGamma.sample does not work on GPU\n### Issue Description\r\n`pyro.distributions.InverseGamma.sample` fails with the following error when its arguments are on the GPU:\r\n```\r\nRuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.\r\n```\r\n\r\nI think the problem is in `__init__`:\r\n```python\r\n def __init__(self, concentration, rate, validate_args=None):\r\n base_dist = Gamma(concentration, rate)\r\n super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)\r\n```\r\nThe argument to `PowerTransform` should probably be something like `-torch.ones_like(rate)`.\r\n\r\n### Environment\r\nUbuntu 16.04, Python 3.7, PyTorch 1.4.0, Pyro 1.3.0\r\n\r\n### Code Snippet\r\n```python\r\n>>> import torch\r\n>>> from pyro.distributions import Gamma, InverseGamma, TransformedDistribution\r\n>>> from pyro.distributions.transforms import PowerTransform\r\n>>>\r\n>>> concentration = torch.tensor(1.0).to(\"cuda\")\r\n>>> rate = torch.tensor(1.0).to(\"cuda\")\r\n>>> \r\n>>> # InverseGamma.sample fails with an error\r\n>>> InverseGamma(concentration, rate).sample()\r\nRuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.\r\n>>> \r\n>>> # The equivalent TransformedDistribution is fine\r\n>>> TransformedDistribution(\r\n... Gamma(concentration, rate),\r\n... PowerTransform(torch.tensor(-1.0).to(\"cuda\")),\r\n... ).sample()\r\ntensor(0.5707, device='cuda:0')\r\n```\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom torch.distributions import constraints\nfrom torch.distributions.transforms import PowerTransform\nfrom pyro.distributions.torch import Gamma, TransformedDistribution\n\n\nclass InverseGamma(TransformedDistribution):\n r\"\"\"\n Creates an inverse-gamma distribution parameterized by\n `concentration` and `rate`.\n\n X ~ Gamma(concentration, rate)\n Y = 1/X ~ InverseGamma(concentration, rate)\n\n :param torch.Tensor concentration: the concentration parameter (i.e. alpha).\n :param torch.Tensor rate: the rate parameter (i.e. beta).\n \"\"\"\n arg_constraints = {'concentration': constraints.positive, 'rate': constraints.positive}\n support = constraints.positive\n has_rsample = True\n\n def __init__(self, concentration, rate, validate_args=None):\n base_dist = Gamma(concentration, rate)\n super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(InverseGamma, _instance)\n return super().expand(batch_shape, _instance=new)\n\n @property\n def concentration(self):\n return self.base_dist.concentration\n\n @property\n def rate(self):\n return self.base_dist.rate\n", "path": "pyro/distributions/inverse_gamma.py"}]} | 1,333 | 161 |
gh_patches_debug_7918 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-75 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CIFAR10 run_fl.py misses license header
https://github.com/NVIDIA/NVFlare/blob/d784e7be9742b4d1bcfa0f389ea063fec984fe50/examples/cifar10/run_fl.py#L1
</issue>
<code>
[start of examples/cifar10/run_fl.py]
1 import argparse
2 import os
3 import time
4
5 from nvflare.fuel.hci.client.fl_admin_api_runner import FLAdminAPIRunner
6
7
8 def main():
9 parser = argparse.ArgumentParser()
10 parser.add_argument("--run_number", type=int, default=100, help="FL run number to start at.")
11 parser.add_argument("--admin_dir", type=str, default="./admin/", help="Path to admin directory.")
12 parser.add_argument("--username", type=str, default="[email protected]", help="Admin username")
13 parser.add_argument("--app", type=str, default="cifar10_fedavg", help="App to be deployed")
14 parser.add_argument("--port", type=int, default=8003, help="The admin server port")
15 parser.add_argument("--poc", action='store_true', help="Whether admin uses POC mode.")
16 parser.add_argument("--min_clients", type=int, default=8, help="Minimum number of clients.")
17 args = parser.parse_args()
18
19 host = ""
20 port = args.port
21
22 assert os.path.isdir(args.admin_dir), f"admin directory does not exist at {args.admin_dir}"
23
24 # Set up certificate names and admin folders
25 upload_dir = os.path.join(args.admin_dir, "transfer")
26 if not os.path.isdir(upload_dir):
27 os.makedirs(upload_dir)
28 download_dir = os.path.join(args.admin_dir, "download")
29 if not os.path.isdir(download_dir):
30 os.makedirs(download_dir)
31
32 run_number = args.run_number
33
34 # Initialize the runner
35 runner = FLAdminAPIRunner(
36 host=host,
37 port=port,
38 username=args.username,
39 admin_dir=args.admin_dir,
40 poc=args.poc,
41 debug=False,
42 )
43
44 # Run
45 start = time.time()
46 runner.run(run_number, args.app, restart_all_first=False, shutdown_on_error=True, shutdown_at_end=True,
47 timeout=7200, min_clients=args.min_clients) # will time out if not completed in 2 hours
48 print("Total training time", time.time() - start)
49
50
51 if __name__ == "__main__":
52 main()
53
[end of examples/cifar10/run_fl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/cifar10/run_fl.py b/examples/cifar10/run_fl.py
--- a/examples/cifar10/run_fl.py
+++ b/examples/cifar10/run_fl.py
@@ -1,3 +1,17 @@
+# Copyright (c) 2021, NVIDIA CORPORATION.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import argparse
import os
import time
| {"golden_diff": "diff --git a/examples/cifar10/run_fl.py b/examples/cifar10/run_fl.py\n--- a/examples/cifar10/run_fl.py\n+++ b/examples/cifar10/run_fl.py\n@@ -1,3 +1,17 @@\n+# Copyright (c) 2021, NVIDIA CORPORATION.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n import argparse\n import os\n import time\n", "issue": "CIFAR10 run_fl.py misses license header\nhttps://github.com/NVIDIA/NVFlare/blob/d784e7be9742b4d1bcfa0f389ea063fec984fe50/examples/cifar10/run_fl.py#L1\n", "before_files": [{"content": "import argparse\nimport os\nimport time\n\nfrom nvflare.fuel.hci.client.fl_admin_api_runner import FLAdminAPIRunner\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--run_number\", type=int, default=100, help=\"FL run number to start at.\")\n parser.add_argument(\"--admin_dir\", type=str, default=\"./admin/\", help=\"Path to admin directory.\")\n parser.add_argument(\"--username\", type=str, default=\"[email protected]\", help=\"Admin username\")\n parser.add_argument(\"--app\", type=str, default=\"cifar10_fedavg\", help=\"App to be deployed\")\n parser.add_argument(\"--port\", type=int, default=8003, help=\"The admin server port\")\n parser.add_argument(\"--poc\", action='store_true', help=\"Whether admin uses POC mode.\")\n parser.add_argument(\"--min_clients\", type=int, default=8, help=\"Minimum number of clients.\")\n args = parser.parse_args()\n\n host = \"\"\n port = args.port\n\n assert os.path.isdir(args.admin_dir), f\"admin directory does not exist at {args.admin_dir}\"\n\n # Set up certificate names and admin folders\n upload_dir = os.path.join(args.admin_dir, \"transfer\")\n if not os.path.isdir(upload_dir):\n os.makedirs(upload_dir)\n download_dir = os.path.join(args.admin_dir, \"download\")\n if not os.path.isdir(download_dir):\n os.makedirs(download_dir)\n\n run_number = args.run_number\n\n # Initialize the runner\n runner = FLAdminAPIRunner(\n host=host,\n port=port,\n username=args.username,\n admin_dir=args.admin_dir,\n poc=args.poc,\n debug=False,\n )\n\n # Run\n start = time.time()\n runner.run(run_number, args.app, restart_all_first=False, shutdown_on_error=True, shutdown_at_end=True,\n timeout=7200, min_clients=args.min_clients) # will time out if not completed in 2 hours\n print(\"Total training time\", time.time() - start)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/cifar10/run_fl.py"}]} | 1,168 | 203 |
gh_patches_debug_29982 | rasdani/github-patches | git_diff | astronomer__astro-sdk-453 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change `export_file` to return `File` object
**Context**
In order to allow users to perform subsequent actions on an exported file (while maintaining a functional structure), we should allow the `export_file` function to return a file object.
* Astro-SDK version: 0.9..1
* Request by: @jlaneve
* Analysed by @dimberman
**Problem**
At the moment a user who wants to use the `output_file` object would need to explicitly set dependencies like this:
```
output_file = File(path="/tmp/saved_df.csv")
with sample_dag:
table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
export = aql.export_file(
input_data=table,
output_file=output_file,
if_exists="replace",
)
res_df = aql.load_file(input_file=output_file)
export >> res_df
```
**Desired behaviour**
```
with sample_dag:
table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
exported_file = aql.export_file(
input_data=table,
output_file=File(path="/tmp/saved_df.csv"),
if_exists="replace",
)
res_df = aql.load_file(input_file=exported_file)
```
**Acceptance criteria**
* Change `export_file` so it returns the `File` instance, as opposed to `None`
Since there is no documentation about this task, we don't need to update the documentation for it. To create documentation for this feature should be part of another issue.
</issue>
<code>
[start of src/astro/sql/operators/export_file.py]
1 from typing import Optional, Union
2
3 import pandas as pd
4 from airflow.models import BaseOperator
5 from airflow.models.xcom_arg import XComArg
6
7 from astro.constants import ExportExistsStrategy
8 from astro.databases import create_database
9 from astro.files import File
10 from astro.sql.table import Table
11 from astro.utils.task_id_helper import get_task_id
12
13
14 class ExportFile(BaseOperator):
15 """Write SQL table to csv/parquet on local/S3/GCS.
16
17 :param input_data: Table to convert to file
18 :param output_file: File object containing the path to the file and connection id.
19 :param if_exists: Overwrite file if exists. Default False.
20 """
21
22 template_fields = ("input_data", "output_file")
23
24 def __init__(
25 self,
26 input_data: Union[Table, pd.DataFrame],
27 output_file: File,
28 if_exists: ExportExistsStrategy = "exception",
29 **kwargs,
30 ) -> None:
31 super().__init__(**kwargs)
32 self.output_file = output_file
33 self.input_data = input_data
34 self.if_exists = if_exists
35 self.kwargs = kwargs
36
37 def execute(self, context: dict) -> None:
38 """Write SQL table to csv/parquet on local/S3/GCS.
39
40 Infers SQL database type based on connection.
41 """
42 # Infer db type from `input_conn_id`.
43 if isinstance(self.input_data, Table):
44 database = create_database(self.input_data.conn_id)
45 self.input_data = database.populate_table_metadata(self.input_data)
46 df = database.export_table_to_pandas_dataframe(self.input_data)
47 elif isinstance(self.input_data, pd.DataFrame):
48 df = self.input_data
49 else:
50 raise ValueError(
51 f"Expected input_table to be Table or dataframe. Got {type(self.input_data)}"
52 )
53 # Write file if overwrite == True or if file doesn't exist.
54 if self.if_exists == "replace" or not self.output_file.exists():
55 self.output_file.create_from_dataframe(df)
56 else:
57 raise FileExistsError(f"{self.output_file.path} file already exists.")
58
59
60 def export_file(
61 input_data: Union[Table, pd.DataFrame],
62 output_file: File,
63 if_exists: ExportExistsStrategy = "exception",
64 task_id: Optional[str] = None,
65 **kwargs,
66 ) -> XComArg:
67 """Convert SaveFile into a function. Returns XComArg.
68
69 Returns an XComArg object.
70
71 :param output_file: Path and conn_id
72 :param input_data: Input table / dataframe
73 :param if_exists: Overwrite file if exists. Default "exception"
74 :param task_id: task id, optional
75 """
76
77 task_id = (
78 task_id if task_id is not None else get_task_id("export_file", output_file.path)
79 )
80
81 return ExportFile(
82 task_id=task_id,
83 output_file=output_file,
84 input_data=input_data,
85 if_exists=if_exists,
86 ).output
87
[end of src/astro/sql/operators/export_file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/astro/sql/operators/export_file.py b/src/astro/sql/operators/export_file.py
--- a/src/astro/sql/operators/export_file.py
+++ b/src/astro/sql/operators/export_file.py
@@ -34,7 +34,7 @@
self.if_exists = if_exists
self.kwargs = kwargs
- def execute(self, context: dict) -> None:
+ def execute(self, context: dict) -> File:
"""Write SQL table to csv/parquet on local/S3/GCS.
Infers SQL database type based on connection.
@@ -53,6 +53,7 @@
# Write file if overwrite == True or if file doesn't exist.
if self.if_exists == "replace" or not self.output_file.exists():
self.output_file.create_from_dataframe(df)
+ return self.output_file
else:
raise FileExistsError(f"{self.output_file.path} file already exists.")
@@ -66,7 +67,20 @@
) -> XComArg:
"""Convert SaveFile into a function. Returns XComArg.
- Returns an XComArg object.
+ Returns an XComArg object of type File which matches the output_file parameter.
+
+ This will allow users to perform further actions with the exported file.
+
+ e.g.
+
+ with sample_dag:
+ table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
+ exported_file = aql.export_file(
+ input_data=table,
+ output_file=File(path="/tmp/saved_df.csv"),
+ if_exists="replace",
+ )
+ res_df = aql.load_file(input_file=exported_file)
:param output_file: Path and conn_id
:param input_data: Input table / dataframe
| {"golden_diff": "diff --git a/src/astro/sql/operators/export_file.py b/src/astro/sql/operators/export_file.py\n--- a/src/astro/sql/operators/export_file.py\n+++ b/src/astro/sql/operators/export_file.py\n@@ -34,7 +34,7 @@\n self.if_exists = if_exists\n self.kwargs = kwargs\n \n- def execute(self, context: dict) -> None:\n+ def execute(self, context: dict) -> File:\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n \n Infers SQL database type based on connection.\n@@ -53,6 +53,7 @@\n # Write file if overwrite == True or if file doesn't exist.\n if self.if_exists == \"replace\" or not self.output_file.exists():\n self.output_file.create_from_dataframe(df)\n+ return self.output_file\n else:\n raise FileExistsError(f\"{self.output_file.path} file already exists.\")\n \n@@ -66,7 +67,20 @@\n ) -> XComArg:\n \"\"\"Convert SaveFile into a function. Returns XComArg.\n \n- Returns an XComArg object.\n+ Returns an XComArg object of type File which matches the output_file parameter.\n+\n+ This will allow users to perform further actions with the exported file.\n+\n+ e.g.\n+\n+ with sample_dag:\n+ table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\n+ exported_file = aql.export_file(\n+ input_data=table,\n+ output_file=File(path=\"/tmp/saved_df.csv\"),\n+ if_exists=\"replace\",\n+ )\n+ res_df = aql.load_file(input_file=exported_file)\n \n :param output_file: Path and conn_id\n :param input_data: Input table / dataframe\n", "issue": "Change `export_file` to return `File` object\n**Context**\r\n\r\nIn order to allow users to perform subsequent actions on an exported file (while maintaining a functional structure), we should allow the `export_file` function to return a file object.\r\n\r\n* Astro-SDK version: 0.9..1\r\n* Request by: @jlaneve\r\n* Analysed by @dimberman \r\n\r\n**Problem**\r\n\r\nAt the moment a user who wants to use the `output_file` object would need to explicitly set dependencies like this:\r\n\r\n```\r\n output_file = File(path=\"/tmp/saved_df.csv\")\r\n with sample_dag:\r\n table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\r\n export = aql.export_file(\r\n input_data=table,\r\n output_file=output_file,\r\n if_exists=\"replace\",\r\n )\r\n res_df = aql.load_file(input_file=output_file)\r\n export >> res_df\r\n```\r\n\r\n**Desired behaviour**\r\n\r\n```\r\n with sample_dag:\r\n table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\r\n exported_file = aql.export_file(\r\n input_data=table,\r\n output_file=File(path=\"/tmp/saved_df.csv\"),\r\n if_exists=\"replace\",\r\n )\r\n res_df = aql.load_file(input_file=exported_file)\r\n```\r\n\r\n**Acceptance criteria**\r\n* Change `export_file` so it returns the `File` instance, as opposed to `None`\r\n\r\nSince there is no documentation about this task, we don't need to update the documentation for it. To create documentation for this feature should be part of another issue.\n", "before_files": [{"content": "from typing import Optional, Union\n\nimport pandas as pd\nfrom airflow.models import BaseOperator\nfrom airflow.models.xcom_arg import XComArg\n\nfrom astro.constants import ExportExistsStrategy\nfrom astro.databases import create_database\nfrom astro.files import File\nfrom astro.sql.table import Table\nfrom astro.utils.task_id_helper import get_task_id\n\n\nclass ExportFile(BaseOperator):\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n :param input_data: Table to convert to file\n :param output_file: File object containing the path to the file and connection id.\n :param if_exists: Overwrite file if exists. Default False.\n \"\"\"\n\n template_fields = (\"input_data\", \"output_file\")\n\n def __init__(\n self,\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.output_file = output_file\n self.input_data = input_data\n self.if_exists = if_exists\n self.kwargs = kwargs\n\n def execute(self, context: dict) -> None:\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n Infers SQL database type based on connection.\n \"\"\"\n # Infer db type from `input_conn_id`.\n if isinstance(self.input_data, Table):\n database = create_database(self.input_data.conn_id)\n self.input_data = database.populate_table_metadata(self.input_data)\n df = database.export_table_to_pandas_dataframe(self.input_data)\n elif isinstance(self.input_data, pd.DataFrame):\n df = self.input_data\n else:\n raise ValueError(\n f\"Expected input_table to be Table or dataframe. Got {type(self.input_data)}\"\n )\n # Write file if overwrite == True or if file doesn't exist.\n if self.if_exists == \"replace\" or not self.output_file.exists():\n self.output_file.create_from_dataframe(df)\n else:\n raise FileExistsError(f\"{self.output_file.path} file already exists.\")\n\n\ndef export_file(\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n task_id: Optional[str] = None,\n **kwargs,\n) -> XComArg:\n \"\"\"Convert SaveFile into a function. Returns XComArg.\n\n Returns an XComArg object.\n\n :param output_file: Path and conn_id\n :param input_data: Input table / dataframe\n :param if_exists: Overwrite file if exists. Default \"exception\"\n :param task_id: task id, optional\n \"\"\"\n\n task_id = (\n task_id if task_id is not None else get_task_id(\"export_file\", output_file.path)\n )\n\n return ExportFile(\n task_id=task_id,\n output_file=output_file,\n input_data=input_data,\n if_exists=if_exists,\n ).output\n", "path": "src/astro/sql/operators/export_file.py"}]} | 1,695 | 396 |
gh_patches_debug_39269 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1019 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
get_file_stream support
node-telegram-bot-api has added `getFileStream` method , which allows accessing the file stream without downloading it to a folder. It'll be useful.For example, changing a group photo from a photo uploaded by users.
</issue>
<code>
[start of telegram/files/file.py]
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2018
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains an object that represents a Telegram File."""
20 from os.path import basename
21
22 from future.backports.urllib import parse as urllib_parse
23
24 from telegram import TelegramObject
25
26
27 class File(TelegramObject):
28 """
29 This object represents a file ready to be downloaded. The file can be downloaded with
30 :attr:`download`. It is guaranteed that the link will be valid for at least 1 hour. When the
31 link expires, a new one can be requested by calling getFile.
32
33 Note:
34 Maximum file size to download is 20 MB
35
36 Attributes:
37 file_id (:obj:`str`): Unique identifier for this file.
38 file_size (:obj:`str`): Optional. File size.
39 file_path (:obj:`str`): Optional. File path. Use :attr:`download` to get the file.
40
41 Args:
42 file_id (:obj:`str`): Unique identifier for this file.
43 file_size (:obj:`int`, optional): Optional. File size, if known.
44 file_path (:obj:`str`, optional): File path. Use :attr:`download` to get the file.
45 bot (:obj:`telegram.Bot`, optional): Bot to use with shortcut method.
46 **kwargs (:obj:`dict`): Arbitrary keyword arguments.
47
48 """
49
50 def __init__(self, file_id, bot=None, file_size=None, file_path=None, **kwargs):
51 # Required
52 self.file_id = str(file_id)
53
54 # Optionals
55 self.file_size = file_size
56 self.file_path = file_path
57
58 self.bot = bot
59
60 self._id_attrs = (self.file_id,)
61
62 @classmethod
63 def de_json(cls, data, bot):
64 if not data:
65 return None
66
67 return cls(bot=bot, **data)
68
69 def download(self, custom_path=None, out=None, timeout=None):
70 """
71 Download this file. By default, the file is saved in the current working directory with its
72 original filename as reported by Telegram. If a :attr:`custom_path` is supplied, it will be
73 saved to that path instead. If :attr:`out` is defined, the file contents will be saved to
74 that object using the ``out.write`` method.
75
76 Note:
77 `custom_path` and `out` are mutually exclusive.
78
79 Args:
80 custom_path (:obj:`str`, optional): Custom path.
81 out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if
82 applicable.
83 timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
84 the read timeout from the server (instead of the one specified during creation of
85 the connection pool).
86
87 Raises:
88 ValueError: If both ``custom_path`` and ``out`` are passed.
89
90 """
91 if custom_path is not None and out is not None:
92 raise ValueError('custom_path and out are mutually exclusive')
93
94 # Convert any UTF-8 char into a url encoded ASCII string.
95 sres = urllib_parse.urlsplit(self.file_path)
96 url = urllib_parse.urlunsplit(urllib_parse.SplitResult(
97 sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))
98
99 if out:
100 buf = self.bot.request.retrieve(url)
101 out.write(buf)
102
103 else:
104 if custom_path:
105 filename = custom_path
106 else:
107 filename = basename(self.file_path)
108
109 self.bot.request.download(url, filename, timeout=timeout)
110
[end of telegram/files/file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/telegram/files/file.py b/telegram/files/file.py
--- a/telegram/files/file.py
+++ b/telegram/files/file.py
@@ -74,32 +74,34 @@
that object using the ``out.write`` method.
Note:
- `custom_path` and `out` are mutually exclusive.
+ :attr:`custom_path` and :attr:`out` are mutually exclusive.
Args:
custom_path (:obj:`str`, optional): Custom path.
- out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if
- applicable.
+ out (:obj:`io.BufferedWriter`, optional): A file-like object. Must be opened for
+ writing in binary mode, if applicable.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
+ Returns:
+ :obj:`str` | :obj:`io.BufferedWriter`: The same object as :attr:`out` if specified.
+ Otherwise, returns the filename downloaded to.
+
Raises:
- ValueError: If both ``custom_path`` and ``out`` are passed.
+ ValueError: If both :attr:`custom_path` and :attr:`out` are passed.
"""
if custom_path is not None and out is not None:
raise ValueError('custom_path and out are mutually exclusive')
# Convert any UTF-8 char into a url encoded ASCII string.
- sres = urllib_parse.urlsplit(self.file_path)
- url = urllib_parse.urlunsplit(urllib_parse.SplitResult(
- sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))
+ url = self._get_encoded_url()
if out:
buf = self.bot.request.retrieve(url)
out.write(buf)
-
+ return out
else:
if custom_path:
filename = custom_path
@@ -107,3 +109,27 @@
filename = basename(self.file_path)
self.bot.request.download(url, filename, timeout=timeout)
+ return filename
+
+ def _get_encoded_url(self):
+ """Convert any UTF-8 char in :obj:`File.file_path` into a url encoded ASCII string."""
+ sres = urllib_parse.urlsplit(self.file_path)
+ return urllib_parse.urlunsplit(urllib_parse.SplitResult(
+ sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))
+
+ def download_as_bytearray(self, buf=None):
+ """Download this file and return it as a bytearray.
+
+ Args:
+ buf (:obj:`bytearray`, optional): Extend the given bytearray with the downloaded data.
+
+ Returns:
+ :obj:`bytearray`: The same object as :attr:`buf` if it was specified. Otherwise a newly
+ allocated :obj:`bytearray`.
+
+ """
+ if buf is None:
+ buf = bytearray()
+
+ buf.extend(self.bot.request.retrieve(self._get_encoded_url()))
+ return buf
| {"golden_diff": "diff --git a/telegram/files/file.py b/telegram/files/file.py\n--- a/telegram/files/file.py\n+++ b/telegram/files/file.py\n@@ -74,32 +74,34 @@\n that object using the ``out.write`` method.\n \n Note:\n- `custom_path` and `out` are mutually exclusive.\n+ :attr:`custom_path` and :attr:`out` are mutually exclusive.\n \n Args:\n custom_path (:obj:`str`, optional): Custom path.\n- out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if\n- applicable.\n+ out (:obj:`io.BufferedWriter`, optional): A file-like object. Must be opened for\n+ writing in binary mode, if applicable.\n timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as\n the read timeout from the server (instead of the one specified during creation of\n the connection pool).\n \n+ Returns:\n+ :obj:`str` | :obj:`io.BufferedWriter`: The same object as :attr:`out` if specified.\n+ Otherwise, returns the filename downloaded to.\n+\n Raises:\n- ValueError: If both ``custom_path`` and ``out`` are passed.\n+ ValueError: If both :attr:`custom_path` and :attr:`out` are passed.\n \n \"\"\"\n if custom_path is not None and out is not None:\n raise ValueError('custom_path and out are mutually exclusive')\n \n # Convert any UTF-8 char into a url encoded ASCII string.\n- sres = urllib_parse.urlsplit(self.file_path)\n- url = urllib_parse.urlunsplit(urllib_parse.SplitResult(\n- sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))\n+ url = self._get_encoded_url()\n \n if out:\n buf = self.bot.request.retrieve(url)\n out.write(buf)\n-\n+ return out\n else:\n if custom_path:\n filename = custom_path\n@@ -107,3 +109,27 @@\n filename = basename(self.file_path)\n \n self.bot.request.download(url, filename, timeout=timeout)\n+ return filename\n+\n+ def _get_encoded_url(self):\n+ \"\"\"Convert any UTF-8 char in :obj:`File.file_path` into a url encoded ASCII string.\"\"\"\n+ sres = urllib_parse.urlsplit(self.file_path)\n+ return urllib_parse.urlunsplit(urllib_parse.SplitResult(\n+ sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))\n+\n+ def download_as_bytearray(self, buf=None):\n+ \"\"\"Download this file and return it as a bytearray.\n+\n+ Args:\n+ buf (:obj:`bytearray`, optional): Extend the given bytearray with the downloaded data.\n+\n+ Returns:\n+ :obj:`bytearray`: The same object as :attr:`buf` if it was specified. Otherwise a newly\n+ allocated :obj:`bytearray`.\n+\n+ \"\"\"\n+ if buf is None:\n+ buf = bytearray()\n+\n+ buf.extend(self.bot.request.retrieve(self._get_encoded_url()))\n+ return buf\n", "issue": "get_file_stream support\nnode-telegram-bot-api has added `getFileStream` method , which allows accessing the file stream without downloading it to a folder. It'll be useful.For example, changing a group photo from a photo uploaded by users.\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram File.\"\"\"\nfrom os.path import basename\n\nfrom future.backports.urllib import parse as urllib_parse\n\nfrom telegram import TelegramObject\n\n\nclass File(TelegramObject):\n \"\"\"\n This object represents a file ready to be downloaded. The file can be downloaded with\n :attr:`download`. It is guaranteed that the link will be valid for at least 1 hour. When the\n link expires, a new one can be requested by calling getFile.\n\n Note:\n Maximum file size to download is 20 MB\n\n Attributes:\n file_id (:obj:`str`): Unique identifier for this file.\n file_size (:obj:`str`): Optional. File size.\n file_path (:obj:`str`): Optional. File path. Use :attr:`download` to get the file.\n\n Args:\n file_id (:obj:`str`): Unique identifier for this file.\n file_size (:obj:`int`, optional): Optional. File size, if known.\n file_path (:obj:`str`, optional): File path. Use :attr:`download` to get the file.\n bot (:obj:`telegram.Bot`, optional): Bot to use with shortcut method.\n **kwargs (:obj:`dict`): Arbitrary keyword arguments.\n\n \"\"\"\n\n def __init__(self, file_id, bot=None, file_size=None, file_path=None, **kwargs):\n # Required\n self.file_id = str(file_id)\n\n # Optionals\n self.file_size = file_size\n self.file_path = file_path\n\n self.bot = bot\n\n self._id_attrs = (self.file_id,)\n\n @classmethod\n def de_json(cls, data, bot):\n if not data:\n return None\n\n return cls(bot=bot, **data)\n\n def download(self, custom_path=None, out=None, timeout=None):\n \"\"\"\n Download this file. By default, the file is saved in the current working directory with its\n original filename as reported by Telegram. If a :attr:`custom_path` is supplied, it will be\n saved to that path instead. If :attr:`out` is defined, the file contents will be saved to\n that object using the ``out.write`` method.\n\n Note:\n `custom_path` and `out` are mutually exclusive.\n\n Args:\n custom_path (:obj:`str`, optional): Custom path.\n out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if\n applicable.\n timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as\n the read timeout from the server (instead of the one specified during creation of\n the connection pool).\n\n Raises:\n ValueError: If both ``custom_path`` and ``out`` are passed.\n\n \"\"\"\n if custom_path is not None and out is not None:\n raise ValueError('custom_path and out are mutually exclusive')\n\n # Convert any UTF-8 char into a url encoded ASCII string.\n sres = urllib_parse.urlsplit(self.file_path)\n url = urllib_parse.urlunsplit(urllib_parse.SplitResult(\n sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))\n\n if out:\n buf = self.bot.request.retrieve(url)\n out.write(buf)\n\n else:\n if custom_path:\n filename = custom_path\n else:\n filename = basename(self.file_path)\n\n self.bot.request.download(url, filename, timeout=timeout)\n", "path": "telegram/files/file.py"}]} | 1,767 | 706 |
gh_patches_debug_32392 | rasdani/github-patches | git_diff | Qiskit__qiskit-2387 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`set_qiskit_logger()` is hard to use
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
A utility method `[set_qiskit_logger()]`(https://github.com/Qiskit/qiskit-terra/blob/cac7c6a2631d76ef3e811a5c943a2b9101fed240/qiskit/_logging.py#L48) exists to log qiskit-terra's execution flow.
It has room for improvement, namely:
- It turns on logging everywhere, thus generating a lot of noise. One should be able to specify a certain file to or module to log.
- One cannot customize the LOG_LEVEL.
</issue>
<code>
[start of qiskit/tools/logging.py]
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """Utilities for logging."""
16
17 import logging
18 from logging.config import dictConfig
19
20
21 class SimpleInfoFormatter(logging.Formatter):
22 """Custom Formatter that uses a simple format for INFO."""
23 _style_info = logging._STYLES['%'][0]('%(message)s')
24
25 def formatMessage(self, record):
26 if record.levelno == logging.INFO:
27 return self._style_info.format(record)
28 return logging.Formatter.formatMessage(self, record)
29
30
31 QISKIT_LOGGING_CONFIG = {
32 'version': 1,
33 'disable_existing_loggers': False,
34 'formatters': {
35 'f': {
36 '()': SimpleInfoFormatter,
37 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'
38 },
39 },
40 'handlers': {
41 'h': {
42 'class': 'logging.StreamHandler',
43 'formatter': 'f'
44 }
45 },
46 'loggers': {
47 'qiskit': {
48 'handlers': ['h'],
49 'level': logging.INFO,
50 },
51 }
52 }
53
54
55 def set_qiskit_logger():
56 """Update 'qiskit' logger configuration using a SDK default one.
57
58 Update the configuration of the 'qiskit' logger using the default SDK
59 configuration provided by `QISKIT_LOGGING_CONFIG`:
60
61 * console logging using a custom format for levels != INFO.
62 * console logging with simple format for level INFO.
63 * set logger level to INFO.
64
65 Warning:
66 This function modifies the configuration of the standard logging system
67 for the 'qiskit.*' loggers, and might interfere with custom logger
68 configurations.
69 """
70 dictConfig(QISKIT_LOGGING_CONFIG)
71
72
73 def unset_qiskit_logger():
74 """Remove the handlers for the 'qiskit' logger."""
75 qiskit_logger = logging.getLogger('qiskit')
76 for handler in qiskit_logger.handlers:
77 qiskit_logger.removeHandler(handler)
78
[end of qiskit/tools/logging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qiskit/tools/logging.py b/qiskit/tools/logging.py
deleted file mode 100644
--- a/qiskit/tools/logging.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# This code is part of Qiskit.
-#
-# (C) Copyright IBM 2017.
-#
-# This code is licensed under the Apache License, Version 2.0. You may
-# obtain a copy of this license in the LICENSE.txt file in the root directory
-# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
-#
-# Any modifications or derivative works of this code must retain this
-# copyright notice, and modified files need to carry a notice indicating
-# that they have been altered from the originals.
-
-"""Utilities for logging."""
-
-import logging
-from logging.config import dictConfig
-
-
-class SimpleInfoFormatter(logging.Formatter):
- """Custom Formatter that uses a simple format for INFO."""
- _style_info = logging._STYLES['%'][0]('%(message)s')
-
- def formatMessage(self, record):
- if record.levelno == logging.INFO:
- return self._style_info.format(record)
- return logging.Formatter.formatMessage(self, record)
-
-
-QISKIT_LOGGING_CONFIG = {
- 'version': 1,
- 'disable_existing_loggers': False,
- 'formatters': {
- 'f': {
- '()': SimpleInfoFormatter,
- 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'
- },
- },
- 'handlers': {
- 'h': {
- 'class': 'logging.StreamHandler',
- 'formatter': 'f'
- }
- },
- 'loggers': {
- 'qiskit': {
- 'handlers': ['h'],
- 'level': logging.INFO,
- },
- }
-}
-
-
-def set_qiskit_logger():
- """Update 'qiskit' logger configuration using a SDK default one.
-
- Update the configuration of the 'qiskit' logger using the default SDK
- configuration provided by `QISKIT_LOGGING_CONFIG`:
-
- * console logging using a custom format for levels != INFO.
- * console logging with simple format for level INFO.
- * set logger level to INFO.
-
- Warning:
- This function modifies the configuration of the standard logging system
- for the 'qiskit.*' loggers, and might interfere with custom logger
- configurations.
- """
- dictConfig(QISKIT_LOGGING_CONFIG)
-
-
-def unset_qiskit_logger():
- """Remove the handlers for the 'qiskit' logger."""
- qiskit_logger = logging.getLogger('qiskit')
- for handler in qiskit_logger.handlers:
- qiskit_logger.removeHandler(handler)
| {"golden_diff": "diff --git a/qiskit/tools/logging.py b/qiskit/tools/logging.py\ndeleted file mode 100644\n--- a/qiskit/tools/logging.py\n+++ /dev/null\n@@ -1,77 +0,0 @@\n-# -*- coding: utf-8 -*-\n-\n-# This code is part of Qiskit.\n-#\n-# (C) Copyright IBM 2017.\n-#\n-# This code is licensed under the Apache License, Version 2.0. You may\n-# obtain a copy of this license in the LICENSE.txt file in the root directory\n-# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n-#\n-# Any modifications or derivative works of this code must retain this\n-# copyright notice, and modified files need to carry a notice indicating\n-# that they have been altered from the originals.\n-\n-\"\"\"Utilities for logging.\"\"\"\n-\n-import logging\n-from logging.config import dictConfig\n-\n-\n-class SimpleInfoFormatter(logging.Formatter):\n- \"\"\"Custom Formatter that uses a simple format for INFO.\"\"\"\n- _style_info = logging._STYLES['%'][0]('%(message)s')\n-\n- def formatMessage(self, record):\n- if record.levelno == logging.INFO:\n- return self._style_info.format(record)\n- return logging.Formatter.formatMessage(self, record)\n-\n-\n-QISKIT_LOGGING_CONFIG = {\n- 'version': 1,\n- 'disable_existing_loggers': False,\n- 'formatters': {\n- 'f': {\n- '()': SimpleInfoFormatter,\n- 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'\n- },\n- },\n- 'handlers': {\n- 'h': {\n- 'class': 'logging.StreamHandler',\n- 'formatter': 'f'\n- }\n- },\n- 'loggers': {\n- 'qiskit': {\n- 'handlers': ['h'],\n- 'level': logging.INFO,\n- },\n- }\n-}\n-\n-\n-def set_qiskit_logger():\n- \"\"\"Update 'qiskit' logger configuration using a SDK default one.\n-\n- Update the configuration of the 'qiskit' logger using the default SDK\n- configuration provided by `QISKIT_LOGGING_CONFIG`:\n-\n- * console logging using a custom format for levels != INFO.\n- * console logging with simple format for level INFO.\n- * set logger level to INFO.\n-\n- Warning:\n- This function modifies the configuration of the standard logging system\n- for the 'qiskit.*' loggers, and might interfere with custom logger\n- configurations.\n- \"\"\"\n- dictConfig(QISKIT_LOGGING_CONFIG)\n-\n-\n-def unset_qiskit_logger():\n- \"\"\"Remove the handlers for the 'qiskit' logger.\"\"\"\n- qiskit_logger = logging.getLogger('qiskit')\n- for handler in qiskit_logger.handlers:\n- qiskit_logger.removeHandler(handler)\n", "issue": "`set_qiskit_logger()` is hard to use\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\nA utility method `[set_qiskit_logger()]`(https://github.com/Qiskit/qiskit-terra/blob/cac7c6a2631d76ef3e811a5c943a2b9101fed240/qiskit/_logging.py#L48) exists to log qiskit-terra's execution flow.\r\n\r\nIt has room for improvement, namely:\r\n- It turns on logging everywhere, thus generating a lot of noise. One should be able to specify a certain file to or module to log.\r\n- One cannot customize the LOG_LEVEL.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Utilities for logging.\"\"\"\n\nimport logging\nfrom logging.config import dictConfig\n\n\nclass SimpleInfoFormatter(logging.Formatter):\n \"\"\"Custom Formatter that uses a simple format for INFO.\"\"\"\n _style_info = logging._STYLES['%'][0]('%(message)s')\n\n def formatMessage(self, record):\n if record.levelno == logging.INFO:\n return self._style_info.format(record)\n return logging.Formatter.formatMessage(self, record)\n\n\nQISKIT_LOGGING_CONFIG = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'f': {\n '()': SimpleInfoFormatter,\n 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'\n },\n },\n 'handlers': {\n 'h': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'f'\n }\n },\n 'loggers': {\n 'qiskit': {\n 'handlers': ['h'],\n 'level': logging.INFO,\n },\n }\n}\n\n\ndef set_qiskit_logger():\n \"\"\"Update 'qiskit' logger configuration using a SDK default one.\n\n Update the configuration of the 'qiskit' logger using the default SDK\n configuration provided by `QISKIT_LOGGING_CONFIG`:\n\n * console logging using a custom format for levels != INFO.\n * console logging with simple format for level INFO.\n * set logger level to INFO.\n\n Warning:\n This function modifies the configuration of the standard logging system\n for the 'qiskit.*' loggers, and might interfere with custom logger\n configurations.\n \"\"\"\n dictConfig(QISKIT_LOGGING_CONFIG)\n\n\ndef unset_qiskit_logger():\n \"\"\"Remove the handlers for the 'qiskit' logger.\"\"\"\n qiskit_logger = logging.getLogger('qiskit')\n for handler in qiskit_logger.handlers:\n qiskit_logger.removeHandler(handler)\n", "path": "qiskit/tools/logging.py"}]} | 1,420 | 657 |
gh_patches_debug_6305 | rasdani/github-patches | git_diff | scipy__scipy-5920 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"master" fails to build with MKL
Recently, when I try to build the "master" branch, I keep getting this error from `linalg/solve/dsolve/setup.py`:
``` .Python
ValueError: list.remove(x): x not in list
```
The error traces to this block of code <a href="https://github.com/scipy/scipy/blob/master/scipy/sparse/linalg/dsolve/setup.py#L30-L32">here</a>, for it seems that the `lsame.c` file does not exist in the list of sources. Sure enough, when I remove this check, `scipy` builds successfully and tests pass.
I don't know what happened recently that caused this setup to break (it has never happened before until now), but is anyone else getting this issue?
Setup: Python 3.5.1, Windows 7, `numpy` 1.10.4, MKL
</issue>
<code>
[start of scipy/sparse/linalg/dsolve/setup.py]
1 #!/usr/bin/env python
2 from __future__ import division, print_function, absolute_import
3
4 from os.path import join, dirname
5 import sys
6 import os
7 import glob
8
9
10 def configuration(parent_package='',top_path=None):
11 from numpy.distutils.misc_util import Configuration
12 from numpy.distutils.system_info import get_info
13 from scipy._build_utils import get_sgemv_fix
14 from scipy._build_utils import numpy_nodepr_api
15
16 config = Configuration('dsolve',parent_package,top_path)
17 config.add_data_dir('tests')
18
19 lapack_opt = get_info('lapack_opt',notfound_action=2)
20 if sys.platform == 'win32':
21 superlu_defs = [('NO_TIMER',1)]
22 else:
23 superlu_defs = []
24 superlu_defs.append(('USE_VENDOR_BLAS',1))
25
26 superlu_src = join(dirname(__file__), 'SuperLU', 'SRC')
27
28 sources = list(glob.glob(join(superlu_src, '*.c')))
29 headers = list(glob.glob(join(superlu_src, '*.h')))
30 if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):
31 # when using MSVC + MKL, lsame is already in MKL
32 sources.remove(join(superlu_src, 'lsame.c'))
33
34 config.add_library('superlu_src',
35 sources=sources,
36 macros=superlu_defs,
37 include_dirs=[superlu_src],
38 )
39
40 # Extension
41 ext_sources = ['_superlumodule.c',
42 '_superlu_utils.c',
43 '_superluobject.c']
44 ext_sources += get_sgemv_fix(lapack_opt)
45
46 config.add_extension('_superlu',
47 sources=ext_sources,
48 libraries=['superlu_src'],
49 depends=(sources + headers),
50 extra_info=lapack_opt,
51 **numpy_nodepr_api
52 )
53
54 return config
55
56 if __name__ == '__main__':
57 from numpy.distutils.core import setup
58 setup(**configuration(top_path='').todict())
59
[end of scipy/sparse/linalg/dsolve/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scipy/sparse/linalg/dsolve/setup.py b/scipy/sparse/linalg/dsolve/setup.py
--- a/scipy/sparse/linalg/dsolve/setup.py
+++ b/scipy/sparse/linalg/dsolve/setup.py
@@ -27,9 +27,6 @@
sources = list(glob.glob(join(superlu_src, '*.c')))
headers = list(glob.glob(join(superlu_src, '*.h')))
- if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):
- # when using MSVC + MKL, lsame is already in MKL
- sources.remove(join(superlu_src, 'lsame.c'))
config.add_library('superlu_src',
sources=sources,
| {"golden_diff": "diff --git a/scipy/sparse/linalg/dsolve/setup.py b/scipy/sparse/linalg/dsolve/setup.py\n--- a/scipy/sparse/linalg/dsolve/setup.py\n+++ b/scipy/sparse/linalg/dsolve/setup.py\n@@ -27,9 +27,6 @@\n \n sources = list(glob.glob(join(superlu_src, '*.c')))\n headers = list(glob.glob(join(superlu_src, '*.h')))\n- if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):\n- # when using MSVC + MKL, lsame is already in MKL\n- sources.remove(join(superlu_src, 'lsame.c'))\n \n config.add_library('superlu_src',\n sources=sources,\n", "issue": "\"master\" fails to build with MKL\nRecently, when I try to build the \"master\" branch, I keep getting this error from `linalg/solve/dsolve/setup.py`:\n\n``` .Python\nValueError: list.remove(x): x not in list\n```\n\nThe error traces to this block of code <a href=\"https://github.com/scipy/scipy/blob/master/scipy/sparse/linalg/dsolve/setup.py#L30-L32\">here</a>, for it seems that the `lsame.c` file does not exist in the list of sources. Sure enough, when I remove this check, `scipy` builds successfully and tests pass.\n\nI don't know what happened recently that caused this setup to break (it has never happened before until now), but is anyone else getting this issue?\n\nSetup: Python 3.5.1, Windows 7, `numpy` 1.10.4, MKL\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import division, print_function, absolute_import\n\nfrom os.path import join, dirname\nimport sys\nimport os\nimport glob\n\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n from numpy.distutils.system_info import get_info\n from scipy._build_utils import get_sgemv_fix\n from scipy._build_utils import numpy_nodepr_api\n\n config = Configuration('dsolve',parent_package,top_path)\n config.add_data_dir('tests')\n\n lapack_opt = get_info('lapack_opt',notfound_action=2)\n if sys.platform == 'win32':\n superlu_defs = [('NO_TIMER',1)]\n else:\n superlu_defs = []\n superlu_defs.append(('USE_VENDOR_BLAS',1))\n\n superlu_src = join(dirname(__file__), 'SuperLU', 'SRC')\n\n sources = list(glob.glob(join(superlu_src, '*.c')))\n headers = list(glob.glob(join(superlu_src, '*.h')))\n if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):\n # when using MSVC + MKL, lsame is already in MKL\n sources.remove(join(superlu_src, 'lsame.c'))\n\n config.add_library('superlu_src',\n sources=sources,\n macros=superlu_defs,\n include_dirs=[superlu_src],\n )\n\n # Extension\n ext_sources = ['_superlumodule.c',\n '_superlu_utils.c',\n '_superluobject.c']\n ext_sources += get_sgemv_fix(lapack_opt)\n\n config.add_extension('_superlu',\n sources=ext_sources,\n libraries=['superlu_src'],\n depends=(sources + headers),\n extra_info=lapack_opt,\n **numpy_nodepr_api\n )\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n", "path": "scipy/sparse/linalg/dsolve/setup.py"}]} | 1,295 | 171 |
gh_patches_debug_624 | rasdani/github-patches | git_diff | codespell-project__codespell-89 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Makefile is broken
The makefile is no longer working, since there is no longer a codespell.py
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2
3 # adapted from mne-python
4
5 import os
6 from os import path as op
7
8 try:
9 import setuptools # noqa to allow --develop
10 except Exception:
11 pass
12 from distutils.core import setup
13
14 from codespell_lib import __version__
15
16 DISTNAME = 'codespell'
17 DESCRIPTION = """Codespell"""
18 MAINTAINER = 'Lucas De Marchi'
19 MAINTAINER_EMAIL = '[email protected]'
20 URL = 'https://github.com/lucasdemarchi/codespell/'
21 LICENSE = 'GPL v2'
22 DOWNLOAD_URL = 'https://github.com/lucasdemarchi/codespell/'
23 with open('README.rst', 'r') as f:
24 LONG_DESCRIPTION = f.read()
25
26 if __name__ == "__main__":
27 if os.path.exists('MANIFEST'):
28 os.remove('MANIFEST')
29
30 setup(name=DISTNAME,
31 maintainer=MAINTAINER,
32 include_package_data=True,
33 maintainer_email=MAINTAINER_EMAIL,
34 description=DESCRIPTION,
35 license=LICENSE,
36 url=URL,
37 version=__version__,
38 download_url=DOWNLOAD_URL,
39 long_description=LONG_DESCRIPTION,
40 zip_safe=False,
41 classifiers=['Intended Audience :: Developers',
42 'License :: OSI Approved',
43 'Programming Language :: Python',
44 'Topic :: Software Development',
45 'Operating System :: Microsoft :: Windows',
46 'Operating System :: POSIX',
47 'Operating System :: Unix',
48 'Operating System :: MacOS'],
49 platforms='any',
50 packages=[
51 'codespell_lib', 'codespell_lib.tests',
52 'codespell_lib.data',
53 ],
54 package_data={'codespell_lib': [
55 op.join('data', 'dictionary.txt'),
56 op.join('data', 'linux-kernel.exclude'),
57 ]},
58 scripts=['bin/codespell.py'])
59
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -55,4 +55,4 @@
op.join('data', 'dictionary.txt'),
op.join('data', 'linux-kernel.exclude'),
]},
- scripts=['bin/codespell.py'])
+ scripts=['bin/codespell'])
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -55,4 +55,4 @@\n op.join('data', 'dictionary.txt'),\n op.join('data', 'linux-kernel.exclude'),\n ]},\n- scripts=['bin/codespell.py'])\n+ scripts=['bin/codespell'])\n", "issue": "Makefile is broken\nThe makefile is no longer working, since there is no longer a codespell.py\n\n", "before_files": [{"content": "#! /usr/bin/env python\n\n# adapted from mne-python\n\nimport os\nfrom os import path as op\n\ntry:\n import setuptools # noqa to allow --develop\nexcept Exception:\n pass\nfrom distutils.core import setup\n\nfrom codespell_lib import __version__\n\nDISTNAME = 'codespell'\nDESCRIPTION = \"\"\"Codespell\"\"\"\nMAINTAINER = 'Lucas De Marchi'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://github.com/lucasdemarchi/codespell/'\nLICENSE = 'GPL v2'\nDOWNLOAD_URL = 'https://github.com/lucasdemarchi/codespell/'\nwith open('README.rst', 'r') as f:\n LONG_DESCRIPTION = f.read()\n\nif __name__ == \"__main__\":\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n include_package_data=True,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=__version__,\n download_url=DOWNLOAD_URL,\n long_description=LONG_DESCRIPTION,\n zip_safe=False,\n classifiers=['Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS'],\n platforms='any',\n packages=[\n 'codespell_lib', 'codespell_lib.tests',\n 'codespell_lib.data',\n ],\n package_data={'codespell_lib': [\n op.join('data', 'dictionary.txt'),\n op.join('data', 'linux-kernel.exclude'),\n ]},\n scripts=['bin/codespell.py'])\n", "path": "setup.py"}]} | 1,053 | 77 |
gh_patches_debug_28536 | rasdani/github-patches | git_diff | systemd__mkosi-499 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include man page in Python package
When building the man page with
```
pandoc -t man -o mkosi.1 mkosi.md
```
the output looks rather unpleasing:
```
mkosi ‐ Build Legacy‐Free OS Images mkosi [options...] build
mkosi [options...] clean mkosi [options...] summary mkosi [op‐
tions...] shell [command line...] mkosi [options...] boot [com‐
mand line...] mkosi [options...] qemu mkosi is a tool for easily
building legacy‐free OS images. It’s a fancy wrapper around dnf
‐‐installroot, debootstrap, pacstrap and zypper that may generate
disk images with a number of bells and whistles. The following
output formats are supported: Raw GPT disk image, with ext4 as
[...]
```
We should fix that and maybe generate the man page automatically via a commit hook, so that the man page can be included (and installed) when doing a pip install.
</issue>
<code>
[start of mkosi/__main__.py]
1 # SPDX-License-Identifier: LGPL-2.1+
2 # PYTHON_ARGCOMPLETE_OK
3 import os
4 import sys
5
6 from . import parse_args, complete_step, run_verb, die, MkosiException
7
8
9 try:
10 args = parse_args()
11
12 for job_name, a in args.items():
13 # Change working directory if --directory is passed
14 if a.directory:
15 work_dir = a.directory
16 if os.path.isdir(work_dir):
17 os.chdir(work_dir)
18 else:
19 die(f"Error: {work_dir} is not a directory!")
20 with complete_step(f"Processing {job_name}"):
21 run_verb(a)
22 except MkosiException:
23 sys.exit(1)
24
[end of mkosi/__main__.py]
[start of setup.py]
1 #!/usr/bin/python3
2 # SPDX-License-Identifier: LGPL-2.1+
3
4 from setuptools import setup, Command
5
6 class BuildManpage(Command):
7 description = ('builds the manpage')
8 user_options = []
9
10 def initialize_options(self):
11 pass
12 def finalize_options(self):
13 pass
14
15 def run(self):
16 self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])
17
18
19 setup(
20 name="mkosi",
21 version="5",
22 description="Create legacy-free OS images",
23 url="https://github.com/systemd/mkosi",
24 maintainer="mkosi contributors",
25 maintainer_email="[email protected]",
26 license="LGPLv2+",
27 python_requires=">=3.6",
28 packages = ["mkosi"],
29 cmdclass = { "man": BuildManpage },
30 entry_points = {"console_scripts": ["mkosi=mkosi.__main__"]},
31 )
32
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkosi/__main__.py b/mkosi/__main__.py
--- a/mkosi/__main__.py
+++ b/mkosi/__main__.py
@@ -6,18 +6,23 @@
from . import parse_args, complete_step, run_verb, die, MkosiException
-try:
- args = parse_args()
+def main() -> None:
+ try:
+ args = parse_args()
- for job_name, a in args.items():
- # Change working directory if --directory is passed
- if a.directory:
- work_dir = a.directory
- if os.path.isdir(work_dir):
- os.chdir(work_dir)
- else:
- die(f"Error: {work_dir} is not a directory!")
- with complete_step(f"Processing {job_name}"):
- run_verb(a)
-except MkosiException:
- sys.exit(1)
+ for job_name, a in args.items():
+ # Change working directory if --directory is passed
+ if a.directory:
+ work_dir = a.directory
+ if os.path.isdir(work_dir):
+ os.chdir(work_dir)
+ else:
+ die(f"Error: {work_dir} is not a directory!")
+ with complete_step(f"Processing {job_name}"):
+ run_verb(a)
+ except MkosiException:
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,7 @@
pass
def run(self):
- self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])
+ self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'man/mkosi.1', 'mkosi.md'])
setup(
@@ -27,5 +27,6 @@
python_requires=">=3.6",
packages = ["mkosi"],
cmdclass = { "man": BuildManpage },
- entry_points = {"console_scripts": ["mkosi=mkosi.__main__"]},
+ data_files = [('share/man/man1', ["man/mkosi.1"])],
+ entry_points = {"console_scripts": ["mkosi=mkosi.__main__:main"]},
)
| {"golden_diff": "diff --git a/mkosi/__main__.py b/mkosi/__main__.py\n--- a/mkosi/__main__.py\n+++ b/mkosi/__main__.py\n@@ -6,18 +6,23 @@\n from . import parse_args, complete_step, run_verb, die, MkosiException\n \n \n-try:\n- args = parse_args()\n+def main() -> None:\n+ try:\n+ args = parse_args()\n \n- for job_name, a in args.items():\n- # Change working directory if --directory is passed\n- if a.directory:\n- work_dir = a.directory\n- if os.path.isdir(work_dir):\n- os.chdir(work_dir)\n- else:\n- die(f\"Error: {work_dir} is not a directory!\")\n- with complete_step(f\"Processing {job_name}\"):\n- run_verb(a)\n-except MkosiException:\n- sys.exit(1)\n+ for job_name, a in args.items():\n+ # Change working directory if --directory is passed\n+ if a.directory:\n+ work_dir = a.directory\n+ if os.path.isdir(work_dir):\n+ os.chdir(work_dir)\n+ else:\n+ die(f\"Error: {work_dir} is not a directory!\")\n+ with complete_step(f\"Processing {job_name}\"):\n+ run_verb(a)\n+ except MkosiException:\n+ sys.exit(1)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,7 +13,7 @@\n pass\n \n def run(self):\n- self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])\n+ self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'man/mkosi.1', 'mkosi.md'])\n \n \n setup(\n@@ -27,5 +27,6 @@\n python_requires=\">=3.6\",\n packages = [\"mkosi\"],\n cmdclass = { \"man\": BuildManpage },\n- entry_points = {\"console_scripts\": [\"mkosi=mkosi.__main__\"]},\n+ data_files = [('share/man/man1', [\"man/mkosi.1\"])],\n+ entry_points = {\"console_scripts\": [\"mkosi=mkosi.__main__:main\"]},\n )\n", "issue": "Include man page in Python package\nWhen building the man page with\r\n```\r\npandoc -t man -o mkosi.1 mkosi.md\r\n```\r\nthe output looks rather unpleasing:\r\n```\r\nmkosi \u2010 Build Legacy\u2010Free OS Images mkosi [options...] build\r\nmkosi [options...] clean mkosi [options...] summary mkosi [op\u2010\r\ntions...] shell [command line...] mkosi [options...] boot [com\u2010\r\nmand line...] mkosi [options...] qemu mkosi is a tool for easily\r\nbuilding legacy\u2010free OS images. It\u2019s a fancy wrapper around dnf\r\n\u2010\u2010installroot, debootstrap, pacstrap and zypper that may generate\r\ndisk images with a number of bells and whistles. The following\r\noutput formats are supported: Raw GPT disk image, with ext4 as\r\n[...]\r\n```\r\n\r\nWe should fix that and maybe generate the man page automatically via a commit hook, so that the man page can be included (and installed) when doing a pip install.\n", "before_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\n# PYTHON_ARGCOMPLETE_OK\nimport os\nimport sys\n\nfrom . import parse_args, complete_step, run_verb, die, MkosiException\n\n\ntry:\n args = parse_args()\n\n for job_name, a in args.items():\n # Change working directory if --directory is passed\n if a.directory:\n work_dir = a.directory\n if os.path.isdir(work_dir):\n os.chdir(work_dir)\n else:\n die(f\"Error: {work_dir} is not a directory!\")\n with complete_step(f\"Processing {job_name}\"):\n run_verb(a)\nexcept MkosiException:\n sys.exit(1)\n", "path": "mkosi/__main__.py"}, {"content": "#!/usr/bin/python3\n# SPDX-License-Identifier: LGPL-2.1+\n\nfrom setuptools import setup, Command\n\nclass BuildManpage(Command):\n description = ('builds the manpage')\n user_options = []\n\n def initialize_options(self):\n pass\n def finalize_options(self):\n pass\n\n def run(self):\n self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])\n\n\nsetup(\n name=\"mkosi\",\n version=\"5\",\n description=\"Create legacy-free OS images\",\n url=\"https://github.com/systemd/mkosi\",\n maintainer=\"mkosi contributors\",\n maintainer_email=\"[email protected]\",\n license=\"LGPLv2+\",\n python_requires=\">=3.6\",\n packages = [\"mkosi\"],\n cmdclass = { \"man\": BuildManpage },\n entry_points = {\"console_scripts\": [\"mkosi=mkosi.__main__\"]},\n)\n", "path": "setup.py"}]} | 1,248 | 541 |
gh_patches_debug_419 | rasdani/github-patches | git_diff | bokeh__bokeh-8651 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
grid_axis_alignment_no_toolbar is broken in Windows (32-bit)
`examples/integration/layout/grid_axis_alignment_no_toolbar`:

All axes should be in 0 to 10, 10^3, 10^6, 10^9 ranges. All major browsers are affected the same way. I suspect this may be due to 32-bit system architecture. It would be good if someone with 64-bit windows could run this example for comparison.
</issue>
<code>
[start of examples/integration/layout/grid_axis_alignment_no_toolbar.py]
1 from __future__ import absolute_import
2
3 import numpy as np
4
5 from bokeh.plotting import figure, save
6 from bokeh.layouts import gridplot
7
8 coeffs = [10**0, 10**3, 10**6, 10**9]
9 V = np.arange(10)
10
11 figs = []
12
13 for ycoeff in coeffs:
14 row = []
15 for xcoeff in coeffs:
16 fig = figure(plot_height=200, plot_width=200)
17 fig.xaxis[0].formatter.use_scientific = False
18 fig.yaxis[0].formatter.use_scientific = False
19 fig.xaxis[0].major_label_orientation = "vertical"
20 fig.yaxis[0].major_label_orientation = "horizontal"
21 fig.scatter(V*xcoeff, V*ycoeff)
22 row.append(fig)
23 figs.append(row)
24
25 grid = gridplot(figs, toolbar_location=None)
26
27 save(grid)
28
[end of examples/integration/layout/grid_axis_alignment_no_toolbar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/integration/layout/grid_axis_alignment_no_toolbar.py b/examples/integration/layout/grid_axis_alignment_no_toolbar.py
--- a/examples/integration/layout/grid_axis_alignment_no_toolbar.py
+++ b/examples/integration/layout/grid_axis_alignment_no_toolbar.py
@@ -6,7 +6,7 @@
from bokeh.layouts import gridplot
coeffs = [10**0, 10**3, 10**6, 10**9]
-V = np.arange(10)
+V = np.arange(10, dtype="int64")
figs = []
| {"golden_diff": "diff --git a/examples/integration/layout/grid_axis_alignment_no_toolbar.py b/examples/integration/layout/grid_axis_alignment_no_toolbar.py\n--- a/examples/integration/layout/grid_axis_alignment_no_toolbar.py\n+++ b/examples/integration/layout/grid_axis_alignment_no_toolbar.py\n@@ -6,7 +6,7 @@\n from bokeh.layouts import gridplot\n \n coeffs = [10**0, 10**3, 10**6, 10**9]\n-V = np.arange(10)\n+V = np.arange(10, dtype=\"int64\")\n \n figs = []\n", "issue": "grid_axis_alignment_no_toolbar is broken in Windows (32-bit)\n`examples/integration/layout/grid_axis_alignment_no_toolbar`:\r\n\r\n\r\n\r\nAll axes should be in 0 to 10, 10^3, 10^6, 10^9 ranges. All major browsers are affected the same way. I suspect this may be due to 32-bit system architecture. It would be good if someone with 64-bit windows could run this example for comparison.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport numpy as np\n\nfrom bokeh.plotting import figure, save\nfrom bokeh.layouts import gridplot\n\ncoeffs = [10**0, 10**3, 10**6, 10**9]\nV = np.arange(10)\n\nfigs = []\n\nfor ycoeff in coeffs:\n row = []\n for xcoeff in coeffs:\n fig = figure(plot_height=200, plot_width=200)\n fig.xaxis[0].formatter.use_scientific = False\n fig.yaxis[0].formatter.use_scientific = False\n fig.xaxis[0].major_label_orientation = \"vertical\"\n fig.yaxis[0].major_label_orientation = \"horizontal\"\n fig.scatter(V*xcoeff, V*ycoeff)\n row.append(fig)\n figs.append(row)\n\ngrid = gridplot(figs, toolbar_location=None)\n\nsave(grid)\n", "path": "examples/integration/layout/grid_axis_alignment_no_toolbar.py"}]} | 956 | 125 |
gh_patches_debug_17856 | rasdani/github-patches | git_diff | mdn__kuma-5636 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unnecessary SQL select for react home landing page
The React landing page view extracts the default filters. Every time but it's never used.
<img width="1502" alt="Screen Shot 2019-08-13 at 2 44 39 PM" src="https://user-images.githubusercontent.com/26739/62968514-34a08a00-bdd9-11e9-92fb-c584683c7085.png">
</issue>
<code>
[start of kuma/landing/views.py]
1 from __future__ import unicode_literals
2
3 from django.conf import settings
4 from django.http import HttpResponse
5 from django.shortcuts import redirect, render
6 from django.views import static
7 from django.views.decorators.cache import never_cache
8 from django.views.generic import RedirectView
9
10 from kuma.core.decorators import ensure_wiki_domain, shared_cache_control
11 from kuma.core.utils import is_wiki
12 from kuma.feeder.models import Bundle
13 from kuma.feeder.sections import SECTION_HACKS
14 from kuma.search.models import Filter
15
16 from .utils import favicon_url
17
18
19 @shared_cache_control
20 def contribute_json(request):
21 return static.serve(request, 'contribute.json', document_root=settings.ROOT)
22
23
24 @shared_cache_control
25 def home(request):
26 """Home page."""
27 if is_wiki(request):
28 return render_home(request, 'landing/homepage.html')
29 return render_home(request, 'landing/react_homepage.html')
30
31
32 def render_home(request, template_name):
33 """Render the home page with the template named "template_name"."""
34 updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
35 default_filters = Filter.objects.default_filters()
36 context = {
37 'updates': updates,
38 'default_filters': default_filters,
39 }
40 return render(request, template_name, context)
41
42
43 @ensure_wiki_domain
44 @never_cache
45 def maintenance_mode(request):
46 if settings.MAINTENANCE_MODE:
47 return render(request, 'landing/maintenance-mode.html')
48 else:
49 return redirect('home')
50
51
52 @ensure_wiki_domain
53 @shared_cache_control
54 def promote_buttons(request):
55 """Bug 646192: MDN affiliate buttons"""
56 return render(request, 'landing/promote_buttons.html')
57
58
59 ROBOTS_ALLOWED_TXT = '''\
60 User-agent: *
61 Sitemap: https://developer.mozilla.org/sitemap.xml
62
63 Disallow: /admin/
64 Disallow: /api/
65 Disallow: /*/dashboards/*
66 Disallow: /*docs/feeds
67 Disallow: /*docs/templates
68 Disallow: /*docs*Template:
69 Disallow: /*docs/all
70 Disallow: /*docs/tag*
71 Disallow: /*docs/needs-review*
72 Disallow: /*docs/localization-tag*
73 Disallow: /*docs/with-errors
74 Disallow: /*docs/without-parent
75 Disallow: /*docs/top-level
76 Disallow: /*docs/new
77 Disallow: /*docs/get-documents
78 Disallow: /*docs/submit_akismet_spam
79 Disallow: /*docs/load*
80 Disallow: /*docs/Experiment:*
81 Disallow: /*$api
82 Disallow: /*$compare
83 Disallow: /*$revision
84 Disallow: /*$history
85 Disallow: /*$children
86 Disallow: /*$flag
87 Disallow: /*$locales
88 Disallow: /*$toc
89 Disallow: /*$move
90 Disallow: /*$quick-review
91 Disallow: /*$samples
92 Disallow: /*$revert
93 Disallow: /*$repair_breadcrumbs
94 Disallow: /*$delete
95 Disallow: /*$restore
96 Disallow: /*$purge
97 Disallow: /*$subscribe
98 Disallow: /*$subscribe_to_tree
99 Disallow: /*$vote
100 Disallow: /*docs.json
101 Disallow: /*docs/ckeditor_config.js
102 Disallow: /*/files/
103 Disallow: /media
104 Disallow: /*move-requested
105 Disallow: /*preview-wiki-content
106 Disallow: /*profiles*/edit
107 Disallow: /skins
108 Disallow: /*type=feed
109 Disallow: /*users/
110 ''' + '\n'.join('Disallow: /{locale}/search'.format(locale=locale)
111 for locale in settings.ENABLED_LOCALES)
112
113 ROBOTS_GO_AWAY_TXT = '''\
114 User-Agent: *
115 Disallow: /
116 '''
117
118
119 @shared_cache_control
120 def robots_txt(request):
121 """Serve robots.txt that allows or forbids robots."""
122 host = request.get_host()
123 if host in settings.ALLOW_ROBOTS_DOMAINS:
124 robots = ""
125 elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:
126 robots = ROBOTS_ALLOWED_TXT
127 else:
128 robots = ROBOTS_GO_AWAY_TXT
129 return HttpResponse(robots, content_type='text/plain')
130
131
132 class FaviconRedirect(RedirectView):
133 """Redirect to the favicon in the static img folder (bug 1402497)"""
134
135 def get_redirect_url(self, *args, **kwargs):
136 return favicon_url()
137
[end of kuma/landing/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/landing/views.py b/kuma/landing/views.py
--- a/kuma/landing/views.py
+++ b/kuma/landing/views.py
@@ -24,19 +24,16 @@
@shared_cache_control
def home(request):
"""Home page."""
+ context = {}
+ # Need for both wiki and react homepage
+ context['updates'] = list(
+ Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
+
+ # The default template name
+ template_name = 'landing/react_homepage.html'
if is_wiki(request):
- return render_home(request, 'landing/homepage.html')
- return render_home(request, 'landing/react_homepage.html')
-
-
-def render_home(request, template_name):
- """Render the home page with the template named "template_name"."""
- updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
- default_filters = Filter.objects.default_filters()
- context = {
- 'updates': updates,
- 'default_filters': default_filters,
- }
+ template_name = 'landing/homepage.html'
+ context['default_filters'] = Filter.objects.default_filters()
return render(request, template_name, context)
| {"golden_diff": "diff --git a/kuma/landing/views.py b/kuma/landing/views.py\n--- a/kuma/landing/views.py\n+++ b/kuma/landing/views.py\n@@ -24,19 +24,16 @@\n @shared_cache_control\n def home(request):\n \"\"\"Home page.\"\"\"\n+ context = {}\n+ # Need for both wiki and react homepage\n+ context['updates'] = list(\n+ Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n+\n+ # The default template name\n+ template_name = 'landing/react_homepage.html'\n if is_wiki(request):\n- return render_home(request, 'landing/homepage.html')\n- return render_home(request, 'landing/react_homepage.html')\n-\n-\n-def render_home(request, template_name):\n- \"\"\"Render the home page with the template named \"template_name\".\"\"\"\n- updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n- default_filters = Filter.objects.default_filters()\n- context = {\n- 'updates': updates,\n- 'default_filters': default_filters,\n- }\n+ template_name = 'landing/homepage.html'\n+ context['default_filters'] = Filter.objects.default_filters()\n return render(request, template_name, context)\n", "issue": "Unnecessary SQL select for react home landing page\nThe React landing page view extracts the default filters. Every time but it's never used. \r\n<img width=\"1502\" alt=\"Screen Shot 2019-08-13 at 2 44 39 PM\" src=\"https://user-images.githubusercontent.com/26739/62968514-34a08a00-bdd9-11e9-92fb-c584683c7085.png\">\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.views import static\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import RedirectView\n\nfrom kuma.core.decorators import ensure_wiki_domain, shared_cache_control\nfrom kuma.core.utils import is_wiki\nfrom kuma.feeder.models import Bundle\nfrom kuma.feeder.sections import SECTION_HACKS\nfrom kuma.search.models import Filter\n\nfrom .utils import favicon_url\n\n\n@shared_cache_control\ndef contribute_json(request):\n return static.serve(request, 'contribute.json', document_root=settings.ROOT)\n\n\n@shared_cache_control\ndef home(request):\n \"\"\"Home page.\"\"\"\n if is_wiki(request):\n return render_home(request, 'landing/homepage.html')\n return render_home(request, 'landing/react_homepage.html')\n\n\ndef render_home(request, template_name):\n \"\"\"Render the home page with the template named \"template_name\".\"\"\"\n updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n default_filters = Filter.objects.default_filters()\n context = {\n 'updates': updates,\n 'default_filters': default_filters,\n }\n return render(request, template_name, context)\n\n\n@ensure_wiki_domain\n@never_cache\ndef maintenance_mode(request):\n if settings.MAINTENANCE_MODE:\n return render(request, 'landing/maintenance-mode.html')\n else:\n return redirect('home')\n\n\n@ensure_wiki_domain\n@shared_cache_control\ndef promote_buttons(request):\n \"\"\"Bug 646192: MDN affiliate buttons\"\"\"\n return render(request, 'landing/promote_buttons.html')\n\n\nROBOTS_ALLOWED_TXT = '''\\\nUser-agent: *\nSitemap: https://developer.mozilla.org/sitemap.xml\n\nDisallow: /admin/\nDisallow: /api/\nDisallow: /*/dashboards/*\nDisallow: /*docs/feeds\nDisallow: /*docs/templates\nDisallow: /*docs*Template:\nDisallow: /*docs/all\nDisallow: /*docs/tag*\nDisallow: /*docs/needs-review*\nDisallow: /*docs/localization-tag*\nDisallow: /*docs/with-errors\nDisallow: /*docs/without-parent\nDisallow: /*docs/top-level\nDisallow: /*docs/new\nDisallow: /*docs/get-documents\nDisallow: /*docs/submit_akismet_spam\nDisallow: /*docs/load*\nDisallow: /*docs/Experiment:*\nDisallow: /*$api\nDisallow: /*$compare\nDisallow: /*$revision\nDisallow: /*$history\nDisallow: /*$children\nDisallow: /*$flag\nDisallow: /*$locales\nDisallow: /*$toc\nDisallow: /*$move\nDisallow: /*$quick-review\nDisallow: /*$samples\nDisallow: /*$revert\nDisallow: /*$repair_breadcrumbs\nDisallow: /*$delete\nDisallow: /*$restore\nDisallow: /*$purge\nDisallow: /*$subscribe\nDisallow: /*$subscribe_to_tree\nDisallow: /*$vote\nDisallow: /*docs.json\nDisallow: /*docs/ckeditor_config.js\nDisallow: /*/files/\nDisallow: /media\nDisallow: /*move-requested\nDisallow: /*preview-wiki-content\nDisallow: /*profiles*/edit\nDisallow: /skins\nDisallow: /*type=feed\nDisallow: /*users/\n''' + '\\n'.join('Disallow: /{locale}/search'.format(locale=locale)\n for locale in settings.ENABLED_LOCALES)\n\nROBOTS_GO_AWAY_TXT = '''\\\nUser-Agent: *\nDisallow: /\n'''\n\n\n@shared_cache_control\ndef robots_txt(request):\n \"\"\"Serve robots.txt that allows or forbids robots.\"\"\"\n host = request.get_host()\n if host in settings.ALLOW_ROBOTS_DOMAINS:\n robots = \"\"\n elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:\n robots = ROBOTS_ALLOWED_TXT\n else:\n robots = ROBOTS_GO_AWAY_TXT\n return HttpResponse(robots, content_type='text/plain')\n\n\nclass FaviconRedirect(RedirectView):\n \"\"\"Redirect to the favicon in the static img folder (bug 1402497)\"\"\"\n\n def get_redirect_url(self, *args, **kwargs):\n return favicon_url()\n", "path": "kuma/landing/views.py"}]} | 1,923 | 276 |
gh_patches_debug_9183 | rasdani/github-patches | git_diff | PaddlePaddle__models-492 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
model/text_classification 使用内置语料集训练模型后,预测时出错,具体如下
```text
I1123 09:23:54.320137 84 Util.cpp:166] commandline: --use_gpu=False --trainer_count=1
Traceback (most recent call last):
File "infer.py", line 91, in <module>
batch_size=10)
File "infer.py", line 58, in infer
for idx, item in enumerate(test_reader):
TypeError: 'function' object is not iterable
```
paddle镜像用的是https://hub.docker.com/r/paddlepaddle/paddle/tags/ latest的,model下载的最新的
</issue>
<code>
[start of text_classification/infer.py]
1 import sys
2 import os
3 import gzip
4
5 import paddle.v2 as paddle
6
7 import reader
8 from network_conf import fc_net, convolution_net
9 from utils import logger, load_dict, load_reverse_dict
10
11
12 def infer(topology, data_dir, model_path, word_dict_path, label_dict_path,
13 batch_size):
14 def _infer_a_batch(inferer, test_batch, ids_2_word, ids_2_label):
15 probs = inferer.infer(input=test_batch, field=["value"])
16 assert len(probs) == len(test_batch)
17 for word_ids, prob in zip(test_batch, probs):
18 word_text = " ".join([ids_2_word[id] for id in word_ids[0]])
19 print("%s\t%s\t%s" % (ids_2_label[prob.argmax()],
20 " ".join(["{:0.4f}".format(p)
21 for p in prob]), word_text))
22
23 logger.info("begin to predict...")
24 use_default_data = (data_dir is None)
25
26 if use_default_data:
27 word_dict = paddle.dataset.imdb.word_dict()
28 word_reverse_dict = dict((value, key)
29 for key, value in word_dict.iteritems())
30 label_reverse_dict = {0: "positive", 1: "negative"}
31 test_reader = paddle.dataset.imdb.test(word_dict)
32 else:
33 assert os.path.exists(
34 word_dict_path), "the word dictionary file does not exist"
35 assert os.path.exists(
36 label_dict_path), "the label dictionary file does not exist"
37
38 word_dict = load_dict(word_dict_path)
39 word_reverse_dict = load_reverse_dict(word_dict_path)
40 label_reverse_dict = load_reverse_dict(label_dict_path)
41
42 test_reader = reader.test_reader(data_dir, word_dict)()
43
44 dict_dim = len(word_dict)
45 class_num = len(label_reverse_dict)
46 prob_layer = topology(dict_dim, class_num, is_infer=True)
47
48 # initialize PaddlePaddle
49 paddle.init(use_gpu=False, trainer_count=1)
50
51 # load the trained models
52 parameters = paddle.parameters.Parameters.from_tar(
53 gzip.open(model_path, "r"))
54 inferer = paddle.inference.Inference(
55 output_layer=prob_layer, parameters=parameters)
56
57 test_batch = []
58 for idx, item in enumerate(test_reader):
59 test_batch.append([item[0]])
60 if len(test_batch) == batch_size:
61 _infer_a_batch(inferer, test_batch, word_reverse_dict,
62 label_reverse_dict)
63 test_batch = []
64
65 if len(test_batch):
66 _infer_a_batch(inferer, test_batch, word_reverse_dict,
67 label_reverse_dict)
68 test_batch = []
69
70
71 if __name__ == "__main__":
72 model_path = "models/dnn_params_pass_00000.tar.gz"
73 assert os.path.exists(model_path), "the trained model does not exist."
74
75 nn_type = "dnn"
76 test_dir = None
77 word_dict = None
78 label_dict = None
79
80 if nn_type == "dnn":
81 topology = fc_net
82 elif nn_type == "cnn":
83 topology = convolution_net
84
85 infer(
86 topology=topology,
87 data_dir=test_dir,
88 word_dict_path=word_dict,
89 label_dict_path=label_dict,
90 model_path=model_path,
91 batch_size=10)
92
[end of text_classification/infer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/text_classification/infer.py b/text_classification/infer.py
--- a/text_classification/infer.py
+++ b/text_classification/infer.py
@@ -28,7 +28,7 @@
word_reverse_dict = dict((value, key)
for key, value in word_dict.iteritems())
label_reverse_dict = {0: "positive", 1: "negative"}
- test_reader = paddle.dataset.imdb.test(word_dict)
+ test_reader = paddle.dataset.imdb.test(word_dict)()
else:
assert os.path.exists(
word_dict_path), "the word dictionary file does not exist"
| {"golden_diff": "diff --git a/text_classification/infer.py b/text_classification/infer.py\n--- a/text_classification/infer.py\n+++ b/text_classification/infer.py\n@@ -28,7 +28,7 @@\n word_reverse_dict = dict((value, key)\n for key, value in word_dict.iteritems())\n label_reverse_dict = {0: \"positive\", 1: \"negative\"}\n- test_reader = paddle.dataset.imdb.test(word_dict)\n+ test_reader = paddle.dataset.imdb.test(word_dict)()\n else:\n assert os.path.exists(\n word_dict_path), \"the word dictionary file does not exist\"\n", "issue": "model/text_classification \u4f7f\u7528\u5185\u7f6e\u8bed\u6599\u96c6\u8bad\u7ec3\u6a21\u578b\u540e\uff0c\u9884\u6d4b\u65f6\u51fa\u9519\uff0c\u5177\u4f53\u5982\u4e0b\n```text\r\nI1123 09:23:54.320137 84 Util.cpp:166] commandline: --use_gpu=False --trainer_count=1 \r\nTraceback (most recent call last):\r\n File \"infer.py\", line 91, in <module>\r\n batch_size=10)\r\n File \"infer.py\", line 58, in infer\r\n for idx, item in enumerate(test_reader):\r\nTypeError: 'function' object is not iterable\r\n```\r\npaddle\u955c\u50cf\u7528\u7684\u662fhttps://hub.docker.com/r/paddlepaddle/paddle/tags/ latest\u7684\uff0cmodel\u4e0b\u8f7d\u7684\u6700\u65b0\u7684\n", "before_files": [{"content": "import sys\nimport os\nimport gzip\n\nimport paddle.v2 as paddle\n\nimport reader\nfrom network_conf import fc_net, convolution_net\nfrom utils import logger, load_dict, load_reverse_dict\n\n\ndef infer(topology, data_dir, model_path, word_dict_path, label_dict_path,\n batch_size):\n def _infer_a_batch(inferer, test_batch, ids_2_word, ids_2_label):\n probs = inferer.infer(input=test_batch, field=[\"value\"])\n assert len(probs) == len(test_batch)\n for word_ids, prob in zip(test_batch, probs):\n word_text = \" \".join([ids_2_word[id] for id in word_ids[0]])\n print(\"%s\\t%s\\t%s\" % (ids_2_label[prob.argmax()],\n \" \".join([\"{:0.4f}\".format(p)\n for p in prob]), word_text))\n\n logger.info(\"begin to predict...\")\n use_default_data = (data_dir is None)\n\n if use_default_data:\n word_dict = paddle.dataset.imdb.word_dict()\n word_reverse_dict = dict((value, key)\n for key, value in word_dict.iteritems())\n label_reverse_dict = {0: \"positive\", 1: \"negative\"}\n test_reader = paddle.dataset.imdb.test(word_dict)\n else:\n assert os.path.exists(\n word_dict_path), \"the word dictionary file does not exist\"\n assert os.path.exists(\n label_dict_path), \"the label dictionary file does not exist\"\n\n word_dict = load_dict(word_dict_path)\n word_reverse_dict = load_reverse_dict(word_dict_path)\n label_reverse_dict = load_reverse_dict(label_dict_path)\n\n test_reader = reader.test_reader(data_dir, word_dict)()\n\n dict_dim = len(word_dict)\n class_num = len(label_reverse_dict)\n prob_layer = topology(dict_dim, class_num, is_infer=True)\n\n # initialize PaddlePaddle\n paddle.init(use_gpu=False, trainer_count=1)\n\n # load the trained models\n parameters = paddle.parameters.Parameters.from_tar(\n gzip.open(model_path, \"r\"))\n inferer = paddle.inference.Inference(\n output_layer=prob_layer, parameters=parameters)\n\n test_batch = []\n for idx, item in enumerate(test_reader):\n test_batch.append([item[0]])\n if len(test_batch) == batch_size:\n _infer_a_batch(inferer, test_batch, word_reverse_dict,\n label_reverse_dict)\n test_batch = []\n\n if len(test_batch):\n _infer_a_batch(inferer, test_batch, word_reverse_dict,\n label_reverse_dict)\n test_batch = []\n\n\nif __name__ == \"__main__\":\n model_path = \"models/dnn_params_pass_00000.tar.gz\"\n assert os.path.exists(model_path), \"the trained model does not exist.\"\n\n nn_type = \"dnn\"\n test_dir = None\n word_dict = None\n label_dict = None\n\n if nn_type == \"dnn\":\n topology = fc_net\n elif nn_type == \"cnn\":\n topology = convolution_net\n\n infer(\n topology=topology,\n data_dir=test_dir,\n word_dict_path=word_dict,\n label_dict_path=label_dict,\n model_path=model_path,\n batch_size=10)\n", "path": "text_classification/infer.py"}]} | 1,591 | 133 |
gh_patches_debug_30819 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-2322 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use `show objects in schema` on Snowflake
### Describe the feature
dbt should leverage the `show terse objects in schema` query to list the tables and views in a database. Crucially, `show ....` queries _do not require a running warehouse_, so they will not queue in the virtual warehouse (leading to long perceived startup times).
Caveats:
- Snowflake's docs indicate that only 10k records can be returned from one of these queries. I'd like to talk to someone from Snowflake to get an understanding if this limit is still accurate, or if they have near-term plans to lift this restriction.
- The existing info schema query gets away with a case-insensitive lookup using `ilike, but we'll need to quote and case the schema name exactly for this query to succeed.
- Per [the docs](https://docs.snowflake.net/manuals/sql-reference/sql/show-objects.html), materialized views are returned with type `VIEW`, which is kind of unpleasant.
- We should post-process the results in Python instead of using `result_scan`, because selecting from `result_scan` can queue
</issue>
<code>
[start of plugins/snowflake/dbt/adapters/snowflake/impl.py]
1 from typing import Mapping, Any, Optional, List
2
3 import agate
4
5 from dbt.adapters.sql import SQLAdapter
6 from dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME
7 from dbt.adapters.snowflake import SnowflakeConnectionManager
8 from dbt.adapters.snowflake import SnowflakeRelation
9 from dbt.adapters.snowflake import SnowflakeColumn
10 from dbt.contracts.graph.manifest import Manifest
11 from dbt.exceptions import RuntimeException, DatabaseException
12 from dbt.utils import filter_null_values
13
14
15 class SnowflakeAdapter(SQLAdapter):
16 Relation = SnowflakeRelation
17 Column = SnowflakeColumn
18 ConnectionManager = SnowflakeConnectionManager
19
20 AdapterSpecificConfigs = frozenset(
21 {"transient", "cluster_by", "automatic_clustering", "secure",
22 "copy_grants", "snowflake_warehouse"}
23 )
24
25 @classmethod
26 def date_function(cls):
27 return "CURRENT_TIMESTAMP()"
28
29 @classmethod
30 def _catalog_filter_table(
31 cls, table: agate.Table, manifest: Manifest
32 ) -> agate.Table:
33 # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force
34 # the column names to their lowercased forms.
35 lowered = table.rename(
36 column_names=[c.lower() for c in table.column_names]
37 )
38 return super()._catalog_filter_table(lowered, manifest)
39
40 def _make_match_kwargs(self, database, schema, identifier):
41 quoting = self.config.quoting
42 if identifier is not None and quoting["identifier"] is False:
43 identifier = identifier.upper()
44
45 if schema is not None and quoting["schema"] is False:
46 schema = schema.upper()
47
48 if database is not None and quoting["database"] is False:
49 database = database.upper()
50
51 return filter_null_values(
52 {"identifier": identifier, "schema": schema, "database": database}
53 )
54
55 def _get_warehouse(self) -> str:
56 _, table = self.execute(
57 'select current_warehouse() as warehouse',
58 fetch=True
59 )
60 if len(table) == 0 or len(table[0]) == 0:
61 # can this happen?
62 raise RuntimeException(
63 'Could not get current warehouse: no results'
64 )
65 return str(table[0][0])
66
67 def _use_warehouse(self, warehouse: str):
68 """Use the given warehouse. Quotes are never applied."""
69 self.execute('use warehouse {}'.format(warehouse))
70
71 def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:
72 default_warehouse = self.config.credentials.warehouse
73 warehouse = config.get('snowflake_warehouse', default_warehouse)
74 if warehouse == default_warehouse or warehouse is None:
75 return None
76 previous = self._get_warehouse()
77 self._use_warehouse(warehouse)
78 return previous
79
80 def post_model_hook(
81 self, config: Mapping[str, Any], context: Optional[str]
82 ) -> None:
83 if context is not None:
84 self._use_warehouse(context)
85
86 def list_schemas(self, database: str) -> List[str]:
87 try:
88 results = self.execute_macro(
89 LIST_SCHEMAS_MACRO_NAME,
90 kwargs={'database': database}
91 )
92 except DatabaseException as exc:
93 msg = (
94 f'Database error while listing schemas in database '
95 f'"{database}"\n{exc}'
96 )
97 raise RuntimeException(msg)
98 # this uses 'show terse schemas in database', and the column name we
99 # want is 'name'
100
101 return [row['name'] for row in results]
102
[end of plugins/snowflake/dbt/adapters/snowflake/impl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py
--- a/plugins/snowflake/dbt/adapters/snowflake/impl.py
+++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py
@@ -3,7 +3,10 @@
import agate
from dbt.adapters.sql import SQLAdapter
-from dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME
+from dbt.adapters.sql.impl import (
+ LIST_SCHEMAS_MACRO_NAME,
+ LIST_RELATIONS_MACRO_NAME,
+)
from dbt.adapters.snowflake import SnowflakeConnectionManager
from dbt.adapters.snowflake import SnowflakeRelation
from dbt.adapters.snowflake import SnowflakeColumn
@@ -99,3 +102,43 @@
# want is 'name'
return [row['name'] for row in results]
+
+ def list_relations_without_caching(
+ self, information_schema, schema
+ ) -> List[SnowflakeRelation]:
+ kwargs = {'information_schema': information_schema, 'schema': schema}
+ try:
+ results = self.execute_macro(
+ LIST_RELATIONS_MACRO_NAME,
+ kwargs=kwargs
+ )
+ except DatabaseException as exc:
+ # if the schema doesn't exist, we just want to return.
+ # Alternatively, we could query the list of schemas before we start
+ # and skip listing the missing ones, which sounds expensive.
+ if 'Object does not exist' in str(exc):
+ return []
+ raise
+
+ relations = []
+ quote_policy = {
+ 'database': True,
+ 'schema': True,
+ 'identifier': True
+ }
+
+ columns = ['database_name', 'schema_name', 'name', 'kind']
+ for _database, _schema, _identifier, _type in results.select(columns):
+ try:
+ _type = self.Relation.get_relation_type(_type.lower())
+ except ValueError:
+ _type = self.Relation.External
+ relations.append(self.Relation.create(
+ database=_database,
+ schema=_schema,
+ identifier=_identifier,
+ quote_policy=quote_policy,
+ type=_type
+ ))
+
+ return relations
| {"golden_diff": "diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py\n--- a/plugins/snowflake/dbt/adapters/snowflake/impl.py\n+++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py\n@@ -3,7 +3,10 @@\n import agate\n \n from dbt.adapters.sql import SQLAdapter\n-from dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME\n+from dbt.adapters.sql.impl import (\n+ LIST_SCHEMAS_MACRO_NAME,\n+ LIST_RELATIONS_MACRO_NAME,\n+)\n from dbt.adapters.snowflake import SnowflakeConnectionManager\n from dbt.adapters.snowflake import SnowflakeRelation\n from dbt.adapters.snowflake import SnowflakeColumn\n@@ -99,3 +102,43 @@\n # want is 'name'\n \n return [row['name'] for row in results]\n+\n+ def list_relations_without_caching(\n+ self, information_schema, schema\n+ ) -> List[SnowflakeRelation]:\n+ kwargs = {'information_schema': information_schema, 'schema': schema}\n+ try:\n+ results = self.execute_macro(\n+ LIST_RELATIONS_MACRO_NAME,\n+ kwargs=kwargs\n+ )\n+ except DatabaseException as exc:\n+ # if the schema doesn't exist, we just want to return.\n+ # Alternatively, we could query the list of schemas before we start\n+ # and skip listing the missing ones, which sounds expensive.\n+ if 'Object does not exist' in str(exc):\n+ return []\n+ raise\n+\n+ relations = []\n+ quote_policy = {\n+ 'database': True,\n+ 'schema': True,\n+ 'identifier': True\n+ }\n+\n+ columns = ['database_name', 'schema_name', 'name', 'kind']\n+ for _database, _schema, _identifier, _type in results.select(columns):\n+ try:\n+ _type = self.Relation.get_relation_type(_type.lower())\n+ except ValueError:\n+ _type = self.Relation.External\n+ relations.append(self.Relation.create(\n+ database=_database,\n+ schema=_schema,\n+ identifier=_identifier,\n+ quote_policy=quote_policy,\n+ type=_type\n+ ))\n+\n+ return relations\n", "issue": "Use `show objects in schema` on Snowflake\n### Describe the feature\r\ndbt should leverage the `show terse objects in schema` query to list the tables and views in a database. Crucially, `show ....` queries _do not require a running warehouse_, so they will not queue in the virtual warehouse (leading to long perceived startup times).\r\n\r\nCaveats:\r\n - Snowflake's docs indicate that only 10k records can be returned from one of these queries. I'd like to talk to someone from Snowflake to get an understanding if this limit is still accurate, or if they have near-term plans to lift this restriction.\r\n - The existing info schema query gets away with a case-insensitive lookup using `ilike, but we'll need to quote and case the schema name exactly for this query to succeed.\r\n- Per [the docs](https://docs.snowflake.net/manuals/sql-reference/sql/show-objects.html), materialized views are returned with type `VIEW`, which is kind of unpleasant.\r\n- We should post-process the results in Python instead of using `result_scan`, because selecting from `result_scan` can queue\n", "before_files": [{"content": "from typing import Mapping, Any, Optional, List\n\nimport agate\n\nfrom dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.adapters.snowflake import SnowflakeColumn\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.exceptions import RuntimeException, DatabaseException\nfrom dbt.utils import filter_null_values\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n Column = SnowflakeColumn\n ConnectionManager = SnowflakeConnectionManager\n\n AdapterSpecificConfigs = frozenset(\n {\"transient\", \"cluster_by\", \"automatic_clustering\", \"secure\",\n \"copy_grants\", \"snowflake_warehouse\"}\n )\n\n @classmethod\n def date_function(cls):\n return \"CURRENT_TIMESTAMP()\"\n\n @classmethod\n def _catalog_filter_table(\n cls, table: agate.Table, manifest: Manifest\n ) -> agate.Table:\n # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force\n # the column names to their lowercased forms.\n lowered = table.rename(\n column_names=[c.lower() for c in table.column_names]\n )\n return super()._catalog_filter_table(lowered, manifest)\n\n def _make_match_kwargs(self, database, schema, identifier):\n quoting = self.config.quoting\n if identifier is not None and quoting[\"identifier\"] is False:\n identifier = identifier.upper()\n\n if schema is not None and quoting[\"schema\"] is False:\n schema = schema.upper()\n\n if database is not None and quoting[\"database\"] is False:\n database = database.upper()\n\n return filter_null_values(\n {\"identifier\": identifier, \"schema\": schema, \"database\": database}\n )\n\n def _get_warehouse(self) -> str:\n _, table = self.execute(\n 'select current_warehouse() as warehouse',\n fetch=True\n )\n if len(table) == 0 or len(table[0]) == 0:\n # can this happen?\n raise RuntimeException(\n 'Could not get current warehouse: no results'\n )\n return str(table[0][0])\n\n def _use_warehouse(self, warehouse: str):\n \"\"\"Use the given warehouse. Quotes are never applied.\"\"\"\n self.execute('use warehouse {}'.format(warehouse))\n\n def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:\n default_warehouse = self.config.credentials.warehouse\n warehouse = config.get('snowflake_warehouse', default_warehouse)\n if warehouse == default_warehouse or warehouse is None:\n return None\n previous = self._get_warehouse()\n self._use_warehouse(warehouse)\n return previous\n\n def post_model_hook(\n self, config: Mapping[str, Any], context: Optional[str]\n ) -> None:\n if context is not None:\n self._use_warehouse(context)\n\n def list_schemas(self, database: str) -> List[str]:\n try:\n results = self.execute_macro(\n LIST_SCHEMAS_MACRO_NAME,\n kwargs={'database': database}\n )\n except DatabaseException as exc:\n msg = (\n f'Database error while listing schemas in database '\n f'\"{database}\"\\n{exc}'\n )\n raise RuntimeException(msg)\n # this uses 'show terse schemas in database', and the column name we\n # want is 'name'\n\n return [row['name'] for row in results]\n", "path": "plugins/snowflake/dbt/adapters/snowflake/impl.py"}]} | 1,757 | 514 |
gh_patches_debug_12748 | rasdani/github-patches | git_diff | searx__searx-233 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Requests does not follow on an HTTP 301
We had already an issue when kickass redirected from https to http I seem to remember. Or it was on another engine. And the request wasn't followed so we had an empty response with status_code 301 in the engine.
Anyway, that is still the case in kickass if we try to perform a search when the query contains capitalized letters.
So why isn't 301 followed ? Is there a way to force it ?
</issue>
<code>
[start of searx/engines/kickass.py]
1 ## Kickass Torrent (Videos, Music, Files)
2 #
3 # @website https://kickass.so
4 # @provide-api no (nothing found)
5 #
6 # @using-api no
7 # @results HTML (using search portal)
8 # @stable yes (HTML can change)
9 # @parse url, title, content, seed, leech, magnetlink
10
11 from urlparse import urljoin
12 from cgi import escape
13 from urllib import quote
14 from lxml import html
15 from operator import itemgetter
16 from searx.engines.xpath import extract_text
17
18 # engine dependent config
19 categories = ['videos', 'music', 'files']
20 paging = True
21
22 # search-url
23 url = 'https://kickass.so/'
24 search_url = url + 'search/{search_term}/{pageno}/'
25
26 # specific xpath variables
27 magnet_xpath = './/a[@title="Torrent magnet link"]'
28 torrent_xpath = './/a[@title="Download torrent file"]'
29 content_xpath = './/span[@class="font11px lightgrey block"]'
30
31
32 # do search-request
33 def request(query, params):
34 params['url'] = search_url.format(search_term=quote(query),
35 pageno=params['pageno'])
36
37 # FIX: SSLError: hostname 'kickass.so'
38 # doesn't match either of '*.kickass.to', 'kickass.to'
39 params['verify'] = False
40
41 return params
42
43
44 # get response from search-request
45 def response(resp):
46 results = []
47
48 dom = html.fromstring(resp.text)
49
50 search_res = dom.xpath('//table[@class="data"]//tr')
51
52 # return empty array if nothing is found
53 if not search_res:
54 return []
55
56 # parse results
57 for result in search_res[1:]:
58 link = result.xpath('.//a[@class="cellMainLink"]')[0]
59 href = urljoin(url, link.attrib['href'])
60 title = extract_text(link)
61 content = escape(extract_text(result.xpath(content_xpath)))
62 seed = result.xpath('.//td[contains(@class, "green")]/text()')[0]
63 leech = result.xpath('.//td[contains(@class, "red")]/text()')[0]
64 filesize = result.xpath('.//td[contains(@class, "nobr")]/text()')[0]
65 filesize_multiplier = result.xpath('.//td[contains(@class, "nobr")]//span/text()')[0]
66 files = result.xpath('.//td[contains(@class, "center")][2]/text()')[0]
67
68 # convert seed to int if possible
69 if seed.isdigit():
70 seed = int(seed)
71 else:
72 seed = 0
73
74 # convert leech to int if possible
75 if leech.isdigit():
76 leech = int(leech)
77 else:
78 leech = 0
79
80 # convert filesize to byte if possible
81 try:
82 filesize = float(filesize)
83
84 # convert filesize to byte
85 if filesize_multiplier == 'TB':
86 filesize = int(filesize * 1024 * 1024 * 1024 * 1024)
87 elif filesize_multiplier == 'GB':
88 filesize = int(filesize * 1024 * 1024 * 1024)
89 elif filesize_multiplier == 'MB':
90 filesize = int(filesize * 1024 * 1024)
91 elif filesize_multiplier == 'KB':
92 filesize = int(filesize * 1024)
93 except:
94 filesize = None
95
96 # convert files to int if possible
97 if files.isdigit():
98 files = int(files)
99 else:
100 files = None
101
102 magnetlink = result.xpath(magnet_xpath)[0].attrib['href']
103
104 torrentfile = result.xpath(torrent_xpath)[0].attrib['href']
105 torrentfileurl = quote(torrentfile, safe="%/:=&?~#+!$,;'@()*")
106
107 # append result
108 results.append({'url': href,
109 'title': title,
110 'content': content,
111 'seed': seed,
112 'leech': leech,
113 'filesize': filesize,
114 'files': files,
115 'magnetlink': magnetlink,
116 'torrentfile': torrentfileurl,
117 'template': 'torrent.html'})
118
119 # return results sorted by seeder
120 return sorted(results, key=itemgetter('seed'), reverse=True)
121
[end of searx/engines/kickass.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py
--- a/searx/engines/kickass.py
+++ b/searx/engines/kickass.py
@@ -20,7 +20,7 @@
paging = True
# search-url
-url = 'https://kickass.so/'
+url = 'https://kickass.to/'
search_url = url + 'search/{search_term}/{pageno}/'
# specific xpath variables
@@ -45,6 +45,11 @@
def response(resp):
results = []
+ # check if redirect comparing to the True value,
+ # because resp can be a Mock object, and any attribut name returns something.
+ if resp.is_redirect is True:
+ return results
+
dom = html.fromstring(resp.text)
search_res = dom.xpath('//table[@class="data"]//tr')
| {"golden_diff": "diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py\n--- a/searx/engines/kickass.py\n+++ b/searx/engines/kickass.py\n@@ -20,7 +20,7 @@\n paging = True\n \n # search-url\n-url = 'https://kickass.so/'\n+url = 'https://kickass.to/'\n search_url = url + 'search/{search_term}/{pageno}/'\n \n # specific xpath variables\n@@ -45,6 +45,11 @@\n def response(resp):\n results = []\n \n+ # check if redirect comparing to the True value,\n+ # because resp can be a Mock object, and any attribut name returns something.\n+ if resp.is_redirect is True:\n+ return results\n+\n dom = html.fromstring(resp.text)\n \n search_res = dom.xpath('//table[@class=\"data\"]//tr')\n", "issue": "Requests does not follow on an HTTP 301\nWe had already an issue when kickass redirected from https to http I seem to remember. Or it was on another engine. And the request wasn't followed so we had an empty response with status_code 301 in the engine.\n\nAnyway, that is still the case in kickass if we try to perform a search when the query contains capitalized letters.\n\nSo why isn't 301 followed ? Is there a way to force it ?\n\n", "before_files": [{"content": "## Kickass Torrent (Videos, Music, Files)\n#\n# @website https://kickass.so\n# @provide-api no (nothing found)\n#\n# @using-api no\n# @results HTML (using search portal)\n# @stable yes (HTML can change)\n# @parse url, title, content, seed, leech, magnetlink\n\nfrom urlparse import urljoin\nfrom cgi import escape\nfrom urllib import quote\nfrom lxml import html\nfrom operator import itemgetter\nfrom searx.engines.xpath import extract_text\n\n# engine dependent config\ncategories = ['videos', 'music', 'files']\npaging = True\n\n# search-url\nurl = 'https://kickass.so/'\nsearch_url = url + 'search/{search_term}/{pageno}/'\n\n# specific xpath variables\nmagnet_xpath = './/a[@title=\"Torrent magnet link\"]'\ntorrent_xpath = './/a[@title=\"Download torrent file\"]'\ncontent_xpath = './/span[@class=\"font11px lightgrey block\"]'\n\n\n# do search-request\ndef request(query, params):\n params['url'] = search_url.format(search_term=quote(query),\n pageno=params['pageno'])\n\n # FIX: SSLError: hostname 'kickass.so'\n # doesn't match either of '*.kickass.to', 'kickass.to'\n params['verify'] = False\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n search_res = dom.xpath('//table[@class=\"data\"]//tr')\n\n # return empty array if nothing is found\n if not search_res:\n return []\n\n # parse results\n for result in search_res[1:]:\n link = result.xpath('.//a[@class=\"cellMainLink\"]')[0]\n href = urljoin(url, link.attrib['href'])\n title = extract_text(link)\n content = escape(extract_text(result.xpath(content_xpath)))\n seed = result.xpath('.//td[contains(@class, \"green\")]/text()')[0]\n leech = result.xpath('.//td[contains(@class, \"red\")]/text()')[0]\n filesize = result.xpath('.//td[contains(@class, \"nobr\")]/text()')[0]\n filesize_multiplier = result.xpath('.//td[contains(@class, \"nobr\")]//span/text()')[0]\n files = result.xpath('.//td[contains(@class, \"center\")][2]/text()')[0]\n\n # convert seed to int if possible\n if seed.isdigit():\n seed = int(seed)\n else:\n seed = 0\n\n # convert leech to int if possible\n if leech.isdigit():\n leech = int(leech)\n else:\n leech = 0\n\n # convert filesize to byte if possible\n try:\n filesize = float(filesize)\n\n # convert filesize to byte\n if filesize_multiplier == 'TB':\n filesize = int(filesize * 1024 * 1024 * 1024 * 1024)\n elif filesize_multiplier == 'GB':\n filesize = int(filesize * 1024 * 1024 * 1024)\n elif filesize_multiplier == 'MB':\n filesize = int(filesize * 1024 * 1024)\n elif filesize_multiplier == 'KB':\n filesize = int(filesize * 1024)\n except:\n filesize = None\n\n # convert files to int if possible\n if files.isdigit():\n files = int(files)\n else:\n files = None\n\n magnetlink = result.xpath(magnet_xpath)[0].attrib['href']\n\n torrentfile = result.xpath(torrent_xpath)[0].attrib['href']\n torrentfileurl = quote(torrentfile, safe=\"%/:=&?~#+!$,;'@()*\")\n\n # append result\n results.append({'url': href,\n 'title': title,\n 'content': content,\n 'seed': seed,\n 'leech': leech,\n 'filesize': filesize,\n 'files': files,\n 'magnetlink': magnetlink,\n 'torrentfile': torrentfileurl,\n 'template': 'torrent.html'})\n\n # return results sorted by seeder\n return sorted(results, key=itemgetter('seed'), reverse=True)\n", "path": "searx/engines/kickass.py"}]} | 1,852 | 207 |
gh_patches_debug_21270 | rasdani/github-patches | git_diff | uccser__cs-unplugged-210 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add 'About' page content
Using content from existing website.
</issue>
<code>
[start of csunplugged/general/urls.py]
1 """URL routing for the general application."""
2
3 from django.conf.urls import url
4
5 from . import views
6
7 urlpatterns = [
8 url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),
9 url(r'^$', views.GeneralIndexView.as_view(), name='home'),
10 ]
11
[end of csunplugged/general/urls.py]
[start of csunplugged/general/views.py]
1 """Views for the general application."""
2
3 from django.views.generic import TemplateView
4 from django.http import HttpResponse
5
6
7 class GeneralIndexView(TemplateView):
8 """View for the homepage that renders from a template."""
9
10 template_name = 'general/index.html'
11
12
13 class GeneralAboutView(TemplateView):
14 """View for the about page that renders from a template."""
15
16 template_name = 'general/about.html'
17
18
19 def health_check(request):
20 """Return heath check response for Google App Engine.
21
22 Returns a 200 HTTP response for Google App Engine to detect the system
23 is running.
24 """
25 return HttpResponse(status=200)
26
[end of csunplugged/general/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/csunplugged/general/urls.py b/csunplugged/general/urls.py
--- a/csunplugged/general/urls.py
+++ b/csunplugged/general/urls.py
@@ -6,5 +6,8 @@
urlpatterns = [
url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),
+ url(r'^contact/$', views.GeneralContactView.as_view(), name='contact'),
+ url(r'^people/$', views.GeneralPeopleView.as_view(), name='people'),
+ url(r'^principles/$', views.GeneralPrinciplesView.as_view(), name='principles'),
url(r'^$', views.GeneralIndexView.as_view(), name='home'),
]
diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py
--- a/csunplugged/general/views.py
+++ b/csunplugged/general/views.py
@@ -16,6 +16,24 @@
template_name = 'general/about.html'
+class GeneralContactView(TemplateView):
+ """View for the contact page that renders from a template."""
+
+ template_name = 'general/contact.html'
+
+
+class GeneralPeopleView(TemplateView):
+ """View for the people page that renders from a template."""
+
+ template_name = 'general/people.html'
+
+
+class GeneralPrinciplesView(TemplateView):
+ """View for the princples page that renders from a template."""
+
+ template_name = 'general/principles.html'
+
+
def health_check(request):
"""Return heath check response for Google App Engine.
| {"golden_diff": "diff --git a/csunplugged/general/urls.py b/csunplugged/general/urls.py\n--- a/csunplugged/general/urls.py\n+++ b/csunplugged/general/urls.py\n@@ -6,5 +6,8 @@\n \n urlpatterns = [\n url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),\n+ url(r'^contact/$', views.GeneralContactView.as_view(), name='contact'),\n+ url(r'^people/$', views.GeneralPeopleView.as_view(), name='people'),\n+ url(r'^principles/$', views.GeneralPrinciplesView.as_view(), name='principles'),\n url(r'^$', views.GeneralIndexView.as_view(), name='home'),\n ]\ndiff --git a/csunplugged/general/views.py b/csunplugged/general/views.py\n--- a/csunplugged/general/views.py\n+++ b/csunplugged/general/views.py\n@@ -16,6 +16,24 @@\n template_name = 'general/about.html'\n \n \n+class GeneralContactView(TemplateView):\n+ \"\"\"View for the contact page that renders from a template.\"\"\"\n+\n+ template_name = 'general/contact.html'\n+\n+\n+class GeneralPeopleView(TemplateView):\n+ \"\"\"View for the people page that renders from a template.\"\"\"\n+\n+ template_name = 'general/people.html'\n+\n+\n+class GeneralPrinciplesView(TemplateView):\n+ \"\"\"View for the princples page that renders from a template.\"\"\"\n+\n+ template_name = 'general/principles.html'\n+\n+\n def health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n", "issue": "Add 'About' page content\nUsing content from existing website.\n", "before_files": [{"content": "\"\"\"URL routing for the general application.\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),\n url(r'^$', views.GeneralIndexView.as_view(), name='home'),\n]\n", "path": "csunplugged/general/urls.py"}, {"content": "\"\"\"Views for the general application.\"\"\"\n\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\n\n\nclass GeneralIndexView(TemplateView):\n \"\"\"View for the homepage that renders from a template.\"\"\"\n\n template_name = 'general/index.html'\n\n\nclass GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n\n template_name = 'general/about.html'\n\n\ndef health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n\n Returns a 200 HTTP response for Google App Engine to detect the system\n is running.\n \"\"\"\n return HttpResponse(status=200)\n", "path": "csunplugged/general/views.py"}]} | 831 | 346 |
gh_patches_debug_12277 | rasdani/github-patches | git_diff | kserve__kserve-2899 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make poetry version plugin to update the pyproject.toml as well not just the build
/kind bug
**What steps did you take and what happened:**
[A clear and concise description of what the bug is.]
Currently the poetry plugin updates the version during the build, install phases but does not update the pyproject.toml
This makes it harder to easily decipher the version of the package.
**What did you expect to happen:**
Running poetry build or install should set the version of the package in pyproject.toml
</issue>
<code>
[start of python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py]
1 import ast
2 import subprocess
3 from pathlib import Path
4 from typing import Any, Dict, Optional
5
6 from cleo.io.io import IO
7 from poetry.plugins.plugin import Plugin
8 from poetry.poetry import Poetry
9 from poetry.core.utils.helpers import module_name
10
11
12 class VersionPlugin(Plugin): # type: ignore
13 def activate(self, poetry: Poetry, io: IO) -> None:
14 poetry_version_config: Optional[Dict[str, Any]] = poetry.pyproject.data.get(
15 "tool", {}
16 ).get("poetry-version-plugin")
17 if poetry_version_config is None:
18 return
19 version_source = poetry_version_config.get("source")
20 version_file_path = poetry_version_config.get("file_path")
21 if not version_source:
22 message = (
23 "<b>poetry-version-plugin</b>: No <b>source</b> configuration found in "
24 "[tool.poetry-version-plugin] in pyproject.toml, not extracting "
25 "dynamic version"
26 )
27 io.write_error_line(message)
28 raise RuntimeError(message)
29
30 # Update version using file
31 if version_source == "file" and version_file_path:
32 with open(version_file_path, "r") as version_file:
33 version = version_file.read().strip()
34 poetry.package._set_version(version)
35 io.write_line(
36 "<b>poetry-version-plugin</b>: New package "
37 "version is updated from given file "
38 f"version is: <b>{version}</b>"
39 )
40
41 if version_source == "init":
42 packages = poetry.local_config.get("packages")
43 if packages:
44 if len(packages) == 1:
45 package_name = packages[0]["include"]
46 else:
47 message = (
48 "<b>poetry-version-plugin</b>: More than one package set, "
49 "cannot extract dynamic version"
50 )
51 io.write_error_line(message)
52 raise RuntimeError(message)
53 else:
54 package_name = module_name(poetry.package.name)
55 init_path = Path(package_name) / "__init__.py"
56 if not init_path.is_file():
57 message = (
58 "<b>poetry-version-plugin</b>: __init__.py file not found at "
59 f"{init_path} cannot extract dynamic version"
60 )
61 io.write_error_line(message)
62 raise RuntimeError(message)
63 else:
64 io.write_line(
65 "<b>poetry-version-plugin</b>: Using __init__.py file at "
66 f"{init_path} for dynamic version"
67 )
68 tree = ast.parse(init_path.read_text())
69 for el in tree.body:
70 if isinstance(el, ast.Assign):
71 if len(el.targets) == 1:
72 target = el.targets[0]
73 if isinstance(target, ast.Name):
74 if target.id == "__version__":
75 value_node = el.value
76 if isinstance(value_node, ast.Constant):
77 version = value_node.value
78 elif isinstance(value_node, ast.Str):
79 version = value_node.s
80 else: # pragma: nocover
81 # This is actually covered by tests, but can't be
82 # reported by Coverage
83 # Ref: https://github.com/nedbat/coveragepy/issues/198
84 continue
85 io.write_line(
86 "<b>poetry-version-plugin</b>: Setting package "
87 "dynamic version to __version__ "
88 f"variable from __init__.py: <b>{version}</b>"
89 )
90 poetry.package._set_version(version)
91 return
92 message = (
93 "<b>poetry-version-plugin</b>: No valid __version__ variable found "
94 "in __init__.py, cannot extract dynamic version"
95 )
96 io.write_error_line(message)
97 raise RuntimeError(message)
98 elif version_source == "git-tag":
99 result = subprocess.run(
100 ["git", "describe", "--exact-match", "--tags", "HEAD"],
101 stdin=subprocess.PIPE,
102 stdout=subprocess.PIPE,
103 universal_newlines=True,
104 )
105 if result.returncode == 0:
106 tag = result.stdout.strip()
107 io.write_line(
108 "<b>poetry-version-plugin</b>: Git tag found, setting "
109 f"dynamic version to: {tag}"
110 )
111 poetry.package._set_version(tag)
112 return
113 else:
114 message = (
115 "<b>poetry-version-plugin</b>: No Git tag found, not "
116 "extracting dynamic version"
117 )
118 io.write_error_line(message)
119 raise RuntimeError(message)
120
[end of python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py
--- a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py
+++ b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py
@@ -37,6 +37,13 @@
"version is updated from given file "
f"version is: <b>{version}</b>"
)
+ content: dict[str, Any] = poetry.file.read()
+ poetry_content = content["tool"]["poetry"]
+ poetry_content["version"] = version
+ poetry.file.write(content)
+ io.write_line(
+ "<b>poetry-version-plugin</b>: New version updated in toml file"
+ )
if version_source == "init":
packages = poetry.local_config.get("packages")
| {"golden_diff": "diff --git a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py\n--- a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py\n+++ b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py\n@@ -37,6 +37,13 @@\n \"version is updated from given file \"\n f\"version is: <b>{version}</b>\"\n )\n+ content: dict[str, Any] = poetry.file.read()\n+ poetry_content = content[\"tool\"][\"poetry\"]\n+ poetry_content[\"version\"] = version\n+ poetry.file.write(content)\n+ io.write_line(\n+ \"<b>poetry-version-plugin</b>: New version updated in toml file\"\n+ )\n \n if version_source == \"init\":\n packages = poetry.local_config.get(\"packages\")\n", "issue": "Make poetry version plugin to update the pyproject.toml as well not just the build\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\n[A clear and concise description of what the bug is.]\r\nCurrently the poetry plugin updates the version during the build, install phases but does not update the pyproject.toml\r\nThis makes it harder to easily decipher the version of the package.\r\n\r\n**What did you expect to happen:**\r\nRunning poetry build or install should set the version of the package in pyproject.toml\r\n\r\n\n", "before_files": [{"content": "import ast\nimport subprocess\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nfrom cleo.io.io import IO\nfrom poetry.plugins.plugin import Plugin\nfrom poetry.poetry import Poetry\nfrom poetry.core.utils.helpers import module_name\n\n\nclass VersionPlugin(Plugin): # type: ignore\n def activate(self, poetry: Poetry, io: IO) -> None:\n poetry_version_config: Optional[Dict[str, Any]] = poetry.pyproject.data.get(\n \"tool\", {}\n ).get(\"poetry-version-plugin\")\n if poetry_version_config is None:\n return\n version_source = poetry_version_config.get(\"source\")\n version_file_path = poetry_version_config.get(\"file_path\")\n if not version_source:\n message = (\n \"<b>poetry-version-plugin</b>: No <b>source</b> configuration found in \"\n \"[tool.poetry-version-plugin] in pyproject.toml, not extracting \"\n \"dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n\n # Update version using file\n if version_source == \"file\" and version_file_path:\n with open(version_file_path, \"r\") as version_file:\n version = version_file.read().strip()\n poetry.package._set_version(version)\n io.write_line(\n \"<b>poetry-version-plugin</b>: New package \"\n \"version is updated from given file \"\n f\"version is: <b>{version}</b>\"\n )\n\n if version_source == \"init\":\n packages = poetry.local_config.get(\"packages\")\n if packages:\n if len(packages) == 1:\n package_name = packages[0][\"include\"]\n else:\n message = (\n \"<b>poetry-version-plugin</b>: More than one package set, \"\n \"cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n else:\n package_name = module_name(poetry.package.name)\n init_path = Path(package_name) / \"__init__.py\"\n if not init_path.is_file():\n message = (\n \"<b>poetry-version-plugin</b>: __init__.py file not found at \"\n f\"{init_path} cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n else:\n io.write_line(\n \"<b>poetry-version-plugin</b>: Using __init__.py file at \"\n f\"{init_path} for dynamic version\"\n )\n tree = ast.parse(init_path.read_text())\n for el in tree.body:\n if isinstance(el, ast.Assign):\n if len(el.targets) == 1:\n target = el.targets[0]\n if isinstance(target, ast.Name):\n if target.id == \"__version__\":\n value_node = el.value\n if isinstance(value_node, ast.Constant):\n version = value_node.value\n elif isinstance(value_node, ast.Str):\n version = value_node.s\n else: # pragma: nocover\n # This is actually covered by tests, but can't be\n # reported by Coverage\n # Ref: https://github.com/nedbat/coveragepy/issues/198\n continue\n io.write_line(\n \"<b>poetry-version-plugin</b>: Setting package \"\n \"dynamic version to __version__ \"\n f\"variable from __init__.py: <b>{version}</b>\"\n )\n poetry.package._set_version(version)\n return\n message = (\n \"<b>poetry-version-plugin</b>: No valid __version__ variable found \"\n \"in __init__.py, cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n elif version_source == \"git-tag\":\n result = subprocess.run(\n [\"git\", \"describe\", \"--exact-match\", \"--tags\", \"HEAD\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n )\n if result.returncode == 0:\n tag = result.stdout.strip()\n io.write_line(\n \"<b>poetry-version-plugin</b>: Git tag found, setting \"\n f\"dynamic version to: {tag}\"\n )\n poetry.package._set_version(tag)\n return\n else:\n message = (\n \"<b>poetry-version-plugin</b>: No Git tag found, not \"\n \"extracting dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n", "path": "python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py"}]} | 1,859 | 197 |
gh_patches_debug_3921 | rasdani/github-patches | git_diff | bokeh__bokeh-5427 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
embed/animated fails with "did not find model"
`Error rendering Bokeh items Error: Did not find model ID_HERE in session`.
</issue>
<code>
[start of examples/embed/animated.py]
1 """ To view this example, first start a Bokeh server:
2
3 bokeh serve --allow-websocket-origin=localhost:8000
4
5 And then load the example into the Bokeh server by
6 running the script:
7
8 python animated.py
9
10 in this directory. Finally, start a simple web server
11 by running:
12
13 python -m SimpleHTTPServer (python 2)
14
15 or
16
17 python -m http.server (python 3)
18
19 in this directory. Navigate to
20
21 http://localhost:8000/animated.html
22
23 """
24 from __future__ import print_function
25
26 from numpy import pi, cos, sin, linspace, roll
27
28 from bokeh.client import push_session
29 from bokeh.embed import autoload_server
30 from bokeh.plotting import figure, curdoc
31
32 M = 5
33 N = M*10 + 1
34 r_base = 8
35 theta = linspace(0, 2*pi, N)
36 r_x = linspace(0, 6*pi, N-1)
37 rmin = r_base - cos(r_x) - 1
38 rmax = r_base + sin(r_x) + 1
39
40 colors = ["FFFFCC", "#C7E9B4", "#7FCDBB", "#41B6C4", "#2C7FB8",
41 "#253494", "#2C7FB8", "#41B6C4", "#7FCDBB", "#C7E9B4"] * 5
42
43 # figure() function auto-adds the figure to curdoc()
44 p = figure(x_range=(-11, 11), y_range=(-11, 11))
45 r = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],
46 fill_color=colors, line_color="white")
47
48 # open a session which will keep our local doc in sync with server
49 session = push_session(curdoc())
50
51 html = """
52 <html>
53 <head></head>
54 <body>
55 %s
56 </body>
57 </html>
58 """ % autoload_server(p, session_id=session.id)
59
60 with open("animated.html", "w+") as f:
61 f.write(html)
62
63 print(__doc__)
64
65 ds = r.data_source
66
67 def update():
68 rmin = roll(ds.data["inner_radius"], 1)
69 rmax = roll(ds.data["outer_radius"], -1)
70 ds.data.update(inner_radius=rmin, outer_radius=rmax)
71
72 curdoc().add_periodic_callback(update, 30)
73
74 session.loop_until_closed() # run forever
75
[end of examples/embed/animated.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/embed/animated.py b/examples/embed/animated.py
--- a/examples/embed/animated.py
+++ b/examples/embed/animated.py
@@ -45,6 +45,9 @@
r = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],
fill_color=colors, line_color="white")
+# add the plot to curdoc
+curdoc().add_root(p)
+
# open a session which will keep our local doc in sync with server
session = push_session(curdoc())
| {"golden_diff": "diff --git a/examples/embed/animated.py b/examples/embed/animated.py\n--- a/examples/embed/animated.py\n+++ b/examples/embed/animated.py\n@@ -45,6 +45,9 @@\n r = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],\n fill_color=colors, line_color=\"white\")\n \n+# add the plot to curdoc\n+curdoc().add_root(p)\n+\n # open a session which will keep our local doc in sync with server\n session = push_session(curdoc())\n", "issue": "embed/animated fails with \"did not find model\"\n`Error rendering Bokeh items Error: Did not find model ID_HERE in session`.\n\n", "before_files": [{"content": "\"\"\" To view this example, first start a Bokeh server:\n\n bokeh serve --allow-websocket-origin=localhost:8000\n\nAnd then load the example into the Bokeh server by\nrunning the script:\n\n python animated.py\n\nin this directory. Finally, start a simple web server\nby running:\n\n python -m SimpleHTTPServer (python 2)\n\nor\n\n python -m http.server (python 3)\n\nin this directory. Navigate to\n\n http://localhost:8000/animated.html\n\n\"\"\"\nfrom __future__ import print_function\n\nfrom numpy import pi, cos, sin, linspace, roll\n\nfrom bokeh.client import push_session\nfrom bokeh.embed import autoload_server\nfrom bokeh.plotting import figure, curdoc\n\nM = 5\nN = M*10 + 1\nr_base = 8\ntheta = linspace(0, 2*pi, N)\nr_x = linspace(0, 6*pi, N-1)\nrmin = r_base - cos(r_x) - 1\nrmax = r_base + sin(r_x) + 1\n\ncolors = [\"FFFFCC\", \"#C7E9B4\", \"#7FCDBB\", \"#41B6C4\", \"#2C7FB8\",\n \"#253494\", \"#2C7FB8\", \"#41B6C4\", \"#7FCDBB\", \"#C7E9B4\"] * 5\n\n# figure() function auto-adds the figure to curdoc()\np = figure(x_range=(-11, 11), y_range=(-11, 11))\nr = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],\n fill_color=colors, line_color=\"white\")\n\n# open a session which will keep our local doc in sync with server\nsession = push_session(curdoc())\n\nhtml = \"\"\"\n<html>\n <head></head>\n <body>\n %s\n </body>\n</html>\n\"\"\" % autoload_server(p, session_id=session.id)\n\nwith open(\"animated.html\", \"w+\") as f:\n f.write(html)\n\nprint(__doc__)\n\nds = r.data_source\n\ndef update():\n rmin = roll(ds.data[\"inner_radius\"], 1)\n rmax = roll(ds.data[\"outer_radius\"], -1)\n ds.data.update(inner_radius=rmin, outer_radius=rmax)\n\ncurdoc().add_periodic_callback(update, 30)\n\nsession.loop_until_closed() # run forever\n", "path": "examples/embed/animated.py"}]} | 1,274 | 125 |
gh_patches_debug_9670 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-1500 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Internal server error @ fresh install
### Description
Get "Internal server error" with no description after a fresh install. Have I missed a step, or is this a bug?
### Steps to reproduce
...
### Webserver logs
```bash
Nothing shows up using 'docker-compose logs -f'
webserver_1 | [2022-08-30 23:21:18 +0000] [44] [CRITICAL] WORKER TIMEOUT (pid:47)
webserver_1 | [2022-08-30 23:21:18 +0000] [44] [WARNING] Worker with pid 47 was terminated due to signal 6
```
### Paperless-ngx version
1.8.0
### Host OS
CentOS 7.9 -fully patched with latest patches
### Installation method
Docker - official image
### Browser
Firefox
### Configuration changes
none
### Other
...
</issue>
<code>
[start of gunicorn.conf.py]
1 import os
2
3 bind = f'{os.getenv("PAPERLESS_BIND_ADDR", "[::]")}:{os.getenv("PAPERLESS_PORT", 8000)}'
4 workers = int(os.getenv("PAPERLESS_WEBSERVER_WORKERS", 1))
5 worker_class = "paperless.workers.ConfigurableWorker"
6 timeout = 120
7
8
9 def pre_fork(server, worker):
10 pass
11
12
13 def pre_exec(server):
14 server.log.info("Forked child, re-executing.")
15
16
17 def when_ready(server):
18 server.log.info("Server is ready. Spawning workers")
19
20
21 def worker_int(worker):
22 worker.log.info("worker received INT or QUIT signal")
23
24 ## get traceback info
25 import threading, sys, traceback
26
27 id2name = {th.ident: th.name for th in threading.enumerate()}
28 code = []
29 for threadId, stack in sys._current_frames().items():
30 code.append("\n# Thread: %s(%d)" % (id2name.get(threadId, ""), threadId))
31 for filename, lineno, name, line in traceback.extract_stack(stack):
32 code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
33 if line:
34 code.append(" %s" % (line.strip()))
35 worker.log.debug("\n".join(code))
36
37
38 def worker_abort(worker):
39 worker.log.info("worker received SIGABRT signal")
40
[end of gunicorn.conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gunicorn.conf.py b/gunicorn.conf.py
--- a/gunicorn.conf.py
+++ b/gunicorn.conf.py
@@ -1,9 +1,17 @@
import os
+# See https://docs.gunicorn.org/en/stable/settings.html for
+# explanations of settings
+
bind = f'{os.getenv("PAPERLESS_BIND_ADDR", "[::]")}:{os.getenv("PAPERLESS_PORT", 8000)}'
+
workers = int(os.getenv("PAPERLESS_WEBSERVER_WORKERS", 1))
worker_class = "paperless.workers.ConfigurableWorker"
timeout = 120
+preload_app = True
+
+# https://docs.gunicorn.org/en/stable/faq.html#blocking-os-fchmod
+worker_tmp_dir = "/dev/shm"
def pre_fork(server, worker):
| {"golden_diff": "diff --git a/gunicorn.conf.py b/gunicorn.conf.py\n--- a/gunicorn.conf.py\n+++ b/gunicorn.conf.py\n@@ -1,9 +1,17 @@\n import os\n \n+# See https://docs.gunicorn.org/en/stable/settings.html for\n+# explanations of settings\n+\n bind = f'{os.getenv(\"PAPERLESS_BIND_ADDR\", \"[::]\")}:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\n+\n workers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\n worker_class = \"paperless.workers.ConfigurableWorker\"\n timeout = 120\n+preload_app = True\n+\n+# https://docs.gunicorn.org/en/stable/faq.html#blocking-os-fchmod\n+worker_tmp_dir = \"/dev/shm\"\n \n \n def pre_fork(server, worker):\n", "issue": "[BUG] Internal server error @ fresh install\n### Description\n\nGet \"Internal server error\" with no description after a fresh install. Have I missed a step, or is this a bug?\n\n### Steps to reproduce\n\n...\n\n### Webserver logs\n\n```bash\nNothing shows up using 'docker-compose logs -f'\r\n\r\nwebserver_1 | [2022-08-30 23:21:18 +0000] [44] [CRITICAL] WORKER TIMEOUT (pid:47)\r\nwebserver_1 | [2022-08-30 23:21:18 +0000] [44] [WARNING] Worker with pid 47 was terminated due to signal 6\n```\n\n\n### Paperless-ngx version\n\n1.8.0\n\n### Host OS\n\nCentOS 7.9 -fully patched with latest patches\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nFirefox\n\n### Configuration changes\n\nnone\n\n### Other\n\n...\n", "before_files": [{"content": "import os\n\nbind = f'{os.getenv(\"PAPERLESS_BIND_ADDR\", \"[::]\")}:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\nworkers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\nworker_class = \"paperless.workers.ConfigurableWorker\"\ntimeout = 120\n\n\ndef pre_fork(server, worker):\n pass\n\n\ndef pre_exec(server):\n server.log.info(\"Forked child, re-executing.\")\n\n\ndef when_ready(server):\n server.log.info(\"Server is ready. Spawning workers\")\n\n\ndef worker_int(worker):\n worker.log.info(\"worker received INT or QUIT signal\")\n\n ## get traceback info\n import threading, sys, traceback\n\n id2name = {th.ident: th.name for th in threading.enumerate()}\n code = []\n for threadId, stack in sys._current_frames().items():\n code.append(\"\\n# Thread: %s(%d)\" % (id2name.get(threadId, \"\"), threadId))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n code.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n code.append(\" %s\" % (line.strip()))\n worker.log.debug(\"\\n\".join(code))\n\n\ndef worker_abort(worker):\n worker.log.info(\"worker received SIGABRT signal\")\n", "path": "gunicorn.conf.py"}]} | 1,140 | 179 |
gh_patches_debug_31683 | rasdani/github-patches | git_diff | ivy-llc__ivy-19089 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bernoulli
</issue>
<code>
[start of ivy/functional/backends/paddle/experimental/random.py]
1 # global
2 from typing import Optional, Union, Sequence
3 import paddle
4
5 from ivy import with_unsupported_device_and_dtypes
6 from ivy.functional.backends.paddle import backend_version
7 from ivy.utils.exceptions import IvyNotImplementedException
8
9 # local
10 import ivy
11 from paddle.device import core
12
13 # dirichlet
14
15
16 @with_unsupported_device_and_dtypes(
17 {
18 "2.5.1 and below": {
19 "cpu": (
20 "int8",
21 "int16",
22 "uint8",
23 "float16",
24 "complex64",
25 "complex128",
26 "bool",
27 )
28 }
29 },
30 backend_version,
31 )
32 def dirichlet(
33 alpha: Union[paddle.Tensor, float, Sequence[float]],
34 /,
35 *,
36 size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
37 out: Optional[paddle.Tensor] = None,
38 seed: Optional[int] = None,
39 dtype: Optional[paddle.dtype] = None,
40 ) -> paddle.Tensor:
41 size = size if size is not None else len(alpha)
42 dtype = dtype if dtype is not None else paddle.float64
43 if seed is not None:
44 paddle.seed(seed)
45 res = paddle.to_tensor(
46 paddle.distribution.Dirichlet(concentration=alpha).sample(shape=size),
47 dtype=dtype,
48 )
49 return res
50
51
52 def beta(
53 alpha: Union[float, paddle.Tensor],
54 beta: Union[float, paddle.Tensor],
55 /,
56 *,
57 shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
58 dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,
59 device: core.Place = None,
60 seed: Optional[int] = None,
61 out: Optional[paddle.Tensor] = None,
62 ) -> paddle.Tensor:
63 raise IvyNotImplementedException()
64
65
66 def gamma(
67 alpha: Union[float, paddle.Tensor],
68 beta: Union[float, paddle.Tensor],
69 /,
70 *,
71 shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
72 dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,
73 device: core.Place = None,
74 seed: Optional[int] = None,
75 out: Optional[paddle.Tensor] = None,
76 ) -> paddle.Tensor:
77 raise IvyNotImplementedException()
78
79
80 def poisson(
81 lam: Union[float, paddle.Tensor],
82 *,
83 shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
84 device: core.Place,
85 dtype: paddle.dtype,
86 seed: Optional[int] = None,
87 fill_value: Optional[Union[float, int]] = 0,
88 out: Optional[paddle.Tensor] = None,
89 ):
90 raise IvyNotImplementedException()
91
92
93 def bernoulli(
94 probs: Union[float, paddle.Tensor],
95 *,
96 logits: Union[float, paddle.Tensor] = None,
97 shape: Optional[Union[ivy.NativeArray, Sequence[int]]] = None,
98 device: core.Place,
99 dtype: paddle.dtype,
100 seed: Optional[int] = None,
101 out: Optional[paddle.Tensor] = None,
102 ) -> paddle.Tensor:
103 raise IvyNotImplementedException()
104
[end of ivy/functional/backends/paddle/experimental/random.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/backends/paddle/experimental/random.py b/ivy/functional/backends/paddle/experimental/random.py
--- a/ivy/functional/backends/paddle/experimental/random.py
+++ b/ivy/functional/backends/paddle/experimental/random.py
@@ -1,7 +1,7 @@
# global
from typing import Optional, Union, Sequence
import paddle
-
+from ivy.functional.backends.paddle.device import to_device
from ivy import with_unsupported_device_and_dtypes
from ivy.functional.backends.paddle import backend_version
from ivy.utils.exceptions import IvyNotImplementedException
@@ -9,6 +9,7 @@
# local
import ivy
from paddle.device import core
+from ivy import with_supported_device_and_dtypes
# dirichlet
@@ -90,6 +91,23 @@
raise IvyNotImplementedException()
+# bernoulli
+@with_supported_device_and_dtypes(
+ {
+ "2.5.0 and above": {
+ "cpu": ("float32", "float64"),
+ "gpu": ("bfloat16", "float16", "float32", "float64"),
+ },
+ "2.4.2 and below": {
+ "cpu": (
+ "float32",
+ "float64",
+ ),
+ "gpu": ("float16", "float32", "float64"),
+ },
+ },
+ backend_version,
+)
def bernoulli(
probs: Union[float, paddle.Tensor],
*,
@@ -100,4 +118,14 @@
seed: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
- raise IvyNotImplementedException()
+ if seed is not None:
+ paddle.seed(seed)
+ if probs is not None:
+ probs = probs
+ elif logits is not None:
+ probs = ivy.softmax(logits)
+ probs = paddle.cast(probs, dtype)
+ probs = paddle.unsqueeze(probs, 0) if len(probs.shape) == 0 else probs
+ probs = paddle.maximum(probs, paddle.full_like(probs, 1e-6))
+ sample = paddle.bernoulli(probs)
+ return to_device(sample, device)
| {"golden_diff": "diff --git a/ivy/functional/backends/paddle/experimental/random.py b/ivy/functional/backends/paddle/experimental/random.py\n--- a/ivy/functional/backends/paddle/experimental/random.py\n+++ b/ivy/functional/backends/paddle/experimental/random.py\n@@ -1,7 +1,7 @@\n # global\n from typing import Optional, Union, Sequence\n import paddle\n-\n+from ivy.functional.backends.paddle.device import to_device\n from ivy import with_unsupported_device_and_dtypes\n from ivy.functional.backends.paddle import backend_version\n from ivy.utils.exceptions import IvyNotImplementedException\n@@ -9,6 +9,7 @@\n # local\n import ivy\n from paddle.device import core\n+from ivy import with_supported_device_and_dtypes\n \n # dirichlet\n \n@@ -90,6 +91,23 @@\n raise IvyNotImplementedException()\n \n \n+# bernoulli\n+@with_supported_device_and_dtypes(\n+ {\n+ \"2.5.0 and above\": {\n+ \"cpu\": (\"float32\", \"float64\"),\n+ \"gpu\": (\"bfloat16\", \"float16\", \"float32\", \"float64\"),\n+ },\n+ \"2.4.2 and below\": {\n+ \"cpu\": (\n+ \"float32\",\n+ \"float64\",\n+ ),\n+ \"gpu\": (\"float16\", \"float32\", \"float64\"),\n+ },\n+ },\n+ backend_version,\n+)\n def bernoulli(\n probs: Union[float, paddle.Tensor],\n *,\n@@ -100,4 +118,14 @@\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n ) -> paddle.Tensor:\n- raise IvyNotImplementedException()\n+ if seed is not None:\n+ paddle.seed(seed)\n+ if probs is not None:\n+ probs = probs\n+ elif logits is not None:\n+ probs = ivy.softmax(logits)\n+ probs = paddle.cast(probs, dtype)\n+ probs = paddle.unsqueeze(probs, 0) if len(probs.shape) == 0 else probs\n+ probs = paddle.maximum(probs, paddle.full_like(probs, 1e-6))\n+ sample = paddle.bernoulli(probs)\n+ return to_device(sample, device)\n", "issue": "bernoulli\n\n", "before_files": [{"content": "# global\nfrom typing import Optional, Union, Sequence\nimport paddle\n\nfrom ivy import with_unsupported_device_and_dtypes\nfrom ivy.functional.backends.paddle import backend_version\nfrom ivy.utils.exceptions import IvyNotImplementedException\n\n# local\nimport ivy\nfrom paddle.device import core\n\n# dirichlet\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.5.1 and below\": {\n \"cpu\": (\n \"int8\",\n \"int16\",\n \"uint8\",\n \"float16\",\n \"complex64\",\n \"complex128\",\n \"bool\",\n )\n }\n },\n backend_version,\n)\ndef dirichlet(\n alpha: Union[paddle.Tensor, float, Sequence[float]],\n /,\n *,\n size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n out: Optional[paddle.Tensor] = None,\n seed: Optional[int] = None,\n dtype: Optional[paddle.dtype] = None,\n) -> paddle.Tensor:\n size = size if size is not None else len(alpha)\n dtype = dtype if dtype is not None else paddle.float64\n if seed is not None:\n paddle.seed(seed)\n res = paddle.to_tensor(\n paddle.distribution.Dirichlet(concentration=alpha).sample(shape=size),\n dtype=dtype,\n )\n return res\n\n\ndef beta(\n alpha: Union[float, paddle.Tensor],\n beta: Union[float, paddle.Tensor],\n /,\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,\n device: core.Place = None,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\ndef gamma(\n alpha: Union[float, paddle.Tensor],\n beta: Union[float, paddle.Tensor],\n /,\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,\n device: core.Place = None,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\ndef poisson(\n lam: Union[float, paddle.Tensor],\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n device: core.Place,\n dtype: paddle.dtype,\n seed: Optional[int] = None,\n fill_value: Optional[Union[float, int]] = 0,\n out: Optional[paddle.Tensor] = None,\n):\n raise IvyNotImplementedException()\n\n\ndef bernoulli(\n probs: Union[float, paddle.Tensor],\n *,\n logits: Union[float, paddle.Tensor] = None,\n shape: Optional[Union[ivy.NativeArray, Sequence[int]]] = None,\n device: core.Place,\n dtype: paddle.dtype,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n", "path": "ivy/functional/backends/paddle/experimental/random.py"}]} | 1,448 | 522 |
gh_patches_debug_27388 | rasdani/github-patches | git_diff | python-poetry__poetry-2787 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RecursionError when installing mkdocs-material
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
- **OS version and name**: Windows 10 Pro 1909 18363.719
- **Poetry version**: 1.0.5
- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/ndevln/f27b5b64251241d40e1b960ec96b8b68
## Issue
Hi,
when running: `poetry install -E docs_material` I get the following error:
```
[RecursionError]
maximum recursion depth exceeded
```
There are quite a few similar issues (like #2329), but it seems that this problem is package specific?!?
Thank you for poetry and your effort :)
</issue>
<code>
[start of poetry/utils/extras.py]
1 from typing import Iterator
2 from typing import List
3 from typing import Mapping
4 from typing import Sequence
5
6 from poetry.core.packages import Package
7 from poetry.utils.helpers import canonicalize_name
8
9
10 def get_extra_package_names(
11 packages, # type: Sequence[Package]
12 extras, # type: Mapping[str, List[str]]
13 extra_names, # type: Sequence[str]
14 ): # type: (...) -> Iterator[str]
15 """
16 Returns all package names required by the given extras.
17
18 :param packages: A collection of packages, such as from Repository.packages
19 :param extras: A mapping of `extras` names to lists of package names, as defined
20 in the `extras` section of `poetry.lock`.
21 :param extra_names: A list of strings specifying names of extra groups to resolve.
22 """
23 if not extra_names:
24 return []
25
26 # lookup for packages by name, faster than looping over packages repeatedly
27 packages_by_name = {package.name: package for package in packages}
28
29 # get and flatten names of packages we've opted into as extras
30 extra_package_names = [
31 canonicalize_name(extra_package_name)
32 for extra_name in extra_names
33 for extra_package_name in extras.get(extra_name, ())
34 ]
35
36 def _extra_packages(package_names):
37 """Recursively find dependencies for packages names"""
38 # for each extra pacakge name
39 for package_name in package_names:
40 # Find the actual Package object. A missing key indicates an implicit
41 # dependency (like setuptools), which should be ignored
42 package = packages_by_name.get(canonicalize_name(package_name))
43 if package:
44 yield package.name
45 # Recurse for dependencies
46 for dependency_package_name in _extra_packages(
47 dependency.name for dependency in package.requires
48 ):
49 yield dependency_package_name
50
51 return _extra_packages(extra_package_names)
52
[end of poetry/utils/extras.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/poetry/utils/extras.py b/poetry/utils/extras.py
--- a/poetry/utils/extras.py
+++ b/poetry/utils/extras.py
@@ -33,6 +33,9 @@
for extra_package_name in extras.get(extra_name, ())
]
+ # keep record of packages seen during recursion in order to avoid recursion error
+ seen_package_names = set()
+
def _extra_packages(package_names):
"""Recursively find dependencies for packages names"""
# for each extra pacakge name
@@ -41,11 +44,16 @@
# dependency (like setuptools), which should be ignored
package = packages_by_name.get(canonicalize_name(package_name))
if package:
- yield package.name
+ if package.name not in seen_package_names:
+ seen_package_names.add(package.name)
+ yield package.name
# Recurse for dependencies
for dependency_package_name in _extra_packages(
- dependency.name for dependency in package.requires
+ dependency.name
+ for dependency in package.requires
+ if dependency.name not in seen_package_names
):
+ seen_package_names.add(dependency_package_name)
yield dependency_package_name
return _extra_packages(extra_package_names)
| {"golden_diff": "diff --git a/poetry/utils/extras.py b/poetry/utils/extras.py\n--- a/poetry/utils/extras.py\n+++ b/poetry/utils/extras.py\n@@ -33,6 +33,9 @@\n for extra_package_name in extras.get(extra_name, ())\n ]\n \n+ # keep record of packages seen during recursion in order to avoid recursion error\n+ seen_package_names = set()\n+\n def _extra_packages(package_names):\n \"\"\"Recursively find dependencies for packages names\"\"\"\n # for each extra pacakge name\n@@ -41,11 +44,16 @@\n # dependency (like setuptools), which should be ignored\n package = packages_by_name.get(canonicalize_name(package_name))\n if package:\n- yield package.name\n+ if package.name not in seen_package_names:\n+ seen_package_names.add(package.name)\n+ yield package.name\n # Recurse for dependencies\n for dependency_package_name in _extra_packages(\n- dependency.name for dependency in package.requires\n+ dependency.name\n+ for dependency in package.requires\n+ if dependency.name not in seen_package_names\n ):\n+ seen_package_names.add(dependency_package_name)\n yield dependency_package_name\n \n return _extra_packages(extra_package_names)\n", "issue": "RecursionError when installing mkdocs-material\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n- **OS version and name**: Windows 10 Pro 1909 18363.719\r\n- **Poetry version**: 1.0.5\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/ndevln/f27b5b64251241d40e1b960ec96b8b68\r\n\r\n## Issue\r\nHi,\r\n\r\nwhen running: `poetry install -E docs_material` I get the following error:\r\n\r\n```\r\n[RecursionError]\r\nmaximum recursion depth exceeded\r\n```\r\n\r\nThere are quite a few similar issues (like #2329), but it seems that this problem is package specific?!?\r\n\r\nThank you for poetry and your effort :)\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Iterator\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Sequence\n\nfrom poetry.core.packages import Package\nfrom poetry.utils.helpers import canonicalize_name\n\n\ndef get_extra_package_names(\n packages, # type: Sequence[Package]\n extras, # type: Mapping[str, List[str]]\n extra_names, # type: Sequence[str]\n): # type: (...) -> Iterator[str]\n \"\"\"\n Returns all package names required by the given extras.\n\n :param packages: A collection of packages, such as from Repository.packages\n :param extras: A mapping of `extras` names to lists of package names, as defined\n in the `extras` section of `poetry.lock`.\n :param extra_names: A list of strings specifying names of extra groups to resolve.\n \"\"\"\n if not extra_names:\n return []\n\n # lookup for packages by name, faster than looping over packages repeatedly\n packages_by_name = {package.name: package for package in packages}\n\n # get and flatten names of packages we've opted into as extras\n extra_package_names = [\n canonicalize_name(extra_package_name)\n for extra_name in extra_names\n for extra_package_name in extras.get(extra_name, ())\n ]\n\n def _extra_packages(package_names):\n \"\"\"Recursively find dependencies for packages names\"\"\"\n # for each extra pacakge name\n for package_name in package_names:\n # Find the actual Package object. A missing key indicates an implicit\n # dependency (like setuptools), which should be ignored\n package = packages_by_name.get(canonicalize_name(package_name))\n if package:\n yield package.name\n # Recurse for dependencies\n for dependency_package_name in _extra_packages(\n dependency.name for dependency in package.requires\n ):\n yield dependency_package_name\n\n return _extra_packages(extra_package_names)\n", "path": "poetry/utils/extras.py"}]} | 1,325 | 277 |
gh_patches_debug_230 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-2545 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Releasing 1.0
With #2435 rounding out the final thing I think we need for the next release, I think it's time to put together the 1.0 release.
This should consist of:
- [x] assembling changelog #2440
- [x] making sure new features are well documented
- [x] publishing beta release
- [x] test beta (perhaps by adding it to the z2jh chart)
- [ ] release 1.0 final
</issue>
<code>
[start of jupyterhub/_version.py]
1 """JupyterHub version info"""
2 # Copyright (c) Jupyter Development Team.
3 # Distributed under the terms of the Modified BSD License.
4
5 version_info = (
6 1,
7 0,
8 0,
9 "b2", # release (b1, rc1, or "" for final or dev)
10 # "dev", # dev or nothing
11 )
12
13 # pep 440 version: no dot before beta/rc, but before .dev
14 # 0.1.0rc1
15 # 0.1.0a1
16 # 0.1.0b1.dev
17 # 0.1.0.dev
18
19 __version__ = ".".join(map(str, version_info[:3])) + ".".join(version_info[3:])
20
21
22 def _check_version(hub_version, singleuser_version, log):
23 """Compare Hub and single-user server versions"""
24 if not hub_version:
25 log.warning(
26 "Hub has no version header, which means it is likely < 0.8. Expected %s",
27 __version__,
28 )
29 return
30
31 if not singleuser_version:
32 log.warning(
33 "Single-user server has no version header, which means it is likely < 0.8. Expected %s",
34 __version__,
35 )
36 return
37
38 # compare minor X.Y versions
39 if hub_version != singleuser_version:
40 from distutils.version import LooseVersion as V
41
42 hub_major_minor = V(hub_version).version[:2]
43 singleuser_major_minor = V(singleuser_version).version[:2]
44 extra = ""
45 if singleuser_major_minor == hub_major_minor:
46 # patch-level mismatch or lower, log difference at debug-level
47 # because this should be fine
48 log_method = log.debug
49 else:
50 # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.
51 log_method = log.warning
52 extra = " This could cause failure to authenticate and result in redirect loops!"
53 log_method(
54 "jupyterhub version %s != jupyterhub-singleuser version %s." + extra,
55 hub_version,
56 singleuser_version,
57 )
58 else:
59 log.debug(
60 "jupyterhub and jupyterhub-singleuser both on version %s" % hub_version
61 )
62
[end of jupyterhub/_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/jupyterhub/_version.py b/jupyterhub/_version.py
--- a/jupyterhub/_version.py
+++ b/jupyterhub/_version.py
@@ -6,7 +6,7 @@
1,
0,
0,
- "b2", # release (b1, rc1, or "" for final or dev)
+ # "b2", # release (b1, rc1, or "" for final or dev)
# "dev", # dev or nothing
)
| {"golden_diff": "diff --git a/jupyterhub/_version.py b/jupyterhub/_version.py\n--- a/jupyterhub/_version.py\n+++ b/jupyterhub/_version.py\n@@ -6,7 +6,7 @@\n 1,\n 0,\n 0,\n- \"b2\", # release (b1, rc1, or \"\" for final or dev)\n+ # \"b2\", # release (b1, rc1, or \"\" for final or dev)\n # \"dev\", # dev or nothing\n )\n", "issue": "Releasing 1.0\nWith #2435 rounding out the final thing I think we need for the next release, I think it's time to put together the 1.0 release.\r\n\r\nThis should consist of:\r\n\r\n- [x] assembling changelog #2440\r\n- [x] making sure new features are well documented\r\n- [x] publishing beta release\r\n- [x] test beta (perhaps by adding it to the z2jh chart)\r\n- [ ] release 1.0 final\n", "before_files": [{"content": "\"\"\"JupyterHub version info\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nversion_info = (\n 1,\n 0,\n 0,\n \"b2\", # release (b1, rc1, or \"\" for final or dev)\n # \"dev\", # dev or nothing\n)\n\n# pep 440 version: no dot before beta/rc, but before .dev\n# 0.1.0rc1\n# 0.1.0a1\n# 0.1.0b1.dev\n# 0.1.0.dev\n\n__version__ = \".\".join(map(str, version_info[:3])) + \".\".join(version_info[3:])\n\n\ndef _check_version(hub_version, singleuser_version, log):\n \"\"\"Compare Hub and single-user server versions\"\"\"\n if not hub_version:\n log.warning(\n \"Hub has no version header, which means it is likely < 0.8. Expected %s\",\n __version__,\n )\n return\n\n if not singleuser_version:\n log.warning(\n \"Single-user server has no version header, which means it is likely < 0.8. Expected %s\",\n __version__,\n )\n return\n\n # compare minor X.Y versions\n if hub_version != singleuser_version:\n from distutils.version import LooseVersion as V\n\n hub_major_minor = V(hub_version).version[:2]\n singleuser_major_minor = V(singleuser_version).version[:2]\n extra = \"\"\n if singleuser_major_minor == hub_major_minor:\n # patch-level mismatch or lower, log difference at debug-level\n # because this should be fine\n log_method = log.debug\n else:\n # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.\n log_method = log.warning\n extra = \" This could cause failure to authenticate and result in redirect loops!\"\n log_method(\n \"jupyterhub version %s != jupyterhub-singleuser version %s.\" + extra,\n hub_version,\n singleuser_version,\n )\n else:\n log.debug(\n \"jupyterhub and jupyterhub-singleuser both on version %s\" % hub_version\n )\n", "path": "jupyterhub/_version.py"}]} | 1,258 | 118 |
gh_patches_debug_57772 | rasdani/github-patches | git_diff | celery__kombu-400 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Django Kombu app conflicts with Django app with Django 1.7
``` python
Traceback (most recent call last):
File "manage.py", line 13, in <module>
execute_from_command_line(sys.argv)
File "/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py", line 385, in execute_from_command_line
utility.execute()
File "/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute
django.setup()
File "/.../.env/lib/python2.7/site-packages/django/__init__.py", line 21, in setup
apps.populate(settings.INSTALLED_APPS)
File "/.../.env/lib/python2.7/site-packages/django/apps/registry.py", line 89, in populate
"duplicates: %s" % app_config.label)
django.core.exceptions.ImproperlyConfigured: Application labels aren't unique, duplicates: django
```
More info [here](https://docs.djangoproject.com/en/1.7/releases/1.7/#app-loading-refactor)
</issue>
<code>
[start of kombu/transport/django/__init__.py]
1 """Kombu transport using the Django database as a message store."""
2 from __future__ import absolute_import
3
4 from django.conf import settings
5 from django.core import exceptions as errors
6
7 from kombu.five import Empty
8 from kombu.transport import virtual
9 from kombu.utils.encoding import bytes_to_str
10 from kombu.utils.json import loads, dumps
11
12
13 from .models import Queue
14
15 try:
16 from django.apps import AppConfig
17 except ImportError: # pragma: no cover
18 pass
19 else:
20 class KombuAppConfig(AppConfig):
21 name = __name__
22 label = name.replace('.', '_')
23 verbose_name = 'Message queue'
24 default_app_config = 'kombu.transport.django.KombuAppConfig'
25
26 VERSION = (1, 0, 0)
27 __version__ = '.'.join(map(str, VERSION))
28
29 POLLING_INTERVAL = getattr(settings, 'KOMBU_POLLING_INTERVAL',
30 getattr(settings, 'DJKOMBU_POLLING_INTERVAL', 5.0))
31
32
33 class Channel(virtual.Channel):
34
35 def _new_queue(self, queue, **kwargs):
36 Queue.objects.get_or_create(name=queue)
37
38 def _put(self, queue, message, **kwargs):
39 Queue.objects.publish(queue, dumps(message))
40
41 def basic_consume(self, queue, *args, **kwargs):
42 qinfo = self.state.bindings[queue]
43 exchange = qinfo[0]
44 if self.typeof(exchange).type == 'fanout':
45 return
46 super(Channel, self).basic_consume(queue, *args, **kwargs)
47
48 def _get(self, queue):
49 m = Queue.objects.fetch(queue)
50 if m:
51 return loads(bytes_to_str(m))
52 raise Empty()
53
54 def _size(self, queue):
55 return Queue.objects.size(queue)
56
57 def _purge(self, queue):
58 return Queue.objects.purge(queue)
59
60 def refresh_connection(self):
61 from django import db
62 db.close_connection()
63
64
65 class Transport(virtual.Transport):
66 Channel = Channel
67
68 default_port = 0
69 polling_interval = POLLING_INTERVAL
70 channel_errors = (
71 virtual.Transport.channel_errors + (
72 errors.ObjectDoesNotExist, errors.MultipleObjectsReturned)
73 )
74 driver_type = 'sql'
75 driver_name = 'django'
76
77 def driver_version(self):
78 import django
79 return '.'.join(map(str, django.VERSION))
80
[end of kombu/transport/django/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kombu/transport/django/__init__.py b/kombu/transport/django/__init__.py
--- a/kombu/transport/django/__init__.py
+++ b/kombu/transport/django/__init__.py
@@ -18,7 +18,7 @@
pass
else:
class KombuAppConfig(AppConfig):
- name = __name__
+ name = 'kombu.transport.django'
label = name.replace('.', '_')
verbose_name = 'Message queue'
default_app_config = 'kombu.transport.django.KombuAppConfig'
| {"golden_diff": "diff --git a/kombu/transport/django/__init__.py b/kombu/transport/django/__init__.py\n--- a/kombu/transport/django/__init__.py\n+++ b/kombu/transport/django/__init__.py\n@@ -18,7 +18,7 @@\n pass\n else:\n class KombuAppConfig(AppConfig):\n- name = __name__\n+ name = 'kombu.transport.django'\n label = name.replace('.', '_')\n verbose_name = 'Message queue'\n default_app_config = 'kombu.transport.django.KombuAppConfig'\n", "issue": "Django Kombu app conflicts with Django app with Django 1.7\n``` python\nTraceback (most recent call last):\n File \"manage.py\", line 13, in <module>\n execute_from_command_line(sys.argv)\n File \"/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py\", line 385, in execute_from_command_line\n utility.execute()\n File \"/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py\", line 354, in execute\n django.setup()\n File \"/.../.env/lib/python2.7/site-packages/django/__init__.py\", line 21, in setup\n apps.populate(settings.INSTALLED_APPS)\n File \"/.../.env/lib/python2.7/site-packages/django/apps/registry.py\", line 89, in populate\n \"duplicates: %s\" % app_config.label)\ndjango.core.exceptions.ImproperlyConfigured: Application labels aren't unique, duplicates: django\n```\n\nMore info [here](https://docs.djangoproject.com/en/1.7/releases/1.7/#app-loading-refactor)\n\n", "before_files": [{"content": "\"\"\"Kombu transport using the Django database as a message store.\"\"\"\nfrom __future__ import absolute_import\n\nfrom django.conf import settings\nfrom django.core import exceptions as errors\n\nfrom kombu.five import Empty\nfrom kombu.transport import virtual\nfrom kombu.utils.encoding import bytes_to_str\nfrom kombu.utils.json import loads, dumps\n\n\nfrom .models import Queue\n\ntry:\n from django.apps import AppConfig\nexcept ImportError: # pragma: no cover\n pass\nelse:\n class KombuAppConfig(AppConfig):\n name = __name__\n label = name.replace('.', '_')\n verbose_name = 'Message queue'\n default_app_config = 'kombu.transport.django.KombuAppConfig'\n\nVERSION = (1, 0, 0)\n__version__ = '.'.join(map(str, VERSION))\n\nPOLLING_INTERVAL = getattr(settings, 'KOMBU_POLLING_INTERVAL',\n getattr(settings, 'DJKOMBU_POLLING_INTERVAL', 5.0))\n\n\nclass Channel(virtual.Channel):\n\n def _new_queue(self, queue, **kwargs):\n Queue.objects.get_or_create(name=queue)\n\n def _put(self, queue, message, **kwargs):\n Queue.objects.publish(queue, dumps(message))\n\n def basic_consume(self, queue, *args, **kwargs):\n qinfo = self.state.bindings[queue]\n exchange = qinfo[0]\n if self.typeof(exchange).type == 'fanout':\n return\n super(Channel, self).basic_consume(queue, *args, **kwargs)\n\n def _get(self, queue):\n m = Queue.objects.fetch(queue)\n if m:\n return loads(bytes_to_str(m))\n raise Empty()\n\n def _size(self, queue):\n return Queue.objects.size(queue)\n\n def _purge(self, queue):\n return Queue.objects.purge(queue)\n\n def refresh_connection(self):\n from django import db\n db.close_connection()\n\n\nclass Transport(virtual.Transport):\n Channel = Channel\n\n default_port = 0\n polling_interval = POLLING_INTERVAL\n channel_errors = (\n virtual.Transport.channel_errors + (\n errors.ObjectDoesNotExist, errors.MultipleObjectsReturned)\n )\n driver_type = 'sql'\n driver_name = 'django'\n\n def driver_version(self):\n import django\n return '.'.join(map(str, django.VERSION))\n", "path": "kombu/transport/django/__init__.py"}]} | 1,461 | 137 |
gh_patches_debug_32907 | rasdani/github-patches | git_diff | scikit-hep__pyhf-873 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move metadata from setup.py to setup.cfg
# Description
@henryiii has documented on the [Scikit-HEP packaging information page](https://scikit-hep.org/developer/packaging#setup-configuration-medium-priority) how one can move from having PyPI metadata be stored in `setup.py` to `setup.cfg`. We've known about this for sometime but haven't taken the time to do it yet, but it is probably worth doing.
@henryiii Can you comment on how to deal with `project_urls`?
https://github.com/scikit-hep/pyhf/blob/3e1f157119dbcb4d8db8ffd8c98e16a2d12d0239/setup.py#L82-L86
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 from pathlib import Path
3
4 this_directory = Path(__file__).parent.resolve()
5 with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:
6 long_description = readme_rst.read()
7
8 extras_require = {
9 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
10 'torch': ['torch~=1.2'],
11 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
12 'xmlio': ['uproot'],
13 'minuit': ['iminuit'],
14 }
15 extras_require['backends'] = sorted(
16 set(
17 extras_require['tensorflow']
18 + extras_require['torch']
19 + extras_require['jax']
20 + extras_require['minuit']
21 )
22 )
23 extras_require['contrib'] = sorted(set(['matplotlib']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + [
31 'pyflakes',
32 'pytest~=3.5',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=2.0',
41 'nteract-scrapbook~=0.2',
42 'check-manifest',
43 'jupyter',
44 'uproot~=3.3',
45 'graphviz',
46 'jsonpatch',
47 'black',
48 ]
49 )
50 )
51 extras_require['docs'] = sorted(
52 set(
53 [
54 'sphinx',
55 'sphinxcontrib-bibtex',
56 'sphinx-click',
57 'sphinx_rtd_theme',
58 'nbsphinx',
59 'ipywidgets',
60 'sphinx-issues',
61 'sphinx-copybutton>0.2.9',
62 ]
63 )
64 )
65 extras_require['develop'] = sorted(
66 set(
67 extras_require['docs']
68 + extras_require['test']
69 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']
70 )
71 )
72 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
73
74
75 setup(
76 name='pyhf',
77 version='0.4.1',
78 description='(partial) pure python histfactory implementation',
79 long_description=long_description,
80 long_description_content_type='text/x-rst',
81 url='https://github.com/scikit-hep/pyhf',
82 project_urls={
83 "Documentation": "https://scikit-hep.org/pyhf/",
84 "Source": "https://github.com/scikit-hep/pyhf",
85 "Tracker": "https://github.com/scikit-hep/pyhf/issues",
86 },
87 author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
88 author_email='[email protected], [email protected], [email protected]',
89 license='Apache',
90 keywords='physics fitting numpy scipy tensorflow pytorch jax',
91 classifiers=[
92 "Development Status :: 4 - Beta",
93 "License :: OSI Approved :: Apache Software License",
94 "Intended Audience :: Science/Research",
95 "Topic :: Scientific/Engineering",
96 "Topic :: Scientific/Engineering :: Physics",
97 "Programming Language :: Python :: 3",
98 "Programming Language :: Python :: 3.6",
99 "Programming Language :: Python :: 3.7",
100 "Programming Language :: Python :: 3.8",
101 ],
102 package_dir={'': 'src'},
103 packages=find_packages(where='src'),
104 include_package_data=True,
105 python_requires=">=3.6",
106 install_requires=[
107 'scipy', # requires numpy, which is required by pyhf and tensorflow
108 'click>=6.0', # for console scripts,
109 'tqdm', # for readxml
110 'jsonschema>=3.2.0', # for utils
111 'jsonpatch',
112 'pyyaml', # for parsing CLI equal-delimited options
113 ],
114 extras_require=extras_require,
115 entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},
116 dependency_links=[],
117 use_scm_version=lambda: {'local_scheme': lambda version: ''},
118 )
119
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,9 +1,4 @@
-from setuptools import setup, find_packages
-from pathlib import Path
-
-this_directory = Path(__file__).parent.resolve()
-with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:
- long_description = readme_rst.read()
+from setuptools import setup
extras_require = {
'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
@@ -73,46 +68,6 @@
setup(
- name='pyhf',
- version='0.4.1',
- description='(partial) pure python histfactory implementation',
- long_description=long_description,
- long_description_content_type='text/x-rst',
- url='https://github.com/scikit-hep/pyhf',
- project_urls={
- "Documentation": "https://scikit-hep.org/pyhf/",
- "Source": "https://github.com/scikit-hep/pyhf",
- "Tracker": "https://github.com/scikit-hep/pyhf/issues",
- },
- author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
- author_email='[email protected], [email protected], [email protected]',
- license='Apache',
- keywords='physics fitting numpy scipy tensorflow pytorch jax',
- classifiers=[
- "Development Status :: 4 - Beta",
- "License :: OSI Approved :: Apache Software License",
- "Intended Audience :: Science/Research",
- "Topic :: Scientific/Engineering",
- "Topic :: Scientific/Engineering :: Physics",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.6",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
- ],
- package_dir={'': 'src'},
- packages=find_packages(where='src'),
- include_package_data=True,
- python_requires=">=3.6",
- install_requires=[
- 'scipy', # requires numpy, which is required by pyhf and tensorflow
- 'click>=6.0', # for console scripts,
- 'tqdm', # for readxml
- 'jsonschema>=3.2.0', # for utils
- 'jsonpatch',
- 'pyyaml', # for parsing CLI equal-delimited options
- ],
extras_require=extras_require,
- entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},
- dependency_links=[],
use_scm_version=lambda: {'local_scheme': lambda version: ''},
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,9 +1,4 @@\n-from setuptools import setup, find_packages\n-from pathlib import Path\n-\n-this_directory = Path(__file__).parent.resolve()\n-with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n- long_description = readme_rst.read()\n+from setuptools import setup\n \n extras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n@@ -73,46 +68,6 @@\n \n \n setup(\n- name='pyhf',\n- version='0.4.1',\n- description='(partial) pure python histfactory implementation',\n- long_description=long_description,\n- long_description_content_type='text/x-rst',\n- url='https://github.com/scikit-hep/pyhf',\n- project_urls={\n- \"Documentation\": \"https://scikit-hep.org/pyhf/\",\n- \"Source\": \"https://github.com/scikit-hep/pyhf\",\n- \"Tracker\": \"https://github.com/scikit-hep/pyhf/issues\",\n- },\n- author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n- author_email='[email protected], [email protected], [email protected]',\n- license='Apache',\n- keywords='physics fitting numpy scipy tensorflow pytorch jax',\n- classifiers=[\n- \"Development Status :: 4 - Beta\",\n- \"License :: OSI Approved :: Apache Software License\",\n- \"Intended Audience :: Science/Research\",\n- \"Topic :: Scientific/Engineering\",\n- \"Topic :: Scientific/Engineering :: Physics\",\n- \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.6\",\n- \"Programming Language :: Python :: 3.7\",\n- \"Programming Language :: Python :: 3.8\",\n- ],\n- package_dir={'': 'src'},\n- packages=find_packages(where='src'),\n- include_package_data=True,\n- python_requires=\">=3.6\",\n- install_requires=[\n- 'scipy', # requires numpy, which is required by pyhf and tensorflow\n- 'click>=6.0', # for console scripts,\n- 'tqdm', # for readxml\n- 'jsonschema>=3.2.0', # for utils\n- 'jsonpatch',\n- 'pyyaml', # for parsing CLI equal-delimited options\n- ],\n extras_require=extras_require,\n- entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n- dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n )\n", "issue": "Move metadata from setup.py to setup.cfg\n# Description\r\n\r\n@henryiii has documented on the [Scikit-HEP packaging information page](https://scikit-hep.org/developer/packaging#setup-configuration-medium-priority) how one can move from having PyPI metadata be stored in `setup.py` to `setup.cfg`. We've known about this for sometime but haven't taken the time to do it yet, but it is probably worth doing.\r\n\r\n@henryiii Can you comment on how to deal with `project_urls`?\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/3e1f157119dbcb4d8db8ffd8c98e16a2d12d0239/setup.py#L82-L86\r\n\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n project_urls={\n \"Documentation\": \"https://scikit-hep.org/pyhf/\",\n \"Source\": \"https://github.com/scikit-hep/pyhf\",\n \"Tracker\": \"https://github.com/scikit-hep/pyhf/issues\",\n },\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch jax',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Physics\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,914 | 618 |
gh_patches_debug_31047 | rasdani/github-patches | git_diff | conan-io__conan-center-index-11189 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] libpciaccess/0.16: TypeError: __init__() got an unexpected keyword argument 'build_script_folder' with conan 1.49.0
### Package and Environment Details
* Package Name/Version: **libpciaccess/0.16**
* Operating System+version: **Fedora 34 x86_64**
* Compiler+version: **gcc 11.3.1**
* Conan version: **conan 1.49.0**
* Python version: **Python 3.9.13**
### Conan profile
```
Configuration for profile default:
[settings]
os=Linux
os_build=Linux
arch=x86_64
arch_build=x86_64
compiler=gcc
compiler.version=11
compiler.libcxx=libstdc++11
build_type=Release
[options]
[conf]
[build_requires]
[env]
```
### Steps to reproduce
Simply run `conan install libpciaccess/0.16@ --build` using conan 1.49.0.
Expected result:
libpciaccess/0.16 is successfully built and made available in conan cache.
Actual result:
Build fails with the following error:
```
libpciaccess/0.16:
libpciaccess/0.16: ERROR: Package 'dfbe50feef7f3c6223a476cd5aeadb687084a646' build failed
libpciaccess/0.16: WARN: Build folder /home/ts/.conan/data/libpciaccess/0.16/_/_/build/dfbe50feef7f3c6223a476cd5aeadb687084a646
ERROR: libpciaccess/0.16: Error in build() method, line 66
autotools = Autotools(self, build_script_folder=self._source_subfolder)
TypeError: __init__() got an unexpected keyword argument 'build_script_folder'
```
The problem appears to be that the changes merged in #11021 which reverts the fix introduced for #10909 in #10910 hasn't been updated in the conan center. Possibly because it reverts to a previous version?
### Logs
_No response_
</issue>
<code>
[start of recipes/libpciaccess/all/conanfile.py]
1 import os
2
3 from conan.tools.gnu import Autotools, AutotoolsToolchain
4 from conans import ConanFile, tools
5 from conans.errors import ConanInvalidConfiguration
6
7 required_conan_version = ">=1.33.0"
8
9
10 class LibPciAccessConan(ConanFile):
11 name = "libpciaccess"
12 description = "Generic PCI access library"
13 topics = ("pci", "xorg")
14 url = "https://github.com/conan-io/conan-center-index"
15 homepage = "https://gitlab.freedesktop.org/xorg/lib/libpciaccess"
16 license = "MIT", "X11"
17
18 settings = "os", "arch", "compiler", "build_type"
19 options = {"shared": [True, False], "fPIC": [True, False]}
20 default_options = {"shared": False, "fPIC": True}
21
22 _source_subfolder = "source_subfolder"
23 _build_subfolder = "build_subfolder"
24
25 def validate(self):
26 def is_supported(settings):
27 if settings.os in ("Linux", "FreeBSD", "SunOS"):
28 return True
29 return settings.os == "Windows" and settings.get_safe("os.subsystem") == "cygwin"
30 if not is_supported(self.settings):
31 raise ConanInvalidConfiguration("Unsupported architecture.")
32
33 def config_options(self):
34 if self.settings.os == "Windows":
35 del self.options.fPIC
36
37 def configure(self):
38 if self.options.shared:
39 del self.options.fPIC
40 del self.settings.compiler.libcxx
41 del self.settings.compiler.cppstd
42
43 def build_requirements(self):
44 self.build_requires("pkgconf/1.7.4")
45 self.build_requires("xorg-macros/1.19.3")
46 self.build_requires("libtool/2.4.6")
47
48 def generate(self):
49 tc = AutotoolsToolchain(self)
50 tc.default_configure_install_args = True
51 tc.generate()
52
53 def source(self):
54 tools.get(**self.conan_data["sources"][self.version],
55 strip_root=True, destination=self._source_subfolder)
56
57 def build(self):
58 # autoreconf
59 self.run("{} -fiv".format(tools.get_env("AUTORECONF") or "autoreconf"),
60 win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)
61
62 autotools = Autotools(self)
63 autotools.configure(build_script_folder=self._source_subfolder)
64 autotools.make()
65
66 def package(self):
67 self.copy(pattern="COPYING", dst="licenses",
68 src=self._source_subfolder)
69
70 autotools = Autotools(self)
71 autotools.install()
72
73 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
74 tools.remove_files_by_mask(os.path.join(
75 self.package_folder, "lib"), "*.la")
76
77 def package_info(self):
78 self.cpp_info.libs = tools.collect_libs(self)
79 self.cpp_info.set_property("pkg_config_name", "pciaccess")
80
[end of recipes/libpciaccess/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/libpciaccess/all/conanfile.py b/recipes/libpciaccess/all/conanfile.py
--- a/recipes/libpciaccess/all/conanfile.py
+++ b/recipes/libpciaccess/all/conanfile.py
@@ -1,10 +1,10 @@
import os
-
from conan.tools.gnu import Autotools, AutotoolsToolchain
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
-required_conan_version = ">=1.33.0"
+
+required_conan_version = ">=1.44.0"
class LibPciAccessConan(ConanFile):
@@ -19,8 +19,9 @@
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
- _source_subfolder = "source_subfolder"
- _build_subfolder = "build_subfolder"
+ @property
+ def _source_subfolder(self):
+ return "source_subfolder"
def validate(self):
def is_supported(settings):
@@ -55,7 +56,6 @@
strip_root=True, destination=self._source_subfolder)
def build(self):
- # autoreconf
self.run("{} -fiv".format(tools.get_env("AUTORECONF") or "autoreconf"),
win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)
@@ -75,5 +75,5 @@
self.package_folder, "lib"), "*.la")
def package_info(self):
- self.cpp_info.libs = tools.collect_libs(self)
+ self.cpp_info.libs = ["pciaccess"]
self.cpp_info.set_property("pkg_config_name", "pciaccess")
| {"golden_diff": "diff --git a/recipes/libpciaccess/all/conanfile.py b/recipes/libpciaccess/all/conanfile.py\n--- a/recipes/libpciaccess/all/conanfile.py\n+++ b/recipes/libpciaccess/all/conanfile.py\n@@ -1,10 +1,10 @@\n import os\n-\n from conan.tools.gnu import Autotools, AutotoolsToolchain\n from conans import ConanFile, tools\n from conans.errors import ConanInvalidConfiguration\n \n-required_conan_version = \">=1.33.0\"\n+\n+required_conan_version = \">=1.44.0\"\n \n \n class LibPciAccessConan(ConanFile):\n@@ -19,8 +19,9 @@\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n \n- _source_subfolder = \"source_subfolder\"\n- _build_subfolder = \"build_subfolder\"\n+ @property\n+ def _source_subfolder(self):\n+ return \"source_subfolder\"\n \n def validate(self):\n def is_supported(settings):\n@@ -55,7 +56,6 @@\n strip_root=True, destination=self._source_subfolder)\n \n def build(self):\n- # autoreconf\n self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\") or \"autoreconf\"),\n win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)\n \n@@ -75,5 +75,5 @@\n self.package_folder, \"lib\"), \"*.la\")\n \n def package_info(self):\n- self.cpp_info.libs = tools.collect_libs(self)\n+ self.cpp_info.libs = [\"pciaccess\"]\n self.cpp_info.set_property(\"pkg_config_name\", \"pciaccess\")\n", "issue": "[package] libpciaccess/0.16: TypeError: __init__() got an unexpected keyword argument 'build_script_folder' with conan 1.49.0\n### Package and Environment Details\r\n\r\n* Package Name/Version: **libpciaccess/0.16**\r\n* Operating System+version: **Fedora 34 x86_64**\r\n* Compiler+version: **gcc 11.3.1**\r\n* Conan version: **conan 1.49.0**\r\n* Python version: **Python 3.9.13**\r\n\r\n\r\n### Conan profile\r\n\r\n```\r\nConfiguration for profile default:\r\n\r\n[settings]\r\nos=Linux\r\nos_build=Linux\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=gcc\r\ncompiler.version=11\r\ncompiler.libcxx=libstdc++11\r\nbuild_type=Release\r\n[options]\r\n[conf]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\nSimply run `conan install libpciaccess/0.16@ --build` using conan 1.49.0.\r\n\r\nExpected result:\r\nlibpciaccess/0.16 is successfully built and made available in conan cache.\r\n\r\nActual result:\r\nBuild fails with the following error:\r\n\r\n```\r\nlibpciaccess/0.16: \r\nlibpciaccess/0.16: ERROR: Package 'dfbe50feef7f3c6223a476cd5aeadb687084a646' build failed\r\nlibpciaccess/0.16: WARN: Build folder /home/ts/.conan/data/libpciaccess/0.16/_/_/build/dfbe50feef7f3c6223a476cd5aeadb687084a646\r\nERROR: libpciaccess/0.16: Error in build() method, line 66\r\n\tautotools = Autotools(self, build_script_folder=self._source_subfolder)\r\n\tTypeError: __init__() got an unexpected keyword argument 'build_script_folder'\r\n\r\n```\r\nThe problem appears to be that the changes merged in #11021 which reverts the fix introduced for #10909 in #10910 hasn't been updated in the conan center. Possibly because it reverts to a previous version?\r\n\r\n### Logs\r\n\r\n_No response_\n", "before_files": [{"content": "import os\n\nfrom conan.tools.gnu import Autotools, AutotoolsToolchain\nfrom conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass LibPciAccessConan(ConanFile):\n name = \"libpciaccess\"\n description = \"Generic PCI access library\"\n topics = (\"pci\", \"xorg\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://gitlab.freedesktop.org/xorg/lib/libpciaccess\"\n license = \"MIT\", \"X11\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def validate(self):\n def is_supported(settings):\n if settings.os in (\"Linux\", \"FreeBSD\", \"SunOS\"):\n return True\n return settings.os == \"Windows\" and settings.get_safe(\"os.subsystem\") == \"cygwin\"\n if not is_supported(self.settings):\n raise ConanInvalidConfiguration(\"Unsupported architecture.\")\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def build_requirements(self):\n self.build_requires(\"pkgconf/1.7.4\")\n self.build_requires(\"xorg-macros/1.19.3\")\n self.build_requires(\"libtool/2.4.6\")\n\n def generate(self):\n tc = AutotoolsToolchain(self)\n tc.default_configure_install_args = True\n tc.generate()\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n strip_root=True, destination=self._source_subfolder)\n\n def build(self):\n # autoreconf\n self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\") or \"autoreconf\"),\n win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)\n\n autotools = Autotools(self)\n autotools.configure(build_script_folder=self._source_subfolder)\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\",\n src=self._source_subfolder)\n\n autotools = Autotools(self)\n autotools.install()\n\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.remove_files_by_mask(os.path.join(\n self.package_folder, \"lib\"), \"*.la\")\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n self.cpp_info.set_property(\"pkg_config_name\", \"pciaccess\")\n", "path": "recipes/libpciaccess/all/conanfile.py"}]} | 1,882 | 405 |
gh_patches_debug_2041 | rasdani/github-patches | git_diff | Pyomo__pyomo-2633 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fixed Vars unpickle as stale
## Summary
I'm not sure if this is a bug, but it seems unexpected? Anyway, if you pickle a model that has a fixed variable (not stale), when you unpickle it, it comes back as stale.
### Steps to reproduce the issue
```
from pyomo.environ import *
import pickle
m = ConcreteModel()
m.x = Var(domain=Binary)
m.x.fix(1)
unpickle = pickle.loads(pickle.dumps(m))
m.x.pprint()
unpickle.x.pprint()
```
```
x : Size=1, Index=None
Key : Lower : Value : Upper : Fixed : Stale : Domain
None : 0 : 1 : 1 : True : False : Binary
x : Size=1, Index=None
Key : Lower : Value : Upper : Fixed : Stale : Domain
None : 0 : 1 : 1 : True : True : Binary
```
### Error Message
It seems like these models should be identical, even up to stale-ness, right?
### Information on your system
Pyomo version: main
Python version: 3.8
Operating system: linux
How Pyomo was installed (PyPI, conda, source): source
Solver (if applicable):
</issue>
<code>
[start of pyomo/core/staleflag.py]
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright (c) 2008-2022
5 # National Technology and Engineering Solutions of Sandia, LLC
6 # Under the terms of Contract DE-NA0003525 with National Technology and
7 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
8 # rights in this software.
9 # This software is distributed under the 3-clause BSD License.
10 # ___________________________________________________________________________
11
12 class _StaleFlagManager(object):
13 def __init__(self):
14 self._current = 0
15 self.mark_all_as_stale()
16
17 def stale_mapper(self, encode, value):
18 if encode:
19 return self.is_stale(value)
20 else:
21 if value:
22 return 0
23 else:
24 self.get_flag(0)
25
26 def _get_flag(self, current_flag):
27 """Return the current global stale flag value"""
28 return self._current
29
30 def _get_flag_delayed(self, current_flag):
31 """Implement the "delayed" advancement of the global stale flag value
32
33 This will continue to return the current value of the state flag
34 until the first non-stale variable is updated (that it, it is
35 passed the current stale flag when called). This allows for
36 updating stale variable values without incrementing the global
37 stale flag, but will mark everything as stale as soon as a
38 non-stale variable value is changed.
39
40 """
41 if current_flag == self._current:
42 self._current += 1
43 setattr(self, 'get_flag', getattr(self, '_get_flag'))
44 return self._current
45
46 def is_stale(self, val):
47 """Return ``True`` if the passed value indicated a stale variable"""
48 return val != self._current
49
50 def mark_all_as_stale(self, delayed=False):
51 """Advance the global stale flag, marking all variables as stale
52
53 This is generally called immediately before and after a batch
54 variable update (i.e. loading values from a solver result or
55 stored solution). Before the batch update
56 :meth:`mark_all_as_stale` is called with ``delayed=False``,
57 which immediately marks all variables as stale. After the batch
58 update, :meth:`mark_all_as_stale` is typically called with
59 ``delayed=True``. This allows additional stale variables to be
60 updated without advancing the global flag, but as soon as any
61 non-stale variable has its value changed, then the flag is
62 advanced and all other variables become stale.
63
64 """
65 if delayed:
66 setattr(self, 'get_flag', getattr(self, '_get_flag_delayed'))
67 else:
68 setattr(self, 'get_flag', getattr(self, '_get_flag'))
69 self._current += 1
70
71 StaleFlagManager = _StaleFlagManager()
72
[end of pyomo/core/staleflag.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyomo/core/staleflag.py b/pyomo/core/staleflag.py
--- a/pyomo/core/staleflag.py
+++ b/pyomo/core/staleflag.py
@@ -21,7 +21,7 @@
if value:
return 0
else:
- self.get_flag(0)
+ return self.get_flag(0)
def _get_flag(self, current_flag):
"""Return the current global stale flag value"""
| {"golden_diff": "diff --git a/pyomo/core/staleflag.py b/pyomo/core/staleflag.py\n--- a/pyomo/core/staleflag.py\n+++ b/pyomo/core/staleflag.py\n@@ -21,7 +21,7 @@\n if value:\n return 0\n else:\n- self.get_flag(0)\n+ return self.get_flag(0)\n \n def _get_flag(self, current_flag):\n \"\"\"Return the current global stale flag value\"\"\"\n", "issue": "Fixed Vars unpickle as stale\n## Summary\r\n\r\nI'm not sure if this is a bug, but it seems unexpected? Anyway, if you pickle a model that has a fixed variable (not stale), when you unpickle it, it comes back as stale.\r\n\r\n### Steps to reproduce the issue\r\n\r\n```\r\nfrom pyomo.environ import *\r\nimport pickle\r\n\r\nm = ConcreteModel()\r\nm.x = Var(domain=Binary)\r\nm.x.fix(1)\r\n\r\nunpickle = pickle.loads(pickle.dumps(m))\r\n\r\nm.x.pprint()\r\nunpickle.x.pprint()\r\n```\r\n\r\n```\r\nx : Size=1, Index=None\r\n Key : Lower : Value : Upper : Fixed : Stale : Domain\r\n None : 0 : 1 : 1 : True : False : Binary\r\nx : Size=1, Index=None\r\n Key : Lower : Value : Upper : Fixed : Stale : Domain\r\n None : 0 : 1 : 1 : True : True : Binary\r\n```\r\n\r\n### Error Message\r\n\r\nIt seems like these models should be identical, even up to stale-ness, right?\r\n\r\n### Information on your system\r\n\r\nPyomo version: main\r\nPython version: 3.8\r\nOperating system: linux\r\nHow Pyomo was installed (PyPI, conda, source): source \r\nSolver (if applicable):\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nclass _StaleFlagManager(object):\n def __init__(self):\n self._current = 0\n self.mark_all_as_stale()\n\n def stale_mapper(self, encode, value):\n if encode:\n return self.is_stale(value)\n else:\n if value:\n return 0\n else:\n self.get_flag(0)\n\n def _get_flag(self, current_flag):\n \"\"\"Return the current global stale flag value\"\"\"\n return self._current\n\n def _get_flag_delayed(self, current_flag):\n \"\"\"Implement the \"delayed\" advancement of the global stale flag value\n\n This will continue to return the current value of the state flag\n until the first non-stale variable is updated (that it, it is\n passed the current stale flag when called). This allows for\n updating stale variable values without incrementing the global\n stale flag, but will mark everything as stale as soon as a\n non-stale variable value is changed.\n\n \"\"\"\n if current_flag == self._current:\n self._current += 1\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n return self._current\n\n def is_stale(self, val):\n \"\"\"Return ``True`` if the passed value indicated a stale variable\"\"\"\n return val != self._current\n\n def mark_all_as_stale(self, delayed=False):\n \"\"\"Advance the global stale flag, marking all variables as stale\n\n This is generally called immediately before and after a batch\n variable update (i.e. loading values from a solver result or\n stored solution). Before the batch update\n :meth:`mark_all_as_stale` is called with ``delayed=False``,\n which immediately marks all variables as stale. After the batch\n update, :meth:`mark_all_as_stale` is typically called with\n ``delayed=True``. This allows additional stale variables to be\n updated without advancing the global flag, but as soon as any\n non-stale variable has its value changed, then the flag is\n advanced and all other variables become stale.\n\n \"\"\"\n if delayed:\n setattr(self, 'get_flag', getattr(self, '_get_flag_delayed'))\n else:\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n self._current += 1\n\nStaleFlagManager = _StaleFlagManager()\n", "path": "pyomo/core/staleflag.py"}]} | 1,593 | 103 |
gh_patches_debug_11964 | rasdani/github-patches | git_diff | urllib3__urllib3-1439 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'Failed to parse headers' warning logged when getting message/rfc822 content
I've been investigating an [issue](https://github.com/boto/botocore/issues/1551) I've recently discovered when retrieving objects from S3. I've now tracked it to `urllib3`; this test case (which I've added to `urllib3/test/with_dummyserver/test_socketlevel.py`) demonstrates it:
```py
class TestOkayHeaders(SocketDummyServerTestCase):
def _test_okay_header_parsing(self, header):
self.start_response_handler((
b'HTTP/1.1 200 OK\r\n'
b'Content-Length: 0\r\n'
) + header + b'\r\n\r\n'
)
pool = HTTPConnectionPool(self.host, self.port, retries=False)
self.addCleanup(pool.close)
with LogRecorder() as logs:
pool.request('GET', '/')
for record in logs:
assert 'Failed to parse headers' not in record.msg
def test_header_text_plain(self):
self._test_okay_header_parsing(b'Content-type: text/plain')
def test_header_message_rfc822(self):
self._test_okay_header_parsing(b'Content-type: message/rfc822')
```
The test with `text/plain` passes, while the test with `message/rfc822` fails, and this is logged:
```py
Failed to parse headers (url=http://localhost:36732/): Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>]
Traceback (most recent call last):
File "/home/user/git/urllib3/src/urllib3/connectionpool.py", line 396, in _make_request
assert_header_parsing(httplib_response.msg)
File "/home/user/git/urllib3/src/urllib3/util/response.py", line 68, in assert_header_parsing
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
urllib3.exceptions.HeaderParsingError: Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>]
```
While retrieving content of type `message/rfc822` still works, the warning message being logged is incorrect and unhelpful.
</issue>
<code>
[start of src/urllib3/util/response.py]
1 from __future__ import absolute_import
2 from ..packages.six.moves import http_client as httplib
3
4 from ..exceptions import HeaderParsingError
5
6
7 def is_fp_closed(obj):
8 """
9 Checks whether a given file-like object is closed.
10
11 :param obj:
12 The file-like object to check.
13 """
14
15 try:
16 # Check `isclosed()` first, in case Python3 doesn't set `closed`.
17 # GH Issue #928
18 return obj.isclosed()
19 except AttributeError:
20 pass
21
22 try:
23 # Check via the official file-like-object way.
24 return obj.closed
25 except AttributeError:
26 pass
27
28 try:
29 # Check if the object is a container for another file-like object that
30 # gets released on exhaustion (e.g. HTTPResponse).
31 return obj.fp is None
32 except AttributeError:
33 pass
34
35 raise ValueError("Unable to determine whether fp is closed.")
36
37
38 def assert_header_parsing(headers):
39 """
40 Asserts whether all headers have been successfully parsed.
41 Extracts encountered errors from the result of parsing headers.
42
43 Only works on Python 3.
44
45 :param headers: Headers to verify.
46 :type headers: `httplib.HTTPMessage`.
47
48 :raises urllib3.exceptions.HeaderParsingError:
49 If parsing errors are found.
50 """
51
52 # This will fail silently if we pass in the wrong kind of parameter.
53 # To make debugging easier add an explicit check.
54 if not isinstance(headers, httplib.HTTPMessage):
55 raise TypeError('expected httplib.Message, got {0}.'.format(
56 type(headers)))
57
58 defects = getattr(headers, 'defects', None)
59 get_payload = getattr(headers, 'get_payload', None)
60
61 unparsed_data = None
62 if get_payload: # Platform-specific: Python 3.
63 unparsed_data = get_payload()
64
65 if defects or unparsed_data:
66 raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
67
68
69 def is_response_to_head(response):
70 """
71 Checks whether the request of a response has been a HEAD-request.
72 Handles the quirks of AppEngine.
73
74 :param conn:
75 :type conn: :class:`httplib.HTTPResponse`
76 """
77 # FIXME: Can we do this somehow without accessing private httplib _method?
78 method = response._method
79 if isinstance(method, int): # Platform-specific: Appengine
80 return method == 3
81 return method.upper() == 'HEAD'
82
[end of src/urllib3/util/response.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py
--- a/src/urllib3/util/response.py
+++ b/src/urllib3/util/response.py
@@ -59,8 +59,14 @@
get_payload = getattr(headers, 'get_payload', None)
unparsed_data = None
- if get_payload: # Platform-specific: Python 3.
- unparsed_data = get_payload()
+ if get_payload:
+ # get_payload is actually email.message.Message.get_payload;
+ # we're only interested in the result if it's not a multipart message
+ if not headers.is_multipart():
+ payload = get_payload()
+
+ if isinstance(payload, (bytes, str)):
+ unparsed_data = payload
if defects or unparsed_data:
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
| {"golden_diff": "diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py\n--- a/src/urllib3/util/response.py\n+++ b/src/urllib3/util/response.py\n@@ -59,8 +59,14 @@\n get_payload = getattr(headers, 'get_payload', None)\n \n unparsed_data = None\n- if get_payload: # Platform-specific: Python 3.\n- unparsed_data = get_payload()\n+ if get_payload:\n+ # get_payload is actually email.message.Message.get_payload;\n+ # we're only interested in the result if it's not a multipart message\n+ if not headers.is_multipart():\n+ payload = get_payload()\n+\n+ if isinstance(payload, (bytes, str)):\n+ unparsed_data = payload\n \n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n", "issue": "'Failed to parse headers' warning logged when getting message/rfc822 content\nI've been investigating an [issue](https://github.com/boto/botocore/issues/1551) I've recently discovered when retrieving objects from S3. I've now tracked it to `urllib3`; this test case (which I've added to `urllib3/test/with_dummyserver/test_socketlevel.py`) demonstrates it:\r\n\r\n```py\r\nclass TestOkayHeaders(SocketDummyServerTestCase):\r\n\r\n def _test_okay_header_parsing(self, header):\r\n self.start_response_handler((\r\n b'HTTP/1.1 200 OK\\r\\n'\r\n b'Content-Length: 0\\r\\n'\r\n ) + header + b'\\r\\n\\r\\n'\r\n )\r\n\r\n pool = HTTPConnectionPool(self.host, self.port, retries=False)\r\n self.addCleanup(pool.close)\r\n\r\n with LogRecorder() as logs:\r\n pool.request('GET', '/')\r\n\r\n for record in logs:\r\n assert 'Failed to parse headers' not in record.msg\r\n\r\n def test_header_text_plain(self):\r\n self._test_okay_header_parsing(b'Content-type: text/plain')\r\n\r\n def test_header_message_rfc822(self):\r\n self._test_okay_header_parsing(b'Content-type: message/rfc822')\r\n```\r\n\r\nThe test with `text/plain` passes, while the test with `message/rfc822` fails, and this is logged:\r\n\r\n```py\r\nFailed to parse headers (url=http://localhost:36732/): Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>]\r\nTraceback (most recent call last):\r\n File \"/home/user/git/urllib3/src/urllib3/connectionpool.py\", line 396, in _make_request\r\n assert_header_parsing(httplib_response.msg)\r\n File \"/home/user/git/urllib3/src/urllib3/util/response.py\", line 68, in assert_header_parsing\r\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\r\nurllib3.exceptions.HeaderParsingError: Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>]\r\n```\r\n\r\nWhile retrieving content of type `message/rfc822` still works, the warning message being logged is incorrect and unhelpful.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom ..packages.six.moves import http_client as httplib\n\nfrom ..exceptions import HeaderParsingError\n\n\ndef is_fp_closed(obj):\n \"\"\"\n Checks whether a given file-like object is closed.\n\n :param obj:\n The file-like object to check.\n \"\"\"\n\n try:\n # Check `isclosed()` first, in case Python3 doesn't set `closed`.\n # GH Issue #928\n return obj.isclosed()\n except AttributeError:\n pass\n\n try:\n # Check via the official file-like-object way.\n return obj.closed\n except AttributeError:\n pass\n\n try:\n # Check if the object is a container for another file-like object that\n # gets released on exhaustion (e.g. HTTPResponse).\n return obj.fp is None\n except AttributeError:\n pass\n\n raise ValueError(\"Unable to determine whether fp is closed.\")\n\n\ndef assert_header_parsing(headers):\n \"\"\"\n Asserts whether all headers have been successfully parsed.\n Extracts encountered errors from the result of parsing headers.\n\n Only works on Python 3.\n\n :param headers: Headers to verify.\n :type headers: `httplib.HTTPMessage`.\n\n :raises urllib3.exceptions.HeaderParsingError:\n If parsing errors are found.\n \"\"\"\n\n # This will fail silently if we pass in the wrong kind of parameter.\n # To make debugging easier add an explicit check.\n if not isinstance(headers, httplib.HTTPMessage):\n raise TypeError('expected httplib.Message, got {0}.'.format(\n type(headers)))\n\n defects = getattr(headers, 'defects', None)\n get_payload = getattr(headers, 'get_payload', None)\n\n unparsed_data = None\n if get_payload: # Platform-specific: Python 3.\n unparsed_data = get_payload()\n\n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n\n\ndef is_response_to_head(response):\n \"\"\"\n Checks whether the request of a response has been a HEAD-request.\n Handles the quirks of AppEngine.\n\n :param conn:\n :type conn: :class:`httplib.HTTPResponse`\n \"\"\"\n # FIXME: Can we do this somehow without accessing private httplib _method?\n method = response._method\n if isinstance(method, int): # Platform-specific: Appengine\n return method == 3\n return method.upper() == 'HEAD'\n", "path": "src/urllib3/util/response.py"}]} | 1,755 | 205 |
gh_patches_debug_2822 | rasdani/github-patches | git_diff | StackStorm__st2-5092 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove obsolete 'bin/st2-check-license'
The project ships https://github.com/StackStorm/st2/blob/master/st2common/bin/st2-check-license which is irrelevant now, considering ongoing EWC features integration in the st2 core.
The task is to find all the places: https://github.com/search?q=org%3AStackStorm+st2-check-license&type=code and remove the `st2-check-license` scripts.
This is an easy `good first issue` for someone willing to start contributing and exploring the st2 system.
Bonus points to find other obsolete, outdated, irrelevant scripts in st2 core.
Help wanted!
</issue>
<code>
[start of st2common/setup.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2020 The StackStorm Authors.
3 # Copyright 2019 Extreme Networks, Inc.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from __future__ import absolute_import
18 import os.path
19
20 from setuptools import setup, find_packages
21
22 from dist_utils import fetch_requirements
23 from dist_utils import apply_vagrant_workaround
24 from dist_utils import get_version_string
25
26 ST2_COMPONENT = 'st2common'
27 BASE_DIR = os.path.dirname(os.path.abspath(__file__))
28 REQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt')
29 INIT_FILE = os.path.join(BASE_DIR, 'st2common/__init__.py')
30
31 install_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)
32
33 apply_vagrant_workaround()
34 setup(
35 name=ST2_COMPONENT,
36 version=get_version_string(INIT_FILE),
37 description='{} StackStorm event-driven automation platform component'.format(ST2_COMPONENT),
38 author='StackStorm',
39 author_email='[email protected]',
40 license='Apache License (2.0)',
41 url='https://stackstorm.com/',
42 install_requires=install_reqs,
43 dependency_links=dep_links,
44 test_suite=ST2_COMPONENT,
45 zip_safe=False,
46 include_package_data=True,
47 packages=find_packages(exclude=['setuptools', 'tests']),
48 scripts=[
49 'bin/st2-bootstrap-rmq',
50 'bin/st2-cleanup-db',
51 'bin/st2-register-content',
52 'bin/st2-purge-executions',
53 'bin/st2-purge-trigger-instances',
54 'bin/st2-run-pack-tests',
55 'bin/st2ctl',
56 'bin/st2-generate-symmetric-crypto-key',
57 'bin/st2-self-check',
58 'bin/st2-track-result',
59 'bin/st2-validate-pack-config',
60 'bin/st2-check-license',
61 'bin/st2-pack-install',
62 'bin/st2-pack-download',
63 'bin/st2-pack-setup-virtualenv'
64 ],
65 entry_points={
66 'st2common.metrics.driver': [
67 'statsd = st2common.metrics.drivers.statsd_driver:StatsdDriver',
68 'noop = st2common.metrics.drivers.noop_driver:NoopDriver',
69 'echo = st2common.metrics.drivers.echo_driver:EchoDriver'
70 ],
71 'st2common.rbac.backend': [
72 'noop = st2common.rbac.backends.noop:NoOpRBACBackend'
73 ],
74 }
75 )
76
[end of st2common/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/st2common/setup.py b/st2common/setup.py
--- a/st2common/setup.py
+++ b/st2common/setup.py
@@ -57,7 +57,6 @@
'bin/st2-self-check',
'bin/st2-track-result',
'bin/st2-validate-pack-config',
- 'bin/st2-check-license',
'bin/st2-pack-install',
'bin/st2-pack-download',
'bin/st2-pack-setup-virtualenv'
| {"golden_diff": "diff --git a/st2common/setup.py b/st2common/setup.py\n--- a/st2common/setup.py\n+++ b/st2common/setup.py\n@@ -57,7 +57,6 @@\n 'bin/st2-self-check',\n 'bin/st2-track-result',\n 'bin/st2-validate-pack-config',\n- 'bin/st2-check-license',\n 'bin/st2-pack-install',\n 'bin/st2-pack-download',\n 'bin/st2-pack-setup-virtualenv'\n", "issue": "Remove obsolete 'bin/st2-check-license'\nThe project ships https://github.com/StackStorm/st2/blob/master/st2common/bin/st2-check-license which is irrelevant now, considering ongoing EWC features integration in the st2 core.\r\n\r\nThe task is to find all the places: https://github.com/search?q=org%3AStackStorm+st2-check-license&type=code and remove the `st2-check-license` scripts.\r\n\r\nThis is an easy `good first issue` for someone willing to start contributing and exploring the st2 system.\r\nBonus points to find other obsolete, outdated, irrelevant scripts in st2 core.\r\nHelp wanted!\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport os.path\n\nfrom setuptools import setup, find_packages\n\nfrom dist_utils import fetch_requirements\nfrom dist_utils import apply_vagrant_workaround\nfrom dist_utils import get_version_string\n\nST2_COMPONENT = 'st2common'\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nREQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt')\nINIT_FILE = os.path.join(BASE_DIR, 'st2common/__init__.py')\n\ninstall_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)\n\napply_vagrant_workaround()\nsetup(\n name=ST2_COMPONENT,\n version=get_version_string(INIT_FILE),\n description='{} StackStorm event-driven automation platform component'.format(ST2_COMPONENT),\n author='StackStorm',\n author_email='[email protected]',\n license='Apache License (2.0)',\n url='https://stackstorm.com/',\n install_requires=install_reqs,\n dependency_links=dep_links,\n test_suite=ST2_COMPONENT,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=['setuptools', 'tests']),\n scripts=[\n 'bin/st2-bootstrap-rmq',\n 'bin/st2-cleanup-db',\n 'bin/st2-register-content',\n 'bin/st2-purge-executions',\n 'bin/st2-purge-trigger-instances',\n 'bin/st2-run-pack-tests',\n 'bin/st2ctl',\n 'bin/st2-generate-symmetric-crypto-key',\n 'bin/st2-self-check',\n 'bin/st2-track-result',\n 'bin/st2-validate-pack-config',\n 'bin/st2-check-license',\n 'bin/st2-pack-install',\n 'bin/st2-pack-download',\n 'bin/st2-pack-setup-virtualenv'\n ],\n entry_points={\n 'st2common.metrics.driver': [\n 'statsd = st2common.metrics.drivers.statsd_driver:StatsdDriver',\n 'noop = st2common.metrics.drivers.noop_driver:NoopDriver',\n 'echo = st2common.metrics.drivers.echo_driver:EchoDriver'\n ],\n 'st2common.rbac.backend': [\n 'noop = st2common.rbac.backends.noop:NoOpRBACBackend'\n ],\n }\n)\n", "path": "st2common/setup.py"}]} | 1,445 | 106 |
gh_patches_debug_16823 | rasdani/github-patches | git_diff | bids-standard__pybids-505 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
0.9.4 Release
@tyarkoni We've built up a [changelog](https://github.com/bids-standard/pybids/releases/tag/untagged-aa777a4ea34ce82990cd), so it'd be good to release soon. Any blocking issues?
</issue>
<code>
[start of tools/prep_zenodo.py]
1 #!/usr/bin/env python3
2 import git
3 import json
4 from subprocess import run, PIPE, CalledProcessError
5 from pathlib import Path
6 from tempfile import TemporaryDirectory
7
8
9 def decommify(name):
10 return ' '.join(name.split(', ')[::-1])
11
12
13 # List of repositories whose commits should be counted as contributions
14 codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]
15
16 # Last shablona commit
17 origin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'
18
19 git_root = Path(git.Repo('.', search_parent_directories=True).working_dir)
20 zenodo_file = git_root / '.zenodo.json'
21
22 zenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}
23
24 orig_creators = zenodo.get('creators', [])
25 creator_map = {decommify(creator['name']): creator
26 for creator in orig_creators}
27
28 shortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)
29 counts = [line.split('\t', 1)[::-1]
30 for line in shortlog.stdout.decode().split('\n') if line]
31
32 # Get additional commit counts from dependencies
33 with TemporaryDirectory() as tmpdir:
34 tmppath = Path(tmpdir)
35 for repo in codependents:
36 try:
37 repo, ref = repo
38 except (TypeError, ValueError):
39 ref = None
40 repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])
41 try:
42 clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)
43 except CalledProcessError as err:
44 raise RuntimeError("Could not clone {}".format(repo)) from err
45
46 if ref is None:
47 tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)
48 # latest tag
49 ref = tag.stdout.decode().strip().rsplit('\n', 1)[1]
50
51 dep_shortlog = run(
52 ['git', '-C', repo_dir, 'shortlog', '-ns', ref],
53 stdout=PIPE)
54 counts.extend(line.split('\t', 1)[::-1]
55 for line in dep_shortlog.stdout.decode().split('\n')
56 if line)
57
58 commit_counts = {}
59 for committer, commits in counts:
60 commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)
61
62 # Stable sort:
63 # Number of commits in reverse order
64 # Ties broken by alphabetical order of first name
65 committers = [committer
66 for committer, _ in sorted(commit_counts.items(),
67 key=lambda x: (-x[1], x[0]))]
68
69 # Tal to the top
70 first_author = 'Tal Yarkoni'
71 if committers[0] != first_author:
72 committers.remove(first_author)
73 committers.insert(0, first_author)
74
75 creators = [
76 creator_map.get(committer, {'name': committer})
77 for committer in committers
78 ]
79
80 zenodo['creators'] = creators
81 zenodo_file.write_text(json.dumps(zenodo, indent=2) + '\n')
82
[end of tools/prep_zenodo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/prep_zenodo.py b/tools/prep_zenodo.py
--- a/tools/prep_zenodo.py
+++ b/tools/prep_zenodo.py
@@ -9,6 +9,11 @@
def decommify(name):
return ' '.join(name.split(', ')[::-1])
+# Users who have asked not to be cited at this time
+# XXX We should add a shortlog since the most recent tag and explicitly note
+# that a blacklisted user has contributed again recently, and verify they still
+# do not want to be cited.
+blacklist = {'Cecile Madjar'}
# List of repositories whose commits should be counted as contributions
codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]
@@ -75,6 +80,7 @@
creators = [
creator_map.get(committer, {'name': committer})
for committer in committers
+ if committer not in blacklist
]
zenodo['creators'] = creators
| {"golden_diff": "diff --git a/tools/prep_zenodo.py b/tools/prep_zenodo.py\n--- a/tools/prep_zenodo.py\n+++ b/tools/prep_zenodo.py\n@@ -9,6 +9,11 @@\n def decommify(name):\n return ' '.join(name.split(', ')[::-1])\n \n+# Users who have asked not to be cited at this time\n+# XXX We should add a shortlog since the most recent tag and explicitly note\n+# that a blacklisted user has contributed again recently, and verify they still\n+# do not want to be cited.\n+blacklist = {'Cecile Madjar'}\n \n # List of repositories whose commits should be counted as contributions\n codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]\n@@ -75,6 +80,7 @@\n creators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n+ if committer not in blacklist\n ]\n \n zenodo['creators'] = creators\n", "issue": "0.9.4 Release\n@tyarkoni We've built up a [changelog](https://github.com/bids-standard/pybids/releases/tag/untagged-aa777a4ea34ce82990cd), so it'd be good to release soon. Any blocking issues?\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport git\nimport json\nfrom subprocess import run, PIPE, CalledProcessError\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\n\ndef decommify(name):\n return ' '.join(name.split(', ')[::-1])\n\n\n# List of repositories whose commits should be counted as contributions\ncodependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]\n\n# Last shablona commit\norigin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'\n\ngit_root = Path(git.Repo('.', search_parent_directories=True).working_dir)\nzenodo_file = git_root / '.zenodo.json'\n\nzenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}\n\norig_creators = zenodo.get('creators', [])\ncreator_map = {decommify(creator['name']): creator\n for creator in orig_creators}\n\nshortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)\ncounts = [line.split('\\t', 1)[::-1]\n for line in shortlog.stdout.decode().split('\\n') if line]\n\n# Get additional commit counts from dependencies\nwith TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n for repo in codependents:\n try:\n repo, ref = repo\n except (TypeError, ValueError):\n ref = None\n repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])\n try:\n clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)\n except CalledProcessError as err:\n raise RuntimeError(\"Could not clone {}\".format(repo)) from err\n\n if ref is None:\n tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)\n # latest tag\n ref = tag.stdout.decode().strip().rsplit('\\n', 1)[1]\n\n dep_shortlog = run(\n ['git', '-C', repo_dir, 'shortlog', '-ns', ref],\n stdout=PIPE)\n counts.extend(line.split('\\t', 1)[::-1]\n for line in dep_shortlog.stdout.decode().split('\\n')\n if line)\n\ncommit_counts = {}\nfor committer, commits in counts:\n commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)\n\n# Stable sort:\n# Number of commits in reverse order\n# Ties broken by alphabetical order of first name\ncommitters = [committer\n for committer, _ in sorted(commit_counts.items(),\n key=lambda x: (-x[1], x[0]))]\n\n# Tal to the top\nfirst_author = 'Tal Yarkoni'\nif committers[0] != first_author:\n committers.remove(first_author)\n committers.insert(0, first_author)\n\ncreators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n ]\n\nzenodo['creators'] = creators\nzenodo_file.write_text(json.dumps(zenodo, indent=2) + '\\n')\n", "path": "tools/prep_zenodo.py"}]} | 1,476 | 229 |
gh_patches_debug_26541 | rasdani/github-patches | git_diff | OpenMined__PySyft-5732 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add tests for domain's RoleManager
## Description
Add unit tests for grid domain's RoleManager at the module `grid/apps/domain/src/main/core/manager`
## Type of Test
- [x] **Unit test** (e.g. checking a loop, method, or function is working as intended)
- [ ] **Integration test** (e.g. checking if a certain group or set of functionality is working as intended)
- [ ] **Regression test** (e.g. checking if by adding or removing a module of code allows other systems to continue to function as intended)
- [ ] **Stress test** (e.g. checking to see how well a system performs under various situations, including heavy usage)
- [ ] **Performance test** (e.g. checking to see how efficient a system is as performing the intended task)
- [ ] Other...
## Expected Behavior
Ideally, the tests should cover as many methods as possible and within reason.
</issue>
<code>
[start of packages/grid/apps/domain/src/main/core/manager/role_manager.py]
1 # stdlib
2 from typing import List
3 from typing import Union
4
5 # grid relative
6 from ..database.tables.roles import Role
7 from ..exceptions import RoleNotFoundError
8 from .database_manager import DatabaseManager
9
10
11 class RoleManager(DatabaseManager):
12
13 schema = Role
14
15 def __init__(self, database):
16 self._schema = RoleManager.schema
17 self.db = database
18
19 @property
20 def user_role(self):
21 return self.first(name="User")
22
23 @property
24 def owner_role(self):
25 return self.first(name="Owner")
26
27 @property
28 def compliance_officer_role(self):
29 return self.first(name="Compliance Officer")
30
31 @property
32 def admin_role(self):
33 return self.first(name="Administrator")
34
35 @property
36 def common_roles(self):
37 return self.db.session.query(self._schema).filter_by(
38 can_triage_requests=False,
39 can_edit_settings=False,
40 can_create_users=False,
41 can_create_groups=False,
42 can_upload_data=False,
43 can_edit_roles=False,
44 can_manage_infrastructure=False,
45 )
46
47 @property
48 def org_roles(self):
49 return self.db.session.query(self._schema).except_(self.common_roles)
50
51 def first(self, **kwargs) -> Union[None, List]:
52 result = super().first(**kwargs)
53 if not result:
54 raise RoleNotFoundError
55 return result
56
57 def query(self, **kwargs) -> Union[None, List]:
58 results = super().query(**kwargs)
59 if len(results) == 0:
60 raise RoleNotFoundError
61 return results
62
63 def set(self, role_id, params):
64 if self.contain(id=role_id):
65 self.modify({"id": role_id}, params)
66 else:
67 raise RoleNotFoundError
68
[end of packages/grid/apps/domain/src/main/core/manager/role_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/grid/apps/domain/src/main/core/manager/role_manager.py b/packages/grid/apps/domain/src/main/core/manager/role_manager.py
--- a/packages/grid/apps/domain/src/main/core/manager/role_manager.py
+++ b/packages/grid/apps/domain/src/main/core/manager/role_manager.py
@@ -2,6 +2,8 @@
from typing import List
from typing import Union
+from flask_sqlalchemy import BaseQuery
+
# grid relative
from ..database.tables.roles import Role
from ..exceptions import RoleNotFoundError
@@ -32,8 +34,7 @@
def admin_role(self):
return self.first(name="Administrator")
- @property
- def common_roles(self):
+ def _common_roles(self) -> BaseQuery:
return self.db.session.query(self._schema).filter_by(
can_triage_requests=False,
can_edit_settings=False,
@@ -44,9 +45,13 @@
can_manage_infrastructure=False,
)
+ @property
+ def common_roles(self):
+ return self._common_roles().all()
+
@property
def org_roles(self):
- return self.db.session.query(self._schema).except_(self.common_roles)
+ return self.db.session.query(self._schema).except_(self._common_roles).all()
def first(self, **kwargs) -> Union[None, List]:
result = super().first(**kwargs)
| {"golden_diff": "diff --git a/packages/grid/apps/domain/src/main/core/manager/role_manager.py b/packages/grid/apps/domain/src/main/core/manager/role_manager.py\n--- a/packages/grid/apps/domain/src/main/core/manager/role_manager.py\n+++ b/packages/grid/apps/domain/src/main/core/manager/role_manager.py\n@@ -2,6 +2,8 @@\n from typing import List\n from typing import Union\n \n+from flask_sqlalchemy import BaseQuery\n+\n # grid relative\n from ..database.tables.roles import Role\n from ..exceptions import RoleNotFoundError\n@@ -32,8 +34,7 @@\n def admin_role(self):\n return self.first(name=\"Administrator\")\n \n- @property\n- def common_roles(self):\n+ def _common_roles(self) -> BaseQuery:\n return self.db.session.query(self._schema).filter_by(\n can_triage_requests=False,\n can_edit_settings=False,\n@@ -44,9 +45,13 @@\n can_manage_infrastructure=False,\n )\n \n+ @property\n+ def common_roles(self):\n+ return self._common_roles().all()\n+\n @property\n def org_roles(self):\n- return self.db.session.query(self._schema).except_(self.common_roles)\n+ return self.db.session.query(self._schema).except_(self._common_roles).all()\n \n def first(self, **kwargs) -> Union[None, List]:\n result = super().first(**kwargs)\n", "issue": "Add tests for domain's RoleManager\n## Description\r\nAdd unit tests for grid domain's RoleManager at the module `grid/apps/domain/src/main/core/manager`\r\n\r\n## Type of Test\r\n- [x] **Unit test** (e.g. checking a loop, method, or function is working as intended)\r\n- [ ] **Integration test** (e.g. checking if a certain group or set of functionality is working as intended)\r\n- [ ] **Regression test** (e.g. checking if by adding or removing a module of code allows other systems to continue to function as intended)\r\n- [ ] **Stress test** (e.g. checking to see how well a system performs under various situations, including heavy usage)\r\n- [ ] **Performance test** (e.g. checking to see how efficient a system is as performing the intended task)\r\n- [ ] Other...\r\n\r\n## Expected Behavior\r\nIdeally, the tests should cover as many methods as possible and within reason.\r\n\n", "before_files": [{"content": "# stdlib\nfrom typing import List\nfrom typing import Union\n\n# grid relative\nfrom ..database.tables.roles import Role\nfrom ..exceptions import RoleNotFoundError\nfrom .database_manager import DatabaseManager\n\n\nclass RoleManager(DatabaseManager):\n\n schema = Role\n\n def __init__(self, database):\n self._schema = RoleManager.schema\n self.db = database\n\n @property\n def user_role(self):\n return self.first(name=\"User\")\n\n @property\n def owner_role(self):\n return self.first(name=\"Owner\")\n\n @property\n def compliance_officer_role(self):\n return self.first(name=\"Compliance Officer\")\n\n @property\n def admin_role(self):\n return self.first(name=\"Administrator\")\n\n @property\n def common_roles(self):\n return self.db.session.query(self._schema).filter_by(\n can_triage_requests=False,\n can_edit_settings=False,\n can_create_users=False,\n can_create_groups=False,\n can_upload_data=False,\n can_edit_roles=False,\n can_manage_infrastructure=False,\n )\n\n @property\n def org_roles(self):\n return self.db.session.query(self._schema).except_(self.common_roles)\n\n def first(self, **kwargs) -> Union[None, List]:\n result = super().first(**kwargs)\n if not result:\n raise RoleNotFoundError\n return result\n\n def query(self, **kwargs) -> Union[None, List]:\n results = super().query(**kwargs)\n if len(results) == 0:\n raise RoleNotFoundError\n return results\n\n def set(self, role_id, params):\n if self.contain(id=role_id):\n self.modify({\"id\": role_id}, params)\n else:\n raise RoleNotFoundError\n", "path": "packages/grid/apps/domain/src/main/core/manager/role_manager.py"}]} | 1,260 | 309 |
gh_patches_debug_59199 | rasdani/github-patches | git_diff | Nitrate__Nitrate-380 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add more information links to package
Add these links to `project_urls` argument of `setup.py`.
- Source Code: https://github.com/Nitrate/Nitrate
- Issue Tracker: https://github.com/Nitrate/Nitrate/issues
- Documentation: https://nitrate.readthedocs.io/
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 import sys
4
5 from setuptools import setup, find_packages
6
7
8 with open('VERSION.txt', 'r') as f:
9 pkg_version = f.read().strip()
10
11
12 def get_long_description():
13 with open('README.rst', 'r') as f:
14 return f.read()
15
16
17 install_requires = [
18 'beautifulsoup4 >= 4.1.1',
19 'django >= 1.11,<3.0',
20 'django-contrib-comments == 1.8.0',
21 'django-tinymce == 2.7.0',
22 'django-uuslug == 1.1.8',
23 'html2text',
24 'odfpy >= 0.9.6',
25 'python-bugzilla',
26 'six',
27 'xmltodict',
28 'kobo == 0.9.0'
29 ]
30
31 if sys.version_info.major < 3:
32 install_requires += [
33 'enum34',
34 ]
35
36 extras_require = {
37 'mysql': ['PyMySQL == 0.9.2'],
38 'pgsql': ['psycopg2 == 2.7.5'],
39
40 # Required for tcms.core.contrib.auth.backends.KerberosBackend
41 'krbauth': [
42 'kerberos == 1.2.5'
43 ],
44
45 # Packages for building documentation
46 'docs': [
47 'Sphinx >= 1.1.2',
48 'sphinx_rtd_theme',
49 ],
50
51 # Necessary packages for running tests
52 'tests': [
53 'beautifulsoup4',
54 'coverage',
55 'factory_boy',
56 'flake8',
57 'mock',
58 'pytest',
59 'pytest-cov',
60 'pytest-django',
61 ],
62
63 # Contain tools that assists the development
64 'devtools': [
65 'django-debug-toolbar == 1.7',
66 'tox',
67 'django-extensions',
68 'pygraphviz',
69 'future-breakpoint',
70 ],
71
72 # Required packages required to run async tasks
73 'async': [
74 'celery == 4.2.0',
75 ]
76 }
77
78 setup(
79 name='Nitrate',
80 version=pkg_version,
81 description='Test Case Management System',
82 long_description=get_long_description(),
83 author='Nitrate Team',
84 maintainer='Chenxiong Qi',
85 maintainer_email='[email protected]',
86 url='https://github.com/Nitrate/Nitrate/',
87 license='GPLv2+',
88 keywords='test case',
89 install_requires=install_requires,
90 extras_require=extras_require,
91 packages=find_packages(),
92 include_package_data=True,
93 classifiers=[
94 'Framework :: Django',
95 'Framework :: Django :: 1.11',
96 'Framework :: Django :: 2.0',
97 'Framework :: Django :: 2.1',
98 'Intended Audience :: Developers',
99 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
100 'Programming Language :: Python :: 2',
101 'Programming Language :: Python :: 2.7',
102 'Programming Language :: Python :: 3',
103 'Programming Language :: Python :: 3.6',
104 'Programming Language :: Python :: 3.7',
105 'Topic :: Software Development :: Quality Assurance',
106 'Topic :: Software Development :: Testing',
107 ],
108 )
109
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -105,4 +105,9 @@
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
],
+ project_urls={
+ 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',
+ 'Source Code': 'https://github.com/Nitrate/Nitrate',
+ 'Documentation': 'https://nitrate.readthedocs.io/',
+ },
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -105,4 +105,9 @@\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n+ project_urls={\n+ 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n+ 'Source Code': 'https://github.com/Nitrate/Nitrate',\n+ 'Documentation': 'https://nitrate.readthedocs.io/',\n+ },\n )\n", "issue": "Add more information links to package\nAdd these links to `project_urls` argument of `setup.py`.\r\n\r\n- Source Code: https://github.com/Nitrate/Nitrate\r\n- Issue Tracker: https://github.com/Nitrate/Nitrate/issues\r\n- Documentation: https://nitrate.readthedocs.io/\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 1.11,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n 'mysql': ['PyMySQL == 0.9.2'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}]} | 1,541 | 117 |
gh_patches_debug_21139 | rasdani/github-patches | git_diff | conan-io__conan-3600 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Listing package content
Maybe a stupid question, but is there a quick way to list all the files (artifacts: binaries and header files) contained in a package (i.e. files added through the package method)?
TIA
</issue>
<code>
[start of conans/client/local_file_getter.py]
1 import os
2
3 from conans.errors import NotFoundException
4 from conans.model.manifest import discarded_file
5 from conans.model.ref import PackageReference
6 from conans.util.files import load
7
8
9 def get_path(client_cache, conan_ref, package_id, path):
10 """
11 :param client_cache: Conan's client cache
12 :param conan_ref: Specified reference in the conan get command
13 :param package_id: Specified package id (can be None)
14 :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)
15 :return: The real path in the local cache for the specified parameters
16 """
17 if package_id is None: # Get the file in the exported files
18 folder = client_cache.export(conan_ref)
19 else:
20 folder = client_cache.package(PackageReference(conan_ref, package_id))
21
22 abs_path = os.path.join(folder, path)
23 if not os.path.exists(abs_path):
24 raise NotFoundException("The specified path doesn't exist")
25 if os.path.isdir(abs_path):
26 return sorted([path for path in os.listdir(abs_path) if not discarded_file(path)])
27 else:
28 return load(abs_path)
29
[end of conans/client/local_file_getter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conans/client/local_file_getter.py b/conans/client/local_file_getter.py
--- a/conans/client/local_file_getter.py
+++ b/conans/client/local_file_getter.py
@@ -11,13 +11,15 @@
:param client_cache: Conan's client cache
:param conan_ref: Specified reference in the conan get command
:param package_id: Specified package id (can be None)
- :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)
+ :param path: Path to a file, subfolder of exports (if only ref)
+ or package (if package_id defined)
:return: The real path in the local cache for the specified parameters
"""
if package_id is None: # Get the file in the exported files
folder = client_cache.export(conan_ref)
else:
- folder = client_cache.package(PackageReference(conan_ref, package_id))
+ folder = client_cache.package(PackageReference(conan_ref, package_id),
+ short_paths=None)
abs_path = os.path.join(folder, path)
if not os.path.exists(abs_path):
| {"golden_diff": "diff --git a/conans/client/local_file_getter.py b/conans/client/local_file_getter.py\n--- a/conans/client/local_file_getter.py\n+++ b/conans/client/local_file_getter.py\n@@ -11,13 +11,15 @@\n :param client_cache: Conan's client cache\n :param conan_ref: Specified reference in the conan get command\n :param package_id: Specified package id (can be None)\n- :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)\n+ :param path: Path to a file, subfolder of exports (if only ref)\n+ or package (if package_id defined)\n :return: The real path in the local cache for the specified parameters\n \"\"\"\n if package_id is None: # Get the file in the exported files\n folder = client_cache.export(conan_ref)\n else:\n- folder = client_cache.package(PackageReference(conan_ref, package_id))\n+ folder = client_cache.package(PackageReference(conan_ref, package_id),\n+ short_paths=None)\n \n abs_path = os.path.join(folder, path)\n if not os.path.exists(abs_path):\n", "issue": "Listing package content\nMaybe a stupid question, but is there a quick way to list all the files (artifacts: binaries and header files) contained in a package (i.e. files added through the package method)?\r\nTIA\r\n\n", "before_files": [{"content": "import os\n\nfrom conans.errors import NotFoundException\nfrom conans.model.manifest import discarded_file\nfrom conans.model.ref import PackageReference\nfrom conans.util.files import load\n\n\ndef get_path(client_cache, conan_ref, package_id, path):\n \"\"\"\n :param client_cache: Conan's client cache\n :param conan_ref: Specified reference in the conan get command\n :param package_id: Specified package id (can be None)\n :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)\n :return: The real path in the local cache for the specified parameters\n \"\"\"\n if package_id is None: # Get the file in the exported files\n folder = client_cache.export(conan_ref)\n else:\n folder = client_cache.package(PackageReference(conan_ref, package_id))\n\n abs_path = os.path.join(folder, path)\n if not os.path.exists(abs_path):\n raise NotFoundException(\"The specified path doesn't exist\")\n if os.path.isdir(abs_path):\n return sorted([path for path in os.listdir(abs_path) if not discarded_file(path)])\n else:\n return load(abs_path)\n", "path": "conans/client/local_file_getter.py"}]} | 901 | 271 |
gh_patches_debug_56202 | rasdani/github-patches | git_diff | svthalia__concrexit-3558 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Separate promotion permissions in eventadmin inline from the main promotion perms
### What?
Currently, people need add/change_promotionrequest permission to make promotionrequests for their events. But with this permission they also get full access to all other promotionrequests. So we should make the inline in the eventadmin bypass that check. For all I care, anyone who can change an event can make a promorquest from the eventadmin (by virtue of their 'change_event' permission and being an organizer of the event), without having the add/change_promotionrequest permission, and thus without seeing the main Promotion Requests changelist page.
### Why?
<!-- A clear and concise motivation why we should consider implementing this. -->
Least privilege principle: many people should be allowed to request promotion for their own events, but don't need to be able to edit unrelated requests. And this way we can have promocie be able to bypass the requirements in #3529, without normal organizers being able to do the same.
### How?
Override has_xxx_permission() on the inline class. Read the inlinemodeladmin docs for guidance.
</issue>
<code>
[start of website/events/admin/inlines.py]
1 from django.contrib import admin
2
3 from events import models
4 from pizzas.models import FoodEvent
5 from promotion.models import PromotionRequest
6
7 from .forms import RegistrationInformationFieldForm
8
9
10 class RegistrationInformationFieldInline(admin.TabularInline):
11 """The inline for registration information fields in the Event admin."""
12
13 form = RegistrationInformationFieldForm
14 extra = 0
15 model = models.RegistrationInformationField
16 ordering = ("_order",)
17
18 radio_fields = {"type": admin.VERTICAL}
19
20 def get_formset(self, request, obj=None, **kwargs):
21 formset = super().get_formset(request, obj, **kwargs)
22 if obj is not None:
23 count = obj.registrationinformationfield_set.count()
24 formset.form.declared_fields["order"].initial = count
25 return formset
26
27
28 class PizzaEventInline(admin.StackedInline):
29 """The inline for pizza events in the Event admin."""
30
31 model = FoodEvent
32 extra = 0
33 max_num = 1
34
35
36 class PromotionRequestInline(admin.StackedInline):
37 model = PromotionRequest
38 readonly_fields = (
39 "assigned_to",
40 "status",
41 "drive_folder",
42 )
43 extra = 0
44
[end of website/events/admin/inlines.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/events/admin/inlines.py b/website/events/admin/inlines.py
--- a/website/events/admin/inlines.py
+++ b/website/events/admin/inlines.py
@@ -39,5 +39,19 @@
"assigned_to",
"status",
"drive_folder",
+ "status_updated",
)
+
+ def has_add_permission(self, request, obj=None):
+ return True
+
+ def has_view_permission(self, request, obj=None):
+ return True
+
+ def has_change_permission(self, request, obj=None):
+ return True
+
+ def has_delete_permission(self, request, obj=None):
+ return True
+
extra = 0
| {"golden_diff": "diff --git a/website/events/admin/inlines.py b/website/events/admin/inlines.py\n--- a/website/events/admin/inlines.py\n+++ b/website/events/admin/inlines.py\n@@ -39,5 +39,19 @@\n \"assigned_to\",\n \"status\",\n \"drive_folder\",\n+ \"status_updated\",\n )\n+\n+ def has_add_permission(self, request, obj=None):\n+ return True\n+\n+ def has_view_permission(self, request, obj=None):\n+ return True\n+\n+ def has_change_permission(self, request, obj=None):\n+ return True\n+\n+ def has_delete_permission(self, request, obj=None):\n+ return True\n+\n extra = 0\n", "issue": "Separate promotion permissions in eventadmin inline from the main promotion perms\n### What?\r\nCurrently, people need add/change_promotionrequest permission to make promotionrequests for their events. But with this permission they also get full access to all other promotionrequests. So we should make the inline in the eventadmin bypass that check. For all I care, anyone who can change an event can make a promorquest from the eventadmin (by virtue of their 'change_event' permission and being an organizer of the event), without having the add/change_promotionrequest permission, and thus without seeing the main Promotion Requests changelist page.\r\n\r\n### Why?\r\n<!-- A clear and concise motivation why we should consider implementing this. -->\r\nLeast privilege principle: many people should be allowed to request promotion for their own events, but don't need to be able to edit unrelated requests. And this way we can have promocie be able to bypass the requirements in #3529, without normal organizers being able to do the same.\r\n\r\n### How?\r\nOverride has_xxx_permission() on the inline class. Read the inlinemodeladmin docs for guidance.\r\n\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom events import models\nfrom pizzas.models import FoodEvent\nfrom promotion.models import PromotionRequest\n\nfrom .forms import RegistrationInformationFieldForm\n\n\nclass RegistrationInformationFieldInline(admin.TabularInline):\n \"\"\"The inline for registration information fields in the Event admin.\"\"\"\n\n form = RegistrationInformationFieldForm\n extra = 0\n model = models.RegistrationInformationField\n ordering = (\"_order\",)\n\n radio_fields = {\"type\": admin.VERTICAL}\n\n def get_formset(self, request, obj=None, **kwargs):\n formset = super().get_formset(request, obj, **kwargs)\n if obj is not None:\n count = obj.registrationinformationfield_set.count()\n formset.form.declared_fields[\"order\"].initial = count\n return formset\n\n\nclass PizzaEventInline(admin.StackedInline):\n \"\"\"The inline for pizza events in the Event admin.\"\"\"\n\n model = FoodEvent\n extra = 0\n max_num = 1\n\n\nclass PromotionRequestInline(admin.StackedInline):\n model = PromotionRequest\n readonly_fields = (\n \"assigned_to\",\n \"status\",\n \"drive_folder\",\n )\n extra = 0\n", "path": "website/events/admin/inlines.py"}]} | 1,105 | 159 |
gh_patches_debug_32880 | rasdani/github-patches | git_diff | learningequality__kolibri-7214 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Learner activity records partially not visible after upgrade to 0.14b3
# Observed Behaviour
Upgraded from 0.13.3 to 0.14b3. Learner activity records were partially not visible in Class Home -> Class activity and Reports. Downgraded back to 0.13.3 and they are all visible again.
# Expected behavior
All learner activity should be visible as prior to upgrade
# User-facing consequences
Confusion and fear of data loss.
# Errors and logs
None on screen
# Steps to reproduce
Upgrade from 0.13.3 to 0.14.0-b3 and check learner activity records.
# Context
Ubuntu 18.04.3
Package : 0.14.beta3 .deb
</issue>
<code>
[start of kolibri/core/query.py]
1 from django.db import connection
2 from django.db.models import Aggregate
3 from django.db.models import CharField
4 from django.db.models import IntegerField
5 from django.db.models import Subquery
6
7 try:
8 from django.contrib.postgres.aggregates import ArrayAgg
9
10 class NotNullArrayAgg(ArrayAgg):
11 def convert_value(self, value, expression, connection, context):
12 if not value:
13 return []
14 return filter(lambda x: x is not None, value)
15
16
17 except ImportError:
18 NotNullArrayAgg = None
19
20
21 class SQCount(Subquery):
22 # Include ALIAS at the end to support Postgres
23 template = "(SELECT COUNT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)"
24 output_field = IntegerField()
25
26
27 class SQSum(Subquery):
28 # Include ALIAS at the end to support Postgres
29 template = "(SELECT SUM(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)"
30 output_field = IntegerField()
31
32
33 class GroupConcatSubquery(Subquery):
34 template = "(SELECT GROUP_CONCAT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)"
35 output_field = CharField()
36
37 def as_postgresql(self, compiler, connection):
38 self.template = (
39 "(SELECT STRING_AGG(%(field)s, ',') FROM (%(subquery)s) AS %(field)s__sum)"
40 )
41 return super(GroupConcatSubquery, self).as_sql(compiler, connection)
42
43
44 class GroupConcat(Aggregate):
45 template = "GROUP_CONCAT(%(field)s)"
46 output_field = CharField()
47
48 def __init__(self, *args, **kwargs):
49 self.result_field = kwargs.pop("result_field", None)
50 super(GroupConcat, self).__init__(*args, **kwargs)
51
52 def convert_value(self, value, expression, connection, context):
53 if not value:
54 return []
55 results = value.split(",")
56 if self.result_field is not None:
57 return map(self.result_field.to_python, results)
58 return results
59
60
61 def get_source_field(model, field_path):
62 # Get the source field from the model so that we can properly coerce values
63 # this is necessary when we are using GroupConcat to return non-string fields.
64 paths = field_path.split("__")
65 while len(paths) > 1:
66 model = model._meta.get_field(paths.pop(0)).related_model
67 return model._meta.get_field(paths[0])
68
69
70 def annotate_array_aggregate(queryset, **kwargs):
71 if connection.vendor == "postgresql" and NotNullArrayAgg is not None:
72 return queryset.annotate(
73 **{target: NotNullArrayAgg(source) for target, source in kwargs.items()}
74 )
75 model = queryset.model
76 # Call values on "pk" to insert a GROUP BY to ensure the GROUP CONCAT
77 # is called by row and not across the entire queryset.
78 return queryset.values("pk").annotate(
79 **{
80 target: GroupConcat(source, result_field=get_source_field(model, source))
81 for target, source in kwargs.items()
82 }
83 )
84
[end of kolibri/core/query.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/core/query.py b/kolibri/core/query.py
--- a/kolibri/core/query.py
+++ b/kolibri/core/query.py
@@ -8,10 +8,17 @@
from django.contrib.postgres.aggregates import ArrayAgg
class NotNullArrayAgg(ArrayAgg):
+ def __init__(self, *args, **kwargs):
+ self.result_field = kwargs.pop("result_field", None)
+ super(NotNullArrayAgg, self).__init__(*args, **kwargs)
+
def convert_value(self, value, expression, connection, context):
if not value:
return []
- return filter(lambda x: x is not None, value)
+ results = list(filter(lambda x: x is not None, value))
+ if self.result_field is not None:
+ return list(map(self.result_field.to_python, results))
+ return results
except ImportError:
@@ -54,7 +61,7 @@
return []
results = value.split(",")
if self.result_field is not None:
- return map(self.result_field.to_python, results)
+ return list(map(self.result_field.to_python, results))
return results
@@ -68,11 +75,16 @@
def annotate_array_aggregate(queryset, **kwargs):
+ model = queryset.model
if connection.vendor == "postgresql" and NotNullArrayAgg is not None:
return queryset.annotate(
- **{target: NotNullArrayAgg(source) for target, source in kwargs.items()}
+ **{
+ target: NotNullArrayAgg(
+ source, result_field=get_source_field(model, source)
+ )
+ for target, source in kwargs.items()
+ }
)
- model = queryset.model
# Call values on "pk" to insert a GROUP BY to ensure the GROUP CONCAT
# is called by row and not across the entire queryset.
return queryset.values("pk").annotate(
| {"golden_diff": "diff --git a/kolibri/core/query.py b/kolibri/core/query.py\n--- a/kolibri/core/query.py\n+++ b/kolibri/core/query.py\n@@ -8,10 +8,17 @@\n from django.contrib.postgres.aggregates import ArrayAgg\n \n class NotNullArrayAgg(ArrayAgg):\n+ def __init__(self, *args, **kwargs):\n+ self.result_field = kwargs.pop(\"result_field\", None)\n+ super(NotNullArrayAgg, self).__init__(*args, **kwargs)\n+\n def convert_value(self, value, expression, connection, context):\n if not value:\n return []\n- return filter(lambda x: x is not None, value)\n+ results = list(filter(lambda x: x is not None, value))\n+ if self.result_field is not None:\n+ return list(map(self.result_field.to_python, results))\n+ return results\n \n \n except ImportError:\n@@ -54,7 +61,7 @@\n return []\n results = value.split(\",\")\n if self.result_field is not None:\n- return map(self.result_field.to_python, results)\n+ return list(map(self.result_field.to_python, results))\n return results\n \n \n@@ -68,11 +75,16 @@\n \n \n def annotate_array_aggregate(queryset, **kwargs):\n+ model = queryset.model\n if connection.vendor == \"postgresql\" and NotNullArrayAgg is not None:\n return queryset.annotate(\n- **{target: NotNullArrayAgg(source) for target, source in kwargs.items()}\n+ **{\n+ target: NotNullArrayAgg(\n+ source, result_field=get_source_field(model, source)\n+ )\n+ for target, source in kwargs.items()\n+ }\n )\n- model = queryset.model\n # Call values on \"pk\" to insert a GROUP BY to ensure the GROUP CONCAT\n # is called by row and not across the entire queryset.\n return queryset.values(\"pk\").annotate(\n", "issue": "Learner activity records partially not visible after upgrade to 0.14b3\n# Observed Behaviour\n\nUpgraded from 0.13.3 to 0.14b3. Learner activity records were partially not visible in Class Home -> Class activity and Reports. Downgraded back to 0.13.3 and they are all visible again.\n\n# Expected behavior\n\nAll learner activity should be visible as prior to upgrade\n\n\n# User-facing consequences\n\n Confusion and fear of data loss.\n\n# Errors and logs\n\nNone on screen\n\n# Steps to reproduce\n\nUpgrade from 0.13.3 to 0.14.0-b3 and check learner activity records.\n\n\n\n# Context\nUbuntu 18.04.3\nPackage : 0.14.beta3 .deb\n\n", "before_files": [{"content": "from django.db import connection\nfrom django.db.models import Aggregate\nfrom django.db.models import CharField\nfrom django.db.models import IntegerField\nfrom django.db.models import Subquery\n\ntry:\n from django.contrib.postgres.aggregates import ArrayAgg\n\n class NotNullArrayAgg(ArrayAgg):\n def convert_value(self, value, expression, connection, context):\n if not value:\n return []\n return filter(lambda x: x is not None, value)\n\n\nexcept ImportError:\n NotNullArrayAgg = None\n\n\nclass SQCount(Subquery):\n # Include ALIAS at the end to support Postgres\n template = \"(SELECT COUNT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = IntegerField()\n\n\nclass SQSum(Subquery):\n # Include ALIAS at the end to support Postgres\n template = \"(SELECT SUM(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = IntegerField()\n\n\nclass GroupConcatSubquery(Subquery):\n template = \"(SELECT GROUP_CONCAT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = CharField()\n\n def as_postgresql(self, compiler, connection):\n self.template = (\n \"(SELECT STRING_AGG(%(field)s, ',') FROM (%(subquery)s) AS %(field)s__sum)\"\n )\n return super(GroupConcatSubquery, self).as_sql(compiler, connection)\n\n\nclass GroupConcat(Aggregate):\n template = \"GROUP_CONCAT(%(field)s)\"\n output_field = CharField()\n\n def __init__(self, *args, **kwargs):\n self.result_field = kwargs.pop(\"result_field\", None)\n super(GroupConcat, self).__init__(*args, **kwargs)\n\n def convert_value(self, value, expression, connection, context):\n if not value:\n return []\n results = value.split(\",\")\n if self.result_field is not None:\n return map(self.result_field.to_python, results)\n return results\n\n\ndef get_source_field(model, field_path):\n # Get the source field from the model so that we can properly coerce values\n # this is necessary when we are using GroupConcat to return non-string fields.\n paths = field_path.split(\"__\")\n while len(paths) > 1:\n model = model._meta.get_field(paths.pop(0)).related_model\n return model._meta.get_field(paths[0])\n\n\ndef annotate_array_aggregate(queryset, **kwargs):\n if connection.vendor == \"postgresql\" and NotNullArrayAgg is not None:\n return queryset.annotate(\n **{target: NotNullArrayAgg(source) for target, source in kwargs.items()}\n )\n model = queryset.model\n # Call values on \"pk\" to insert a GROUP BY to ensure the GROUP CONCAT\n # is called by row and not across the entire queryset.\n return queryset.values(\"pk\").annotate(\n **{\n target: GroupConcat(source, result_field=get_source_field(model, source))\n for target, source in kwargs.items()\n }\n )\n", "path": "kolibri/core/query.py"}]} | 1,541 | 435 |
gh_patches_debug_7473 | rasdani/github-patches | git_diff | praw-dev__praw-1327 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PRAW installed by pip is missing the `images` directory and its contents
## Issue Description
PRAW's video submit method uses a placeholder image as a video thumbnail when the user doesn't provide a thumbnail. Here's the relevant code:
https://github.com/praw-dev/praw/blob/54f8b3f998008b81988aac057d33fe38d5ac7739/praw/models/reddit/subreddit.py#L511-L514
That image is at [`praw/images/PRAW logo.png`](https://github.com/praw-dev/praw/blob/master/praw/images/PRAW%20logo.png). Unfortunately the current release on PyPI is missing the file and the entire `images` directory, so the `submit_video` method fails when another thumbnail isn't provided.
It isn't just the wheel on PyPI that is missing the image. The source tarball is as well.
I suspect that a change to [`MANIFEST.in`](https://github.com/praw-dev/praw/blob/master/MANIFEST.in) might solve this problem. Or maybe not, as [this Stack Overflow answer](https://stackoverflow.com/a/25964691/8033766) suggests that `MANIFEST.in` is just for Python 2.6 and earlier.
Adding an `__init__.py` to the `images` folder would probably [make `find_packages()` in `setup.py`](https://setuptools.readthedocs.io/en/latest/setuptools.html#using-find-packages) notice the folder, but this would be a mis-use of an `__init__.py` since that folder is not a Python package.
[This page](https://docs.python.org/3.3/distutils/setupscript.html#installing-additional-files) suggests using the `data_files` argument in `setup.py`, but I tried that and couldn't get it to work ([branch here](https://github.com/jarhill0/praw/tree/image-upload-fix), [PyPI testing push here](https://test.pypi.org/project/praw/)).
</issue>
<code>
[start of setup.py]
1 """praw setup.py"""
2
3 import re
4 from codecs import open
5 from os import path
6 from setuptools import find_packages, setup
7
8
9 PACKAGE_NAME = "praw"
10 HERE = path.abspath(path.dirname(__file__))
11 with open(path.join(HERE, "README.rst"), encoding="utf-8") as fp:
12 README = fp.read()
13 with open(path.join(HERE, PACKAGE_NAME, "const.py"), encoding="utf-8") as fp:
14 VERSION = re.search('__version__ = "([^"]+)"', fp.read()).group(1)
15
16 extras = {
17 "ci": ["coveralls"],
18 "dev": ["pre-commit"],
19 "lint": ["black", "flake8", "pydocstyle", "sphinx", "sphinx_rtd_theme"],
20 "test": [
21 "betamax >=0.8, <0.9",
22 "betamax-matchers >=0.3.0, <0.5",
23 "betamax-serializers >=0.2, <0.3",
24 "mock >=0.8",
25 "pytest >=2.7.3",
26 ],
27 }
28 extras["dev"] += extras["lint"] + extras["test"]
29
30 setup(
31 name=PACKAGE_NAME,
32 author="Bryce Boe",
33 author_email="[email protected]",
34 python_requires=">=3.5",
35 classifiers=[
36 "Development Status :: 5 - Production/Stable",
37 "Environment :: Console",
38 "Intended Audience :: Developers",
39 "License :: OSI Approved :: BSD License",
40 "Natural Language :: English",
41 "Operating System :: OS Independent",
42 "Programming Language :: Python",
43 "Programming Language :: Python :: 3",
44 "Programming Language :: Python :: 3.5",
45 "Programming Language :: Python :: 3.6",
46 "Programming Language :: Python :: 3.7",
47 "Programming Language :: Python :: 3.8",
48 "Topic :: Utilities",
49 ],
50 description=(
51 "PRAW, an acronym for `Python Reddit API Wrapper`, is a "
52 "python package that allows for simple access to "
53 "reddit's API."
54 ),
55 extras_require=extras,
56 install_requires=[
57 "prawcore >=1.0.1, <2.0",
58 "update_checker >=0.16",
59 "websocket-client >=0.54.0",
60 ],
61 keywords="reddit api wrapper",
62 license="Simplified BSD License",
63 long_description=README,
64 package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini"]},
65 packages=find_packages(exclude=["tests", "tests.*", "tools", "tools.*"]),
66 url="https://praw.readthedocs.org/",
67 version=VERSION,
68 )
69
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,10 @@
keywords="reddit api wrapper",
license="Simplified BSD License",
long_description=README,
- package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini"]},
+ package_data={
+ "": ["LICENSE.txt"],
+ PACKAGE_NAME: ["*.ini", "images/*.jpg"],
+ },
packages=find_packages(exclude=["tests", "tests.*", "tools", "tools.*"]),
url="https://praw.readthedocs.org/",
version=VERSION,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,7 +61,10 @@\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n- package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\"]},\n+ package_data={\n+ \"\": [\"LICENSE.txt\"],\n+ PACKAGE_NAME: [\"*.ini\", \"images/*.jpg\"],\n+ },\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n url=\"https://praw.readthedocs.org/\",\n version=VERSION,\n", "issue": "PRAW installed by pip is missing the `images` directory and its contents\n## Issue Description\r\n\r\nPRAW's video submit method uses a placeholder image as a video thumbnail when the user doesn't provide a thumbnail. Here's the relevant code:\r\n\r\nhttps://github.com/praw-dev/praw/blob/54f8b3f998008b81988aac057d33fe38d5ac7739/praw/models/reddit/subreddit.py#L511-L514\r\n\r\nThat image is at [`praw/images/PRAW logo.png`](https://github.com/praw-dev/praw/blob/master/praw/images/PRAW%20logo.png). Unfortunately the current release on PyPI is missing the file and the entire `images` directory, so the `submit_video` method fails when another thumbnail isn't provided.\r\n\r\nIt isn't just the wheel on PyPI that is missing the image. The source tarball is as well.\r\n\r\nI suspect that a change to [`MANIFEST.in`](https://github.com/praw-dev/praw/blob/master/MANIFEST.in) might solve this problem. Or maybe not, as [this Stack Overflow answer](https://stackoverflow.com/a/25964691/8033766) suggests that `MANIFEST.in` is just for Python 2.6 and earlier.\r\n\r\nAdding an `__init__.py` to the `images` folder would probably [make `find_packages()` in `setup.py`](https://setuptools.readthedocs.io/en/latest/setuptools.html#using-find-packages) notice the folder, but this would be a mis-use of an `__init__.py` since that folder is not a Python package.\r\n\r\n[This page](https://docs.python.org/3.3/distutils/setupscript.html#installing-additional-files) suggests using the `data_files` argument in `setup.py`, but I tried that and couldn't get it to work ([branch here](https://github.com/jarhill0/praw/tree/image-upload-fix), [PyPI testing push here](https://test.pypi.org/project/praw/)).\n", "before_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\nfrom setuptools import find_packages, setup\n\n\nPACKAGE_NAME = \"praw\"\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, \"README.rst\"), encoding=\"utf-8\") as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, \"const.py\"), encoding=\"utf-8\") as fp:\n VERSION = re.search('__version__ = \"([^\"]+)\"', fp.read()).group(1)\n\nextras = {\n \"ci\": [\"coveralls\"],\n \"dev\": [\"pre-commit\"],\n \"lint\": [\"black\", \"flake8\", \"pydocstyle\", \"sphinx\", \"sphinx_rtd_theme\"],\n \"test\": [\n \"betamax >=0.8, <0.9\",\n \"betamax-matchers >=0.3.0, <0.5\",\n \"betamax-serializers >=0.2, <0.3\",\n \"mock >=0.8\",\n \"pytest >=2.7.3\",\n ],\n}\nextras[\"dev\"] += extras[\"lint\"] + extras[\"test\"]\n\nsetup(\n name=PACKAGE_NAME,\n author=\"Bryce Boe\",\n author_email=\"[email protected]\",\n python_requires=\">=3.5\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Utilities\",\n ],\n description=(\n \"PRAW, an acronym for `Python Reddit API Wrapper`, is a \"\n \"python package that allows for simple access to \"\n \"reddit's API.\"\n ),\n extras_require=extras,\n install_requires=[\n \"prawcore >=1.0.1, <2.0\",\n \"update_checker >=0.16\",\n \"websocket-client >=0.54.0\",\n ],\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\"]},\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n url=\"https://praw.readthedocs.org/\",\n version=VERSION,\n)\n", "path": "setup.py"}]} | 1,694 | 140 |
gh_patches_debug_28202 | rasdani/github-patches | git_diff | open-mmlab__mmpose-493 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Seed in sampler
https://github.com/open-mmlab/mmdetection/pull/4665
</issue>
<code>
[start of mmpose/datasets/samplers/distributed_sampler.py]
1 import torch
2 from torch.utils.data import DistributedSampler as _DistributedSampler
3
4
5 class DistributedSampler(_DistributedSampler):
6 """DistributedSampler inheriting from
7 `torch.utils.data.DistributedSampler`.
8
9 In pytorch of lower versions, there is no `shuffle` argument. This child
10 class will port one to DistributedSampler.
11 """
12
13 def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
14 super().__init__(dataset, num_replicas=num_replicas, rank=rank)
15 self.shuffle = shuffle
16
17 def __iter__(self):
18 """Deterministically shuffle based on epoch."""
19 if self.shuffle:
20 g = torch.Generator()
21 g.manual_seed(self.epoch)
22 indices = torch.randperm(len(self.dataset), generator=g).tolist()
23 else:
24 indices = torch.arange(len(self.dataset)).tolist()
25
26 # add extra samples to make it evenly divisible
27 indices += indices[:(self.total_size - len(indices))]
28 assert len(indices) == self.total_size
29
30 # subsample
31 indices = indices[self.rank:self.total_size:self.num_replicas]
32 assert len(indices) == self.num_samples
33 return iter(indices)
34
[end of mmpose/datasets/samplers/distributed_sampler.py]
[start of mmpose/datasets/builder.py]
1 import platform
2 import random
3 from functools import partial
4
5 import numpy as np
6 from mmcv.parallel import collate
7 from mmcv.runner import get_dist_info
8 from mmcv.utils import build_from_cfg
9 from mmcv.utils.parrots_wrapper import _get_dataloader
10
11 from .dataset_wrappers import RepeatDataset
12 from .registry import DATASETS
13 from .samplers import DistributedSampler
14
15 if platform.system() != 'Windows':
16 # https://github.com/pytorch/pytorch/issues/973
17 import resource
18 rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
19 hard_limit = rlimit[1]
20 soft_limit = min(4096, hard_limit)
21 resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
22
23
24 def build_dataset(cfg, default_args=None):
25 """Build a dataset from config dict.
26
27 Args:
28 cfg (dict): Config dict. It should at least contain the key "type".
29 default_args (dict, optional): Default initialization arguments.
30 Default: None.
31
32 Returns:
33 Dataset: The constructed dataset.
34 """
35 if cfg['type'] == 'RepeatDataset':
36 dataset = RepeatDataset(
37 build_dataset(cfg['dataset'], default_args), cfg['times'])
38 else:
39 dataset = build_from_cfg(cfg, DATASETS, default_args)
40 return dataset
41
42
43 def build_dataloader(dataset,
44 samples_per_gpu,
45 workers_per_gpu,
46 num_gpus=1,
47 dist=True,
48 shuffle=True,
49 seed=None,
50 drop_last=True,
51 pin_memory=True,
52 **kwargs):
53 """Build PyTorch DataLoader.
54
55 In distributed training, each GPU/process has a dataloader.
56 In non-distributed training, there is only one dataloader for all GPUs.
57
58 Args:
59 dataset (Dataset): A PyTorch dataset.
60 samples_per_gpu (int): Number of training samples on each GPU, i.e.,
61 batch size of each GPU.
62 workers_per_gpu (int): How many subprocesses to use for data loading
63 for each GPU.
64 num_gpus (int): Number of GPUs. Only used in non-distributed training.
65 dist (bool): Distributed training/test or not. Default: True.
66 shuffle (bool): Whether to shuffle the data at every epoch.
67 Default: True.
68 drop_last (bool): Whether to drop the last incomplete batch in epoch.
69 Default: True
70 pin_memory (bool): Whether to use pin_memory in DataLoader.
71 Default: True
72 kwargs: any keyword argument to be used to initialize DataLoader
73
74 Returns:
75 DataLoader: A PyTorch dataloader.
76 """
77 rank, world_size = get_dist_info()
78 if dist:
79 sampler = DistributedSampler(
80 dataset, world_size, rank, shuffle=shuffle)
81 shuffle = False
82 batch_size = samples_per_gpu
83 num_workers = workers_per_gpu
84 else:
85 sampler = None
86 batch_size = num_gpus * samples_per_gpu
87 num_workers = num_gpus * workers_per_gpu
88
89 init_fn = partial(
90 worker_init_fn, num_workers=num_workers, rank=rank,
91 seed=seed) if seed is not None else None
92
93 _, DataLoader = _get_dataloader()
94 data_loader = DataLoader(
95 dataset,
96 batch_size=batch_size,
97 sampler=sampler,
98 num_workers=num_workers,
99 collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
100 pin_memory=pin_memory,
101 shuffle=shuffle,
102 worker_init_fn=init_fn,
103 drop_last=drop_last,
104 **kwargs)
105
106 return data_loader
107
108
109 def worker_init_fn(worker_id, num_workers, rank, seed):
110 """Init the random seed for various workers."""
111 # The seed of each worker equals to
112 # num_worker * rank + worker_id + user_seed
113 worker_seed = num_workers * rank + worker_id + seed
114 np.random.seed(worker_seed)
115 random.seed(worker_seed)
116
[end of mmpose/datasets/builder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py
--- a/mmpose/datasets/builder.py
+++ b/mmpose/datasets/builder.py
@@ -77,7 +77,7 @@
rank, world_size = get_dist_info()
if dist:
sampler = DistributedSampler(
- dataset, world_size, rank, shuffle=shuffle)
+ dataset, world_size, rank, shuffle=shuffle, seed=seed)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
diff --git a/mmpose/datasets/samplers/distributed_sampler.py b/mmpose/datasets/samplers/distributed_sampler.py
--- a/mmpose/datasets/samplers/distributed_sampler.py
+++ b/mmpose/datasets/samplers/distributed_sampler.py
@@ -10,15 +10,22 @@
class will port one to DistributedSampler.
"""
- def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
- super().__init__(dataset, num_replicas=num_replicas, rank=rank)
- self.shuffle = shuffle
+ def __init__(self,
+ dataset,
+ num_replicas=None,
+ rank=None,
+ shuffle=True,
+ seed=0):
+ super().__init__(
+ dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
+ # for the compatibility from PyTorch 1.3+
+ self.seed = seed if seed is not None else 0
def __iter__(self):
"""Deterministically shuffle based on epoch."""
if self.shuffle:
g = torch.Generator()
- g.manual_seed(self.epoch)
+ g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
| {"golden_diff": "diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py\n--- a/mmpose/datasets/builder.py\n+++ b/mmpose/datasets/builder.py\n@@ -77,7 +77,7 @@\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n- dataset, world_size, rank, shuffle=shuffle)\n+ dataset, world_size, rank, shuffle=shuffle, seed=seed)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\ndiff --git a/mmpose/datasets/samplers/distributed_sampler.py b/mmpose/datasets/samplers/distributed_sampler.py\n--- a/mmpose/datasets/samplers/distributed_sampler.py\n+++ b/mmpose/datasets/samplers/distributed_sampler.py\n@@ -10,15 +10,22 @@\n class will port one to DistributedSampler.\n \"\"\"\n \n- def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):\n- super().__init__(dataset, num_replicas=num_replicas, rank=rank)\n- self.shuffle = shuffle\n+ def __init__(self,\n+ dataset,\n+ num_replicas=None,\n+ rank=None,\n+ shuffle=True,\n+ seed=0):\n+ super().__init__(\n+ dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n+ # for the compatibility from PyTorch 1.3+\n+ self.seed = seed if seed is not None else 0\n \n def __iter__(self):\n \"\"\"Deterministically shuffle based on epoch.\"\"\"\n if self.shuffle:\n g = torch.Generator()\n- g.manual_seed(self.epoch)\n+ g.manual_seed(self.epoch + self.seed)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n", "issue": "Seed in sampler\nhttps://github.com/open-mmlab/mmdetection/pull/4665\n", "before_files": [{"content": "import torch\nfrom torch.utils.data import DistributedSampler as _DistributedSampler\n\n\nclass DistributedSampler(_DistributedSampler):\n \"\"\"DistributedSampler inheriting from\n `torch.utils.data.DistributedSampler`.\n\n In pytorch of lower versions, there is no `shuffle` argument. This child\n class will port one to DistributedSampler.\n \"\"\"\n\n def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank)\n self.shuffle = shuffle\n\n def __iter__(self):\n \"\"\"Deterministically shuffle based on epoch.\"\"\"\n if self.shuffle:\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n return iter(indices)\n", "path": "mmpose/datasets/samplers/distributed_sampler.py"}, {"content": "import platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import build_from_cfg\nfrom mmcv.utils.parrots_wrapper import _get_dataloader\n\nfrom .dataset_wrappers import RepeatDataset\nfrom .registry import DATASETS\nfrom .samplers import DistributedSampler\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n hard_limit = rlimit[1]\n soft_limit = min(4096, hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n\n\ndef build_dataset(cfg, default_args=None):\n \"\"\"Build a dataset from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key \"type\".\n default_args (dict, optional): Default initialization arguments.\n Default: None.\n\n Returns:\n Dataset: The constructed dataset.\n \"\"\"\n if cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n return dataset\n\n\ndef build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=True,\n pin_memory=True,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: True\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n _, DataLoader = _get_dataloader()\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n \"\"\"Init the random seed for various workers.\"\"\"\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n", "path": "mmpose/datasets/builder.py"}]} | 2,003 | 427 |
gh_patches_debug_33771 | rasdani/github-patches | git_diff | bridgecrewio__checkov-4750 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_AWS_300 fails if there are more rules than one
**Describe the issue**
CKV_AWS_300 fails if there's more rules than one defined in `aws_s3_bucket_lifecycle_configuration`
**Examples**
```
resource "aws_s3_bucket_lifecycle_configuration" "bucket" {
bucket = aws_s3_bucket.bucket.bucket
rule {
id = "id-1"
status = "Enabled"
abort_incomplete_multipart_upload {
days_after_initiation = 1
}
}
rule {
id = "id-2"
status = "Enabled"
noncurrent_version_expiration {
noncurrent_days = 1
}
}
}
```
**Version (please complete the following information):**
- Checkov Version 2.3.111
Fails with:
```
Check: CKV_AWS_300: "Ensure S3 lifecycle configuration sets period for aborting failed uploads"
FAILED for resource: aws_s3_bucket_lifecycle_configuration.bucket
File: s3.tf:1-1
122 | resource "aws_s3_bucket_lifecycle_configuration" "bucket" {
123 | bucket = aws_s3_bucket.bucket.bucket
124 |
125 | rule {
126 | id = "id-1"
127 |
128 | abort_incomplete_multipart_upload {
129 | days_after_initiation = 1
130 | }
131 |
132 | status = "Enabled"
133 | }
134 |
135 | rule {
136 | id = "id-2"
137 | status = "Enabled"
138 |
139 | noncurrent_version_expiration {
140 | noncurrent_days = 1
141 | }
142 | }
143 | }
```
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py]
1 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
2 from checkov.common.models.enums import CheckCategories, CheckResult
3
4
5 class S3AbortIncompleteUploads(BaseResourceCheck):
6 def __init__(self):
7 """
8 If you don't set this value in a lifecycle configuration you'll end up paying for s3
9 resources you never could use
10 """
11 name = "Ensure S3 lifecycle configuration sets period for aborting failed uploads"
12 id = "CKV_AWS_300"
13 supported_resources = ('aws_s3_bucket_lifecycle_configuration',)
14 categories = (CheckCategories.GENERAL_SECURITY,)
15 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
16
17 def scan_resource_conf(self, conf):
18 self.evaluated_keys = ["rule"]
19 rules = conf.get("rule")
20 if rules and isinstance(rules, list):
21 for idx_rule, rule in enumerate(rules):
22 if not rule.get("abort_incomplete_multipart_upload"):
23 self.evaluated_keys = [f"rule/[{idx_rule}]/"]
24 return CheckResult.FAILED
25 return CheckResult.PASSED
26 return CheckResult.FAILED
27
28
29 check = S3AbortIncompleteUploads()
30
[end of checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py
--- a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py
+++ b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py
@@ -1,28 +1,36 @@
+from __future__ import annotations
+
+from typing import Any
+
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
from checkov.common.models.enums import CheckCategories, CheckResult
class S3AbortIncompleteUploads(BaseResourceCheck):
- def __init__(self):
+ def __init__(self) -> None:
"""
If you don't set this value in a lifecycle configuration you'll end up paying for s3
resources you never could use
"""
name = "Ensure S3 lifecycle configuration sets period for aborting failed uploads"
id = "CKV_AWS_300"
- supported_resources = ('aws_s3_bucket_lifecycle_configuration',)
+ supported_resources = ("aws_s3_bucket_lifecycle_configuration",)
categories = (CheckCategories.GENERAL_SECURITY,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def scan_resource_conf(self, conf):
+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
self.evaluated_keys = ["rule"]
rules = conf.get("rule")
if rules and isinstance(rules, list):
for idx_rule, rule in enumerate(rules):
- if not rule.get("abort_incomplete_multipart_upload"):
- self.evaluated_keys = [f"rule/[{idx_rule}]/"]
- return CheckResult.FAILED
- return CheckResult.PASSED
+ if (
+ rule.get("abort_incomplete_multipart_upload")
+ and rule.get("status") == ["Enabled"]
+ and not rule.get("filter")
+ ):
+ self.evaluated_keys = [f"rule/[{idx_rule}]/abort_incomplete_multipart_upload"]
+ return CheckResult.PASSED
+
return CheckResult.FAILED
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n--- a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n+++ b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n@@ -1,28 +1,36 @@\n+from __future__ import annotations\n+\n+from typing import Any\n+\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n from checkov.common.models.enums import CheckCategories, CheckResult\n \n \n class S3AbortIncompleteUploads(BaseResourceCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n \"\"\"\n If you don't set this value in a lifecycle configuration you'll end up paying for s3\n resources you never could use\n \"\"\"\n name = \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\n id = \"CKV_AWS_300\"\n- supported_resources = ('aws_s3_bucket_lifecycle_configuration',)\n+ supported_resources = (\"aws_s3_bucket_lifecycle_configuration\",)\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n self.evaluated_keys = [\"rule\"]\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n- if not rule.get(\"abort_incomplete_multipart_upload\"):\n- self.evaluated_keys = [f\"rule/[{idx_rule}]/\"]\n- return CheckResult.FAILED\n- return CheckResult.PASSED\n+ if (\n+ rule.get(\"abort_incomplete_multipart_upload\")\n+ and rule.get(\"status\") == [\"Enabled\"]\n+ and not rule.get(\"filter\")\n+ ):\n+ self.evaluated_keys = [f\"rule/[{idx_rule}]/abort_incomplete_multipart_upload\"]\n+ return CheckResult.PASSED\n+\n return CheckResult.FAILED\n", "issue": "CKV_AWS_300 fails if there are more rules than one\n**Describe the issue**\r\nCKV_AWS_300 fails if there's more rules than one defined in `aws_s3_bucket_lifecycle_configuration`\r\n\r\n**Examples**\r\n```\r\nresource \"aws_s3_bucket_lifecycle_configuration\" \"bucket\" {\r\n bucket = aws_s3_bucket.bucket.bucket\r\n\r\n rule {\r\n id = \"id-1\"\r\n status = \"Enabled\"\r\n\r\n abort_incomplete_multipart_upload {\r\n days_after_initiation = 1\r\n }\r\n }\r\n\r\n rule {\r\n id = \"id-2\"\r\n status = \"Enabled\"\r\n\r\n noncurrent_version_expiration {\r\n noncurrent_days = 1\r\n }\r\n }\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.3.111\r\n\r\nFails with:\r\n\r\n```\r\nCheck: CKV_AWS_300: \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\r\n\tFAILED for resource: aws_s3_bucket_lifecycle_configuration.bucket\r\n\tFile: s3.tf:1-1\r\n\r\n\t\t122 | resource \"aws_s3_bucket_lifecycle_configuration\" \"bucket\" {\r\n\t\t123 | bucket = aws_s3_bucket.bucket.bucket\r\n\t\t124 | \r\n\t\t125 | rule {\r\n\t\t126 | id = \"id-1\"\r\n\t\t127 | \r\n\t\t128 | abort_incomplete_multipart_upload {\r\n\t\t129 | days_after_initiation = 1\r\n\t\t130 | }\r\n\t\t131 | \r\n\t\t132 | status = \"Enabled\"\r\n\t\t133 | }\r\n\t\t134 | \r\n\t\t135 | rule {\r\n\t\t136 | id = \"id-2\"\r\n\t\t137 | status = \"Enabled\"\r\n\t\t138 | \r\n\t\t139 | noncurrent_version_expiration {\r\n\t\t140 | noncurrent_days = 1\r\n\t\t141 | }\r\n\t\t142 | }\r\n\t\t143 | }\r\n```\n", "before_files": [{"content": "from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\n\nclass S3AbortIncompleteUploads(BaseResourceCheck):\n def __init__(self):\n \"\"\"\n If you don't set this value in a lifecycle configuration you'll end up paying for s3\n resources you never could use\n \"\"\"\n name = \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\n id = \"CKV_AWS_300\"\n supported_resources = ('aws_s3_bucket_lifecycle_configuration',)\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n self.evaluated_keys = [\"rule\"]\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n if not rule.get(\"abort_incomplete_multipart_upload\"):\n self.evaluated_keys = [f\"rule/[{idx_rule}]/\"]\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = S3AbortIncompleteUploads()\n", "path": "checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py"}]} | 1,348 | 483 |
gh_patches_debug_22629 | rasdani/github-patches | git_diff | yt-project__yt-3613 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: spurious log warning when saving a plot to png format
### Bug report
**Bug summary**
**Code for reproduction**
```python
import yt
yt.funcs.mylog.setLevel("warning")
ds = yt.load_sample("IsolatedGalaxy")
p = yt.SlicePlot(ds, "z", "density")
p.save("/tmp/test.png")
```
**Actual outcome**
```
yt : [WARNING ] 2021-10-20 11:50:44,393 Received two valid image formats '.png' (from `filename`) and 'png' (from `suffix`). The former is ignored.
```
**Expected outcome**
No log warning
</issue>
<code>
[start of yt/visualization/_commons.py]
1 import os
2 import sys
3 from typing import Optional, Type
4
5 import matplotlib
6 from packaging.version import Version
7
8 from yt.utilities.logger import ytLogger as mylog
9
10 from ._mpl_imports import (
11 FigureCanvasAgg,
12 FigureCanvasBase,
13 FigureCanvasPdf,
14 FigureCanvasPS,
15 FigureCanvasSVG,
16 )
17
18 MPL_VERSION = Version(matplotlib.__version__)
19
20 DEFAULT_FONT_PROPERTIES = {
21 "family": "stixgeneral",
22 "size": 18,
23 }
24
25 if MPL_VERSION >= Version("3.4"):
26 DEFAULT_FONT_PROPERTIES["math_fontfamily"] = "cm"
27
28 SUPPORTED_FORMATS = frozenset(FigureCanvasBase.get_supported_filetypes().keys())
29 SUPPORTED_CANVAS_CLASSES = frozenset(
30 (FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS, FigureCanvasSVG)
31 )
32
33
34 def get_canvas_class(suffix: str) -> Type[FigureCanvasBase]:
35 s = normalize_extension_string(suffix)
36 if s not in SUPPORTED_FORMATS:
37 raise ValueError(f"Unsupported file format '{suffix}'.")
38 for cls in SUPPORTED_CANVAS_CLASSES:
39 if s in cls.get_supported_filetypes():
40 return cls
41 raise RuntimeError(
42 "Something went terribly wrong. "
43 f"File extension '{suffix}' is supposed to be supported "
44 "but no compatible backend was found."
45 )
46
47
48 def normalize_extension_string(s: str) -> str:
49 if sys.version_info < (3, 9):
50 if s.startswith("."):
51 return s[1:]
52 return s
53 else:
54 return s.removeprefix(".")
55
56
57 def validate_image_name(filename, suffix: Optional[str] = None) -> str:
58 """
59 Build a valid image filename with a specified extension (default to png).
60 The suffix parameter is ignored if the input filename has a valid extension already.
61 Otherwise, suffix is appended to the filename, replacing any existing extension.
62 """
63 name, psuffix = os.path.splitext(filename)
64 if normalize_extension_string(psuffix) in SUPPORTED_FORMATS:
65 if suffix is not None:
66 suffix = normalize_extension_string(suffix)
67 if suffix in SUPPORTED_FORMATS and suffix != psuffix:
68 mylog.warning(
69 "Received two valid image formats '%s' (from `filename`) "
70 "and '%s' (from `suffix`). The former is ignored.",
71 psuffix,
72 suffix,
73 )
74 return f"{name}.{suffix}"
75 return str(filename)
76
77 if suffix is None:
78 suffix = ".png"
79
80 suffix = normalize_extension_string(suffix)
81
82 if suffix not in SUPPORTED_FORMATS:
83 raise ValueError(f"Unsupported file format '{suffix}'.")
84
85 return f"{filename}.{suffix}"
86
87
88 def get_canvas(figure, filename):
89
90 name, suffix = os.path.splitext(filename)
91
92 if not suffix:
93 raise ValueError(
94 f"Can not determine canvas class from filename '{filename}' "
95 f"without an extension."
96 )
97 return get_canvas_class(suffix)(figure)
98
[end of yt/visualization/_commons.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/yt/visualization/_commons.py b/yt/visualization/_commons.py
--- a/yt/visualization/_commons.py
+++ b/yt/visualization/_commons.py
@@ -61,9 +61,12 @@
Otherwise, suffix is appended to the filename, replacing any existing extension.
"""
name, psuffix = os.path.splitext(filename)
- if normalize_extension_string(psuffix) in SUPPORTED_FORMATS:
- if suffix is not None:
- suffix = normalize_extension_string(suffix)
+ psuffix = normalize_extension_string(psuffix)
+
+ if suffix is not None:
+ suffix = normalize_extension_string(suffix)
+
+ if psuffix in SUPPORTED_FORMATS:
if suffix in SUPPORTED_FORMATS and suffix != psuffix:
mylog.warning(
"Received two valid image formats '%s' (from `filename`) "
@@ -75,9 +78,7 @@
return str(filename)
if suffix is None:
- suffix = ".png"
-
- suffix = normalize_extension_string(suffix)
+ suffix = "png"
if suffix not in SUPPORTED_FORMATS:
raise ValueError(f"Unsupported file format '{suffix}'.")
| {"golden_diff": "diff --git a/yt/visualization/_commons.py b/yt/visualization/_commons.py\n--- a/yt/visualization/_commons.py\n+++ b/yt/visualization/_commons.py\n@@ -61,9 +61,12 @@\n Otherwise, suffix is appended to the filename, replacing any existing extension.\n \"\"\"\n name, psuffix = os.path.splitext(filename)\n- if normalize_extension_string(psuffix) in SUPPORTED_FORMATS:\n- if suffix is not None:\n- suffix = normalize_extension_string(suffix)\n+ psuffix = normalize_extension_string(psuffix)\n+\n+ if suffix is not None:\n+ suffix = normalize_extension_string(suffix)\n+\n+ if psuffix in SUPPORTED_FORMATS:\n if suffix in SUPPORTED_FORMATS and suffix != psuffix:\n mylog.warning(\n \"Received two valid image formats '%s' (from `filename`) \"\n@@ -75,9 +78,7 @@\n return str(filename)\n \n if suffix is None:\n- suffix = \".png\"\n-\n- suffix = normalize_extension_string(suffix)\n+ suffix = \"png\"\n \n if suffix not in SUPPORTED_FORMATS:\n raise ValueError(f\"Unsupported file format '{suffix}'.\")\n", "issue": "BUG: spurious log warning when saving a plot to png format\n### Bug report\r\n\r\n**Bug summary**\r\n\r\n**Code for reproduction**\r\n\r\n```python\r\nimport yt\r\n\r\nyt.funcs.mylog.setLevel(\"warning\")\r\n\r\nds = yt.load_sample(\"IsolatedGalaxy\")\r\np = yt.SlicePlot(ds, \"z\", \"density\")\r\np.save(\"/tmp/test.png\")\r\n```\r\n\r\n\r\n**Actual outcome**\r\n\r\n```\r\nyt : [WARNING ] 2021-10-20 11:50:44,393 Received two valid image formats '.png' (from `filename`) and 'png' (from `suffix`). The former is ignored.\r\n```\r\n\r\n**Expected outcome**\r\n\r\nNo log warning\n", "before_files": [{"content": "import os\nimport sys\nfrom typing import Optional, Type\n\nimport matplotlib\nfrom packaging.version import Version\n\nfrom yt.utilities.logger import ytLogger as mylog\n\nfrom ._mpl_imports import (\n FigureCanvasAgg,\n FigureCanvasBase,\n FigureCanvasPdf,\n FigureCanvasPS,\n FigureCanvasSVG,\n)\n\nMPL_VERSION = Version(matplotlib.__version__)\n\nDEFAULT_FONT_PROPERTIES = {\n \"family\": \"stixgeneral\",\n \"size\": 18,\n}\n\nif MPL_VERSION >= Version(\"3.4\"):\n DEFAULT_FONT_PROPERTIES[\"math_fontfamily\"] = \"cm\"\n\nSUPPORTED_FORMATS = frozenset(FigureCanvasBase.get_supported_filetypes().keys())\nSUPPORTED_CANVAS_CLASSES = frozenset(\n (FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS, FigureCanvasSVG)\n)\n\n\ndef get_canvas_class(suffix: str) -> Type[FigureCanvasBase]:\n s = normalize_extension_string(suffix)\n if s not in SUPPORTED_FORMATS:\n raise ValueError(f\"Unsupported file format '{suffix}'.\")\n for cls in SUPPORTED_CANVAS_CLASSES:\n if s in cls.get_supported_filetypes():\n return cls\n raise RuntimeError(\n \"Something went terribly wrong. \"\n f\"File extension '{suffix}' is supposed to be supported \"\n \"but no compatible backend was found.\"\n )\n\n\ndef normalize_extension_string(s: str) -> str:\n if sys.version_info < (3, 9):\n if s.startswith(\".\"):\n return s[1:]\n return s\n else:\n return s.removeprefix(\".\")\n\n\ndef validate_image_name(filename, suffix: Optional[str] = None) -> str:\n \"\"\"\n Build a valid image filename with a specified extension (default to png).\n The suffix parameter is ignored if the input filename has a valid extension already.\n Otherwise, suffix is appended to the filename, replacing any existing extension.\n \"\"\"\n name, psuffix = os.path.splitext(filename)\n if normalize_extension_string(psuffix) in SUPPORTED_FORMATS:\n if suffix is not None:\n suffix = normalize_extension_string(suffix)\n if suffix in SUPPORTED_FORMATS and suffix != psuffix:\n mylog.warning(\n \"Received two valid image formats '%s' (from `filename`) \"\n \"and '%s' (from `suffix`). The former is ignored.\",\n psuffix,\n suffix,\n )\n return f\"{name}.{suffix}\"\n return str(filename)\n\n if suffix is None:\n suffix = \".png\"\n\n suffix = normalize_extension_string(suffix)\n\n if suffix not in SUPPORTED_FORMATS:\n raise ValueError(f\"Unsupported file format '{suffix}'.\")\n\n return f\"{filename}.{suffix}\"\n\n\ndef get_canvas(figure, filename):\n\n name, suffix = os.path.splitext(filename)\n\n if not suffix:\n raise ValueError(\n f\"Can not determine canvas class from filename '{filename}' \"\n f\"without an extension.\"\n )\n return get_canvas_class(suffix)(figure)\n", "path": "yt/visualization/_commons.py"}]} | 1,513 | 261 |
gh_patches_debug_5604 | rasdani/github-patches | git_diff | bokeh__bokeh-9682 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DOCUMENTATION] typo in texas.py
Superfluous parenthesis on [line 34 of texas.py](https://github.com/bokeh/bokeh/blob/aa60b9c9d554fbd349a21da37d616c5b8eda8c09/examples/plotting/file/texas.py#L34). Shows up in the hover tool tip.
Not sure whether you want a PR for something so small? So far I haven't found any other corrections that could be added in.
</issue>
<code>
[start of examples/plotting/file/texas.py]
1 from bokeh.io import show
2 from bokeh.models import LogColorMapper
3 from bokeh.palettes import Viridis6 as palette
4 from bokeh.plotting import figure
5 from bokeh.sampledata.unemployment import data as unemployment
6 from bokeh.sampledata.us_counties import data as counties
7
8 palette = tuple(reversed(palette))
9
10 counties = {
11 code: county for code, county in counties.items() if county["state"] == "tx"
12 }
13
14 county_xs = [county["lons"] for county in counties.values()]
15 county_ys = [county["lats"] for county in counties.values()]
16
17 county_names = [county['name'] for county in counties.values()]
18 county_rates = [unemployment[county_id] for county_id in counties]
19 color_mapper = LogColorMapper(palette=palette)
20
21 data=dict(
22 x=county_xs,
23 y=county_ys,
24 name=county_names,
25 rate=county_rates,
26 )
27
28 TOOLS = "pan,wheel_zoom,reset,hover,save"
29
30 p = figure(
31 title="Texas Unemployment, 2009", tools=TOOLS,
32 x_axis_location=None, y_axis_location=None,
33 tooltips=[
34 ("Name", "@name"), ("Unemployment rate)", "@rate%"), ("(Long, Lat)", "($x, $y)")
35 ])
36 p.grid.grid_line_color = None
37 p.hover.point_policy = "follow_mouse"
38
39 p.patches('x', 'y', source=data,
40 fill_color={'field': 'rate', 'transform': color_mapper},
41 fill_alpha=0.7, line_color="white", line_width=0.5)
42
43 show(p)
44
[end of examples/plotting/file/texas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/plotting/file/texas.py b/examples/plotting/file/texas.py
--- a/examples/plotting/file/texas.py
+++ b/examples/plotting/file/texas.py
@@ -31,7 +31,7 @@
title="Texas Unemployment, 2009", tools=TOOLS,
x_axis_location=None, y_axis_location=None,
tooltips=[
- ("Name", "@name"), ("Unemployment rate)", "@rate%"), ("(Long, Lat)", "($x, $y)")
+ ("Name", "@name"), ("Unemployment rate", "@rate%"), ("(Long, Lat)", "($x, $y)")
])
p.grid.grid_line_color = None
p.hover.point_policy = "follow_mouse"
| {"golden_diff": "diff --git a/examples/plotting/file/texas.py b/examples/plotting/file/texas.py\n--- a/examples/plotting/file/texas.py\n+++ b/examples/plotting/file/texas.py\n@@ -31,7 +31,7 @@\n title=\"Texas Unemployment, 2009\", tools=TOOLS,\n x_axis_location=None, y_axis_location=None,\n tooltips=[\n- (\"Name\", \"@name\"), (\"Unemployment rate)\", \"@rate%\"), (\"(Long, Lat)\", \"($x, $y)\")\n+ (\"Name\", \"@name\"), (\"Unemployment rate\", \"@rate%\"), (\"(Long, Lat)\", \"($x, $y)\")\n ])\n p.grid.grid_line_color = None\n p.hover.point_policy = \"follow_mouse\"\n", "issue": "[DOCUMENTATION] typo in texas.py\nSuperfluous parenthesis on [line 34 of texas.py](https://github.com/bokeh/bokeh/blob/aa60b9c9d554fbd349a21da37d616c5b8eda8c09/examples/plotting/file/texas.py#L34). Shows up in the hover tool tip.\r\n\r\nNot sure whether you want a PR for something so small? So far I haven't found any other corrections that could be added in.\n", "before_files": [{"content": "from bokeh.io import show\nfrom bokeh.models import LogColorMapper\nfrom bokeh.palettes import Viridis6 as palette\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.unemployment import data as unemployment\nfrom bokeh.sampledata.us_counties import data as counties\n\npalette = tuple(reversed(palette))\n\ncounties = {\n code: county for code, county in counties.items() if county[\"state\"] == \"tx\"\n}\n\ncounty_xs = [county[\"lons\"] for county in counties.values()]\ncounty_ys = [county[\"lats\"] for county in counties.values()]\n\ncounty_names = [county['name'] for county in counties.values()]\ncounty_rates = [unemployment[county_id] for county_id in counties]\ncolor_mapper = LogColorMapper(palette=palette)\n\ndata=dict(\n x=county_xs,\n y=county_ys,\n name=county_names,\n rate=county_rates,\n)\n\nTOOLS = \"pan,wheel_zoom,reset,hover,save\"\n\np = figure(\n title=\"Texas Unemployment, 2009\", tools=TOOLS,\n x_axis_location=None, y_axis_location=None,\n tooltips=[\n (\"Name\", \"@name\"), (\"Unemployment rate)\", \"@rate%\"), (\"(Long, Lat)\", \"($x, $y)\")\n ])\np.grid.grid_line_color = None\np.hover.point_policy = \"follow_mouse\"\n\np.patches('x', 'y', source=data,\n fill_color={'field': 'rate', 'transform': color_mapper},\n fill_alpha=0.7, line_color=\"white\", line_width=0.5)\n\nshow(p)\n", "path": "examples/plotting/file/texas.py"}]} | 1,091 | 167 |
gh_patches_debug_5488 | rasdani/github-patches | git_diff | iterative__dvc-7908 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
config: option for `--jobs` value
Several DVC commands have a `--jobs` option that has default values (e.g, https://dvc.org/doc/commands-reference/gc).
Afaik there is no way to change the default value. Having the option to change it through `dvc config` would be useful.
Can you consider adding it?
Thanks
</issue>
<code>
[start of dvc/fs/__init__.py]
1 from urllib.parse import urlparse
2
3 # pylint: disable=unused-import
4 from dvc_objects.fs import utils # noqa: F401
5 from dvc_objects.fs import ( # noqa: F401
6 FS_MAP,
7 AzureFileSystem,
8 GDriveFileSystem,
9 GSFileSystem,
10 HDFSFileSystem,
11 HTTPFileSystem,
12 HTTPSFileSystem,
13 LocalFileSystem,
14 MemoryFileSystem,
15 OSSFileSystem,
16 S3FileSystem,
17 Schemes,
18 SSHFileSystem,
19 WebDAVFileSystem,
20 WebDAVSFileSystem,
21 WebHDFSFileSystem,
22 generic,
23 get_fs_cls,
24 system,
25 )
26 from dvc_objects.fs.base import AnyFSPath, FileSystem # noqa: F401
27 from dvc_objects.fs.errors import ( # noqa: F401
28 AuthError,
29 ConfigError,
30 RemoteMissingDepsError,
31 )
32 from dvc_objects.fs.implementations.azure import AzureAuthError # noqa: F401
33 from dvc_objects.fs.implementations.gdrive import GDriveAuthError # noqa: F401
34 from dvc_objects.fs.implementations.local import localfs # noqa: F401
35 from dvc_objects.fs.implementations.ssh import ( # noqa: F401
36 DEFAULT_PORT as DEFAULT_SSH_PORT,
37 )
38 from dvc_objects.fs.path import Path # noqa: F401
39
40 from .data import DataFileSystem # noqa: F401
41 from .dvc import DvcFileSystem # noqa: F401
42 from .git import GitFileSystem # noqa: F401
43
44 # pylint: enable=unused-import
45
46
47 def get_fs_config(repo, config, **kwargs):
48 name = kwargs.get("name")
49 if name:
50 try:
51 remote_conf = config["remote"][name.lower()]
52 except KeyError:
53 from dvc.config import RemoteNotFoundError
54
55 raise RemoteNotFoundError(f"remote '{name}' doesn't exist")
56 else:
57 remote_conf = kwargs
58 return _resolve_remote_refs(repo, config, remote_conf)
59
60
61 def _resolve_remote_refs(repo, config, remote_conf):
62 # Support for cross referenced remotes.
63 # This will merge the settings, shadowing base ref with remote_conf.
64 # For example, having:
65 #
66 # dvc remote add server ssh://localhost
67 # dvc remote modify server user root
68 # dvc remote modify server ask_password true
69 #
70 # dvc remote add images remote://server/tmp/pictures
71 # dvc remote modify images user alice
72 # dvc remote modify images ask_password false
73 # dvc remote modify images password asdf1234
74 #
75 # Results on a config dictionary like:
76 #
77 # {
78 # "url": "ssh://localhost/tmp/pictures",
79 # "user": "alice",
80 # "password": "asdf1234",
81 # "ask_password": False,
82 # }
83 parsed = urlparse(remote_conf["url"])
84 if parsed.scheme != "remote":
85 return remote_conf
86
87 base = get_fs_config(repo, config, name=parsed.netloc)
88 cls, _, _ = get_cloud_fs(repo, **base)
89 relpath = parsed.path.lstrip("/").replace("/", cls.sep)
90 url = cls.sep.join((base["url"], relpath))
91 return {**base, **remote_conf, "url": url}
92
93
94 def get_cloud_fs(repo, **kwargs):
95 from dvc.config import ConfigError as RepoConfigError
96 from dvc.config_schema import SCHEMA, Invalid
97
98 repo_config = repo.config if repo else {}
99 core_config = repo_config.get("core", {})
100
101 remote_conf = get_fs_config(repo, repo_config, **kwargs)
102 try:
103 remote_conf = SCHEMA["remote"][str](remote_conf)
104 except Invalid as exc:
105 raise RepoConfigError(str(exc)) from None
106
107 if "jobs" not in remote_conf:
108 jobs = core_config.get("jobs")
109 if jobs:
110 remote_conf["jobs"] = jobs
111
112 if "checksum_jobs" not in remote_conf:
113 checksum_jobs = core_config.get("checksum_jobs")
114 if checksum_jobs:
115 remote_conf["checksum_jobs"] = checksum_jobs
116
117 cls = get_fs_cls(remote_conf)
118
119 if cls == GDriveFileSystem and repo:
120 remote_conf["gdrive_credentials_tmp_dir"] = repo.tmp_dir
121
122 url = remote_conf.pop("url")
123 if issubclass(cls, WebDAVFileSystem):
124 # For WebDAVFileSystem, provided url is the base path itself, so it
125 # should be treated as being a root path.
126 fs_path = cls.root_marker
127 else:
128 fs_path = cls._strip_protocol(url) # pylint:disable=protected-access
129
130 extras = cls._get_kwargs_from_urls(url) # pylint:disable=protected-access
131 conf = {**extras, **remote_conf} # remote config takes priority
132 return cls, conf, fs_path
133
[end of dvc/fs/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/fs/__init__.py b/dvc/fs/__init__.py
--- a/dvc/fs/__init__.py
+++ b/dvc/fs/__init__.py
@@ -104,11 +104,6 @@
except Invalid as exc:
raise RepoConfigError(str(exc)) from None
- if "jobs" not in remote_conf:
- jobs = core_config.get("jobs")
- if jobs:
- remote_conf["jobs"] = jobs
-
if "checksum_jobs" not in remote_conf:
checksum_jobs = core_config.get("checksum_jobs")
if checksum_jobs:
| {"golden_diff": "diff --git a/dvc/fs/__init__.py b/dvc/fs/__init__.py\n--- a/dvc/fs/__init__.py\n+++ b/dvc/fs/__init__.py\n@@ -104,11 +104,6 @@\n except Invalid as exc:\n raise RepoConfigError(str(exc)) from None\n \n- if \"jobs\" not in remote_conf:\n- jobs = core_config.get(\"jobs\")\n- if jobs:\n- remote_conf[\"jobs\"] = jobs\n-\n if \"checksum_jobs\" not in remote_conf:\n checksum_jobs = core_config.get(\"checksum_jobs\")\n if checksum_jobs:\n", "issue": "config: option for `--jobs` value\nSeveral DVC commands have a `--jobs` option that has default values (e.g, https://dvc.org/doc/commands-reference/gc).\r\n\r\nAfaik there is no way to change the default value. Having the option to change it through `dvc config` would be useful.\r\n\r\nCan you consider adding it?\r\n\r\nThanks\n", "before_files": [{"content": "from urllib.parse import urlparse\n\n# pylint: disable=unused-import\nfrom dvc_objects.fs import utils # noqa: F401\nfrom dvc_objects.fs import ( # noqa: F401\n FS_MAP,\n AzureFileSystem,\n GDriveFileSystem,\n GSFileSystem,\n HDFSFileSystem,\n HTTPFileSystem,\n HTTPSFileSystem,\n LocalFileSystem,\n MemoryFileSystem,\n OSSFileSystem,\n S3FileSystem,\n Schemes,\n SSHFileSystem,\n WebDAVFileSystem,\n WebDAVSFileSystem,\n WebHDFSFileSystem,\n generic,\n get_fs_cls,\n system,\n)\nfrom dvc_objects.fs.base import AnyFSPath, FileSystem # noqa: F401\nfrom dvc_objects.fs.errors import ( # noqa: F401\n AuthError,\n ConfigError,\n RemoteMissingDepsError,\n)\nfrom dvc_objects.fs.implementations.azure import AzureAuthError # noqa: F401\nfrom dvc_objects.fs.implementations.gdrive import GDriveAuthError # noqa: F401\nfrom dvc_objects.fs.implementations.local import localfs # noqa: F401\nfrom dvc_objects.fs.implementations.ssh import ( # noqa: F401\n DEFAULT_PORT as DEFAULT_SSH_PORT,\n)\nfrom dvc_objects.fs.path import Path # noqa: F401\n\nfrom .data import DataFileSystem # noqa: F401\nfrom .dvc import DvcFileSystem # noqa: F401\nfrom .git import GitFileSystem # noqa: F401\n\n# pylint: enable=unused-import\n\n\ndef get_fs_config(repo, config, **kwargs):\n name = kwargs.get(\"name\")\n if name:\n try:\n remote_conf = config[\"remote\"][name.lower()]\n except KeyError:\n from dvc.config import RemoteNotFoundError\n\n raise RemoteNotFoundError(f\"remote '{name}' doesn't exist\")\n else:\n remote_conf = kwargs\n return _resolve_remote_refs(repo, config, remote_conf)\n\n\ndef _resolve_remote_refs(repo, config, remote_conf):\n # Support for cross referenced remotes.\n # This will merge the settings, shadowing base ref with remote_conf.\n # For example, having:\n #\n # dvc remote add server ssh://localhost\n # dvc remote modify server user root\n # dvc remote modify server ask_password true\n #\n # dvc remote add images remote://server/tmp/pictures\n # dvc remote modify images user alice\n # dvc remote modify images ask_password false\n # dvc remote modify images password asdf1234\n #\n # Results on a config dictionary like:\n #\n # {\n # \"url\": \"ssh://localhost/tmp/pictures\",\n # \"user\": \"alice\",\n # \"password\": \"asdf1234\",\n # \"ask_password\": False,\n # }\n parsed = urlparse(remote_conf[\"url\"])\n if parsed.scheme != \"remote\":\n return remote_conf\n\n base = get_fs_config(repo, config, name=parsed.netloc)\n cls, _, _ = get_cloud_fs(repo, **base)\n relpath = parsed.path.lstrip(\"/\").replace(\"/\", cls.sep)\n url = cls.sep.join((base[\"url\"], relpath))\n return {**base, **remote_conf, \"url\": url}\n\n\ndef get_cloud_fs(repo, **kwargs):\n from dvc.config import ConfigError as RepoConfigError\n from dvc.config_schema import SCHEMA, Invalid\n\n repo_config = repo.config if repo else {}\n core_config = repo_config.get(\"core\", {})\n\n remote_conf = get_fs_config(repo, repo_config, **kwargs)\n try:\n remote_conf = SCHEMA[\"remote\"][str](remote_conf)\n except Invalid as exc:\n raise RepoConfigError(str(exc)) from None\n\n if \"jobs\" not in remote_conf:\n jobs = core_config.get(\"jobs\")\n if jobs:\n remote_conf[\"jobs\"] = jobs\n\n if \"checksum_jobs\" not in remote_conf:\n checksum_jobs = core_config.get(\"checksum_jobs\")\n if checksum_jobs:\n remote_conf[\"checksum_jobs\"] = checksum_jobs\n\n cls = get_fs_cls(remote_conf)\n\n if cls == GDriveFileSystem and repo:\n remote_conf[\"gdrive_credentials_tmp_dir\"] = repo.tmp_dir\n\n url = remote_conf.pop(\"url\")\n if issubclass(cls, WebDAVFileSystem):\n # For WebDAVFileSystem, provided url is the base path itself, so it\n # should be treated as being a root path.\n fs_path = cls.root_marker\n else:\n fs_path = cls._strip_protocol(url) # pylint:disable=protected-access\n\n extras = cls._get_kwargs_from_urls(url) # pylint:disable=protected-access\n conf = {**extras, **remote_conf} # remote config takes priority\n return cls, conf, fs_path\n", "path": "dvc/fs/__init__.py"}]} | 2,003 | 137 |
gh_patches_debug_34627 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2364 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update I1022 to only suggest sub if all values can be in the sub
### CloudFormation Lint Version
0.64.1
### What operating system are you using?
All
### Describe the bug
Original feedback provided by @iann0036. Translated to an issue for tracking.
```yaml
Fn::Join:
- ""
- - Fn::Select:
- 0
- Fn::Split:
- "/"
- !Ref MySubnet1CIDR
- !Ref MySubnetsCIDRSize
```
```
I1022: Prefer using Fn::Sub over Fn::Join with an empty delimiter
```
### Expected behavior
Currently the way to make this comply would be
```yaml
Fn::Sub:
- ${CIDR}${MySubnetsCIDRSize}
- CIDR:
Fn::Select:
- 0
- Fn::Split:
- "/"
- !Ref MySubnet1CIDR
```
which may not be as optimal
### Reproduction template
```yaml
Fn::Join:
- ""
- - Fn::Select:
- 0
- Fn::Split:
- "/"
- !Ref MySubnet1CIDR
- !Ref MySubnetsCIDRSize
````
</issue>
<code>
[start of src/cfnlint/rules/functions/SubNotJoin.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from cfnlint.rules import CloudFormationLintRule, RuleMatch
6
7
8 class SubNotJoin(CloudFormationLintRule):
9 """Check if Join is being used with no join characters"""
10 id = 'I1022'
11 shortdesc = 'Use Sub instead of Join'
12 description = 'Prefer a sub instead of Join when using a join delimiter that is empty'
13 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'
14 tags = ['functions', 'sub', 'join']
15
16 def match(self, cfn):
17 matches = []
18
19 join_objs = cfn.search_deep_keys('Fn::Join')
20
21 for join_obj in join_objs:
22 if isinstance(join_obj[-1], list):
23 join_operator = join_obj[-1][0]
24 if isinstance(join_operator, str):
25 if join_operator == '':
26 matches.append(RuleMatch(
27 join_obj[0:-1], 'Prefer using Fn::Sub over Fn::Join with an empty delimiter'))
28 return matches
29
[end of src/cfnlint/rules/functions/SubNotJoin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/functions/SubNotJoin.py b/src/cfnlint/rules/functions/SubNotJoin.py
--- a/src/cfnlint/rules/functions/SubNotJoin.py
+++ b/src/cfnlint/rules/functions/SubNotJoin.py
@@ -7,12 +7,34 @@
class SubNotJoin(CloudFormationLintRule):
"""Check if Join is being used with no join characters"""
+
id = 'I1022'
shortdesc = 'Use Sub instead of Join'
- description = 'Prefer a sub instead of Join when using a join delimiter that is empty'
+ description = (
+ 'Prefer a sub instead of Join when using a join delimiter that is empty'
+ )
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'
tags = ['functions', 'sub', 'join']
+ def _check_element(self, element):
+ if isinstance(element, dict):
+ if len(element) == 1:
+ for key, value in element.items():
+ if key in ['Fn::Sub']:
+ if not isinstance(value, str):
+ return False
+ elif key not in ['Ref', 'Fn::GetAtt']:
+ return False
+
+ return True
+
+ def _check_elements(self, elements):
+ for element in elements:
+ if not self._check_element(element):
+ return False
+
+ return True
+
def match(self, cfn):
matches = []
@@ -21,8 +43,15 @@
for join_obj in join_objs:
if isinstance(join_obj[-1], list):
join_operator = join_obj[-1][0]
+ join_elements = join_obj[-1][1]
if isinstance(join_operator, str):
if join_operator == '':
- matches.append(RuleMatch(
- join_obj[0:-1], 'Prefer using Fn::Sub over Fn::Join with an empty delimiter'))
+ if isinstance(join_elements, list):
+ if self._check_elements(join_elements):
+ matches.append(
+ RuleMatch(
+ join_obj[0:-1],
+ 'Prefer using Fn::Sub over Fn::Join with an empty delimiter',
+ )
+ )
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/functions/SubNotJoin.py b/src/cfnlint/rules/functions/SubNotJoin.py\n--- a/src/cfnlint/rules/functions/SubNotJoin.py\n+++ b/src/cfnlint/rules/functions/SubNotJoin.py\n@@ -7,12 +7,34 @@\n \n class SubNotJoin(CloudFormationLintRule):\n \"\"\"Check if Join is being used with no join characters\"\"\"\n+\n id = 'I1022'\n shortdesc = 'Use Sub instead of Join'\n- description = 'Prefer a sub instead of Join when using a join delimiter that is empty'\n+ description = (\n+ 'Prefer a sub instead of Join when using a join delimiter that is empty'\n+ )\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub', 'join']\n \n+ def _check_element(self, element):\n+ if isinstance(element, dict):\n+ if len(element) == 1:\n+ for key, value in element.items():\n+ if key in ['Fn::Sub']:\n+ if not isinstance(value, str):\n+ return False\n+ elif key not in ['Ref', 'Fn::GetAtt']:\n+ return False\n+\n+ return True\n+\n+ def _check_elements(self, elements):\n+ for element in elements:\n+ if not self._check_element(element):\n+ return False\n+\n+ return True\n+\n def match(self, cfn):\n matches = []\n \n@@ -21,8 +43,15 @@\n for join_obj in join_objs:\n if isinstance(join_obj[-1], list):\n join_operator = join_obj[-1][0]\n+ join_elements = join_obj[-1][1]\n if isinstance(join_operator, str):\n if join_operator == '':\n- matches.append(RuleMatch(\n- join_obj[0:-1], 'Prefer using Fn::Sub over Fn::Join with an empty delimiter'))\n+ if isinstance(join_elements, list):\n+ if self._check_elements(join_elements):\n+ matches.append(\n+ RuleMatch(\n+ join_obj[0:-1],\n+ 'Prefer using Fn::Sub over Fn::Join with an empty delimiter',\n+ )\n+ )\n return matches\n", "issue": "Update I1022 to only suggest sub if all values can be in the sub\n### CloudFormation Lint Version\n\n0.64.1\n\n### What operating system are you using?\n\nAll\n\n### Describe the bug\n\nOriginal feedback provided by @iann0036. Translated to an issue for tracking.\r\n\r\n```yaml\r\nFn::Join:\r\n - \"\"\r\n - - Fn::Select:\r\n - 0\r\n - Fn::Split:\r\n - \"/\"\r\n - !Ref MySubnet1CIDR\r\n - !Ref MySubnetsCIDRSize\r\n```\r\n\r\n```\r\nI1022: Prefer using Fn::Sub over Fn::Join with an empty delimiter\r\n```\n\n### Expected behavior\n\nCurrently the way to make this comply would be\r\n\r\n```yaml\r\nFn::Sub:\r\n - ${CIDR}${MySubnetsCIDRSize}\r\n - CIDR:\r\n Fn::Select:\r\n - 0\r\n - Fn::Split:\r\n - \"/\"\r\n - !Ref MySubnet1CIDR\r\n```\r\n\r\nwhich may not be as optimal \n\n### Reproduction template\n\n```yaml\r\nFn::Join:\r\n - \"\"\r\n - - Fn::Select:\r\n - 0\r\n - Fn::Split:\r\n - \"/\"\r\n - !Ref MySubnet1CIDR\r\n - !Ref MySubnetsCIDRSize\r\n ````\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass SubNotJoin(CloudFormationLintRule):\n \"\"\"Check if Join is being used with no join characters\"\"\"\n id = 'I1022'\n shortdesc = 'Use Sub instead of Join'\n description = 'Prefer a sub instead of Join when using a join delimiter that is empty'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub', 'join']\n\n def match(self, cfn):\n matches = []\n\n join_objs = cfn.search_deep_keys('Fn::Join')\n\n for join_obj in join_objs:\n if isinstance(join_obj[-1], list):\n join_operator = join_obj[-1][0]\n if isinstance(join_operator, str):\n if join_operator == '':\n matches.append(RuleMatch(\n join_obj[0:-1], 'Prefer using Fn::Sub over Fn::Join with an empty delimiter'))\n return matches\n", "path": "src/cfnlint/rules/functions/SubNotJoin.py"}]} | 1,133 | 505 |
gh_patches_debug_1475 | rasdani/github-patches | git_diff | graspologic-org__graspologic-654 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Possible issue with direct import
```
import graspologic
dir(graspologic)
```
returns
```
['__builtins__',
'__cached__',
'__doc__',
'__file__',
'__loader__',
'__name__',
'__package__',
'__path__',
'__spec__',
'__version',
'__version__',
'graspologic',
'layouts',
'models',
'partition',
'plot',
'preprocessing',
'subgraph',
'version']
```
and is missing lots of modules (align, cluster, datasets, embed, inference, match, nominate, pipeline, utils).
Is this intentional?
[BUG] Possible issue with direct import
```
import graspologic
dir(graspologic)
```
returns
```
['__builtins__',
'__cached__',
'__doc__',
'__file__',
'__loader__',
'__name__',
'__package__',
'__path__',
'__spec__',
'__version',
'__version__',
'graspologic',
'layouts',
'models',
'partition',
'plot',
'preprocessing',
'subgraph',
'version']
```
and is missing lots of modules (align, cluster, datasets, embed, inference, match, nominate, pipeline, utils).
Is this intentional?
</issue>
<code>
[start of graspologic/__init__.py]
1 # Copyright (c) Microsoft Corporation and contributors.
2 # Licensed under the MIT License.
3
4 import graspologic.align
5 import graspologic.cluster
6 import graspologic.datasets
7 import graspologic.embed
8 import graspologic.inference
9 import graspologic.layouts
10 import graspologic.models
11 import graspologic.partition
12 import graspologic.preprocessing
13 import graspologic.plot
14 import graspologic.simulations
15 import graspologic.subgraph
16 import graspologic.utils
17
18 from graspologic.version import __version
19
20 __version__ = __version()
21
[end of graspologic/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/graspologic/__init__.py b/graspologic/__init__.py
--- a/graspologic/__init__.py
+++ b/graspologic/__init__.py
@@ -8,6 +8,7 @@
import graspologic.inference
import graspologic.layouts
import graspologic.models
+import graspologic.nominate
import graspologic.partition
import graspologic.preprocessing
import graspologic.plot
| {"golden_diff": "diff --git a/graspologic/__init__.py b/graspologic/__init__.py\n--- a/graspologic/__init__.py\n+++ b/graspologic/__init__.py\n@@ -8,6 +8,7 @@\n import graspologic.inference\n import graspologic.layouts\n import graspologic.models\n+import graspologic.nominate\n import graspologic.partition\n import graspologic.preprocessing\n import graspologic.plot\n", "issue": "[BUG] Possible issue with direct import\n```\r\nimport graspologic\r\ndir(graspologic)\r\n```\r\nreturns \r\n\r\n```\r\n['__builtins__',\r\n '__cached__',\r\n '__doc__',\r\n '__file__',\r\n '__loader__',\r\n '__name__',\r\n '__package__',\r\n '__path__',\r\n '__spec__',\r\n '__version',\r\n '__version__',\r\n 'graspologic',\r\n 'layouts',\r\n 'models',\r\n 'partition',\r\n 'plot',\r\n 'preprocessing',\r\n 'subgraph',\r\n 'version']\r\n```\r\n\r\nand is missing lots of modules (align, cluster, datasets, embed, inference, match, nominate, pipeline, utils).\r\nIs this intentional?\n[BUG] Possible issue with direct import\n```\r\nimport graspologic\r\ndir(graspologic)\r\n```\r\nreturns \r\n\r\n```\r\n['__builtins__',\r\n '__cached__',\r\n '__doc__',\r\n '__file__',\r\n '__loader__',\r\n '__name__',\r\n '__package__',\r\n '__path__',\r\n '__spec__',\r\n '__version',\r\n '__version__',\r\n 'graspologic',\r\n 'layouts',\r\n 'models',\r\n 'partition',\r\n 'plot',\r\n 'preprocessing',\r\n 'subgraph',\r\n 'version']\r\n```\r\n\r\nand is missing lots of modules (align, cluster, datasets, embed, inference, match, nominate, pipeline, utils).\r\nIs this intentional?\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport graspologic.align\nimport graspologic.cluster\nimport graspologic.datasets\nimport graspologic.embed\nimport graspologic.inference\nimport graspologic.layouts\nimport graspologic.models\nimport graspologic.partition\nimport graspologic.preprocessing\nimport graspologic.plot\nimport graspologic.simulations\nimport graspologic.subgraph\nimport graspologic.utils\n\nfrom graspologic.version import __version\n\n__version__ = __version()\n", "path": "graspologic/__init__.py"}]} | 932 | 89 |
gh_patches_debug_13790 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3620 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider wafflehouse is broken
During the global build at 2021-06-02-14-42-40, spider **wafflehouse** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/wafflehouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/wafflehouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/wafflehouse.geojson))
</issue>
<code>
[start of locations/spiders/wafflehouse.py]
1 # -*- coding: utf-8 -*-
2 import json
3
4 import scrapy
5
6 from locations.hours import OpeningHours
7 from locations.items import GeojsonPointItem
8
9
10 class WaffleHouseSpider(scrapy.Spider):
11 name = "wafflehouse"
12 item_attributes = {"brand": "Waffle House", "brand_wikidata": "Q1701206"}
13 allowed_domains = ["wafflehouse.com"]
14 start_urls = [
15 "https://wafflehouse.locally.com/stores/conversion_data?has_data=true&company_id=117995&store_mode=&style=&color=&upc=&category=&inline=1&show_links_in_list=&parent_domain=&map_center_lat=39.8&map_center_lng=-98.6&map_distance_diag=3000&sort_by=proximity&no_variants=0&only_retailer_id=&dealers_company_id=&only_store_id=false&uses_alt_coords=false&zoom_level=4&lang=en-us&forced_coords=1"
16 ]
17
18 def parse(self, response):
19 for row in response.json()["markers"]:
20 url = "https://locations.wafflehouse.com/" + row["slug"]
21 yield scrapy.Request(url, callback=self.parse_store)
22
23 def parse_store(self, response):
24 data = json.loads(
25 response.xpath('//head/script[@type="application/ld+json"]/text()').get()
26 )
27
28 hours = OpeningHours()
29 specs = data.get("openingHoursSpecification", [])
30 if any({"validFrom", "validThrough"} <= spec.keys() for spec in specs):
31 # Giving opening hours for specific dates, abandon the whole proposal
32 pass
33 else:
34 for spec in specs:
35 for day in spec["dayOfWeek"]:
36 hours.add_range(
37 day[:2].capitalize(), spec["opens"], spec["closes"], "%I%p"
38 )
39
40 properties = {
41 "ref": data["@id"],
42 "lat": data["geo"]["latitude"],
43 "lon": data["geo"]["longitude"],
44 "website": response.url,
45 "name": data["name"],
46 "phone": data["telephone"],
47 "addr_full": data["address"]["streetAddress"],
48 "city": data["address"]["addressLocality"],
49 "state": data["address"]["addressRegion"],
50 "postcode": data["address"]["postalCode"],
51 "opening_hours": hours.as_opening_hours(),
52 }
53 yield GeojsonPointItem(**properties)
54
[end of locations/spiders/wafflehouse.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/wafflehouse.py b/locations/spiders/wafflehouse.py
--- a/locations/spiders/wafflehouse.py
+++ b/locations/spiders/wafflehouse.py
@@ -44,10 +44,11 @@
"website": response.url,
"name": data["name"],
"phone": data["telephone"],
- "addr_full": data["address"]["streetAddress"],
+ "street_address": data["address"]["streetAddress"],
"city": data["address"]["addressLocality"],
"state": data["address"]["addressRegion"],
"postcode": data["address"]["postalCode"],
"opening_hours": hours.as_opening_hours(),
}
+
yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/wafflehouse.py b/locations/spiders/wafflehouse.py\n--- a/locations/spiders/wafflehouse.py\n+++ b/locations/spiders/wafflehouse.py\n@@ -44,10 +44,11 @@\n \"website\": response.url,\n \"name\": data[\"name\"],\n \"phone\": data[\"telephone\"],\n- \"addr_full\": data[\"address\"][\"streetAddress\"],\n+ \"street_address\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n \"opening_hours\": hours.as_opening_hours(),\n }\n+\n yield GeojsonPointItem(**properties)\n", "issue": "Spider wafflehouse is broken\nDuring the global build at 2021-06-02-14-42-40, spider **wafflehouse** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/wafflehouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/wafflehouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/wafflehouse.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\n\nimport scrapy\n\nfrom locations.hours import OpeningHours\nfrom locations.items import GeojsonPointItem\n\n\nclass WaffleHouseSpider(scrapy.Spider):\n name = \"wafflehouse\"\n item_attributes = {\"brand\": \"Waffle House\", \"brand_wikidata\": \"Q1701206\"}\n allowed_domains = [\"wafflehouse.com\"]\n start_urls = [\n \"https://wafflehouse.locally.com/stores/conversion_data?has_data=true&company_id=117995&store_mode=&style=&color=&upc=&category=&inline=1&show_links_in_list=&parent_domain=&map_center_lat=39.8&map_center_lng=-98.6&map_distance_diag=3000&sort_by=proximity&no_variants=0&only_retailer_id=&dealers_company_id=&only_store_id=false&uses_alt_coords=false&zoom_level=4&lang=en-us&forced_coords=1\"\n ]\n\n def parse(self, response):\n for row in response.json()[\"markers\"]:\n url = \"https://locations.wafflehouse.com/\" + row[\"slug\"]\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_store(self, response):\n data = json.loads(\n response.xpath('//head/script[@type=\"application/ld+json\"]/text()').get()\n )\n\n hours = OpeningHours()\n specs = data.get(\"openingHoursSpecification\", [])\n if any({\"validFrom\", \"validThrough\"} <= spec.keys() for spec in specs):\n # Giving opening hours for specific dates, abandon the whole proposal\n pass\n else:\n for spec in specs:\n for day in spec[\"dayOfWeek\"]:\n hours.add_range(\n day[:2].capitalize(), spec[\"opens\"], spec[\"closes\"], \"%I%p\"\n )\n\n properties = {\n \"ref\": data[\"@id\"],\n \"lat\": data[\"geo\"][\"latitude\"],\n \"lon\": data[\"geo\"][\"longitude\"],\n \"website\": response.url,\n \"name\": data[\"name\"],\n \"phone\": data[\"telephone\"],\n \"addr_full\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n \"opening_hours\": hours.as_opening_hours(),\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/wafflehouse.py"}]} | 1,355 | 165 |
gh_patches_debug_27561 | rasdani/github-patches | git_diff | huggingface__transformers-11746 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ValueError: could not broadcast input array from shape (2816,384) into shape (2698,384)
the command to reproduce:
cd huggingface-transformers/examples/pytorch/question-answering
python -m torch.distributed.launch --nproc_per_node=8 ./run_qa.py \
--model_name_or_path roberta-large \
--dataset_name squad \
--do_train --do_eval \
--per_device_train_batch_size 16 \
--per_device_eval_batch_size 256 \
--learning_rate 3e-5 \
--num_train_epochs 2 \
--max_seq_length 384 \
--doc_stride 128 \
--output_dir test_result2/$trials --overwrite_output_dir \
--logging_dir test_result2/$trials/tensorboard --logging_first_step --logging_steps 50 \
--fp16
i tried add "--max_eval_samples 10240", this will fix the error, while the AUC result is quite low(exact_match = 4.9414, f1 = 8.9784). and when i ran with 1gpu, the above command can succeed(exact_match = 88.5336, f1 = 94.3266)
the full error is "File "./transformers/src/transformers/trainer_pt_utils.py", line 410, in _nested_set_tensors
i * slice_len : (i + 1) * slice_len
i * slice_len : (i + 1) * slice_len
ValueError: could not broadcast input array from shape (2816,384) into shape (2698,384)"
</issue>
<code>
[start of examples/pytorch/question-answering/trainer_qa.py]
1 # coding=utf-8
2 # Copyright 2020 The HuggingFace Team All rights reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """
16 A subclass of `Trainer` specific to Question-Answering tasks
17 """
18
19 from transformers import Trainer, is_torch_tpu_available
20 from transformers.trainer_utils import PredictionOutput
21
22
23 if is_torch_tpu_available():
24 import torch_xla.core.xla_model as xm
25 import torch_xla.debug.metrics as met
26
27
28 class QuestionAnsweringTrainer(Trainer):
29 def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):
30 super().__init__(*args, **kwargs)
31 self.eval_examples = eval_examples
32 self.post_process_function = post_process_function
33
34 def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None):
35 eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset
36 eval_dataloader = self.get_eval_dataloader(eval_dataset)
37 eval_examples = self.eval_examples if eval_examples is None else eval_examples
38
39 # Temporarily disable metric computation, we will do it in the loop here.
40 compute_metrics = self.compute_metrics
41 self.compute_metrics = None
42 try:
43 output = self.prediction_loop(
44 eval_dataloader,
45 description="Evaluation",
46 # No point gathering the predictions if there are no metrics, otherwise we defer to
47 # self.args.prediction_loss_only
48 prediction_loss_only=True if compute_metrics is None else None,
49 ignore_keys=ignore_keys,
50 )
51 finally:
52 self.compute_metrics = compute_metrics
53
54 if self.post_process_function is not None and self.compute_metrics is not None:
55 eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions)
56 metrics = self.compute_metrics(eval_preds)
57
58 self.log(metrics)
59 else:
60 metrics = {}
61
62 if self.args.tpu_metrics_debug or self.args.debug:
63 # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
64 xm.master_print(met.metrics_report())
65
66 self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
67 return metrics
68
69 def predict(self, predict_dataset, predict_examples, ignore_keys=None):
70 predict_dataloader = self.get_test_dataloader(predict_dataset)
71
72 # Temporarily disable metric computation, we will do it in the loop here.
73 compute_metrics = self.compute_metrics
74 self.compute_metrics = None
75 try:
76 output = self.prediction_loop(
77 predict_dataloader,
78 description="Prediction",
79 # No point gathering the predictions if there are no metrics, otherwise we defer to
80 # self.args.prediction_loss_only
81 prediction_loss_only=True if compute_metrics is None else None,
82 ignore_keys=ignore_keys,
83 )
84 finally:
85 self.compute_metrics = compute_metrics
86
87 if self.post_process_function is None or self.compute_metrics is None:
88 return output
89
90 predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, "predict")
91 metrics = self.compute_metrics(predictions)
92
93 return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)
94
[end of examples/pytorch/question-answering/trainer_qa.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/pytorch/question-answering/trainer_qa.py b/examples/pytorch/question-answering/trainer_qa.py
--- a/examples/pytorch/question-answering/trainer_qa.py
+++ b/examples/pytorch/question-answering/trainer_qa.py
@@ -39,8 +39,9 @@
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
+ eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
- output = self.prediction_loop(
+ output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
@@ -72,8 +73,9 @@
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
+ eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
- output = self.prediction_loop(
+ output = eval_loop(
predict_dataloader,
description="Prediction",
# No point gathering the predictions if there are no metrics, otherwise we defer to
| {"golden_diff": "diff --git a/examples/pytorch/question-answering/trainer_qa.py b/examples/pytorch/question-answering/trainer_qa.py\n--- a/examples/pytorch/question-answering/trainer_qa.py\n+++ b/examples/pytorch/question-answering/trainer_qa.py\n@@ -39,8 +39,9 @@\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n+ eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\n try:\n- output = self.prediction_loop(\n+ output = eval_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n@@ -72,8 +73,9 @@\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n+ eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\n try:\n- output = self.prediction_loop(\n+ output = eval_loop(\n predict_dataloader,\n description=\"Prediction\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n", "issue": "ValueError: could not broadcast input array from shape (2816,384) into shape (2698,384)\nthe command to reproduce:\r\ncd huggingface-transformers/examples/pytorch/question-answering\r\npython -m torch.distributed.launch --nproc_per_node=8 ./run_qa.py \\\r\n\t\t --model_name_or_path roberta-large \\\r\n\t\t --dataset_name squad \\\r\n\t\t --do_train --do_eval \\\r\n\t\t --per_device_train_batch_size 16 \\\r\n\t\t --per_device_eval_batch_size 256 \\\r\n\t\t --learning_rate 3e-5 \\\r\n\t\t --num_train_epochs 2 \\\r\n\t\t --max_seq_length 384 \\\r\n\t\t --doc_stride 128 \\\r\n\t\t --output_dir test_result2/$trials --overwrite_output_dir \\\r\n\t\t --logging_dir test_result2/$trials/tensorboard --logging_first_step --logging_steps 50 \\\r\n --fp16\r\n\r\n\r\n\r\ni tried add \"--max_eval_samples 10240\", this will fix the error, while the AUC result is quite low(exact_match = 4.9414, f1 = 8.9784). and when i ran with 1gpu, the above command can succeed(exact_match = 88.5336, f1 = 94.3266)\r\n\r\n\r\nthe full error is \"File \"./transformers/src/transformers/trainer_pt_utils.py\", line 410, in _nested_set_tensors\r\n i * slice_len : (i + 1) * slice_len\r\n i * slice_len : (i + 1) * slice_len\r\nValueError: could not broadcast input array from shape (2816,384) into shape (2698,384)\"\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2020 The HuggingFace Team All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA subclass of `Trainer` specific to Question-Answering tasks\n\"\"\"\n\nfrom transformers import Trainer, is_torch_tpu_available\nfrom transformers.trainer_utils import PredictionOutput\n\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n import torch_xla.debug.metrics as met\n\n\nclass QuestionAnsweringTrainer(Trainer):\n def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.eval_examples = eval_examples\n self.post_process_function = post_process_function\n\n def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None):\n eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset\n eval_dataloader = self.get_eval_dataloader(eval_dataset)\n eval_examples = self.eval_examples if eval_examples is None else eval_examples\n\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n try:\n output = self.prediction_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if compute_metrics is None else None,\n ignore_keys=ignore_keys,\n )\n finally:\n self.compute_metrics = compute_metrics\n\n if self.post_process_function is not None and self.compute_metrics is not None:\n eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions)\n metrics = self.compute_metrics(eval_preds)\n\n self.log(metrics)\n else:\n metrics = {}\n\n if self.args.tpu_metrics_debug or self.args.debug:\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n\n self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)\n return metrics\n\n def predict(self, predict_dataset, predict_examples, ignore_keys=None):\n predict_dataloader = self.get_test_dataloader(predict_dataset)\n\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n try:\n output = self.prediction_loop(\n predict_dataloader,\n description=\"Prediction\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if compute_metrics is None else None,\n ignore_keys=ignore_keys,\n )\n finally:\n self.compute_metrics = compute_metrics\n\n if self.post_process_function is None or self.compute_metrics is None:\n return output\n\n predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, \"predict\")\n metrics = self.compute_metrics(predictions)\n\n return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)\n", "path": "examples/pytorch/question-answering/trainer_qa.py"}]} | 1,920 | 291 |
gh_patches_debug_16666 | rasdani/github-patches | git_diff | SeldonIO__MLServer-613 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Compress requests / responses
Leverage `gzip` to compress requests / responses.
</issue>
<code>
[start of mlserver/rest/app.py]
1 from typing import Callable
2 from fastapi import FastAPI
3 from fastapi.responses import Response as FastAPIResponse
4 from fastapi.routing import APIRoute as FastAPIRoute
5 from fastapi.middleware.cors import CORSMiddleware
6 from starlette_exporter import PrometheusMiddleware
7
8 from .endpoints import Endpoints, ModelRepositoryEndpoints
9 from .requests import Request
10 from .responses import Response
11 from .errors import _EXCEPTION_HANDLERS
12
13 from ..settings import Settings
14 from ..handlers import DataPlane, ModelRepositoryHandlers
15
16
17 class APIRoute(FastAPIRoute):
18 """
19 Custom route to use our own Request handler.
20 """
21
22 def get_route_handler(self) -> Callable:
23 original_route_handler = super().get_route_handler()
24
25 async def custom_route_handler(request: Request) -> FastAPIResponse:
26 request = Request(request.scope, request.receive)
27 return await original_route_handler(request)
28
29 return custom_route_handler
30
31
32 def create_app(
33 settings: Settings,
34 data_plane: DataPlane,
35 model_repository_handlers: ModelRepositoryHandlers,
36 ) -> FastAPI:
37 endpoints = Endpoints(data_plane)
38 model_repository_endpoints = ModelRepositoryEndpoints(model_repository_handlers)
39
40 routes = [
41 # Model ready
42 APIRoute(
43 "/v2/models/{model_name}/ready",
44 endpoints.model_ready,
45 ),
46 APIRoute(
47 "/v2/models/{model_name}/versions/{model_version}/ready",
48 endpoints.model_ready,
49 ),
50 # Model infer
51 APIRoute(
52 "/v2/models/{model_name}/infer",
53 endpoints.infer,
54 methods=["POST"],
55 ),
56 APIRoute(
57 "/v2/models/{model_name}/versions/{model_version}/infer",
58 endpoints.infer,
59 methods=["POST"],
60 ),
61 # Model metadata
62 APIRoute(
63 "/v2/models/{model_name}",
64 endpoints.model_metadata,
65 ),
66 APIRoute(
67 "/v2/models/{model_name}/versions/{model_version}",
68 endpoints.model_metadata,
69 ),
70 # Liveness and readiness
71 APIRoute("/v2/health/live", endpoints.live),
72 APIRoute("/v2/health/ready", endpoints.ready),
73 # Server metadata
74 APIRoute(
75 "/v2",
76 endpoints.metadata,
77 ),
78 ]
79
80 routes += [
81 # Model Repository API
82 APIRoute(
83 "/v2/repository/index",
84 model_repository_endpoints.index,
85 methods=["POST"],
86 ),
87 APIRoute(
88 "/v2/repository/models/{model_name}/load",
89 model_repository_endpoints.load,
90 methods=["POST"],
91 ),
92 APIRoute(
93 "/v2/repository/models/{model_name}/unload",
94 model_repository_endpoints.unload,
95 methods=["POST"],
96 ),
97 ]
98
99 app = FastAPI(
100 debug=settings.debug,
101 routes=routes, # type: ignore
102 default_response_class=Response,
103 exception_handlers=_EXCEPTION_HANDLERS, # type: ignore
104 )
105
106 if settings.cors_settings is not None:
107 app.add_middleware(
108 CORSMiddleware,
109 allow_origins=settings.cors_settings.allow_origins,
110 allow_origin_regex=settings.cors_settings.allow_origin_regex,
111 allow_credentials=settings.cors_settings.allow_credentials,
112 allow_methods=settings.cors_settings.allow_methods,
113 allow_headers=settings.cors_settings.allow_headers,
114 max_age=settings.cors_settings.max_age,
115 )
116
117 if settings.metrics_endpoint:
118 app.add_middleware(
119 PrometheusMiddleware,
120 app_name="mlserver",
121 prefix="rest_server",
122 # TODO: Should we also exclude model's health endpoints?
123 skip_paths=[
124 settings.metrics_endpoint,
125 "/v2/health/live",
126 "/v2/health/ready",
127 ],
128 )
129
130 return app
131
[end of mlserver/rest/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlserver/rest/app.py b/mlserver/rest/app.py
--- a/mlserver/rest/app.py
+++ b/mlserver/rest/app.py
@@ -3,6 +3,7 @@
from fastapi.responses import Response as FastAPIResponse
from fastapi.routing import APIRoute as FastAPIRoute
from fastapi.middleware.cors import CORSMiddleware
+from fastapi.middleware.gzip import GZipMiddleware
from starlette_exporter import PrometheusMiddleware
from .endpoints import Endpoints, ModelRepositoryEndpoints
@@ -103,6 +104,7 @@
exception_handlers=_EXCEPTION_HANDLERS, # type: ignore
)
+ app.add_middleware(GZipMiddleware)
if settings.cors_settings is not None:
app.add_middleware(
CORSMiddleware,
| {"golden_diff": "diff --git a/mlserver/rest/app.py b/mlserver/rest/app.py\n--- a/mlserver/rest/app.py\n+++ b/mlserver/rest/app.py\n@@ -3,6 +3,7 @@\n from fastapi.responses import Response as FastAPIResponse\n from fastapi.routing import APIRoute as FastAPIRoute\n from fastapi.middleware.cors import CORSMiddleware\n+from fastapi.middleware.gzip import GZipMiddleware\n from starlette_exporter import PrometheusMiddleware\n \n from .endpoints import Endpoints, ModelRepositoryEndpoints\n@@ -103,6 +104,7 @@\n exception_handlers=_EXCEPTION_HANDLERS, # type: ignore\n )\n \n+ app.add_middleware(GZipMiddleware)\n if settings.cors_settings is not None:\n app.add_middleware(\n CORSMiddleware,\n", "issue": "Compress requests / responses\nLeverage `gzip` to compress requests / responses.\n", "before_files": [{"content": "from typing import Callable\nfrom fastapi import FastAPI\nfrom fastapi.responses import Response as FastAPIResponse\nfrom fastapi.routing import APIRoute as FastAPIRoute\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom starlette_exporter import PrometheusMiddleware\n\nfrom .endpoints import Endpoints, ModelRepositoryEndpoints\nfrom .requests import Request\nfrom .responses import Response\nfrom .errors import _EXCEPTION_HANDLERS\n\nfrom ..settings import Settings\nfrom ..handlers import DataPlane, ModelRepositoryHandlers\n\n\nclass APIRoute(FastAPIRoute):\n \"\"\"\n Custom route to use our own Request handler.\n \"\"\"\n\n def get_route_handler(self) -> Callable:\n original_route_handler = super().get_route_handler()\n\n async def custom_route_handler(request: Request) -> FastAPIResponse:\n request = Request(request.scope, request.receive)\n return await original_route_handler(request)\n\n return custom_route_handler\n\n\ndef create_app(\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n) -> FastAPI:\n endpoints = Endpoints(data_plane)\n model_repository_endpoints = ModelRepositoryEndpoints(model_repository_handlers)\n\n routes = [\n # Model ready\n APIRoute(\n \"/v2/models/{model_name}/ready\",\n endpoints.model_ready,\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}/ready\",\n endpoints.model_ready,\n ),\n # Model infer\n APIRoute(\n \"/v2/models/{model_name}/infer\",\n endpoints.infer,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}/infer\",\n endpoints.infer,\n methods=[\"POST\"],\n ),\n # Model metadata\n APIRoute(\n \"/v2/models/{model_name}\",\n endpoints.model_metadata,\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}\",\n endpoints.model_metadata,\n ),\n # Liveness and readiness\n APIRoute(\"/v2/health/live\", endpoints.live),\n APIRoute(\"/v2/health/ready\", endpoints.ready),\n # Server metadata\n APIRoute(\n \"/v2\",\n endpoints.metadata,\n ),\n ]\n\n routes += [\n # Model Repository API\n APIRoute(\n \"/v2/repository/index\",\n model_repository_endpoints.index,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/repository/models/{model_name}/load\",\n model_repository_endpoints.load,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/repository/models/{model_name}/unload\",\n model_repository_endpoints.unload,\n methods=[\"POST\"],\n ),\n ]\n\n app = FastAPI(\n debug=settings.debug,\n routes=routes, # type: ignore\n default_response_class=Response,\n exception_handlers=_EXCEPTION_HANDLERS, # type: ignore\n )\n\n if settings.cors_settings is not None:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=settings.cors_settings.allow_origins,\n allow_origin_regex=settings.cors_settings.allow_origin_regex,\n allow_credentials=settings.cors_settings.allow_credentials,\n allow_methods=settings.cors_settings.allow_methods,\n allow_headers=settings.cors_settings.allow_headers,\n max_age=settings.cors_settings.max_age,\n )\n\n if settings.metrics_endpoint:\n app.add_middleware(\n PrometheusMiddleware,\n app_name=\"mlserver\",\n prefix=\"rest_server\",\n # TODO: Should we also exclude model's health endpoints?\n skip_paths=[\n settings.metrics_endpoint,\n \"/v2/health/live\",\n \"/v2/health/ready\",\n ],\n )\n\n return app\n", "path": "mlserver/rest/app.py"}]} | 1,630 | 174 |
gh_patches_debug_26969 | rasdani/github-patches | git_diff | conda__conda-707 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add ability to keep retrying with a lock error
The yum installer (IIRC) has a nice feature that it will keep trying every 10 seconds or so if there is a lock error. This could be useful for conda.
</issue>
<code>
[start of conda/lock.py]
1 # (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6
7 """
8 Tools for working with locks
9
10 A lock is just an empty directory. We use directories because this lets us use
11 the race condition-proof os.makedirs.
12
13 For now, there is one global lock for all of conda, because some things happen
14 globally (such as downloading packages).
15
16 We don't raise an error if the lock is named with the current PID
17 """
18
19 import os
20 from os.path import join
21 import glob
22
23
24 LOCKFN = '.conda_lock'
25
26
27 class Locked(object):
28 """
29 Context manager to handle locks.
30 """
31 def __init__(self, path):
32 self.path = path
33 self.end = "-" + str(os.getpid())
34 self.lock_path = join(self.path, LOCKFN + self.end)
35 self.pattern = join(self.path, LOCKFN + '-*')
36 self.remove = True
37
38 def __enter__(self):
39 files = glob.glob(self.pattern)
40 if files and not files[0].endswith(self.end):
41 # Keep the string "LOCKERROR" in this string so that external
42 # programs can look for it.
43 raise RuntimeError("""\
44 LOCKERROR: It looks like conda is already doing something.
45 The lock %s was found. Wait for it to finish before continuing.
46 If you are sure that conda is not running, remove it and try again.
47 You can also use: $ conda clean --lock""" % self.lock_path)
48
49 if not files:
50 try:
51 os.makedirs(self.lock_path)
52 except OSError:
53 pass
54 else: # PID lock already here --- someone else will remove it.
55 self.remove = False
56
57 def __exit__(self, exc_type, exc_value, traceback):
58 if self.remove:
59 for path in self.lock_path, self.path:
60 try:
61 os.rmdir(path)
62 except OSError:
63 pass
64
[end of conda/lock.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda/lock.py b/conda/lock.py
--- a/conda/lock.py
+++ b/conda/lock.py
@@ -19,7 +19,7 @@
import os
from os.path import join
import glob
-
+from time import sleep
LOCKFN = '.conda_lock'
@@ -36,15 +36,28 @@
self.remove = True
def __enter__(self):
- files = glob.glob(self.pattern)
- if files and not files[0].endswith(self.end):
- # Keep the string "LOCKERROR" in this string so that external
- # programs can look for it.
- raise RuntimeError("""\
-LOCKERROR: It looks like conda is already doing something.
-The lock %s was found. Wait for it to finish before continuing.
-If you are sure that conda is not running, remove it and try again.
-You can also use: $ conda clean --lock""" % self.lock_path)
+ retries = 10
+ # Keep the string "LOCKERROR" in this string so that external
+ # programs can look for it.
+ lockstr = ("""\
+ LOCKERROR: It looks like conda is already doing something.
+ The lock %s was found. Wait for it to finish before continuing.
+ If you are sure that conda is not running, remove it and try again.
+ You can also use: $ conda clean --lock""" % self.lock_path)
+ sleeptime = 1
+ while retries:
+ files = glob.glob(self.pattern)
+ if files and not files[0].endswith(self.end):
+ print(lockstr)
+ print("Sleeping for %s seconds" % sleeptime)
+ sleep(sleeptime)
+ sleeptime *= 2
+ retries -= 1
+ else:
+ break
+ else:
+ print("Exceeded max retries, giving up")
+ raise RuntimeError(lockstr)
if not files:
try:
| {"golden_diff": "diff --git a/conda/lock.py b/conda/lock.py\n--- a/conda/lock.py\n+++ b/conda/lock.py\n@@ -19,7 +19,7 @@\n import os\n from os.path import join\n import glob\n-\n+from time import sleep\n \n LOCKFN = '.conda_lock'\n \n@@ -36,15 +36,28 @@\n self.remove = True\n \n def __enter__(self):\n- files = glob.glob(self.pattern)\n- if files and not files[0].endswith(self.end):\n- # Keep the string \"LOCKERROR\" in this string so that external\n- # programs can look for it.\n- raise RuntimeError(\"\"\"\\\n-LOCKERROR: It looks like conda is already doing something.\n-The lock %s was found. Wait for it to finish before continuing.\n-If you are sure that conda is not running, remove it and try again.\n-You can also use: $ conda clean --lock\"\"\" % self.lock_path)\n+ retries = 10\n+ # Keep the string \"LOCKERROR\" in this string so that external\n+ # programs can look for it.\n+ lockstr = (\"\"\"\\\n+ LOCKERROR: It looks like conda is already doing something.\n+ The lock %s was found. Wait for it to finish before continuing.\n+ If you are sure that conda is not running, remove it and try again.\n+ You can also use: $ conda clean --lock\"\"\" % self.lock_path)\n+ sleeptime = 1\n+ while retries:\n+ files = glob.glob(self.pattern)\n+ if files and not files[0].endswith(self.end):\n+ print(lockstr)\n+ print(\"Sleeping for %s seconds\" % sleeptime)\n+ sleep(sleeptime)\n+ sleeptime *= 2\n+ retries -= 1\n+ else:\n+ break\n+ else:\n+ print(\"Exceeded max retries, giving up\")\n+ raise RuntimeError(lockstr)\n \n if not files:\n try:\n", "issue": "Add ability to keep retrying with a lock error\nThe yum installer (IIRC) has a nice feature that it will keep trying every 10 seconds or so if there is a lock error. This could be useful for conda. \n\n", "before_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\n\"\"\"\nTools for working with locks\n\nA lock is just an empty directory. We use directories because this lets us use\nthe race condition-proof os.makedirs.\n\nFor now, there is one global lock for all of conda, because some things happen\nglobally (such as downloading packages).\n\nWe don't raise an error if the lock is named with the current PID\n\"\"\"\n\nimport os\nfrom os.path import join\nimport glob\n\n\nLOCKFN = '.conda_lock'\n\n\nclass Locked(object):\n \"\"\"\n Context manager to handle locks.\n \"\"\"\n def __init__(self, path):\n self.path = path\n self.end = \"-\" + str(os.getpid())\n self.lock_path = join(self.path, LOCKFN + self.end)\n self.pattern = join(self.path, LOCKFN + '-*')\n self.remove = True\n\n def __enter__(self):\n files = glob.glob(self.pattern)\n if files and not files[0].endswith(self.end):\n # Keep the string \"LOCKERROR\" in this string so that external\n # programs can look for it.\n raise RuntimeError(\"\"\"\\\nLOCKERROR: It looks like conda is already doing something.\nThe lock %s was found. Wait for it to finish before continuing.\nIf you are sure that conda is not running, remove it and try again.\nYou can also use: $ conda clean --lock\"\"\" % self.lock_path)\n\n if not files:\n try:\n os.makedirs(self.lock_path)\n except OSError:\n pass\n else: # PID lock already here --- someone else will remove it.\n self.remove = False\n\n def __exit__(self, exc_type, exc_value, traceback):\n if self.remove:\n for path in self.lock_path, self.path:\n try:\n os.rmdir(path)\n except OSError:\n pass\n", "path": "conda/lock.py"}]} | 1,162 | 452 |
gh_patches_debug_58136 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-4730 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No "moderation tasks" filter in participatory budget (one phase)
**URL:** https://meinberlin-dev.liqd.net/projekte/module/burgerhaushalt/?mode=list (list view)
or https://meinberlin-dev.liqd.net/dashboard/projects/burgerhaushalt-spandau/basic/ (dashboard)
**user:** Moderator, Admin
**expected behaviour:** When using participatory budget with one phase i want to be able to set up moderation tasks for the discussion of ideas and want to filter ideas with an filter "open moderationtasks"
**behaviour:** There is no filter "moderation tasks" in the list view of ideas in participatory budget (one phase) nor is there the possibility to create moderation tasks in the dashboard of the project
**important screensize:** no
**device & browser:** Mac/Windows Chrome, Edge Firefox, Iphone, Samsung Galaxy 20
</issue>
<code>
[start of meinberlin/apps/moderationtasks/dashboard.py]
1 from django.utils.translation import gettext_lazy as _
2
3 from adhocracy4.dashboard import ModuleFormSetComponent
4 from adhocracy4.dashboard import components
5
6 from . import forms
7
8
9 class ModerationTasksComponent(ModuleFormSetComponent):
10 identifier = 'moderation_tasks'
11 weight = 15
12 label = _('Moderation Tasks')
13
14 form_title = _('Edit moderation tasks')
15 form_class = forms.ModerationTasksFormSet
16 form_template_name = \
17 'meinberlin_moderationtasks/moderation_tasks_form.html'
18
19 def is_effective(self, module):
20 return module.blueprint_type in ['PB1', 'PB2', 'PB3']
21
22
23 components.register_module(ModerationTasksComponent())
24
[end of meinberlin/apps/moderationtasks/dashboard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/moderationtasks/dashboard.py b/meinberlin/apps/moderationtasks/dashboard.py
--- a/meinberlin/apps/moderationtasks/dashboard.py
+++ b/meinberlin/apps/moderationtasks/dashboard.py
@@ -17,7 +17,7 @@
'meinberlin_moderationtasks/moderation_tasks_form.html'
def is_effective(self, module):
- return module.blueprint_type in ['PB1', 'PB2', 'PB3']
+ return module.blueprint_type in ['PB', 'PB2', 'PB3']
components.register_module(ModerationTasksComponent())
| {"golden_diff": "diff --git a/meinberlin/apps/moderationtasks/dashboard.py b/meinberlin/apps/moderationtasks/dashboard.py\n--- a/meinberlin/apps/moderationtasks/dashboard.py\n+++ b/meinberlin/apps/moderationtasks/dashboard.py\n@@ -17,7 +17,7 @@\n 'meinberlin_moderationtasks/moderation_tasks_form.html'\n \n def is_effective(self, module):\n- return module.blueprint_type in ['PB1', 'PB2', 'PB3']\n+ return module.blueprint_type in ['PB', 'PB2', 'PB3']\n \n \n components.register_module(ModerationTasksComponent())\n", "issue": "No \"moderation tasks\" filter in participatory budget (one phase)\n**URL:** https://meinberlin-dev.liqd.net/projekte/module/burgerhaushalt/?mode=list (list view)\r\nor https://meinberlin-dev.liqd.net/dashboard/projects/burgerhaushalt-spandau/basic/ (dashboard)\r\n**user:** Moderator, Admin\r\n**expected behaviour:** When using participatory budget with one phase i want to be able to set up moderation tasks for the discussion of ideas and want to filter ideas with an filter \"open moderationtasks\"\r\n**behaviour:** There is no filter \"moderation tasks\" in the list view of ideas in participatory budget (one phase) nor is there the possibility to create moderation tasks in the dashboard of the project\r\n**important screensize:** no\r\n**device & browser:** Mac/Windows Chrome, Edge Firefox, Iphone, Samsung Galaxy 20\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django.utils.translation import gettext_lazy as _\n\nfrom adhocracy4.dashboard import ModuleFormSetComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import forms\n\n\nclass ModerationTasksComponent(ModuleFormSetComponent):\n identifier = 'moderation_tasks'\n weight = 15\n label = _('Moderation Tasks')\n\n form_title = _('Edit moderation tasks')\n form_class = forms.ModerationTasksFormSet\n form_template_name = \\\n 'meinberlin_moderationtasks/moderation_tasks_form.html'\n\n def is_effective(self, module):\n return module.blueprint_type in ['PB1', 'PB2', 'PB3']\n\n\ncomponents.register_module(ModerationTasksComponent())\n", "path": "meinberlin/apps/moderationtasks/dashboard.py"}]} | 930 | 140 |
gh_patches_debug_30055 | rasdani/github-patches | git_diff | pytorch__torchdynamo-193 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make torchdynamo not import third party package in `skipfiles.py`
@xuzhao9 in https://github.com/facebookresearch/torchdynamo/issues/107#issuecomment-1095681515 found that the following line makes alexnet 18% slower:
https://github.com/jansel/torchdynamo/blob/bf90b8cdbacf35944fa8c12185b1823dc5cb90bb/torchdynamo/skipfiles.py#L123
It seems importing: "networkx", "omegaconf", "onnx", "pandas", and "sklearn" cause performance issues.
TorchDynamo is only importing these modules to find the filename, which is also a bit wasteful. We should rewrite `skipfiles.py` to use [find_spec](https://docs.python.org/3/library/importlib.html#importlib.abc.PathEntryFinder.find_spec) instead, so we don't need to import unused packages.
Also, I think we can cut down the list of modules in skipfiles dramatically. Most of those were added when TorchDynamo didn't automatically skip backends and supported much less of python, so likely many (most?) can be removed.
</issue>
<code>
[start of torchdynamo/skipfiles.py]
1 import abc
2 import collections
3 import contextlib
4 import copy
5 import copyreg
6 import dataclasses
7 import enum
8 import functools
9 import importlib
10 import inspect
11 import linecache
12 import logging
13 import multiprocessing
14 import operator
15 import os
16 import posixpath
17 import random
18 import re
19 import selectors
20 import signal
21 import tempfile
22 import threading
23 import tokenize
24 import traceback
25 import types
26 import typing
27 import unittest
28 import weakref
29
30 import _collections_abc
31 import _weakrefset
32 import torch
33
34
35 def _module_dir(m: types.ModuleType):
36 return re.sub(r"__init__.py$", "", m.__file__)
37
38
39 SKIP_DIRS = [
40 # torch.*
41 _module_dir(torch),
42 # torchdynamo.*
43 os.path.dirname(__file__) + "/",
44 "<frozen importlib",
45 "<__array_function__ internals>",
46 ] + [
47 # skip some standard libs
48 _module_dir(m)
49 for m in (
50 abc,
51 collections,
52 contextlib,
53 copy,
54 copyreg,
55 dataclasses,
56 enum,
57 functools,
58 importlib,
59 inspect,
60 linecache,
61 logging,
62 multiprocessing,
63 operator,
64 os,
65 posixpath,
66 random,
67 re,
68 selectors,
69 signal,
70 tempfile,
71 threading,
72 tokenize,
73 traceback,
74 types,
75 typing,
76 unittest,
77 weakref,
78 _collections_abc,
79 _weakrefset,
80 )
81 ]
82 SKIP_DIRS_RE = None # set in add() below
83 FILENAME_ALLOWLIST = {
84 torch.nn.Sequential.__init__.__code__.co_filename,
85 }
86
87
88 def add(module: types.ModuleType):
89 assert isinstance(module, types.ModuleType)
90 global SKIP_DIRS_RE
91 name = module.__file__
92 if name is None:
93 return
94 SKIP_DIRS.append(_module_dir(module))
95 SKIP_DIRS_RE = re.compile(f"^({'|'.join(map(re.escape, SKIP_DIRS))})")
96
97
98 def check(filename, allow_torch=False):
99 """Should skip this file?"""
100 if filename is None:
101 return True
102 if filename in FILENAME_ALLOWLIST:
103 return False
104 if allow_torch and is_torch(filename):
105 return False
106 return bool(SKIP_DIRS_RE.match(filename))
107
108
109 # skip common third party libs
110 for _name in (
111 "functorch",
112 "intel_extension_for_pytorch",
113 "networkx",
114 "numpy",
115 "omegaconf",
116 "onnx",
117 "onnxruntime",
118 "onnx_tf",
119 "pandas",
120 "sklearn",
121 "tabulate",
122 "tensorflow",
123 "tensorrt",
124 "torch2trt",
125 "tqdm",
126 "tree",
127 "tvm",
128 "fx2trt_oss",
129 ):
130 try:
131 add(importlib.import_module(_name))
132 except (ImportError, TypeError):
133 pass
134
135
136 def is_torch_inline_allowed(filename):
137 return filename.startswith(_module_dir(torch.nn)) or filename.startswith(
138 _module_dir(torch.distributions)
139 )
140
141
142 def is_torch(filename):
143 return filename.startswith(_module_dir(torch))
144
[end of torchdynamo/skipfiles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchdynamo/skipfiles.py b/torchdynamo/skipfiles.py
--- a/torchdynamo/skipfiles.py
+++ b/torchdynamo/skipfiles.py
@@ -32,8 +32,12 @@
import torch
+def _strip_init_py(s):
+ return re.sub(r"__init__.py$", "", s)
+
+
def _module_dir(m: types.ModuleType):
- return re.sub(r"__init__.py$", "", m.__file__)
+ return _strip_init_py(m.__file__)
SKIP_DIRS = [
@@ -79,22 +83,32 @@
_weakrefset,
)
]
-SKIP_DIRS_RE = None # set in add() below
FILENAME_ALLOWLIST = {
torch.nn.Sequential.__init__.__code__.co_filename,
}
+SKIP_DIRS_RE = None
-def add(module: types.ModuleType):
- assert isinstance(module, types.ModuleType)
+def _recompile_re():
global SKIP_DIRS_RE
- name = module.__file__
- if name is None:
- return
- SKIP_DIRS.append(_module_dir(module))
SKIP_DIRS_RE = re.compile(f"^({'|'.join(map(re.escape, SKIP_DIRS))})")
+def add(import_name: str):
+ if isinstance(import_name, types.ModuleType):
+ return add(import_name.__name__)
+ assert isinstance(import_name, str)
+ module_spec = importlib.util.find_spec(import_name)
+ if not module_spec:
+ return
+ origin = module_spec.origin
+ if origin is None:
+ return
+ global SKIP_DIRS_RE
+ SKIP_DIRS.append(_strip_init_py(origin))
+ _recompile_re()
+
+
def check(filename, allow_torch=False):
"""Should skip this file?"""
if filename is None:
@@ -127,10 +141,9 @@
"tvm",
"fx2trt_oss",
):
- try:
- add(importlib.import_module(_name))
- except (ImportError, TypeError):
- pass
+ add(_name)
+
+_recompile_re()
def is_torch_inline_allowed(filename):
| {"golden_diff": "diff --git a/torchdynamo/skipfiles.py b/torchdynamo/skipfiles.py\n--- a/torchdynamo/skipfiles.py\n+++ b/torchdynamo/skipfiles.py\n@@ -32,8 +32,12 @@\n import torch\n \n \n+def _strip_init_py(s):\n+ return re.sub(r\"__init__.py$\", \"\", s)\n+\n+\n def _module_dir(m: types.ModuleType):\n- return re.sub(r\"__init__.py$\", \"\", m.__file__)\n+ return _strip_init_py(m.__file__)\n \n \n SKIP_DIRS = [\n@@ -79,22 +83,32 @@\n _weakrefset,\n )\n ]\n-SKIP_DIRS_RE = None # set in add() below\n FILENAME_ALLOWLIST = {\n torch.nn.Sequential.__init__.__code__.co_filename,\n }\n+SKIP_DIRS_RE = None\n \n \n-def add(module: types.ModuleType):\n- assert isinstance(module, types.ModuleType)\n+def _recompile_re():\n global SKIP_DIRS_RE\n- name = module.__file__\n- if name is None:\n- return\n- SKIP_DIRS.append(_module_dir(module))\n SKIP_DIRS_RE = re.compile(f\"^({'|'.join(map(re.escape, SKIP_DIRS))})\")\n \n \n+def add(import_name: str):\n+ if isinstance(import_name, types.ModuleType):\n+ return add(import_name.__name__)\n+ assert isinstance(import_name, str)\n+ module_spec = importlib.util.find_spec(import_name)\n+ if not module_spec:\n+ return\n+ origin = module_spec.origin\n+ if origin is None:\n+ return\n+ global SKIP_DIRS_RE\n+ SKIP_DIRS.append(_strip_init_py(origin))\n+ _recompile_re()\n+\n+\n def check(filename, allow_torch=False):\n \"\"\"Should skip this file?\"\"\"\n if filename is None:\n@@ -127,10 +141,9 @@\n \"tvm\",\n \"fx2trt_oss\",\n ):\n- try:\n- add(importlib.import_module(_name))\n- except (ImportError, TypeError):\n- pass\n+ add(_name)\n+\n+_recompile_re()\n \n \n def is_torch_inline_allowed(filename):\n", "issue": "Make torchdynamo not import third party package in `skipfiles.py`\n@xuzhao9 in https://github.com/facebookresearch/torchdynamo/issues/107#issuecomment-1095681515 found that the following line makes alexnet 18% slower: \r\n\r\nhttps://github.com/jansel/torchdynamo/blob/bf90b8cdbacf35944fa8c12185b1823dc5cb90bb/torchdynamo/skipfiles.py#L123\r\n\r\nIt seems importing: \"networkx\", \"omegaconf\", \"onnx\", \"pandas\", and \"sklearn\" cause performance issues.\r\n\r\nTorchDynamo is only importing these modules to find the filename, which is also a bit wasteful. We should rewrite `skipfiles.py` to use [find_spec](https://docs.python.org/3/library/importlib.html#importlib.abc.PathEntryFinder.find_spec) instead, so we don't need to import unused packages.\r\n\r\nAlso, I think we can cut down the list of modules in skipfiles dramatically. Most of those were added when TorchDynamo didn't automatically skip backends and supported much less of python, so likely many (most?) can be removed.\r\n\n", "before_files": [{"content": "import abc\nimport collections\nimport contextlib\nimport copy\nimport copyreg\nimport dataclasses\nimport enum\nimport functools\nimport importlib\nimport inspect\nimport linecache\nimport logging\nimport multiprocessing\nimport operator\nimport os\nimport posixpath\nimport random\nimport re\nimport selectors\nimport signal\nimport tempfile\nimport threading\nimport tokenize\nimport traceback\nimport types\nimport typing\nimport unittest\nimport weakref\n\nimport _collections_abc\nimport _weakrefset\nimport torch\n\n\ndef _module_dir(m: types.ModuleType):\n return re.sub(r\"__init__.py$\", \"\", m.__file__)\n\n\nSKIP_DIRS = [\n # torch.*\n _module_dir(torch),\n # torchdynamo.*\n os.path.dirname(__file__) + \"/\",\n \"<frozen importlib\",\n \"<__array_function__ internals>\",\n] + [\n # skip some standard libs\n _module_dir(m)\n for m in (\n abc,\n collections,\n contextlib,\n copy,\n copyreg,\n dataclasses,\n enum,\n functools,\n importlib,\n inspect,\n linecache,\n logging,\n multiprocessing,\n operator,\n os,\n posixpath,\n random,\n re,\n selectors,\n signal,\n tempfile,\n threading,\n tokenize,\n traceback,\n types,\n typing,\n unittest,\n weakref,\n _collections_abc,\n _weakrefset,\n )\n]\nSKIP_DIRS_RE = None # set in add() below\nFILENAME_ALLOWLIST = {\n torch.nn.Sequential.__init__.__code__.co_filename,\n}\n\n\ndef add(module: types.ModuleType):\n assert isinstance(module, types.ModuleType)\n global SKIP_DIRS_RE\n name = module.__file__\n if name is None:\n return\n SKIP_DIRS.append(_module_dir(module))\n SKIP_DIRS_RE = re.compile(f\"^({'|'.join(map(re.escape, SKIP_DIRS))})\")\n\n\ndef check(filename, allow_torch=False):\n \"\"\"Should skip this file?\"\"\"\n if filename is None:\n return True\n if filename in FILENAME_ALLOWLIST:\n return False\n if allow_torch and is_torch(filename):\n return False\n return bool(SKIP_DIRS_RE.match(filename))\n\n\n# skip common third party libs\nfor _name in (\n \"functorch\",\n \"intel_extension_for_pytorch\",\n \"networkx\",\n \"numpy\",\n \"omegaconf\",\n \"onnx\",\n \"onnxruntime\",\n \"onnx_tf\",\n \"pandas\",\n \"sklearn\",\n \"tabulate\",\n \"tensorflow\",\n \"tensorrt\",\n \"torch2trt\",\n \"tqdm\",\n \"tree\",\n \"tvm\",\n \"fx2trt_oss\",\n):\n try:\n add(importlib.import_module(_name))\n except (ImportError, TypeError):\n pass\n\n\ndef is_torch_inline_allowed(filename):\n return filename.startswith(_module_dir(torch.nn)) or filename.startswith(\n _module_dir(torch.distributions)\n )\n\n\ndef is_torch(filename):\n return filename.startswith(_module_dir(torch))\n", "path": "torchdynamo/skipfiles.py"}]} | 1,810 | 486 |
gh_patches_debug_31 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1456 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AWS::AutoScaling::AutoScalingGroup MaxInstanceLifetime Validation
*cfn-lint version: 0.29.2*
*Description of issue.*
When using the parameter `MaxInstanceLifetime` for `AWS::AutoScaling::AutoScalingGroup` we are hit with the following lint error:
```
$ cfn-lint templates/proj/rgs/rgs_autoscale_stretch_elb.yml
E3002 Invalid Property Resources/autoscalegroup/Properties/MaxInstanceLifetime
templates/proj/rgs/rgs_autoscale_stretch_elb.yml:194:7
```
The template which leads to the error:
```
[...]
autoscalegroup:
Type: AWS::AutoScaling::AutoScalingGroup
Properties:
AvailabilityZones: !Ref AvailabilityZones
Cooldown: '300'
HealthCheckGracePeriod: !Ref GracePeriod
HealthCheckType: ELB
MaxSize: !Ref MaxSize
MinSize: !Ref MinSize
MaxInstanceLifetime: !Ref MaxInstanceLifetime
VPCZoneIdentifier: !Ref EC2SubnetIDs
TargetGroupARNs:
- !Ref elbtargetgroup
LaunchConfigurationName: !Ref launchconfiguration
Tags: [...]
PropagateAtLaunch: true
TerminationPolicies:
- Default
[..]
```
It seems the parameter is currently not supported by cfn-lint, would be cool to see support for it.
</issue>
<code>
[start of src/cfnlint/version.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5
6 __version__ = '0.29.3'
7
[end of src/cfnlint/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/version.py b/src/cfnlint/version.py
--- a/src/cfnlint/version.py
+++ b/src/cfnlint/version.py
@@ -3,4 +3,4 @@
SPDX-License-Identifier: MIT-0
"""
-__version__ = '0.29.3'
+__version__ = '0.29.4'
| {"golden_diff": "diff --git a/src/cfnlint/version.py b/src/cfnlint/version.py\n--- a/src/cfnlint/version.py\n+++ b/src/cfnlint/version.py\n@@ -3,4 +3,4 @@\n SPDX-License-Identifier: MIT-0\n \"\"\"\n \n-__version__ = '0.29.3'\n+__version__ = '0.29.4'\n", "issue": "AWS::AutoScaling::AutoScalingGroup MaxInstanceLifetime Validation\n*cfn-lint version: 0.29.2*\r\n\r\n*Description of issue.*\r\n\r\nWhen using the parameter `MaxInstanceLifetime` for `AWS::AutoScaling::AutoScalingGroup` we are hit with the following lint error:\r\n\r\n```\r\n$ cfn-lint templates/proj/rgs/rgs_autoscale_stretch_elb.yml\r\nE3002 Invalid Property Resources/autoscalegroup/Properties/MaxInstanceLifetime\r\ntemplates/proj/rgs/rgs_autoscale_stretch_elb.yml:194:7\r\n```\r\n\r\nThe template which leads to the error:\r\n\r\n```\r\n[...]\r\n\r\n autoscalegroup:\r\n Type: AWS::AutoScaling::AutoScalingGroup\r\n Properties:\r\n AvailabilityZones: !Ref AvailabilityZones\r\n Cooldown: '300'\r\n HealthCheckGracePeriod: !Ref GracePeriod\r\n HealthCheckType: ELB\r\n MaxSize: !Ref MaxSize\r\n MinSize: !Ref MinSize\r\n MaxInstanceLifetime: !Ref MaxInstanceLifetime\r\n VPCZoneIdentifier: !Ref EC2SubnetIDs\r\n TargetGroupARNs:\r\n - !Ref elbtargetgroup\r\n LaunchConfigurationName: !Ref launchconfiguration\r\n Tags: [...]\r\n PropagateAtLaunch: true\r\n TerminationPolicies:\r\n - Default\r\n\r\n[..]\r\n```\r\n\r\nIt seems the parameter is currently not supported by cfn-lint, would be cool to see support for it.\n", "before_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\n__version__ = '0.29.3'\n", "path": "src/cfnlint/version.py"}]} | 912 | 83 |
gh_patches_debug_55 | rasdani/github-patches | git_diff | emissary-ingress__emissary-23 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Users need statsd support
Ambassador needs to be able to send stats off to statsd, whatever statsd the user wants to use.
</issue>
<code>
[start of ambassador/VERSION.py]
1 # Don't change this line without also changing .bumpversion.cfg
2 Version = "0.5.0"
3
[end of ambassador/VERSION.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ambassador/VERSION.py b/ambassador/VERSION.py
--- a/ambassador/VERSION.py
+++ b/ambassador/VERSION.py
@@ -1,2 +1,2 @@
# Don't change this line without also changing .bumpversion.cfg
-Version = "0.5.0"
+Version = "0.5.1"
| {"golden_diff": "diff --git a/ambassador/VERSION.py b/ambassador/VERSION.py\n--- a/ambassador/VERSION.py\n+++ b/ambassador/VERSION.py\n@@ -1,2 +1,2 @@\n # Don't change this line without also changing .bumpversion.cfg\n-Version = \"0.5.0\"\n+Version = \"0.5.1\"\n", "issue": "Users need statsd support\nAmbassador needs to be able to send stats off to statsd, whatever statsd the user wants to use.\n", "before_files": [{"content": "# Don't change this line without also changing .bumpversion.cfg\nVersion = \"0.5.0\"\n", "path": "ambassador/VERSION.py"}]} | 590 | 81 |
gh_patches_debug_38263 | rasdani/github-patches | git_diff | microsoft__MLOS-573 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't expose all params as shell environment variables by default
_Originally posted by @bpkroth in https://github.com/microsoft/MLOS/pull/557#discussion_r1374921396_
</issue>
<code>
[start of mlos_bench/mlos_bench/environments/script_env.py]
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 Base scriptable benchmark environment.
7 """
8
9 import abc
10 import logging
11 import re
12 from typing import Dict, Iterable, Optional
13
14 from mlos_bench.environments.base_environment import Environment
15 from mlos_bench.services.base_service import Service
16 from mlos_bench.tunables.tunable import TunableValue
17 from mlos_bench.tunables.tunable_groups import TunableGroups
18
19 from mlos_bench.util import try_parse_val
20
21 _LOG = logging.getLogger(__name__)
22
23
24 class ScriptEnv(Environment, metaclass=abc.ABCMeta):
25 """
26 Base Environment that runs scripts for setup/run/teardown.
27 """
28
29 _RE_INVALID = re.compile(r"[^a-zA-Z0-9_]")
30
31 def __init__(self,
32 *,
33 name: str,
34 config: dict,
35 global_config: Optional[dict] = None,
36 tunables: Optional[TunableGroups] = None,
37 service: Optional[Service] = None):
38 """
39 Create a new environment for script execution.
40
41 Parameters
42 ----------
43 name: str
44 Human-readable name of the environment.
45 config : dict
46 Free-format dictionary that contains the benchmark environment
47 configuration. Each config must have at least the `tunable_params`
48 and the `const_args` sections. It must also have at least one of
49 the following parameters: {`setup`, `run`, `teardown`}.
50 Additional parameters:
51 * `shell_env_params` - an array of parameters to pass to the script
52 as shell environment variables, and
53 * `shell_env_params_rename` - a dictionary of {to: from} mappings
54 of the script parameters. If not specified, replace all
55 non-alphanumeric characters with underscores.
56 If neither `shell_env_params` nor `shell_env_params_rename` are specified,
57 pass *all* parameters to the script.
58 global_config : dict
59 Free-format dictionary of global parameters (e.g., security credentials)
60 to be mixed in into the "const_args" section of the local config.
61 tunables : TunableGroups
62 A collection of tunable parameters for *all* environments.
63 service: Service
64 An optional service object (e.g., providing methods to
65 deploy or reboot a VM, etc.).
66 """
67 super().__init__(name=name, config=config, global_config=global_config,
68 tunables=tunables, service=service)
69
70 self._script_setup = self.config.get("setup")
71 self._script_run = self.config.get("run")
72 self._script_teardown = self.config.get("teardown")
73
74 self._shell_env_params: Optional[Iterable[str]] = self.config.get("shell_env_params")
75 self._shell_env_params_rename: Dict[str, str] = self.config.get("shell_env_params_rename", {})
76
77 results_stdout_pattern = self.config.get("results_stdout_pattern")
78 self._results_stdout_pattern: Optional[re.Pattern[str]] = \
79 re.compile(results_stdout_pattern) if results_stdout_pattern else None
80
81 def _get_env_params(self) -> Dict[str, str]:
82 """
83 Get the *shell* environment parameters to be passed to the script.
84
85 Returns
86 -------
87 env_params : Dict[str, str]
88 Parameters to pass as *shell* environment variables into the script.
89 This is usually a subset of `_params` with some possible conversions.
90 """
91 rename: Dict[str, str] # {to: from} mapping of the script parameters.
92 if self._shell_env_params is None:
93 if self._shell_env_params_rename:
94 # Only rename specified - use it.
95 rename = self._shell_env_params_rename.copy()
96 else:
97 # FIXME: We should not be exposing all params by default.
98 # Neither `shell_env_params` nor rename are specified - use all params.
99 rename = {self._RE_INVALID.sub("_", key): key for key in self._params}
100 else:
101 # Use `shell_env_params` and rename if specified.
102 rename = {self._RE_INVALID.sub("_", key): key for key in self._shell_env_params}
103 rename.update(self._shell_env_params_rename)
104
105 return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}
106
107 def _extract_stdout_results(self, stdout: str) -> Dict[str, TunableValue]:
108 """
109 Extract the results from the stdout of the script.
110
111 Parameters
112 ----------
113 stdout : str
114 The stdout of the script.
115
116 Returns
117 -------
118 results : Dict[str, TunableValue]
119 A dictionary of results extracted from the stdout.
120 """
121 if not self._results_stdout_pattern:
122 return {}
123 _LOG.debug("Extract regex: '%s' from: '%s'", self._results_stdout_pattern, stdout)
124 return {key: try_parse_val(val) for (key, val) in self._results_stdout_pattern.findall(stdout)}
125
[end of mlos_bench/mlos_bench/environments/script_env.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlos_bench/mlos_bench/environments/script_env.py b/mlos_bench/mlos_bench/environments/script_env.py
--- a/mlos_bench/mlos_bench/environments/script_env.py
+++ b/mlos_bench/mlos_bench/environments/script_env.py
@@ -54,7 +54,7 @@
of the script parameters. If not specified, replace all
non-alphanumeric characters with underscores.
If neither `shell_env_params` nor `shell_env_params_rename` are specified,
- pass *all* parameters to the script.
+ *no* additional shell parameters will be passed to the script.
global_config : dict
Free-format dictionary of global parameters (e.g., security credentials)
to be mixed in into the "const_args" section of the local config.
@@ -71,7 +71,7 @@
self._script_run = self.config.get("run")
self._script_teardown = self.config.get("teardown")
- self._shell_env_params: Optional[Iterable[str]] = self.config.get("shell_env_params")
+ self._shell_env_params: Iterable[str] = self.config.get("shell_env_params", [])
self._shell_env_params_rename: Dict[str, str] = self.config.get("shell_env_params_rename", {})
results_stdout_pattern = self.config.get("results_stdout_pattern")
@@ -88,20 +88,8 @@
Parameters to pass as *shell* environment variables into the script.
This is usually a subset of `_params` with some possible conversions.
"""
- rename: Dict[str, str] # {to: from} mapping of the script parameters.
- if self._shell_env_params is None:
- if self._shell_env_params_rename:
- # Only rename specified - use it.
- rename = self._shell_env_params_rename.copy()
- else:
- # FIXME: We should not be exposing all params by default.
- # Neither `shell_env_params` nor rename are specified - use all params.
- rename = {self._RE_INVALID.sub("_", key): key for key in self._params}
- else:
- # Use `shell_env_params` and rename if specified.
- rename = {self._RE_INVALID.sub("_", key): key for key in self._shell_env_params}
- rename.update(self._shell_env_params_rename)
-
+ rename = {self._RE_INVALID.sub("_", key): key for key in self._shell_env_params}
+ rename.update(self._shell_env_params_rename)
return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}
def _extract_stdout_results(self, stdout: str) -> Dict[str, TunableValue]:
| {"golden_diff": "diff --git a/mlos_bench/mlos_bench/environments/script_env.py b/mlos_bench/mlos_bench/environments/script_env.py\n--- a/mlos_bench/mlos_bench/environments/script_env.py\n+++ b/mlos_bench/mlos_bench/environments/script_env.py\n@@ -54,7 +54,7 @@\n of the script parameters. If not specified, replace all\n non-alphanumeric characters with underscores.\n If neither `shell_env_params` nor `shell_env_params_rename` are specified,\n- pass *all* parameters to the script.\n+ *no* additional shell parameters will be passed to the script.\n global_config : dict\n Free-format dictionary of global parameters (e.g., security credentials)\n to be mixed in into the \"const_args\" section of the local config.\n@@ -71,7 +71,7 @@\n self._script_run = self.config.get(\"run\")\n self._script_teardown = self.config.get(\"teardown\")\n \n- self._shell_env_params: Optional[Iterable[str]] = self.config.get(\"shell_env_params\")\n+ self._shell_env_params: Iterable[str] = self.config.get(\"shell_env_params\", [])\n self._shell_env_params_rename: Dict[str, str] = self.config.get(\"shell_env_params_rename\", {})\n \n results_stdout_pattern = self.config.get(\"results_stdout_pattern\")\n@@ -88,20 +88,8 @@\n Parameters to pass as *shell* environment variables into the script.\n This is usually a subset of `_params` with some possible conversions.\n \"\"\"\n- rename: Dict[str, str] # {to: from} mapping of the script parameters.\n- if self._shell_env_params is None:\n- if self._shell_env_params_rename:\n- # Only rename specified - use it.\n- rename = self._shell_env_params_rename.copy()\n- else:\n- # FIXME: We should not be exposing all params by default.\n- # Neither `shell_env_params` nor rename are specified - use all params.\n- rename = {self._RE_INVALID.sub(\"_\", key): key for key in self._params}\n- else:\n- # Use `shell_env_params` and rename if specified.\n- rename = {self._RE_INVALID.sub(\"_\", key): key for key in self._shell_env_params}\n- rename.update(self._shell_env_params_rename)\n-\n+ rename = {self._RE_INVALID.sub(\"_\", key): key for key in self._shell_env_params}\n+ rename.update(self._shell_env_params_rename)\n return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}\n \n def _extract_stdout_results(self, stdout: str) -> Dict[str, TunableValue]:\n", "issue": "Don't expose all params as shell environment variables by default\n_Originally posted by @bpkroth in https://github.com/microsoft/MLOS/pull/557#discussion_r1374921396_\r\n \n", "before_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nBase scriptable benchmark environment.\n\"\"\"\n\nimport abc\nimport logging\nimport re\nfrom typing import Dict, Iterable, Optional\n\nfrom mlos_bench.environments.base_environment import Environment\nfrom mlos_bench.services.base_service import Service\nfrom mlos_bench.tunables.tunable import TunableValue\nfrom mlos_bench.tunables.tunable_groups import TunableGroups\n\nfrom mlos_bench.util import try_parse_val\n\n_LOG = logging.getLogger(__name__)\n\n\nclass ScriptEnv(Environment, metaclass=abc.ABCMeta):\n \"\"\"\n Base Environment that runs scripts for setup/run/teardown.\n \"\"\"\n\n _RE_INVALID = re.compile(r\"[^a-zA-Z0-9_]\")\n\n def __init__(self,\n *,\n name: str,\n config: dict,\n global_config: Optional[dict] = None,\n tunables: Optional[TunableGroups] = None,\n service: Optional[Service] = None):\n \"\"\"\n Create a new environment for script execution.\n\n Parameters\n ----------\n name: str\n Human-readable name of the environment.\n config : dict\n Free-format dictionary that contains the benchmark environment\n configuration. Each config must have at least the `tunable_params`\n and the `const_args` sections. It must also have at least one of\n the following parameters: {`setup`, `run`, `teardown`}.\n Additional parameters:\n * `shell_env_params` - an array of parameters to pass to the script\n as shell environment variables, and\n * `shell_env_params_rename` - a dictionary of {to: from} mappings\n of the script parameters. If not specified, replace all\n non-alphanumeric characters with underscores.\n If neither `shell_env_params` nor `shell_env_params_rename` are specified,\n pass *all* parameters to the script.\n global_config : dict\n Free-format dictionary of global parameters (e.g., security credentials)\n to be mixed in into the \"const_args\" section of the local config.\n tunables : TunableGroups\n A collection of tunable parameters for *all* environments.\n service: Service\n An optional service object (e.g., providing methods to\n deploy or reboot a VM, etc.).\n \"\"\"\n super().__init__(name=name, config=config, global_config=global_config,\n tunables=tunables, service=service)\n\n self._script_setup = self.config.get(\"setup\")\n self._script_run = self.config.get(\"run\")\n self._script_teardown = self.config.get(\"teardown\")\n\n self._shell_env_params: Optional[Iterable[str]] = self.config.get(\"shell_env_params\")\n self._shell_env_params_rename: Dict[str, str] = self.config.get(\"shell_env_params_rename\", {})\n\n results_stdout_pattern = self.config.get(\"results_stdout_pattern\")\n self._results_stdout_pattern: Optional[re.Pattern[str]] = \\\n re.compile(results_stdout_pattern) if results_stdout_pattern else None\n\n def _get_env_params(self) -> Dict[str, str]:\n \"\"\"\n Get the *shell* environment parameters to be passed to the script.\n\n Returns\n -------\n env_params : Dict[str, str]\n Parameters to pass as *shell* environment variables into the script.\n This is usually a subset of `_params` with some possible conversions.\n \"\"\"\n rename: Dict[str, str] # {to: from} mapping of the script parameters.\n if self._shell_env_params is None:\n if self._shell_env_params_rename:\n # Only rename specified - use it.\n rename = self._shell_env_params_rename.copy()\n else:\n # FIXME: We should not be exposing all params by default.\n # Neither `shell_env_params` nor rename are specified - use all params.\n rename = {self._RE_INVALID.sub(\"_\", key): key for key in self._params}\n else:\n # Use `shell_env_params` and rename if specified.\n rename = {self._RE_INVALID.sub(\"_\", key): key for key in self._shell_env_params}\n rename.update(self._shell_env_params_rename)\n\n return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}\n\n def _extract_stdout_results(self, stdout: str) -> Dict[str, TunableValue]:\n \"\"\"\n Extract the results from the stdout of the script.\n\n Parameters\n ----------\n stdout : str\n The stdout of the script.\n\n Returns\n -------\n results : Dict[str, TunableValue]\n A dictionary of results extracted from the stdout.\n \"\"\"\n if not self._results_stdout_pattern:\n return {}\n _LOG.debug(\"Extract regex: '%s' from: '%s'\", self._results_stdout_pattern, stdout)\n return {key: try_parse_val(val) for (key, val) in self._results_stdout_pattern.findall(stdout)}\n", "path": "mlos_bench/mlos_bench/environments/script_env.py"}]} | 1,938 | 600 |
gh_patches_debug_7557 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-123 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ModuleNotFoundError: No module named 'plasmapy.classes' on plasmapy import
On importing freshly installed plasmapy into a new environment:
(plasmapy) [~]$ python
Python 3.6.2 |Continuum Analytics, Inc.| (default, Jul 20 2017, 13:51:32)
[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import plasmapy
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/dominik/.anaconda3/envs/plasmapy/lib/python3.6/site-packages/plasmapy/__init__.py", line 8, in <module>
from .classes import Plasma
ModuleNotFoundError: No module named 'plasmapy.classes'
The goal of this one is being able to import plasmapy. At all.
The issue likely lies in `plasmapy/__init__.py`.
To quote @cadair 's words of encouragement on this bugfixing journey, *packaging is a special kind of hell*.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup
2
3
4 # Package metadata
5 metadata = {}
6 with open('plasmapy/_metadata.py', 'r') as metadata_file:
7 exec(metadata_file.read(), metadata)
8
9 # Requirements
10 with open('requirements/base.txt', 'r') as req_file:
11 requirements = req_file.read().splitlines()
12
13 setup(name=metadata['name'],
14 version=metadata['version'],
15 description="Python package for plasma physics",
16 requires=requirements,
17 install_requires=requirements,
18 provides=[metadata['name']],
19 author=metadata['author'],
20 author_email="[email protected]", # until we get an email address
21 license="BSD",
22 url="https://github.com/PlasmaPy/PlasmaPy", # until we make a webpage
23 long_description=metadata['description'],
24 keywords=['plasma', 'plasma physics', 'science'],
25 classifiers=[
26 'Intended Audience :: Science/Research',
27 'License :: OSI Approved :: BSD License',
28 'Operating System :: OS Independent',
29 'Programming Language :: Python :: 3 :: Only',
30 'Programming Language :: Python :: 3.6',
31 'Topic :: Scientific/Engineering :: Physics',
32 'Topic :: Scientific/Engineering :: Astronomy',
33 'Development Status :: 2 - Pre-Alpha',
34 ],
35 packages=["plasmapy"],
36 zip_safe=False,
37 use_2to3=False,
38 python_requires='>=3.6',
39 )
40
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,4 @@
-from setuptools import setup
+from setuptools import setup, find_packages
# Package metadata
@@ -32,7 +32,7 @@
'Topic :: Scientific/Engineering :: Astronomy',
'Development Status :: 2 - Pre-Alpha',
],
- packages=["plasmapy"],
+ packages=find_packages(),
zip_safe=False,
use_2to3=False,
python_requires='>=3.6',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,4 @@\n-from setuptools import setup\n+from setuptools import setup, find_packages\n \n \n # Package metadata\n@@ -32,7 +32,7 @@\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Development Status :: 2 - Pre-Alpha',\n ],\n- packages=[\"plasmapy\"],\n+ packages=find_packages(),\n zip_safe=False,\n use_2to3=False,\n python_requires='>=3.6',\n", "issue": "ModuleNotFoundError: No module named 'plasmapy.classes' on plasmapy import\nOn importing freshly installed plasmapy into a new environment:\r\n\r\n (plasmapy) [~]$ python\r\n Python 3.6.2 |Continuum Analytics, Inc.| (default, Jul 20 2017, 13:51:32) \r\n [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux\r\n Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n >>> import plasmapy\r\n Traceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/dominik/.anaconda3/envs/plasmapy/lib/python3.6/site-packages/plasmapy/__init__.py\", line 8, in <module>\r\n from .classes import Plasma\r\n ModuleNotFoundError: No module named 'plasmapy.classes'\r\n\r\nThe goal of this one is being able to import plasmapy. At all.\r\n\r\nThe issue likely lies in `plasmapy/__init__.py`. \r\n\r\nTo quote @cadair 's words of encouragement on this bugfixing journey, *packaging is a special kind of hell*. \n", "before_files": [{"content": "from setuptools import setup\n\n\n# Package metadata\nmetadata = {}\nwith open('plasmapy/_metadata.py', 'r') as metadata_file:\n exec(metadata_file.read(), metadata)\n\n# Requirements\nwith open('requirements/base.txt', 'r') as req_file:\n requirements = req_file.read().splitlines()\n\nsetup(name=metadata['name'],\n version=metadata['version'],\n description=\"Python package for plasma physics\",\n requires=requirements,\n install_requires=requirements,\n provides=[metadata['name']],\n author=metadata['author'],\n author_email=\"[email protected]\", # until we get an email address\n license=\"BSD\",\n url=\"https://github.com/PlasmaPy/PlasmaPy\", # until we make a webpage\n long_description=metadata['description'],\n keywords=['plasma', 'plasma physics', 'science'],\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Development Status :: 2 - Pre-Alpha',\n ],\n packages=[\"plasmapy\"],\n zip_safe=False,\n use_2to3=False,\n python_requires='>=3.6',\n )\n", "path": "setup.py"}]} | 1,202 | 123 |
gh_patches_debug_14227 | rasdani/github-patches | git_diff | castorini__pyserini-1626 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error for SPLADE on-the-fly encoding with pytorch
command used:
```bash
python -m pyserini.search.lucene --threads 12 --batch-size 128 \
--index msmarco-v1-passage-splade-pp-ed \
--topics msmarco-passage-dev-subset \
--encoder naver/splade-cocondenser-ensembledistil \
--output run.msmarco-v1-passage.splade-pp-ed-pytorch.dev.txt \
--hits 1000 --impact
```
error message:
> ...
> File "/home/arthur/workplace/pyserini/pyserini/encode/_splade.py", line 28, in encode
> raw_weights = self._output_to_weight_dicts(batch_token_ids, batch_weights)
> NameError: name 'batch_token_ids' is not defined
</issue>
<code>
[start of pyserini/encode/_splade.py]
1 import torch
2 from transformers import AutoModelForMaskedLM, AutoTokenizer
3 import numpy as np
4
5 from pyserini.encode import QueryEncoder
6
7
8 class SpladeQueryEncoder(QueryEncoder):
9 def __init__(self, model_name_or_path, tokenizer_name=None, device='cpu'):
10 self.device = device
11 self.model = AutoModelForMaskedLM.from_pretrained(model_name_or_path)
12 self.model.to(self.device)
13 self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name or model_name_or_path)
14 self.reverse_voc = {v: k for k, v in self.tokenizer.vocab.items()}
15 self.weight_range = 5
16 self.quant_range = 256
17
18 def encode(self, text, max_length=256, **kwargs):
19 inputs = self.tokenizer([text], max_length=max_length, padding='longest',
20 truncation=True, add_special_tokens=True,
21 return_tensors='pt').to(self.device)
22 input_ids = inputs['input_ids']
23 input_attention = inputs['attention_mask']
24 batch_logits = self.model(input_ids)['logits']
25 batch_aggregated_logits, _ = torch.max(torch.log(1 + torch.relu(batch_logits))
26 * input_attention.unsqueeze(-1), dim=1)
27 batch_aggregated_logits = batch_aggregated_logits.cpu().detach().numpy()
28 raw_weights = self._output_to_weight_dicts(batch_token_ids, batch_weights)
29 return self._get_encoded_query_token_wight_dicts(raw_weights)[0]
30
31 def _output_to_weight_dicts(self, batch_aggregated_logits):
32 to_return = []
33 for aggregated_logits in batch_aggregated_logits:
34 col = np.nonzero(aggregated_logits)[0]
35 weights = aggregated_logits[col]
36 d = {self.reverse_voc[k]: float(v) for k, v in zip(list(col), list(weights))}
37 to_return.append(d)
38 return to_return
39
40 def _get_encoded_query_token_wight_dicts(self, tok_weights):
41 to_return = []
42 for _tok_weight in tok_weights:
43 _weights = {}
44 for token, weight in _tok_weight.items():
45 weight_quanted = round(weight / self.weight_range * self.quant_range)
46 _weights[token] = weight_quanted
47 to_return.append(_weights)
48 return to_return
49
[end of pyserini/encode/_splade.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyserini/encode/_splade.py b/pyserini/encode/_splade.py
--- a/pyserini/encode/_splade.py
+++ b/pyserini/encode/_splade.py
@@ -25,7 +25,7 @@
batch_aggregated_logits, _ = torch.max(torch.log(1 + torch.relu(batch_logits))
* input_attention.unsqueeze(-1), dim=1)
batch_aggregated_logits = batch_aggregated_logits.cpu().detach().numpy()
- raw_weights = self._output_to_weight_dicts(batch_token_ids, batch_weights)
+ raw_weights = self._output_to_weight_dicts(batch_aggregated_logits)
return self._get_encoded_query_token_wight_dicts(raw_weights)[0]
def _output_to_weight_dicts(self, batch_aggregated_logits):
| {"golden_diff": "diff --git a/pyserini/encode/_splade.py b/pyserini/encode/_splade.py\n--- a/pyserini/encode/_splade.py\n+++ b/pyserini/encode/_splade.py\n@@ -25,7 +25,7 @@\n batch_aggregated_logits, _ = torch.max(torch.log(1 + torch.relu(batch_logits))\n * input_attention.unsqueeze(-1), dim=1)\n batch_aggregated_logits = batch_aggregated_logits.cpu().detach().numpy()\n- raw_weights = self._output_to_weight_dicts(batch_token_ids, batch_weights)\n+ raw_weights = self._output_to_weight_dicts(batch_aggregated_logits)\n return self._get_encoded_query_token_wight_dicts(raw_weights)[0]\n \n def _output_to_weight_dicts(self, batch_aggregated_logits):\n", "issue": "Error for SPLADE on-the-fly encoding with pytorch \ncommand used:\r\n```bash\r\npython -m pyserini.search.lucene --threads 12 --batch-size 128 \\\r\n --index msmarco-v1-passage-splade-pp-ed \\\r\n --topics msmarco-passage-dev-subset \\\r\n --encoder naver/splade-cocondenser-ensembledistil \\\r\n --output run.msmarco-v1-passage.splade-pp-ed-pytorch.dev.txt \\\r\n --hits 1000 --impact\r\n```\r\n\r\nerror message:\r\n> ...\r\n> File \"/home/arthur/workplace/pyserini/pyserini/encode/_splade.py\", line 28, in encode\r\n> raw_weights = self._output_to_weight_dicts(batch_token_ids, batch_weights)\r\n> NameError: name 'batch_token_ids' is not defined\r\n\n", "before_files": [{"content": "import torch\nfrom transformers import AutoModelForMaskedLM, AutoTokenizer\nimport numpy as np\n\nfrom pyserini.encode import QueryEncoder\n\n\nclass SpladeQueryEncoder(QueryEncoder):\n def __init__(self, model_name_or_path, tokenizer_name=None, device='cpu'):\n self.device = device\n self.model = AutoModelForMaskedLM.from_pretrained(model_name_or_path)\n self.model.to(self.device)\n self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name or model_name_or_path)\n self.reverse_voc = {v: k for k, v in self.tokenizer.vocab.items()}\n self.weight_range = 5\n self.quant_range = 256\n\n def encode(self, text, max_length=256, **kwargs):\n inputs = self.tokenizer([text], max_length=max_length, padding='longest',\n truncation=True, add_special_tokens=True,\n return_tensors='pt').to(self.device)\n input_ids = inputs['input_ids']\n input_attention = inputs['attention_mask']\n batch_logits = self.model(input_ids)['logits']\n batch_aggregated_logits, _ = torch.max(torch.log(1 + torch.relu(batch_logits))\n * input_attention.unsqueeze(-1), dim=1)\n batch_aggregated_logits = batch_aggregated_logits.cpu().detach().numpy()\n raw_weights = self._output_to_weight_dicts(batch_token_ids, batch_weights)\n return self._get_encoded_query_token_wight_dicts(raw_weights)[0]\n\n def _output_to_weight_dicts(self, batch_aggregated_logits):\n to_return = []\n for aggregated_logits in batch_aggregated_logits:\n col = np.nonzero(aggregated_logits)[0]\n weights = aggregated_logits[col]\n d = {self.reverse_voc[k]: float(v) for k, v in zip(list(col), list(weights))}\n to_return.append(d)\n return to_return\n\n def _get_encoded_query_token_wight_dicts(self, tok_weights):\n to_return = []\n for _tok_weight in tok_weights:\n _weights = {}\n for token, weight in _tok_weight.items():\n weight_quanted = round(weight / self.weight_range * self.quant_range)\n _weights[token] = weight_quanted\n to_return.append(_weights)\n return to_return\n", "path": "pyserini/encode/_splade.py"}]} | 1,311 | 174 |
gh_patches_debug_24023 | rasdani/github-patches | git_diff | apache__airflow-13371 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AirflowMacroPluginRemovedRule fails on non-python files
**Apache Airflow version**: 1.10.14
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`):
**Environment**:
- **Cloud provider or hardware configuration**: X
- **OS** (e.g. from /etc/os-release): X
- **Kernel** (e.g. `uname -a`): X
- **Install tools**: X
- **Others**: X
**What happened**:
The `AirflowMacroPluginRemovedRule` seems unable to process non-standard python files (e.g. `.xlsx`) and chokes out with an unhelpful error message.:
```python
========================================================================================================================================================== STATUS ==========================================================================================================================================================
Check for latest versions of apache-airflow and checker...........................................................................................................................................................................................................................................................SUCCESS
Traceback (most recent call last):
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/bin/airflow", line 37, in <module>
args.func(args)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py", line 88, in run
all_problems = check_upgrade(formatter, rules)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py", line 37, in check_upgrade
rule_status = RuleStatus.from_rule(rule)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/problem.py", line 44, in from_rule
result = rule.check()
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py", line 52, in check
problems.extend(self._check_file(file_path))
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py", line 42, in _check_file
for line_number, line in enumerate(file_pointer, 1):
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x82 in position 16: invalid start byte
```
**What you expected to happen**:
I expected the macro to skip over files it could not process/understand
**How to reproduce it**:
Add an `.xlsx` or other binary document to the DAGs folder and run the upgrade check.
**Suggested resolution**:
I think it's fine to fail out on these files (it led us to add certain items to the `.airflowignore` which should have been there anyway) but I had to modify the upgrade rule directly to tell me _which_ files were the problem. A more helpful error message here, and possibly a message prompting users to add said files to their `.airflowignore` would be ideal.
</issue>
<code>
[start of airflow/upgrade/rules/airflow_macro_plugin_removed.py]
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17
18 from __future__ import absolute_import
19
20 from airflow import conf
21 from airflow.upgrade.rules.base_rule import BaseRule
22 from airflow.utils.dag_processing import list_py_file_paths
23
24
25 class AirflowMacroPluginRemovedRule(BaseRule):
26
27 title = "Remove airflow.AirflowMacroPlugin class"
28
29 description = "The airflow.AirflowMacroPlugin class has been removed."
30
31 MACRO_PLUGIN_CLASS = "airflow.AirflowMacroPlugin"
32
33 def _change_info(self, file_path, line_number):
34 return "{} will be removed. Affected file: {} (line {})".format(
35 self.MACRO_PLUGIN_CLASS, file_path, line_number
36 )
37
38 def _check_file(self, file_path):
39 problems = []
40 class_name_to_check = self.MACRO_PLUGIN_CLASS.split(".")[-1]
41 with open(file_path, "r") as file_pointer:
42 for line_number, line in enumerate(file_pointer, 1):
43 if class_name_to_check in line:
44 problems.append(self._change_info(file_path, line_number))
45 return problems
46
47 def check(self):
48 dag_folder = conf.get("core", "dags_folder")
49 file_paths = list_py_file_paths(directory=dag_folder, include_examples=False)
50 problems = []
51 for file_path in file_paths:
52 problems.extend(self._check_file(file_path))
53 return problems
54
[end of airflow/upgrade/rules/airflow_macro_plugin_removed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/upgrade/rules/airflow_macro_plugin_removed.py b/airflow/upgrade/rules/airflow_macro_plugin_removed.py
--- a/airflow/upgrade/rules/airflow_macro_plugin_removed.py
+++ b/airflow/upgrade/rules/airflow_macro_plugin_removed.py
@@ -39,9 +39,12 @@
problems = []
class_name_to_check = self.MACRO_PLUGIN_CLASS.split(".")[-1]
with open(file_path, "r") as file_pointer:
- for line_number, line in enumerate(file_pointer, 1):
- if class_name_to_check in line:
- problems.append(self._change_info(file_path, line_number))
+ try:
+ for line_number, line in enumerate(file_pointer, 1):
+ if class_name_to_check in line:
+ problems.append(self._change_info(file_path, line_number))
+ except UnicodeDecodeError:
+ problems.append("Unable to read python file {}".format(file_path))
return problems
def check(self):
@@ -49,5 +52,7 @@
file_paths = list_py_file_paths(directory=dag_folder, include_examples=False)
problems = []
for file_path in file_paths:
+ if not file_path.endswith(".py"):
+ continue
problems.extend(self._check_file(file_path))
return problems
| {"golden_diff": "diff --git a/airflow/upgrade/rules/airflow_macro_plugin_removed.py b/airflow/upgrade/rules/airflow_macro_plugin_removed.py\n--- a/airflow/upgrade/rules/airflow_macro_plugin_removed.py\n+++ b/airflow/upgrade/rules/airflow_macro_plugin_removed.py\n@@ -39,9 +39,12 @@\n problems = []\n class_name_to_check = self.MACRO_PLUGIN_CLASS.split(\".\")[-1]\n with open(file_path, \"r\") as file_pointer:\n- for line_number, line in enumerate(file_pointer, 1):\n- if class_name_to_check in line:\n- problems.append(self._change_info(file_path, line_number))\n+ try:\n+ for line_number, line in enumerate(file_pointer, 1):\n+ if class_name_to_check in line:\n+ problems.append(self._change_info(file_path, line_number))\n+ except UnicodeDecodeError:\n+ problems.append(\"Unable to read python file {}\".format(file_path))\n return problems\n \n def check(self):\n@@ -49,5 +52,7 @@\n file_paths = list_py_file_paths(directory=dag_folder, include_examples=False)\n problems = []\n for file_path in file_paths:\n+ if not file_path.endswith(\".py\"):\n+ continue\n problems.extend(self._check_file(file_path))\n return problems\n", "issue": "AirflowMacroPluginRemovedRule fails on non-python files\n**Apache Airflow version**: 1.10.14\r\n\r\n\r\n**Kubernetes version (if you are using kubernetes)** (use `kubectl version`):\r\n\r\n**Environment**:\r\n\r\n- **Cloud provider or hardware configuration**: X\r\n- **OS** (e.g. from /etc/os-release): X\r\n- **Kernel** (e.g. `uname -a`): X\r\n- **Install tools**: X\r\n- **Others**: X\r\n\r\n**What happened**:\r\n\r\nThe `AirflowMacroPluginRemovedRule` seems unable to process non-standard python files (e.g. `.xlsx`) and chokes out with an unhelpful error message.:\r\n\r\n```python\r\n========================================================================================================================================================== STATUS ==========================================================================================================================================================\r\n\r\nCheck for latest versions of apache-airflow and checker...........................................................................................................................................................................................................................................................SUCCESS\r\nTraceback (most recent call last):\r\n File \"/Users/madison/programs/anaconda3/envs/memphis-airflow/bin/airflow\", line 37, in <module>\r\n args.func(args)\r\n File \"/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py\", line 88, in run\r\n all_problems = check_upgrade(formatter, rules)\r\n File \"/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py\", line 37, in check_upgrade\r\n rule_status = RuleStatus.from_rule(rule)\r\n File \"/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/problem.py\", line 44, in from_rule\r\n result = rule.check()\r\n File \"/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py\", line 52, in check\r\n problems.extend(self._check_file(file_path))\r\n File \"/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py\", line 42, in _check_file\r\n for line_number, line in enumerate(file_pointer, 1):\r\n File \"/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/codecs.py\", line 322, in decode\r\n (result, consumed) = self._buffer_decode(data, self.errors, final)\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0x82 in position 16: invalid start byte\r\n```\r\n\r\n**What you expected to happen**:\r\n\r\nI expected the macro to skip over files it could not process/understand\r\n\r\n**How to reproduce it**:\r\n\r\nAdd an `.xlsx` or other binary document to the DAGs folder and run the upgrade check.\r\n\r\n\r\n**Suggested resolution**:\r\n\r\nI think it's fine to fail out on these files (it led us to add certain items to the `.airflowignore` which should have been there anyway) but I had to modify the upgrade rule directly to tell me _which_ files were the problem. A more helpful error message here, and possibly a message prompting users to add said files to their `.airflowignore` would be ideal.\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import absolute_import\n\nfrom airflow import conf\nfrom airflow.upgrade.rules.base_rule import BaseRule\nfrom airflow.utils.dag_processing import list_py_file_paths\n\n\nclass AirflowMacroPluginRemovedRule(BaseRule):\n\n title = \"Remove airflow.AirflowMacroPlugin class\"\n\n description = \"The airflow.AirflowMacroPlugin class has been removed.\"\n\n MACRO_PLUGIN_CLASS = \"airflow.AirflowMacroPlugin\"\n\n def _change_info(self, file_path, line_number):\n return \"{} will be removed. Affected file: {} (line {})\".format(\n self.MACRO_PLUGIN_CLASS, file_path, line_number\n )\n\n def _check_file(self, file_path):\n problems = []\n class_name_to_check = self.MACRO_PLUGIN_CLASS.split(\".\")[-1]\n with open(file_path, \"r\") as file_pointer:\n for line_number, line in enumerate(file_pointer, 1):\n if class_name_to_check in line:\n problems.append(self._change_info(file_path, line_number))\n return problems\n\n def check(self):\n dag_folder = conf.get(\"core\", \"dags_folder\")\n file_paths = list_py_file_paths(directory=dag_folder, include_examples=False)\n problems = []\n for file_path in file_paths:\n problems.extend(self._check_file(file_path))\n return problems\n", "path": "airflow/upgrade/rules/airflow_macro_plugin_removed.py"}]} | 1,849 | 294 |
gh_patches_debug_9399 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2541 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bplan template dates saved but not shown in Dashboard
URL: https://mein.berlin.de/dashboard/projects/erweiterung-mauerpark-bebauungsplan-3-64-im-bezirk/bplan/
user: initiator
expected behaviour: date and time that I have entered are still shown after saving form
behaviour: dates are no longer shown after saving, no error message, I can still publish the project and date is shown correctly on project tile
device & browser: Desktop, mac, chrome Version 76.0.3809.132 (Offizieller Build) (64-Bit)
Importance: relevant bug, fix before next release
</issue>
<code>
[start of meinberlin/apps/bplan/forms.py]
1 from django import forms
2
3 from meinberlin.apps.extprojects.forms import ExternalProjectCreateForm
4 from meinberlin.apps.extprojects.forms import ExternalProjectForm
5
6 from . import models
7
8
9 class StatementForm(forms.ModelForm):
10 class Meta:
11 model = models.Statement
12 fields = ['name', 'email', 'statement',
13 'street_number', 'postal_code_city']
14
15
16 class BplanProjectCreateForm(ExternalProjectCreateForm):
17
18 class Meta:
19 model = models.Bplan
20 fields = ['name', 'description', 'tile_image', 'tile_image_copyright']
21
22
23 class BplanProjectForm(ExternalProjectForm):
24
25 class Meta:
26 model = models.Bplan
27 fields = ['name', 'identifier', 'url', 'description', 'tile_image',
28 'tile_image_copyright', 'is_archived', 'office_worker_email']
29 required_for_project_publish = ['name', 'url', 'description',
30 'office_worker_email']
31
[end of meinberlin/apps/bplan/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/bplan/forms.py b/meinberlin/apps/bplan/forms.py
--- a/meinberlin/apps/bplan/forms.py
+++ b/meinberlin/apps/bplan/forms.py
@@ -25,6 +25,7 @@
class Meta:
model = models.Bplan
fields = ['name', 'identifier', 'url', 'description', 'tile_image',
- 'tile_image_copyright', 'is_archived', 'office_worker_email']
+ 'tile_image_copyright', 'is_archived', 'office_worker_email',
+ 'start_date', 'end_date']
required_for_project_publish = ['name', 'url', 'description',
'office_worker_email']
| {"golden_diff": "diff --git a/meinberlin/apps/bplan/forms.py b/meinberlin/apps/bplan/forms.py\n--- a/meinberlin/apps/bplan/forms.py\n+++ b/meinberlin/apps/bplan/forms.py\n@@ -25,6 +25,7 @@\n class Meta:\n model = models.Bplan\n fields = ['name', 'identifier', 'url', 'description', 'tile_image',\n- 'tile_image_copyright', 'is_archived', 'office_worker_email']\n+ 'tile_image_copyright', 'is_archived', 'office_worker_email',\n+ 'start_date', 'end_date']\n required_for_project_publish = ['name', 'url', 'description',\n 'office_worker_email']\n", "issue": "bplan template dates saved but not shown in Dashboard\nURL: https://mein.berlin.de/dashboard/projects/erweiterung-mauerpark-bebauungsplan-3-64-im-bezirk/bplan/\r\nuser: initiator\r\nexpected behaviour: date and time that I have entered are still shown after saving form\r\nbehaviour: dates are no longer shown after saving, no error message, I can still publish the project and date is shown correctly on project tile\r\ndevice & browser: Desktop, mac, chrome Version 76.0.3809.132 (Offizieller Build) (64-Bit)\r\nImportance: relevant bug, fix before next release\n", "before_files": [{"content": "from django import forms\n\nfrom meinberlin.apps.extprojects.forms import ExternalProjectCreateForm\nfrom meinberlin.apps.extprojects.forms import ExternalProjectForm\n\nfrom . import models\n\n\nclass StatementForm(forms.ModelForm):\n class Meta:\n model = models.Statement\n fields = ['name', 'email', 'statement',\n 'street_number', 'postal_code_city']\n\n\nclass BplanProjectCreateForm(ExternalProjectCreateForm):\n\n class Meta:\n model = models.Bplan\n fields = ['name', 'description', 'tile_image', 'tile_image_copyright']\n\n\nclass BplanProjectForm(ExternalProjectForm):\n\n class Meta:\n model = models.Bplan\n fields = ['name', 'identifier', 'url', 'description', 'tile_image',\n 'tile_image_copyright', 'is_archived', 'office_worker_email']\n required_for_project_publish = ['name', 'url', 'description',\n 'office_worker_email']\n", "path": "meinberlin/apps/bplan/forms.py"}]} | 946 | 158 |
gh_patches_debug_62942 | rasdani/github-patches | git_diff | great-expectations__great_expectations-3803 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of great_expectations/core/usage_statistics/anonymizers/anonymizer.py]
1 import logging
2 from hashlib import md5
3 from typing import Optional
4
5 from great_expectations.util import load_class
6
7 logger = logging.getLogger(__name__)
8
9
10 class Anonymizer:
11 """Anonymize string names in an optionally-consistent way."""
12
13 def __init__(self, salt=None):
14 if salt is not None and not isinstance(salt, str):
15 logger.error("invalid salt: must provide a string. Setting a random salt.")
16 salt = None
17 if salt is None:
18 import secrets
19
20 self._salt = secrets.token_hex(8)
21 else:
22 self._salt = salt
23
24 @property
25 def salt(self):
26 return self._salt
27
28 def anonymize(self, string_):
29 if string_ is None:
30 return None
31
32 if not isinstance(string_, str):
33 raise TypeError(
34 f"""The type of the "string_" argument must be a string (Python "str"). The type given is
35 "{str(type(string_))}", which is illegal.
36 """
37 )
38 salted = self._salt + string_
39 return md5(salted.encode("utf-8")).hexdigest()
40
41 def anonymize_object_info(
42 self,
43 anonymized_info_dict,
44 ge_classes,
45 object_=None,
46 object_class=None,
47 object_config=None,
48 runtime_environment=None,
49 ) -> dict:
50 assert (
51 object_ or object_class or object_config
52 ), "Must pass either object_ or object_class or object_config."
53
54 if runtime_environment is None:
55 runtime_environment = {}
56
57 object_class_name: Optional[str] = None
58 try:
59 if object_class is None and object_ is not None:
60 object_class = object_.__class__
61 elif object_class is None and object_config is not None:
62 object_class_name = object_config.get("class_name")
63 object_module_name = object_config.get(
64 "module_name"
65 ) or runtime_environment.get("module_name")
66 object_class = load_class(object_class_name, object_module_name)
67 object_class_name = object_class.__name__
68
69 for ge_class in ge_classes:
70 if issubclass(object_class, ge_class):
71 anonymized_info_dict["parent_class"] = ge_class.__name__
72 if not object_class == ge_class:
73 anonymized_info_dict["anonymized_class"] = self.anonymize(
74 object_class_name
75 )
76 break
77
78 if not anonymized_info_dict.get("parent_class"):
79 anonymized_info_dict["parent_class"] = "__not_recognized__"
80 anonymized_info_dict["anonymized_class"] = self.anonymize(
81 object_class_name
82 )
83 except AttributeError:
84 anonymized_info_dict["parent_class"] = "__not_recognized__"
85 anonymized_info_dict["anonymized_class"] = self.anonymize(object_class_name)
86
87 return anonymized_info_dict
88
89 @staticmethod
90 def _is_parent_class_recognized(
91 classes_to_check,
92 object_=None,
93 object_class=None,
94 object_config=None,
95 ) -> Optional[str]:
96 """
97 Check if the parent class is a subclass of any core GE class.
98 This private method is intended to be used by anonymizers in a public `is_parent_class_recognized()` method. These anonymizers define and provide the core GE classes_to_check.
99 Returns:
100 The name of the parent class found, or None if no parent class was found
101 """
102 assert (
103 object_ or object_class or object_config
104 ), "Must pass either object_ or object_class or object_config."
105 try:
106 if object_class is None and object_ is not None:
107 object_class = object_.__class__
108 elif object_class is None and object_config is not None:
109 object_class_name = object_config.get("class_name")
110 object_module_name = object_config.get("module_name")
111 object_class = load_class(object_class_name, object_module_name)
112
113 for class_to_check in classes_to_check:
114 if issubclass(object_class, class_to_check):
115 return class_to_check.__name__
116
117 return None
118
119 except AttributeError:
120 return None
121
[end of great_expectations/core/usage_statistics/anonymizers/anonymizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/great_expectations/core/usage_statistics/anonymizers/anonymizer.py b/great_expectations/core/usage_statistics/anonymizers/anonymizer.py
--- a/great_expectations/core/usage_statistics/anonymizers/anonymizer.py
+++ b/great_expectations/core/usage_statistics/anonymizers/anonymizer.py
@@ -35,6 +35,7 @@
"{str(type(string_))}", which is illegal.
"""
)
+
salted = self._salt + string_
return md5(salted.encode("utf-8")).hexdigest()
| {"golden_diff": "diff --git a/great_expectations/core/usage_statistics/anonymizers/anonymizer.py b/great_expectations/core/usage_statistics/anonymizers/anonymizer.py\n--- a/great_expectations/core/usage_statistics/anonymizers/anonymizer.py\n+++ b/great_expectations/core/usage_statistics/anonymizers/anonymizer.py\n@@ -35,6 +35,7 @@\n \"{str(type(string_))}\", which is illegal.\n \"\"\"\n )\n+\n salted = self._salt + string_\n return md5(salted.encode(\"utf-8\")).hexdigest()\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import logging\nfrom hashlib import md5\nfrom typing import Optional\n\nfrom great_expectations.util import load_class\n\nlogger = logging.getLogger(__name__)\n\n\nclass Anonymizer:\n \"\"\"Anonymize string names in an optionally-consistent way.\"\"\"\n\n def __init__(self, salt=None):\n if salt is not None and not isinstance(salt, str):\n logger.error(\"invalid salt: must provide a string. Setting a random salt.\")\n salt = None\n if salt is None:\n import secrets\n\n self._salt = secrets.token_hex(8)\n else:\n self._salt = salt\n\n @property\n def salt(self):\n return self._salt\n\n def anonymize(self, string_):\n if string_ is None:\n return None\n\n if not isinstance(string_, str):\n raise TypeError(\n f\"\"\"The type of the \"string_\" argument must be a string (Python \"str\"). The type given is\n\"{str(type(string_))}\", which is illegal.\n \"\"\"\n )\n salted = self._salt + string_\n return md5(salted.encode(\"utf-8\")).hexdigest()\n\n def anonymize_object_info(\n self,\n anonymized_info_dict,\n ge_classes,\n object_=None,\n object_class=None,\n object_config=None,\n runtime_environment=None,\n ) -> dict:\n assert (\n object_ or object_class or object_config\n ), \"Must pass either object_ or object_class or object_config.\"\n\n if runtime_environment is None:\n runtime_environment = {}\n\n object_class_name: Optional[str] = None\n try:\n if object_class is None and object_ is not None:\n object_class = object_.__class__\n elif object_class is None and object_config is not None:\n object_class_name = object_config.get(\"class_name\")\n object_module_name = object_config.get(\n \"module_name\"\n ) or runtime_environment.get(\"module_name\")\n object_class = load_class(object_class_name, object_module_name)\n object_class_name = object_class.__name__\n\n for ge_class in ge_classes:\n if issubclass(object_class, ge_class):\n anonymized_info_dict[\"parent_class\"] = ge_class.__name__\n if not object_class == ge_class:\n anonymized_info_dict[\"anonymized_class\"] = self.anonymize(\n object_class_name\n )\n break\n\n if not anonymized_info_dict.get(\"parent_class\"):\n anonymized_info_dict[\"parent_class\"] = \"__not_recognized__\"\n anonymized_info_dict[\"anonymized_class\"] = self.anonymize(\n object_class_name\n )\n except AttributeError:\n anonymized_info_dict[\"parent_class\"] = \"__not_recognized__\"\n anonymized_info_dict[\"anonymized_class\"] = self.anonymize(object_class_name)\n\n return anonymized_info_dict\n\n @staticmethod\n def _is_parent_class_recognized(\n classes_to_check,\n object_=None,\n object_class=None,\n object_config=None,\n ) -> Optional[str]:\n \"\"\"\n Check if the parent class is a subclass of any core GE class.\n This private method is intended to be used by anonymizers in a public `is_parent_class_recognized()` method. These anonymizers define and provide the core GE classes_to_check.\n Returns:\n The name of the parent class found, or None if no parent class was found\n \"\"\"\n assert (\n object_ or object_class or object_config\n ), \"Must pass either object_ or object_class or object_config.\"\n try:\n if object_class is None and object_ is not None:\n object_class = object_.__class__\n elif object_class is None and object_config is not None:\n object_class_name = object_config.get(\"class_name\")\n object_module_name = object_config.get(\"module_name\")\n object_class = load_class(object_class_name, object_module_name)\n\n for class_to_check in classes_to_check:\n if issubclass(object_class, class_to_check):\n return class_to_check.__name__\n\n return None\n\n except AttributeError:\n return None\n", "path": "great_expectations/core/usage_statistics/anonymizers/anonymizer.py"}]} | 1,724 | 125 |
gh_patches_debug_35438 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-1881 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider public_storage is broken
During the global build at 2021-05-26-14-42-23, spider **public_storage** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/public_storage.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/public_storage.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/public_storage.geojson))
</issue>
<code>
[start of locations/spiders/public_storage.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4
5 from locations.items import GeojsonPointItem
6
7
8 class PublicStorageSpider(scrapy.Spider):
9 name = "public_storage"
10 item_attributes = { 'brand': "Public Storage" }
11 allowed_domains = ["www.publicstorage.com"]
12 start_urls = (
13 'https://www.publicstorage.com/handlers/searchcoordinates.ashx?north=90.0&east=180.0&south=-90.0&west=-180.0',
14 )
15
16 def parse(self, response):
17 data = json.loads(response.body_as_unicode())
18
19 for store in data['response']['properties']['property']:
20 lat, lon = map(float, store['lat_long'].split(', '))
21 properties = {
22 "ref": store.get('property_id'),
23 "opening_hours": '; '.join(response.xpath('//time[@itemprop="openingHours"]/@datetime').extract()),
24 "addr_full": store.get('address'),
25 "city": store.get('city'),
26 "state": store.get('state'),
27 "postcode": store.get('zip'),
28 "lat": lat,
29 "lon": lon,
30 }
31
32 yield GeojsonPointItem(**properties)
33
[end of locations/spiders/public_storage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/public_storage.py b/locations/spiders/public_storage.py
--- a/locations/spiders/public_storage.py
+++ b/locations/spiders/public_storage.py
@@ -3,6 +3,7 @@
import json
from locations.items import GeojsonPointItem
+from locations.hours import OpeningHours
class PublicStorageSpider(scrapy.Spider):
@@ -10,23 +11,45 @@
item_attributes = { 'brand': "Public Storage" }
allowed_domains = ["www.publicstorage.com"]
start_urls = (
- 'https://www.publicstorage.com/handlers/searchcoordinates.ashx?north=90.0&east=180.0&south=-90.0&west=-180.0',
+ 'https://www.publicstorage.com/sitemap_plp.xml',
)
def parse(self, response):
- data = json.loads(response.body_as_unicode())
-
- for store in data['response']['properties']['property']:
- lat, lon = map(float, store['lat_long'].split(', '))
- properties = {
- "ref": store.get('property_id'),
- "opening_hours": '; '.join(response.xpath('//time[@itemprop="openingHours"]/@datetime').extract()),
- "addr_full": store.get('address'),
- "city": store.get('city'),
- "state": store.get('state'),
- "postcode": store.get('zip'),
- "lat": lat,
- "lon": lon,
- }
-
- yield GeojsonPointItem(**properties)
+ response.selector.remove_namespaces()
+ city_urls = response.xpath('//url/loc/text()').extract()
+ for path in city_urls:
+ yield scrapy.Request(
+ path.strip(),
+ callback=self.parse_store,
+ )
+
+ def parse_hours(self, hours):
+ opening_hours = OpeningHours()
+
+ for hour in hours:
+ for day in hour['dayOfWeek']:
+ opening_hours.add_range(
+ day=day[:2],
+ open_time=hour["opens"],
+ close_time=hour["closes"],
+ )
+
+ return opening_hours.as_opening_hours()
+
+ def parse_store(self, response):
+ data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first())
+ data = data['@graph'][0]
+
+ properties = {
+ "ref": data['@id'],
+ "opening_hours": self.parse_hours(data['openingHoursSpecification']),
+ "addr_full": data['address']['streetAddress'],
+ "city": data['address']['addressLocality'],
+ "state": data['address']['addressRegion'],
+ "postcode": data['address']['postalCode'],
+ "phone": data['telephone'],
+ "lat": data['geo']['latitude'],
+ "lon": data['geo']['longitude'],
+ }
+
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/public_storage.py b/locations/spiders/public_storage.py\n--- a/locations/spiders/public_storage.py\n+++ b/locations/spiders/public_storage.py\n@@ -3,6 +3,7 @@\n import json\n \n from locations.items import GeojsonPointItem\n+from locations.hours import OpeningHours\n \n \n class PublicStorageSpider(scrapy.Spider):\n@@ -10,23 +11,45 @@\n item_attributes = { 'brand': \"Public Storage\" }\n allowed_domains = [\"www.publicstorage.com\"]\n start_urls = (\n- 'https://www.publicstorage.com/handlers/searchcoordinates.ashx?north=90.0&east=180.0&south=-90.0&west=-180.0',\n+ 'https://www.publicstorage.com/sitemap_plp.xml',\n )\n \n def parse(self, response):\n- data = json.loads(response.body_as_unicode())\n-\n- for store in data['response']['properties']['property']:\n- lat, lon = map(float, store['lat_long'].split(', '))\n- properties = {\n- \"ref\": store.get('property_id'),\n- \"opening_hours\": '; '.join(response.xpath('//time[@itemprop=\"openingHours\"]/@datetime').extract()),\n- \"addr_full\": store.get('address'),\n- \"city\": store.get('city'),\n- \"state\": store.get('state'),\n- \"postcode\": store.get('zip'),\n- \"lat\": lat,\n- \"lon\": lon,\n- }\n-\n- yield GeojsonPointItem(**properties)\n+ response.selector.remove_namespaces()\n+ city_urls = response.xpath('//url/loc/text()').extract()\n+ for path in city_urls:\n+ yield scrapy.Request(\n+ path.strip(),\n+ callback=self.parse_store,\n+ )\n+\n+ def parse_hours(self, hours):\n+ opening_hours = OpeningHours()\n+\n+ for hour in hours:\n+ for day in hour['dayOfWeek']:\n+ opening_hours.add_range(\n+ day=day[:2],\n+ open_time=hour[\"opens\"],\n+ close_time=hour[\"closes\"],\n+ )\n+\n+ return opening_hours.as_opening_hours()\n+\n+ def parse_store(self, response):\n+ data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first())\n+ data = data['@graph'][0]\n+\n+ properties = {\n+ \"ref\": data['@id'],\n+ \"opening_hours\": self.parse_hours(data['openingHoursSpecification']),\n+ \"addr_full\": data['address']['streetAddress'],\n+ \"city\": data['address']['addressLocality'],\n+ \"state\": data['address']['addressRegion'],\n+ \"postcode\": data['address']['postalCode'],\n+ \"phone\": data['telephone'],\n+ \"lat\": data['geo']['latitude'],\n+ \"lon\": data['geo']['longitude'],\n+ }\n+\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider public_storage is broken\nDuring the global build at 2021-05-26-14-42-23, spider **public_storage** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/public_storage.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/public_storage.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/public_storage.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\n\n\nclass PublicStorageSpider(scrapy.Spider):\n name = \"public_storage\"\n item_attributes = { 'brand': \"Public Storage\" }\n allowed_domains = [\"www.publicstorage.com\"]\n start_urls = (\n 'https://www.publicstorage.com/handlers/searchcoordinates.ashx?north=90.0&east=180.0&south=-90.0&west=-180.0',\n )\n\n def parse(self, response):\n data = json.loads(response.body_as_unicode())\n\n for store in data['response']['properties']['property']:\n lat, lon = map(float, store['lat_long'].split(', '))\n properties = {\n \"ref\": store.get('property_id'),\n \"opening_hours\": '; '.join(response.xpath('//time[@itemprop=\"openingHours\"]/@datetime').extract()),\n \"addr_full\": store.get('address'),\n \"city\": store.get('city'),\n \"state\": store.get('state'),\n \"postcode\": store.get('zip'),\n \"lat\": lat,\n \"lon\": lon,\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/public_storage.py"}]} | 1,040 | 657 |
gh_patches_debug_28889 | rasdani/github-patches | git_diff | piskvorky__gensim-968 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lsi distributed fail
Hi,
I've got a problem with the lsi distributed. When i executed the example:
https://radimrehurek.com/gensim/dist_lsi.html
First configure the server (enviroment variables), then i run the server, worker and dispatcher.
And all without errros. But when i executed the code. I have this fail:

Why does this happens? How can i solve?
Thank you in advance.
</issue>
<code>
[start of gensim/models/lsi_worker.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) 2010 Radim Rehurek <[email protected]>
5 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
6
7 """
8 USAGE: %(program)s
9
10 Worker ("slave") process used in computing distributed LSI. Run this script \
11 on every node in your cluster. If you wish, you may even run it multiple times \
12 on a single machine, to make better use of multiple cores (just beware that \
13 memory footprint increases accordingly).
14
15 Example: python -m gensim.models.lsi_worker
16 """
17
18
19 from __future__ import with_statement
20 import os, sys, logging
21 import threading
22 import tempfile
23 try:
24 import Queue
25 except ImportError:
26 import queue as Queue
27 import Pyro4
28 from gensim.models import lsimodel
29 from gensim import utils
30
31 logger = logging.getLogger('gensim.models.lsi_worker')
32
33
34 SAVE_DEBUG = 0 # save intermediate models after every SAVE_DEBUG updates (0 for never)
35
36
37
38 class Worker(object):
39 def __init__(self):
40 self.model = None
41
42
43 def initialize(self, myid, dispatcher, **model_params):
44 self.lock_update = threading.Lock()
45 self.jobsdone = 0 # how many jobs has this worker completed?
46 self.myid = myid # id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?
47 self.dispatcher = dispatcher
48 self.finished = False
49 logger.info("initializing worker #%s" % myid)
50 self.model = lsimodel.LsiModel(**model_params)
51
52
53 @Pyro4.oneway
54 def requestjob(self):
55 """
56 Request jobs from the dispatcher, in a perpetual loop until `getstate()` is called.
57 """
58 if self.model is None:
59 raise RuntimeError("worker must be initialized before receiving jobs")
60
61 job = None
62 while job is None and not self.finished:
63 try:
64 job = self.dispatcher.getjob(self.myid)
65 except Queue.Empty:
66 # no new job: try again, unless we're finished with all work
67 continue
68 if job is not None:
69 logger.info("worker #%s received job #%i" % (self.myid, self.jobsdone))
70 self.processjob(job)
71 self.dispatcher.jobdone(self.myid)
72 else:
73 logger.info("worker #%i stopping asking for jobs" % self.myid)
74
75
76 @utils.synchronous('lock_update')
77 def processjob(self, job):
78 self.model.add_documents(job)
79 self.jobsdone += 1
80 if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:
81 fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')
82 self.model.save(fname)
83
84
85 @utils.synchronous('lock_update')
86 def getstate(self):
87 logger.info("worker #%i returning its state after %s jobs" %
88 (self.myid, self.jobsdone))
89 assert isinstance(self.model.projection, lsimodel.Projection)
90 self.finished = True
91 return self.model.projection
92
93
94 @utils.synchronous('lock_update')
95 def reset(self):
96 logger.info("resetting worker #%i" % self.myid)
97 self.model.projection = self.model.projection.empty_like()
98 self.finished = False
99
100
101 @Pyro4.oneway
102 def exit(self):
103 logger.info("terminating worker #%i" % self.myid)
104 os._exit(0)
105 #endclass Worker
106
107
108
109 def main():
110 logging.basicConfig(format = '%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
111 logger.info("running %s" % " ".join(sys.argv))
112
113 program = os.path.basename(sys.argv[0])
114 # make sure we have enough cmd line parameters
115 if len(sys.argv) < 1:
116 print(globals()["__doc__"] % locals())
117 sys.exit(1)
118
119 utils.pyro_daemon('gensim.lsi_worker', Worker(), random_suffix=True)
120
121 logger.info("finished running %s" % program)
122
123
124
125 if __name__ == '__main__':
126 main()
127
[end of gensim/models/lsi_worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gensim/models/lsi_worker.py b/gensim/models/lsi_worker.py
--- a/gensim/models/lsi_worker.py
+++ b/gensim/models/lsi_worker.py
@@ -39,7 +39,7 @@
def __init__(self):
self.model = None
-
+ @Pyro4.expose
def initialize(self, myid, dispatcher, **model_params):
self.lock_update = threading.Lock()
self.jobsdone = 0 # how many jobs has this worker completed?
@@ -49,7 +49,7 @@
logger.info("initializing worker #%s" % myid)
self.model = lsimodel.LsiModel(**model_params)
-
+ @Pyro4.expose
@Pyro4.oneway
def requestjob(self):
"""
@@ -81,7 +81,7 @@
fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')
self.model.save(fname)
-
+ @Pyro4.expose
@utils.synchronous('lock_update')
def getstate(self):
logger.info("worker #%i returning its state after %s jobs" %
@@ -90,7 +90,7 @@
self.finished = True
return self.model.projection
-
+ @Pyro4.expose
@utils.synchronous('lock_update')
def reset(self):
logger.info("resetting worker #%i" % self.myid)
| {"golden_diff": "diff --git a/gensim/models/lsi_worker.py b/gensim/models/lsi_worker.py\n--- a/gensim/models/lsi_worker.py\n+++ b/gensim/models/lsi_worker.py\n@@ -39,7 +39,7 @@\n def __init__(self):\n self.model = None\n \n-\n+ @Pyro4.expose\n def initialize(self, myid, dispatcher, **model_params):\n self.lock_update = threading.Lock()\n self.jobsdone = 0 # how many jobs has this worker completed?\n@@ -49,7 +49,7 @@\n logger.info(\"initializing worker #%s\" % myid)\n self.model = lsimodel.LsiModel(**model_params)\n \n-\n+ @Pyro4.expose\n @Pyro4.oneway\n def requestjob(self):\n \"\"\"\n@@ -81,7 +81,7 @@\n fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')\n self.model.save(fname)\n \n-\n+ @Pyro4.expose\n @utils.synchronous('lock_update')\n def getstate(self):\n logger.info(\"worker #%i returning its state after %s jobs\" %\n@@ -90,7 +90,7 @@\n self.finished = True\n return self.model.projection\n \n-\n+ @Pyro4.expose\n @utils.synchronous('lock_update')\n def reset(self):\n logger.info(\"resetting worker #%i\" % self.myid)\n", "issue": "Lsi distributed fail\nHi, \nI've got a problem with the lsi distributed. When i executed the example:\n\nhttps://radimrehurek.com/gensim/dist_lsi.html\n\nFirst configure the server (enviroment variables), then i run the server, worker and dispatcher.\n\nAnd all without errros. But when i executed the code. I have this fail:\n\n\nWhy does this happens? How can i solve?\n\nThank you in advance.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2010 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nUSAGE: %(program)s\n\n Worker (\"slave\") process used in computing distributed LSI. Run this script \\\non every node in your cluster. If you wish, you may even run it multiple times \\\non a single machine, to make better use of multiple cores (just beware that \\\nmemory footprint increases accordingly).\n\nExample: python -m gensim.models.lsi_worker\n\"\"\"\n\n\nfrom __future__ import with_statement\nimport os, sys, logging\nimport threading\nimport tempfile\ntry:\n import Queue\nexcept ImportError:\n import queue as Queue\nimport Pyro4\nfrom gensim.models import lsimodel\nfrom gensim import utils\n\nlogger = logging.getLogger('gensim.models.lsi_worker')\n\n\nSAVE_DEBUG = 0 # save intermediate models after every SAVE_DEBUG updates (0 for never)\n\n\n\nclass Worker(object):\n def __init__(self):\n self.model = None\n\n\n def initialize(self, myid, dispatcher, **model_params):\n self.lock_update = threading.Lock()\n self.jobsdone = 0 # how many jobs has this worker completed?\n self.myid = myid # id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?\n self.dispatcher = dispatcher\n self.finished = False\n logger.info(\"initializing worker #%s\" % myid)\n self.model = lsimodel.LsiModel(**model_params)\n\n\n @Pyro4.oneway\n def requestjob(self):\n \"\"\"\n Request jobs from the dispatcher, in a perpetual loop until `getstate()` is called.\n \"\"\"\n if self.model is None:\n raise RuntimeError(\"worker must be initialized before receiving jobs\")\n\n job = None\n while job is None and not self.finished:\n try:\n job = self.dispatcher.getjob(self.myid)\n except Queue.Empty:\n # no new job: try again, unless we're finished with all work\n continue\n if job is not None:\n logger.info(\"worker #%s received job #%i\" % (self.myid, self.jobsdone))\n self.processjob(job)\n self.dispatcher.jobdone(self.myid)\n else:\n logger.info(\"worker #%i stopping asking for jobs\" % self.myid)\n\n\n @utils.synchronous('lock_update')\n def processjob(self, job):\n self.model.add_documents(job)\n self.jobsdone += 1\n if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:\n fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')\n self.model.save(fname)\n\n\n @utils.synchronous('lock_update')\n def getstate(self):\n logger.info(\"worker #%i returning its state after %s jobs\" %\n (self.myid, self.jobsdone))\n assert isinstance(self.model.projection, lsimodel.Projection)\n self.finished = True\n return self.model.projection\n\n\n @utils.synchronous('lock_update')\n def reset(self):\n logger.info(\"resetting worker #%i\" % self.myid)\n self.model.projection = self.model.projection.empty_like()\n self.finished = False\n\n\n @Pyro4.oneway\n def exit(self):\n logger.info(\"terminating worker #%i\" % self.myid)\n os._exit(0)\n#endclass Worker\n\n\n\ndef main():\n logging.basicConfig(format = '%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n logger.info(\"running %s\" % \" \".join(sys.argv))\n\n program = os.path.basename(sys.argv[0])\n # make sure we have enough cmd line parameters\n if len(sys.argv) < 1:\n print(globals()[\"__doc__\"] % locals())\n sys.exit(1)\n\n utils.pyro_daemon('gensim.lsi_worker', Worker(), random_suffix=True)\n\n logger.info(\"finished running %s\" % program)\n\n\n\nif __name__ == '__main__':\n main()\n", "path": "gensim/models/lsi_worker.py"}]} | 1,886 | 326 |
gh_patches_debug_20388 | rasdani/github-patches | git_diff | vnpy__vnpy-1500 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ubuntu ctp导入问题
## 环境
* 操作系统: Ubuntu 18.04
* Anaconda版本: Python 3.7 64位
* vn.py版本: DEV-2.0.1 branch 20190313(下载日期)
## Issue类型
三选一:Bug
## 预期程序行为
```
from vnpy.gateway.ctp import ctp_gateway导入成功
## 实际程序行为
'''from vnpy.gateway.ctp.ctp_gateway import CtpGateWay
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/vnpy/vnpy/vnpy/gateway/ctp/__init__.py", line 1, in <module>
from .ctp_gateway import CtpGateway
File "/home/vnpy/vnpy/vnpy/gateway/ctp/ctp_gateway.py", line 6, in <module>
from vnpy.api.ctp import (
File "/home/vnpy/vnpy/vnpy/api/ctp/__init__.py", line 1, in <module>
from .vnctpmd import MdApi
ModuleNotFoundError: No module named 'vnpy.api.ctp.vnctpmd'
```
## 重现步骤
```
删除setup下面的oes安装模块
git clone -b v2.0.1-DEV https://github.com/vnpy/vnpy
cd vnpy
vim setup.py #具体删除删除相关代码即可
chmod +x install.sh && ./install.sh
# 安装会正常进行
```
针对Bug类型Issue,请提供具体重现步骤以及报错截图
</issue>
<code>
[start of setup.py]
1 import ast
2 import platform
3 import re
4
5 from setuptools import Extension, find_packages, setup
6
7 with open("vnpy/__init__.py", "rb") as f:
8 version_line = re.search(
9 r"__version__\s+=\s+(.*)", f.read().decode("utf-8")
10 ).group(1)
11 version = str(ast.literal_eval(version_line))
12
13 if platform.uname().system == "Windows":
14 compiler_flags = ["/MP", "/std:c++17", # standard
15 "/O2", "/Ob2", "/Oi", "/Ot", "/Oy", "/GL", # Optimization
16 "/wd4819" # 936 code page
17 ]
18 extra_link_args = []
19 else:
20 compiler_flags = ["-std=c++17",
21 "-Wno-delete-incomplete", "-Wno-sign-compare",
22 ]
23 extra_link_args = ["-lstdc++"]
24
25 vnctpmd = Extension("vnpy.api.ctp.vnctpmd",
26 [
27 "vnpy/api/ctp/vnctp/vnctpmd/vnctpmd.cpp",
28 ],
29 include_dirs=["vnpy/api/ctp/include", "vnpy/api/ctp/vnctp", ],
30 define_macros=[],
31 undef_macros=[],
32 library_dirs=["vnpy/api/ctp/libs", "vnpy/api/ctp"],
33 libraries=["thostmduserapi", "thosttraderapi", ],
34 extra_compile_args=compiler_flags,
35 extra_link_args=extra_link_args,
36 depends=[],
37 runtime_library_dirs=["vnpy/api/ctp"],
38 language="cpp",
39 )
40 vnctptd = Extension("vnpy.api.ctp.vnctptd",
41 [
42 "vnpy/api/ctp/vnctp/vnctptd/vnctptd.cpp",
43 ],
44 include_dirs=["vnpy/api/ctp/include", "vnpy/api/ctp/vnctp", ],
45 define_macros=[],
46 undef_macros=[],
47 library_dirs=["vnpy/api/ctp/libs", "vnpy/api/ctp"],
48 libraries=["thostmduserapi", "thosttraderapi", ],
49 extra_compile_args=compiler_flags,
50 extra_link_args=extra_link_args,
51 runtime_library_dirs=["vnpy/api/ctp"],
52 depends=[],
53 language="cpp",
54 )
55 vnoes = Extension("vnpy.api.oes.vnoes",
56 [
57 "vnpy/api/oes/vnoes/generated_files/classes_1.cpp",
58 "vnpy/api/oes/vnoes/generated_files/classes_2.cpp",
59 "vnpy/api/oes/vnoes/generated_files/module.cpp",
60 ],
61 include_dirs=["vnpy/api/oes/include", "vnpy/api/oes/vnoes", ],
62 define_macros=[("BRIGAND_NO_BOOST_SUPPORT", "1")],
63 undef_macros=[],
64 library_dirs=["vnpy/api/oes/libs"],
65 libraries=["oes_api"],
66 extra_compile_args=compiler_flags,
67 extra_link_args=extra_link_args,
68 depends=[],
69 language="cpp",
70 )
71
72 if platform.uname().system == "Windows":
73 # use pre-built pyd for windows ( support python 3.7 only )
74 ext_modules = []
75 else:
76 ext_modules = [vnctptd, vnctpmd, vnoes]
77
78 pkgs = find_packages()
79
80 setup(
81 name="vnpy",
82 version=version,
83 include_package_data=True,
84 packages=pkgs,
85 package_data={"": [
86 "*.json", "*.md", "*.ico", "*.ini",
87 "*.dll", "*.so", "*.pyd"
88 ]},
89 install_requires=[],
90 ext_modules=ext_modules
91 )
92
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -34,7 +34,7 @@
extra_compile_args=compiler_flags,
extra_link_args=extra_link_args,
depends=[],
- runtime_library_dirs=["vnpy/api/ctp"],
+ runtime_library_dirs=["$ORIGIN"],
language="cpp",
)
vnctptd = Extension("vnpy.api.ctp.vnctptd",
@@ -48,7 +48,7 @@
libraries=["thostmduserapi", "thosttraderapi", ],
extra_compile_args=compiler_flags,
extra_link_args=extra_link_args,
- runtime_library_dirs=["vnpy/api/ctp"],
+ runtime_library_dirs=["$ORIGIN"],
depends=[],
language="cpp",
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,7 +34,7 @@\n extra_compile_args=compiler_flags,\n extra_link_args=extra_link_args,\n depends=[],\n- runtime_library_dirs=[\"vnpy/api/ctp\"],\n+ runtime_library_dirs=[\"$ORIGIN\"],\n language=\"cpp\",\n )\n vnctptd = Extension(\"vnpy.api.ctp.vnctptd\",\n@@ -48,7 +48,7 @@\n libraries=[\"thostmduserapi\", \"thosttraderapi\", ],\n extra_compile_args=compiler_flags,\n extra_link_args=extra_link_args,\n- runtime_library_dirs=[\"vnpy/api/ctp\"],\n+ runtime_library_dirs=[\"$ORIGIN\"],\n depends=[],\n language=\"cpp\",\n )\n", "issue": "ubuntu\u3000 ctp\u5bfc\u5165\u95ee\u9898\n## \u73af\u5883\r\n\r\n* \u64cd\u4f5c\u7cfb\u7edf: Ubuntu 18.04\r\n* Anaconda\u7248\u672c: Python 3.7 64\u4f4d\r\n* vn.py\u7248\u672c: DEV-2.0.1 branch 20190313\uff08\u4e0b\u8f7d\u65e5\u671f\uff09\r\n\r\n## Issue\u7c7b\u578b\r\n\u4e09\u9009\u4e00\uff1aBu\uff47\r\n\r\n## \u9884\u671f\u7a0b\u5e8f\u884c\u4e3a\r\n```\r\nfrom vnpy.gateway.ctp import ctp_gateway\u5bfc\u5165\u6210\u529f\r\n## \u5b9e\u9645\u7a0b\u5e8f\u884c\u4e3a\r\n'''from vnpy.gateway.ctp.ctp_gateway import CtpGateWay\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/vnpy/vnpy/vnpy/gateway/ctp/__init__.py\", line 1, in <module>\r\n from .ctp_gateway import CtpGateway\r\n File \"/home/vnpy/vnpy/vnpy/gateway/ctp/ctp_gateway.py\", line 6, in <module>\r\n from vnpy.api.ctp import (\r\n File \"/home/vnpy/vnpy/vnpy/api/ctp/__init__.py\", line 1, in <module>\r\n from .vnctpmd import MdApi\r\nModuleNotFoundError: No module named 'vnpy.api.ctp.vnctpmd'\r\n```\r\n\r\n## \u91cd\u73b0\u6b65\u9aa4\r\n\r\n```\r\n\u5220\u9664setup\u4e0b\u9762\u7684oes\u5b89\u88c5\u6a21\u5757 \r\ngit clone -b v2.0.1-DEV https://github.com/vnpy/vnpy\r\ncd vnpy\r\nvim setup.py #\u5177\u4f53\u5220\u9664\u5220\u9664\u76f8\u5173\u4ee3\u7801\u5373\u53ef \r\nchmod +x install.sh && ./install.sh \r\n# \u5b89\u88c5\u4f1a\u6b63\u5e38\u8fdb\u884c \r\n```\r\n\r\n\u9488\u5bf9Bug\u7c7b\u578bIssue\uff0c\u8bf7\u63d0\u4f9b\u5177\u4f53\u91cd\u73b0\u6b65\u9aa4\u4ee5\u53ca\u62a5\u9519\u622a\u56fe\r\n\r\n\n", "before_files": [{"content": "import ast\nimport platform\nimport re\n\nfrom setuptools import Extension, find_packages, setup\n\nwith open(\"vnpy/__init__.py\", \"rb\") as f:\n version_line = re.search(\n r\"__version__\\s+=\\s+(.*)\", f.read().decode(\"utf-8\")\n ).group(1)\n version = str(ast.literal_eval(version_line))\n\nif platform.uname().system == \"Windows\":\n compiler_flags = [\"/MP\", \"/std:c++17\", # standard\n \"/O2\", \"/Ob2\", \"/Oi\", \"/Ot\", \"/Oy\", \"/GL\", # Optimization\n \"/wd4819\" # 936 code page\n ]\n extra_link_args = []\nelse:\n compiler_flags = [\"-std=c++17\",\n \"-Wno-delete-incomplete\", \"-Wno-sign-compare\",\n ]\n extra_link_args = [\"-lstdc++\"]\n\nvnctpmd = Extension(\"vnpy.api.ctp.vnctpmd\",\n [\n \"vnpy/api/ctp/vnctp/vnctpmd/vnctpmd.cpp\",\n ],\n include_dirs=[\"vnpy/api/ctp/include\", \"vnpy/api/ctp/vnctp\", ],\n define_macros=[],\n undef_macros=[],\n library_dirs=[\"vnpy/api/ctp/libs\", \"vnpy/api/ctp\"],\n libraries=[\"thostmduserapi\", \"thosttraderapi\", ],\n extra_compile_args=compiler_flags,\n extra_link_args=extra_link_args,\n depends=[],\n runtime_library_dirs=[\"vnpy/api/ctp\"],\n language=\"cpp\",\n )\nvnctptd = Extension(\"vnpy.api.ctp.vnctptd\",\n [\n \"vnpy/api/ctp/vnctp/vnctptd/vnctptd.cpp\",\n ],\n include_dirs=[\"vnpy/api/ctp/include\", \"vnpy/api/ctp/vnctp\", ],\n define_macros=[],\n undef_macros=[],\n library_dirs=[\"vnpy/api/ctp/libs\", \"vnpy/api/ctp\"],\n libraries=[\"thostmduserapi\", \"thosttraderapi\", ],\n extra_compile_args=compiler_flags,\n extra_link_args=extra_link_args,\n runtime_library_dirs=[\"vnpy/api/ctp\"],\n depends=[],\n language=\"cpp\",\n )\nvnoes = Extension(\"vnpy.api.oes.vnoes\",\n [\n \"vnpy/api/oes/vnoes/generated_files/classes_1.cpp\",\n \"vnpy/api/oes/vnoes/generated_files/classes_2.cpp\",\n \"vnpy/api/oes/vnoes/generated_files/module.cpp\",\n ],\n include_dirs=[\"vnpy/api/oes/include\", \"vnpy/api/oes/vnoes\", ],\n define_macros=[(\"BRIGAND_NO_BOOST_SUPPORT\", \"1\")],\n undef_macros=[],\n library_dirs=[\"vnpy/api/oes/libs\"],\n libraries=[\"oes_api\"],\n extra_compile_args=compiler_flags,\n extra_link_args=extra_link_args,\n depends=[],\n language=\"cpp\",\n )\n\nif platform.uname().system == \"Windows\":\n # use pre-built pyd for windows ( support python 3.7 only )\n ext_modules = []\nelse:\n ext_modules = [vnctptd, vnctpmd, vnoes]\n\npkgs = find_packages()\n\nsetup(\n name=\"vnpy\",\n version=version,\n include_package_data=True,\n packages=pkgs,\n package_data={\"\": [\n \"*.json\", \"*.md\", \"*.ico\", \"*.ini\",\n \"*.dll\", \"*.so\", \"*.pyd\"\n ]},\n install_requires=[],\n ext_modules=ext_modules\n)\n", "path": "setup.py"}]} | 1,868 | 179 |
gh_patches_debug_35150 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2973 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider ljsilvers is broken
During the global build at 2021-06-02-14-42-40, spider **ljsilvers** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/ljsilvers.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/ljsilvers.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/ljsilvers.geojson))
Long John Silver's
http://www.ljsilvers.com/
(location search box top right)
</issue>
<code>
[start of locations/spiders/ljsilvers.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 import re
5
6 from locations.items import GeojsonPointItem
7
8
9 class LjsilversSpider(scrapy.Spider):
10 name = "ljsilvers"
11 item_attributes = { 'brand': "Long John Silver's", 'brand_wikidata': "Q1535221" }
12 allowed_domains = ["ljsilvers.com"]
13 start_urls = (
14 'http://www.ljsilvers.com/locator?postalcode=76010',
15 )
16
17 def parse(self, response):
18 data = response.body_as_unicode()
19 base_data = re.search(r'dataout\s--Array\s\((.*)\)\s\s--><style type="text/css">', data, re.DOTALL).group(1)
20 detail_matches = re.findall(r'\((.*?)\)', base_data, re.DOTALL)
21
22 for detail_match in detail_matches:
23 key_values = re.findall(r'(.*?)\s=>\s(.*)', detail_match)
24 props = {}
25
26 for key_value in key_values:
27 key = key_value[0].strip()
28 value = key_value[1].strip()
29
30 if key == '[storeID]':
31 props['ref'] = value
32 if key == '[address]':
33 props['addr_full'] = value
34 if key == '[city]':
35 props['city'] = value
36 if key == '[state]':
37 props['state'] = value
38 if key == '[zip]':
39 props['postcode'] = value
40 if key == '[phone_number]':
41 props['phone'] = value
42 if key == '[latitude]':
43 props['lat'] = value
44 if key == '[longitude]':
45 props['lon'] = value
46
47 yield GeojsonPointItem(**props)
48
[end of locations/spiders/ljsilvers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/ljsilvers.py b/locations/spiders/ljsilvers.py
--- a/locations/spiders/ljsilvers.py
+++ b/locations/spiders/ljsilvers.py
@@ -1,47 +1,32 @@
# -*- coding: utf-8 -*-
import scrapy
-import json
-import re
from locations.items import GeojsonPointItem
class LjsilversSpider(scrapy.Spider):
name = "ljsilvers"
- item_attributes = { 'brand': "Long John Silver's", 'brand_wikidata': "Q1535221" }
+ item_attributes = {"brand": "Long John Silver's", "brand_wikidata": "Q1535221"}
allowed_domains = ["ljsilvers.com"]
start_urls = (
- 'http://www.ljsilvers.com/locator?postalcode=76010',
+ "https://viewer.blipstar.com/searchdbnew?uid=2483677&lat=45&lng=-103&value=10000",
)
def parse(self, response):
- data = response.body_as_unicode()
- base_data = re.search(r'dataout\s--Array\s\((.*)\)\s\s--><style type="text/css">', data, re.DOTALL).group(1)
- detail_matches = re.findall(r'\((.*?)\)', base_data, re.DOTALL)
-
- for detail_match in detail_matches:
- key_values = re.findall(r'(.*?)\s=>\s(.*)', detail_match)
- props = {}
-
- for key_value in key_values:
- key = key_value[0].strip()
- value = key_value[1].strip()
-
- if key == '[storeID]':
- props['ref'] = value
- if key == '[address]':
- props['addr_full'] = value
- if key == '[city]':
- props['city'] = value
- if key == '[state]':
- props['state'] = value
- if key == '[zip]':
- props['postcode'] = value
- if key == '[phone_number]':
- props['phone'] = value
- if key == '[latitude]':
- props['lat'] = value
- if key == '[longitude]':
- props['lon'] = value
-
- yield GeojsonPointItem(**props)
+ for row in response.json():
+ if row.keys() == {"fulltotal", "total", "units"}:
+ continue
+ addr = scrapy.Selector(text=row["a"])
+ properties = {
+ "name": row["n"],
+ "ref": row["bpid"],
+ "lat": row["lat"],
+ "lon": row["lng"],
+ "addr_full": addr.xpath("//p/text()").extract_first(),
+ "city": addr.css(".storecity ::text").extract_first(),
+ "state": addr.css(".storestate ::text").extract_first(),
+ "postcode": addr.css(".storepostalcode ::text").extract_first(),
+ "country": row["c"],
+ "phone": row.get("p"),
+ }
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/ljsilvers.py b/locations/spiders/ljsilvers.py\n--- a/locations/spiders/ljsilvers.py\n+++ b/locations/spiders/ljsilvers.py\n@@ -1,47 +1,32 @@\n # -*- coding: utf-8 -*-\n import scrapy\n-import json\n-import re\n \n from locations.items import GeojsonPointItem\n \n \n class LjsilversSpider(scrapy.Spider):\n name = \"ljsilvers\"\n- item_attributes = { 'brand': \"Long John Silver's\", 'brand_wikidata': \"Q1535221\" }\n+ item_attributes = {\"brand\": \"Long John Silver's\", \"brand_wikidata\": \"Q1535221\"}\n allowed_domains = [\"ljsilvers.com\"]\n start_urls = (\n- 'http://www.ljsilvers.com/locator?postalcode=76010',\n+ \"https://viewer.blipstar.com/searchdbnew?uid=2483677&lat=45&lng=-103&value=10000\",\n )\n \n def parse(self, response):\n- data = response.body_as_unicode()\n- base_data = re.search(r'dataout\\s--Array\\s\\((.*)\\)\\s\\s--><style type=\"text/css\">', data, re.DOTALL).group(1)\n- detail_matches = re.findall(r'\\((.*?)\\)', base_data, re.DOTALL)\n-\n- for detail_match in detail_matches:\n- key_values = re.findall(r'(.*?)\\s=>\\s(.*)', detail_match)\n- props = {}\n-\n- for key_value in key_values:\n- key = key_value[0].strip()\n- value = key_value[1].strip()\n-\n- if key == '[storeID]':\n- props['ref'] = value\n- if key == '[address]':\n- props['addr_full'] = value\n- if key == '[city]':\n- props['city'] = value\n- if key == '[state]':\n- props['state'] = value\n- if key == '[zip]':\n- props['postcode'] = value\n- if key == '[phone_number]':\n- props['phone'] = value\n- if key == '[latitude]':\n- props['lat'] = value\n- if key == '[longitude]':\n- props['lon'] = value\n-\n- yield GeojsonPointItem(**props)\n+ for row in response.json():\n+ if row.keys() == {\"fulltotal\", \"total\", \"units\"}:\n+ continue\n+ addr = scrapy.Selector(text=row[\"a\"])\n+ properties = {\n+ \"name\": row[\"n\"],\n+ \"ref\": row[\"bpid\"],\n+ \"lat\": row[\"lat\"],\n+ \"lon\": row[\"lng\"],\n+ \"addr_full\": addr.xpath(\"//p/text()\").extract_first(),\n+ \"city\": addr.css(\".storecity ::text\").extract_first(),\n+ \"state\": addr.css(\".storestate ::text\").extract_first(),\n+ \"postcode\": addr.css(\".storepostalcode ::text\").extract_first(),\n+ \"country\": row[\"c\"],\n+ \"phone\": row.get(\"p\"),\n+ }\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider ljsilvers is broken\nDuring the global build at 2021-06-02-14-42-40, spider **ljsilvers** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/ljsilvers.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/ljsilvers.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/ljsilvers.geojson))\nLong John Silver's\nhttp://www.ljsilvers.com/\r\n\r\n(location search box top right)\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass LjsilversSpider(scrapy.Spider):\n name = \"ljsilvers\"\n item_attributes = { 'brand': \"Long John Silver's\", 'brand_wikidata': \"Q1535221\" }\n allowed_domains = [\"ljsilvers.com\"]\n start_urls = (\n 'http://www.ljsilvers.com/locator?postalcode=76010',\n )\n\n def parse(self, response):\n data = response.body_as_unicode()\n base_data = re.search(r'dataout\\s--Array\\s\\((.*)\\)\\s\\s--><style type=\"text/css\">', data, re.DOTALL).group(1)\n detail_matches = re.findall(r'\\((.*?)\\)', base_data, re.DOTALL)\n\n for detail_match in detail_matches:\n key_values = re.findall(r'(.*?)\\s=>\\s(.*)', detail_match)\n props = {}\n\n for key_value in key_values:\n key = key_value[0].strip()\n value = key_value[1].strip()\n\n if key == '[storeID]':\n props['ref'] = value\n if key == '[address]':\n props['addr_full'] = value\n if key == '[city]':\n props['city'] = value\n if key == '[state]':\n props['state'] = value\n if key == '[zip]':\n props['postcode'] = value\n if key == '[phone_number]':\n props['phone'] = value\n if key == '[latitude]':\n props['lat'] = value\n if key == '[longitude]':\n props['lon'] = value\n\n yield GeojsonPointItem(**props)\n", "path": "locations/spiders/ljsilvers.py"}]} | 1,238 | 739 |
gh_patches_debug_1832 | rasdani/github-patches | git_diff | conan-io__conan-center-index-18494 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] clickhouse-cpp/*: fPIC option is not respected
In the recipe file fPIC option is always removed during configure stage, which can lead to not working static library.
</issue>
<code>
[start of recipes/clickhouse-cpp/all/conanfile.py]
1 from conan import ConanFile
2 from conan.tools.cmake import CMake, CMakeToolchain,CMakeDeps, cmake_layout
3 from conan.tools.files import copy, get
4 from conan.tools.build import check_min_cppstd
5 from conan.errors import ConanInvalidConfiguration
6 from conan.tools.scm import Version
7 import os
8
9 required_conan_version = ">=1.53.0"
10
11 class ClickHouseCppConan(ConanFile):
12 name = "clickhouse-cpp"
13 homepage = "https://github.com/ClickHouse/clickhouse-cpp"
14 url = "https://github.com/conan-io/conan-center-index"
15 description = "ClickHouse C++ API"
16 license = "Apache-2.0"
17 topics = ("database", "db", "clickhouse")
18 settings = "os", "arch", "compiler", "build_type"
19 options = {
20 "shared": [True, False],
21 "fPIC": [True, False],
22 "enable_benchmark": [True, False],
23 "with_openssl": [True, False]
24 }
25 default_options = {
26 "shared": False,
27 "fPIC": True,
28 "enable_benchmark": False,
29 "with_openssl": False
30 }
31
32 def requirements(self):
33
34 self.requires("lz4/1.9.4")
35
36 self.requires("abseil/20230125.3", transitive_headers=True)
37
38 self.requires("cityhash/cci.20130801")
39 if self.options.with_openssl:
40 self.requires("openssl/[>=1.1 <4]")
41
42 def build_requirements(self):
43 if self.options.enable_benchmark:
44 self.requires("benchmark/1.8.0")
45
46 @property
47 def _min_cppstd(self):
48 return "17"
49
50 @property
51 def _compilers_minimum_version(self):
52 return {
53 "Visual Studio": "15",
54 "msvc": "191",
55 "gcc": "7",
56 "clang": "6",
57 }
58
59 @property
60 def _requires_compiler_rt(self):
61 return self.settings.compiler == "clang" and (( self.settings.compiler.libcxx in ["libstdc++", "libstdc++11"] and not self.options.shared) or self.settings.compiler.libcxx == "libc++" )
62
63 def validate(self):
64 if self.settings.compiler.get_safe("cppstd"):
65 check_min_cppstd(self, self._min_cppstd)
66 minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
67 if minimum_version and Version(self.settings.compiler.version) < minimum_version:
68 raise ConanInvalidConfiguration(f"{self.ref} requires C++17, which your compiler does not support.")
69 if self.settings.os == "Windows" and self.options.shared:
70 raise ConanInvalidConfiguration("f{self.ref} does not support shared library on Windows.")
71 # look at https://github.com/ClickHouse/clickhouse-cpp/pull/226
72
73 def config_options(self):
74 if self.settings.os == "Windows":
75 del self.options.fPIC
76
77 def configure(self):
78 self.options.rm_safe("fPIC")
79
80 def layout(self):
81 cmake_layout(self, src_folder="src")
82
83 def source(self):
84 get(self, **self.conan_data["sources"][self.version],
85 destination=self.source_folder, strip_root=True)
86
87 def generate(self):
88 tc = CMakeToolchain(self)
89 tc.variables["BUILD_BENCHMARK"] = self.options.enable_benchmark
90 tc.cache_variables["BUILD_SHARED_LIBS"] = self.options.shared
91 tc.variables["WITH_OPENSSL"] = self.options.with_openssl
92 tc.cache_variables["WITH_SYSTEM_ABSEIL"] = True
93 tc.cache_variables["WITH_SYSTEM_LZ4"] = True
94 tc.cache_variables["WITH_SYSTEM_CITYHASH"] = True
95 tc.generate()
96
97 cd = CMakeDeps(self)
98 cd.generate()
99
100 def build(self):
101 cmake = CMake(self)
102 cmake.configure()
103 cmake.build()
104
105 def package(self):
106 copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
107 cmake = CMake(self)
108 cmake.install()
109
110 def package_info(self):
111 self.cpp_info.libs.append("clickhouse-cpp-lib")
112 self.cpp_info.set_property("cmake_target_name", "clickhouse-cpp-lib::clickhouse-cpp-lib")
113
114 if self._requires_compiler_rt:
115 ldflags = ["--rtlib=compiler-rt"]
116 self.cpp_info.exelinkflags = ldflags
117 self.cpp_info.sharedlinkflags = ldflags
118 self.cpp_info.system_libs.append("gcc_s")
119
120 self.cpp_info.filenames["cmake_find_package"] = "clickhouse-cpp"
121 self.cpp_info.filenames["cmake_find_package_multi"] = "clickhouse-cpp"
122 self.cpp_info.names["cmake_find_package"] = "clickhouse-cpp-lib"
123 self.cpp_info.names["cmake_find_package_multi"] = "clickhouse-cpp-lib"
124
125 if self.settings.os == 'Windows':
126 self.cpp_info.system_libs = ['ws2_32', 'wsock32']
127
[end of recipes/clickhouse-cpp/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/clickhouse-cpp/all/conanfile.py b/recipes/clickhouse-cpp/all/conanfile.py
--- a/recipes/clickhouse-cpp/all/conanfile.py
+++ b/recipes/clickhouse-cpp/all/conanfile.py
@@ -75,7 +75,8 @@
del self.options.fPIC
def configure(self):
- self.options.rm_safe("fPIC")
+ if self.options.shared:
+ self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
| {"golden_diff": "diff --git a/recipes/clickhouse-cpp/all/conanfile.py b/recipes/clickhouse-cpp/all/conanfile.py\n--- a/recipes/clickhouse-cpp/all/conanfile.py\n+++ b/recipes/clickhouse-cpp/all/conanfile.py\n@@ -75,7 +75,8 @@\n del self.options.fPIC\n \n def configure(self):\n- self.options.rm_safe(\"fPIC\")\n+ if self.options.shared:\n+ self.options.rm_safe(\"fPIC\")\n \n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n", "issue": "[package] clickhouse-cpp/*: fPIC option is not respected\nIn the recipe file fPIC option is always removed during configure stage, which can lead to not working static library.\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.tools.cmake import CMake, CMakeToolchain,CMakeDeps, cmake_layout\nfrom conan.tools.files import copy, get\nfrom conan.tools.build import check_min_cppstd\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\nclass ClickHouseCppConan(ConanFile):\n name = \"clickhouse-cpp\"\n homepage = \"https://github.com/ClickHouse/clickhouse-cpp\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"ClickHouse C++ API\"\n license = \"Apache-2.0\"\n topics = (\"database\", \"db\", \"clickhouse\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"enable_benchmark\": [True, False],\n \"with_openssl\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"enable_benchmark\": False,\n \"with_openssl\": False\n }\n\n def requirements(self):\n\n self.requires(\"lz4/1.9.4\")\n\n self.requires(\"abseil/20230125.3\", transitive_headers=True)\n\n self.requires(\"cityhash/cci.20130801\")\n if self.options.with_openssl:\n self.requires(\"openssl/[>=1.1 <4]\")\n\n def build_requirements(self):\n if self.options.enable_benchmark:\n self.requires(\"benchmark/1.8.0\")\n\n @property\n def _min_cppstd(self):\n return \"17\"\n\n @property\n def _compilers_minimum_version(self):\n return {\n \"Visual Studio\": \"15\",\n \"msvc\": \"191\",\n \"gcc\": \"7\",\n \"clang\": \"6\",\n }\n\n @property\n def _requires_compiler_rt(self):\n return self.settings.compiler == \"clang\" and (( self.settings.compiler.libcxx in [\"libstdc++\", \"libstdc++11\"] and not self.options.shared) or self.settings.compiler.libcxx == \"libc++\" )\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, self._min_cppstd)\n minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n if minimum_version and Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(f\"{self.ref} requires C++17, which your compiler does not support.\")\n if self.settings.os == \"Windows\" and self.options.shared:\n raise ConanInvalidConfiguration(\"f{self.ref} does not support shared library on Windows.\")\n # look at https://github.com/ClickHouse/clickhouse-cpp/pull/226\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"BUILD_BENCHMARK\"] = self.options.enable_benchmark\n tc.cache_variables[\"BUILD_SHARED_LIBS\"] = self.options.shared\n tc.variables[\"WITH_OPENSSL\"] = self.options.with_openssl\n tc.cache_variables[\"WITH_SYSTEM_ABSEIL\"] = True\n tc.cache_variables[\"WITH_SYSTEM_LZ4\"] = True\n tc.cache_variables[\"WITH_SYSTEM_CITYHASH\"] = True\n tc.generate()\n\n cd = CMakeDeps(self)\n cd.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs.append(\"clickhouse-cpp-lib\")\n self.cpp_info.set_property(\"cmake_target_name\", \"clickhouse-cpp-lib::clickhouse-cpp-lib\")\n\n if self._requires_compiler_rt:\n ldflags = [\"--rtlib=compiler-rt\"]\n self.cpp_info.exelinkflags = ldflags\n self.cpp_info.sharedlinkflags = ldflags\n self.cpp_info.system_libs.append(\"gcc_s\")\n\n self.cpp_info.filenames[\"cmake_find_package\"] = \"clickhouse-cpp\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"clickhouse-cpp\"\n self.cpp_info.names[\"cmake_find_package\"] = \"clickhouse-cpp-lib\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"clickhouse-cpp-lib\"\n\n if self.settings.os == 'Windows':\n self.cpp_info.system_libs = ['ws2_32', 'wsock32']\n", "path": "recipes/clickhouse-cpp/all/conanfile.py"}]} | 2,002 | 128 |
gh_patches_debug_14691 | rasdani/github-patches | git_diff | google__timesketch-406 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pylint not present in requirements.txt
Not pinning version of Pylint makes our build a bit non-deterministic. Pylint's behavior can change between versions and break our build.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # Copyright 2015 Google Inc. All rights reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """This is the setup file for the project. The standard setup rules apply:
16
17 python setup.py build
18 sudo python setup.py install
19 """
20
21 import os.path
22 import sys
23 import time
24
25 from setuptools import find_packages
26 from setuptools import setup
27
28 timesketch_version = u'20170721'
29
30 timesketch_description = (
31 u'Timesketch is a web based tool for collaborative forensic timeline '
32 u'analysis. Using sketches you and your collaborators can easily organize '
33 u'timelines and analyze them all at the same time. Add meaning to '
34 u'your raw data with rich annotations, comments, tags and stars.')
35
36 def check_before_upload():
37 """Warn user if frontend build is not present or is not recent.
38
39 Make sure that .js and .css bundles included in the PyPI package are up to
40 date.
41
42 Raises:
43 UserWarning
44 """
45 this_dir = os.path.dirname(__file__)
46 frontend_dist_dir = os.path.join(
47 this_dir, 'timesketch', 'ui', 'static', 'dist',
48 )
49 js = os.path.join(frontend_dist_dir, 'bundle.js')
50 css = os.path.join(frontend_dist_dir, 'bundle.css')
51 if not (os.path.isfile(js) and os.path.isfile(css)):
52 raise UserWarning(
53 "Build the frontend before uploading to PyPI!"
54 + " (see docs/Developers-Guide.md)"
55 )
56 mtime = min(os.path.getmtime(js), os.path.getmtime(css))
57 if time.time() - mtime > 180:
58 raise UserWarning(
59 "Frontend build is older than 3 minutes, please rebuild!"
60 + " (see docs/Developers-Guide.md)"
61 )
62
63 if 'upload' in sys.argv:
64 check_before_upload()
65
66 setup(
67 name=u'timesketch',
68 version=timesketch_version,
69 description=u'Digital forensic timeline analysis',
70 long_description=timesketch_description,
71 license=u'Apache License, Version 2.0',
72 url=u'http://www.timesketch.org/',
73 maintainer=u'Timesketch development team',
74 maintainer_email=u'[email protected]',
75 classifiers=[
76 u'Development Status :: 4 - Beta',
77 u'Environment :: Web Environment',
78 u'Operating System :: OS Independent',
79 u'Programming Language :: Python',
80 ],
81 data_files=[(u'share/timesketch', [u'timesketch.conf'])],
82 packages=find_packages(),
83 include_package_data=True,
84 zip_safe=False,
85 scripts=[u'tsctl'],
86 install_requires=frozenset([
87 u'Flask', u'Flask-Login', u'Flask-script', u'Flask-SQLAlchemy',
88 u'Flask-Bcrypt', u'Flask-RESTful', u'Flask-WTF', u'Flask-Migrate',
89 u'SQLAlchemy', u'celery', u'redis', u'blinker', u'elasticsearch',
90 u'neo4jrestclient', u'python-dateutil'
91 ]))
92
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,6 +24,8 @@
from setuptools import find_packages
from setuptools import setup
+from pip.req import parse_requirements
+from pip.download import PipSession
timesketch_version = u'20170721'
@@ -83,9 +85,7 @@
include_package_data=True,
zip_safe=False,
scripts=[u'tsctl'],
- install_requires=frozenset([
- u'Flask', u'Flask-Login', u'Flask-script', u'Flask-SQLAlchemy',
- u'Flask-Bcrypt', u'Flask-RESTful', u'Flask-WTF', u'Flask-Migrate',
- u'SQLAlchemy', u'celery', u'redis', u'blinker', u'elasticsearch',
- u'neo4jrestclient', u'python-dateutil'
- ]))
+ install_requires=[str(req.req) for req in parse_requirements(
+ "requirements.txt", session=PipSession(),
+ )],
+)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,6 +24,8 @@\n \n from setuptools import find_packages\n from setuptools import setup\n+from pip.req import parse_requirements\n+from pip.download import PipSession\n \n timesketch_version = u'20170721'\n \n@@ -83,9 +85,7 @@\n include_package_data=True,\n zip_safe=False,\n scripts=[u'tsctl'],\n- install_requires=frozenset([\n- u'Flask', u'Flask-Login', u'Flask-script', u'Flask-SQLAlchemy',\n- u'Flask-Bcrypt', u'Flask-RESTful', u'Flask-WTF', u'Flask-Migrate',\n- u'SQLAlchemy', u'celery', u'redis', u'blinker', u'elasticsearch',\n- u'neo4jrestclient', u'python-dateutil'\n- ]))\n+ install_requires=[str(req.req) for req in parse_requirements(\n+ \"requirements.txt\", session=PipSession(),\n+ )],\n+)\n", "issue": "Pylint not present in requirements.txt\nNot pinning version of Pylint makes our build a bit non-deterministic. Pylint's behavior can change between versions and break our build.\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This is the setup file for the project. The standard setup rules apply:\n\n python setup.py build\n sudo python setup.py install\n\"\"\"\n\nimport os.path\nimport sys\nimport time\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\ntimesketch_version = u'20170721'\n\ntimesketch_description = (\n u'Timesketch is a web based tool for collaborative forensic timeline '\n u'analysis. Using sketches you and your collaborators can easily organize '\n u'timelines and analyze them all at the same time. Add meaning to '\n u'your raw data with rich annotations, comments, tags and stars.')\n\ndef check_before_upload():\n \"\"\"Warn user if frontend build is not present or is not recent.\n\n Make sure that .js and .css bundles included in the PyPI package are up to\n date.\n\n Raises:\n UserWarning\n \"\"\"\n this_dir = os.path.dirname(__file__)\n frontend_dist_dir = os.path.join(\n this_dir, 'timesketch', 'ui', 'static', 'dist',\n )\n js = os.path.join(frontend_dist_dir, 'bundle.js')\n css = os.path.join(frontend_dist_dir, 'bundle.css')\n if not (os.path.isfile(js) and os.path.isfile(css)):\n raise UserWarning(\n \"Build the frontend before uploading to PyPI!\"\n + \" (see docs/Developers-Guide.md)\"\n )\n mtime = min(os.path.getmtime(js), os.path.getmtime(css))\n if time.time() - mtime > 180:\n raise UserWarning(\n \"Frontend build is older than 3 minutes, please rebuild!\"\n + \" (see docs/Developers-Guide.md)\"\n )\n\nif 'upload' in sys.argv:\n check_before_upload()\n\nsetup(\n name=u'timesketch',\n version=timesketch_version,\n description=u'Digital forensic timeline analysis',\n long_description=timesketch_description,\n license=u'Apache License, Version 2.0',\n url=u'http://www.timesketch.org/',\n maintainer=u'Timesketch development team',\n maintainer_email=u'[email protected]',\n classifiers=[\n u'Development Status :: 4 - Beta',\n u'Environment :: Web Environment',\n u'Operating System :: OS Independent',\n u'Programming Language :: Python',\n ],\n data_files=[(u'share/timesketch', [u'timesketch.conf'])],\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n scripts=[u'tsctl'],\n install_requires=frozenset([\n u'Flask', u'Flask-Login', u'Flask-script', u'Flask-SQLAlchemy',\n u'Flask-Bcrypt', u'Flask-RESTful', u'Flask-WTF', u'Flask-Migrate',\n u'SQLAlchemy', u'celery', u'redis', u'blinker', u'elasticsearch',\n u'neo4jrestclient', u'python-dateutil'\n ]))\n", "path": "setup.py"}]} | 1,565 | 255 |
gh_patches_debug_18530 | rasdani/github-patches | git_diff | pulp__pulpcore-5377 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Task cleanup must not delete content nor artifacts
Deleting content or artifacts outside of orphan cleanup is breaking the rules.
And no, we cannot get away with that.
</issue>
<code>
[start of pulpcore/tasking/util.py]
1 import logging
2 from gettext import gettext as _
3
4 from django.db import transaction
5 from django.db import connection
6
7 from pulpcore.app.models import Task
8 from pulpcore.constants import TASK_FINAL_STATES, TASK_INCOMPLETE_STATES, TASK_STATES
9
10 _logger = logging.getLogger(__name__)
11
12
13 def cancel(task_id):
14 """
15 Cancel the task that is represented by the given task_id.
16
17 This method cancels only the task with given task_id, not the spawned tasks. This also updates
18 task's state to either 'canceled' or 'canceling'.
19
20 Args:
21 task_id (str): The ID of the task you wish to cancel
22
23 Raises:
24 rest_framework.exceptions.NotFound: If a task with given task_id does not exist
25 """
26 task_status = Task.objects.get(pk=task_id)
27
28 if task_status.state in TASK_FINAL_STATES:
29 # If the task is already done, just stop
30 _logger.debug(
31 "Task [{task_id}] already in a final state: {state}".format(
32 task_id=task_id, state=task_status.state
33 )
34 )
35 return task_status
36
37 _logger.info(_("Canceling task: {id}").format(id=task_id))
38
39 task = task_status
40 # This is the only valid transition without holding the task lock
41 rows = Task.objects.filter(pk=task.pk, state__in=TASK_INCOMPLETE_STATES).update(
42 state=TASK_STATES.CANCELING
43 )
44 # Notify the worker that might be running that task and other workers to clean up
45 with connection.cursor() as cursor:
46 cursor.execute("SELECT pg_notify('pulp_worker_cancel', %s)", (str(task.pk),))
47 cursor.execute("NOTIFY pulp_worker_wakeup")
48 if rows == 1:
49 task.refresh_from_db()
50 return task
51
52
53 def _delete_incomplete_resources(task):
54 """
55 Delete all incomplete created-resources on a canceled task.
56
57 Args:
58 task (Task): A task.
59 """
60 if task.state not in [TASK_STATES.CANCELED, TASK_STATES.CANCELING]:
61 raise RuntimeError(_("Task must be canceled."))
62 for model in (r.content_object for r in task.created_resources.all()):
63 try:
64 if model.complete:
65 continue
66 except AttributeError:
67 continue
68 try:
69 with transaction.atomic():
70 model.delete()
71 except Exception as error:
72 _logger.error(_("Delete created resource, failed: {}").format(str(error)))
73
[end of pulpcore/tasking/util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/tasking/util.py b/pulpcore/tasking/util.py
--- a/pulpcore/tasking/util.py
+++ b/pulpcore/tasking/util.py
@@ -4,7 +4,7 @@
from django.db import transaction
from django.db import connection
-from pulpcore.app.models import Task
+from pulpcore.app.models import Artifact, Content, Task
from pulpcore.constants import TASK_FINAL_STATES, TASK_INCOMPLETE_STATES, TASK_STATES
_logger = logging.getLogger(__name__)
@@ -60,6 +60,8 @@
if task.state not in [TASK_STATES.CANCELED, TASK_STATES.CANCELING]:
raise RuntimeError(_("Task must be canceled."))
for model in (r.content_object for r in task.created_resources.all()):
+ if isinstance(model, (Artifact, Content)):
+ continue
try:
if model.complete:
continue
| {"golden_diff": "diff --git a/pulpcore/tasking/util.py b/pulpcore/tasking/util.py\n--- a/pulpcore/tasking/util.py\n+++ b/pulpcore/tasking/util.py\n@@ -4,7 +4,7 @@\n from django.db import transaction\n from django.db import connection\n \n-from pulpcore.app.models import Task\n+from pulpcore.app.models import Artifact, Content, Task\n from pulpcore.constants import TASK_FINAL_STATES, TASK_INCOMPLETE_STATES, TASK_STATES\n \n _logger = logging.getLogger(__name__)\n@@ -60,6 +60,8 @@\n if task.state not in [TASK_STATES.CANCELED, TASK_STATES.CANCELING]:\n raise RuntimeError(_(\"Task must be canceled.\"))\n for model in (r.content_object for r in task.created_resources.all()):\n+ if isinstance(model, (Artifact, Content)):\n+ continue\n try:\n if model.complete:\n continue\n", "issue": "Task cleanup must not delete content nor artifacts\nDeleting content or artifacts outside of orphan cleanup is breaking the rules.\r\nAnd no, we cannot get away with that.\r\n\n", "before_files": [{"content": "import logging\nfrom gettext import gettext as _\n\nfrom django.db import transaction\nfrom django.db import connection\n\nfrom pulpcore.app.models import Task\nfrom pulpcore.constants import TASK_FINAL_STATES, TASK_INCOMPLETE_STATES, TASK_STATES\n\n_logger = logging.getLogger(__name__)\n\n\ndef cancel(task_id):\n \"\"\"\n Cancel the task that is represented by the given task_id.\n\n This method cancels only the task with given task_id, not the spawned tasks. This also updates\n task's state to either 'canceled' or 'canceling'.\n\n Args:\n task_id (str): The ID of the task you wish to cancel\n\n Raises:\n rest_framework.exceptions.NotFound: If a task with given task_id does not exist\n \"\"\"\n task_status = Task.objects.get(pk=task_id)\n\n if task_status.state in TASK_FINAL_STATES:\n # If the task is already done, just stop\n _logger.debug(\n \"Task [{task_id}] already in a final state: {state}\".format(\n task_id=task_id, state=task_status.state\n )\n )\n return task_status\n\n _logger.info(_(\"Canceling task: {id}\").format(id=task_id))\n\n task = task_status\n # This is the only valid transition without holding the task lock\n rows = Task.objects.filter(pk=task.pk, state__in=TASK_INCOMPLETE_STATES).update(\n state=TASK_STATES.CANCELING\n )\n # Notify the worker that might be running that task and other workers to clean up\n with connection.cursor() as cursor:\n cursor.execute(\"SELECT pg_notify('pulp_worker_cancel', %s)\", (str(task.pk),))\n cursor.execute(\"NOTIFY pulp_worker_wakeup\")\n if rows == 1:\n task.refresh_from_db()\n return task\n\n\ndef _delete_incomplete_resources(task):\n \"\"\"\n Delete all incomplete created-resources on a canceled task.\n\n Args:\n task (Task): A task.\n \"\"\"\n if task.state not in [TASK_STATES.CANCELED, TASK_STATES.CANCELING]:\n raise RuntimeError(_(\"Task must be canceled.\"))\n for model in (r.content_object for r in task.created_resources.all()):\n try:\n if model.complete:\n continue\n except AttributeError:\n continue\n try:\n with transaction.atomic():\n model.delete()\n except Exception as error:\n _logger.error(_(\"Delete created resource, failed: {}\").format(str(error)))\n", "path": "pulpcore/tasking/util.py"}]} | 1,233 | 191 |
gh_patches_debug_11633 | rasdani/github-patches | git_diff | pypi__warehouse-1181 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Errors in celery don't get sent to Sentry
</issue>
<code>
[start of warehouse/celery.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import celery.backends
14
15 # We need to trick Celery into supporting rediss:// URLs which is how redis-py
16 # signals that you should use Redis with TLS.
17 celery.backends.BACKEND_ALIASES["rediss"] = "warehouse.celery:TLSRedisBackend" # noqa
18
19 from celery import Celery, Task
20 from celery.backends.redis import RedisBackend as _RedisBackend
21 from celery.signals import celeryd_init
22 from pyramid import scripting
23 from pyramid.threadlocal import get_current_request
24
25 from warehouse.config import Environment, configure
26
27
28 @celeryd_init.connect
29 def _configure_celery(*args, **kwargs):
30 configure()
31
32
33 class TLSRedisBackend(_RedisBackend):
34
35 def _params_from_url(self, url, defaults):
36 params = super()._params_from_url(url, defaults)
37 params.update({"connection_class": self.redis.SSLConnection})
38 return params
39
40
41 class WarehouseTask(Task):
42
43 abstract = True
44
45 def __call__(self, *args, **kwargs):
46 registry = self.app.pyramid_config.registry
47 pyramid_env = scripting.prepare(registry=registry)
48
49 try:
50 return super().__call__(pyramid_env["request"], *args, **kwargs)
51 finally:
52 pyramid_env["closer"]()
53
54 def apply_async(self, *args, **kwargs):
55 # The API design of Celery makes this threadlocal pretty impossible to
56 # avoid :(
57 request = get_current_request()
58
59 # If for whatever reason we were unable to get a request we'll just
60 # skip this and call the original method to send this immediately.
61 if request is None or not hasattr(request, "tm"):
62 return super().apply_async(*args, **kwargs)
63
64 # This will break things that expect to get an AsyncResult because
65 # we're no longer going to be returning an async result from this when
66 # called from within a request, response cycle. Ideally we shouldn't be
67 # waiting for responses in a request/response cycle anyways though.
68 request.tm.get().addAfterCommitHook(
69 self._after_commit_hook,
70 args=args,
71 kws=kwargs,
72 )
73
74 def _after_commit_hook(self, success, *args, **kwargs):
75 if success:
76 super().apply_async(*args, **kwargs)
77
78
79 app = Celery("warehouse")
80 app.Task = WarehouseTask
81
82
83 task = app.task
84
85
86 def includeme(config):
87 s = config.registry.settings
88 app.pyramid_config = config
89 app.conf.update(
90 BROKER_URL=s["celery.broker_url"],
91 BROKER_USE_SSL=s["warehouse.env"] == Environment.production,
92 CELERY_DISABLE_RATE_LIMITS=True,
93 CELERY_RESULT_BACKEND=s["celery.result_url"],
94 CELERY_RESULT_SERIALIZER="json",
95 CELERY_TASK_SERIALIZER="json",
96 CELERY_ACCEPT_CONTENT=["json", "msgpack"],
97 CELERY_MESSAGE_COMPRESSION="gzip",
98 CELERY_QUEUE_HA_POLICY="all",
99 )
100
[end of warehouse/celery.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/celery.py b/warehouse/celery.py
--- a/warehouse/celery.py
+++ b/warehouse/celery.py
@@ -21,13 +21,16 @@
from celery.signals import celeryd_init
from pyramid import scripting
from pyramid.threadlocal import get_current_request
+from raven.contrib.celery import register_signal, register_logger_signal
from warehouse.config import Environment, configure
@celeryd_init.connect
def _configure_celery(*args, **kwargs):
- configure()
+ config = configure()
+ register_logger_signal(config.registry["raven.client"])
+ register_signal(config.registry["raven.client"])
class TLSRedisBackend(_RedisBackend):
| {"golden_diff": "diff --git a/warehouse/celery.py b/warehouse/celery.py\n--- a/warehouse/celery.py\n+++ b/warehouse/celery.py\n@@ -21,13 +21,16 @@\n from celery.signals import celeryd_init\n from pyramid import scripting\n from pyramid.threadlocal import get_current_request\n+from raven.contrib.celery import register_signal, register_logger_signal\n \n from warehouse.config import Environment, configure\n \n \n @celeryd_init.connect\n def _configure_celery(*args, **kwargs):\n- configure()\n+ config = configure()\n+ register_logger_signal(config.registry[\"raven.client\"])\n+ register_signal(config.registry[\"raven.client\"])\n \n \n class TLSRedisBackend(_RedisBackend):\n", "issue": "Errors in celery don't get sent to Sentry\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport celery.backends\n\n# We need to trick Celery into supporting rediss:// URLs which is how redis-py\n# signals that you should use Redis with TLS.\ncelery.backends.BACKEND_ALIASES[\"rediss\"] = \"warehouse.celery:TLSRedisBackend\" # noqa\n\nfrom celery import Celery, Task\nfrom celery.backends.redis import RedisBackend as _RedisBackend\nfrom celery.signals import celeryd_init\nfrom pyramid import scripting\nfrom pyramid.threadlocal import get_current_request\n\nfrom warehouse.config import Environment, configure\n\n\n@celeryd_init.connect\ndef _configure_celery(*args, **kwargs):\n configure()\n\n\nclass TLSRedisBackend(_RedisBackend):\n\n def _params_from_url(self, url, defaults):\n params = super()._params_from_url(url, defaults)\n params.update({\"connection_class\": self.redis.SSLConnection})\n return params\n\n\nclass WarehouseTask(Task):\n\n abstract = True\n\n def __call__(self, *args, **kwargs):\n registry = self.app.pyramid_config.registry\n pyramid_env = scripting.prepare(registry=registry)\n\n try:\n return super().__call__(pyramid_env[\"request\"], *args, **kwargs)\n finally:\n pyramid_env[\"closer\"]()\n\n def apply_async(self, *args, **kwargs):\n # The API design of Celery makes this threadlocal pretty impossible to\n # avoid :(\n request = get_current_request()\n\n # If for whatever reason we were unable to get a request we'll just\n # skip this and call the original method to send this immediately.\n if request is None or not hasattr(request, \"tm\"):\n return super().apply_async(*args, **kwargs)\n\n # This will break things that expect to get an AsyncResult because\n # we're no longer going to be returning an async result from this when\n # called from within a request, response cycle. Ideally we shouldn't be\n # waiting for responses in a request/response cycle anyways though.\n request.tm.get().addAfterCommitHook(\n self._after_commit_hook,\n args=args,\n kws=kwargs,\n )\n\n def _after_commit_hook(self, success, *args, **kwargs):\n if success:\n super().apply_async(*args, **kwargs)\n\n\napp = Celery(\"warehouse\")\napp.Task = WarehouseTask\n\n\ntask = app.task\n\n\ndef includeme(config):\n s = config.registry.settings\n app.pyramid_config = config\n app.conf.update(\n BROKER_URL=s[\"celery.broker_url\"],\n BROKER_USE_SSL=s[\"warehouse.env\"] == Environment.production,\n CELERY_DISABLE_RATE_LIMITS=True,\n CELERY_RESULT_BACKEND=s[\"celery.result_url\"],\n CELERY_RESULT_SERIALIZER=\"json\",\n CELERY_TASK_SERIALIZER=\"json\",\n CELERY_ACCEPT_CONTENT=[\"json\", \"msgpack\"],\n CELERY_MESSAGE_COMPRESSION=\"gzip\",\n CELERY_QUEUE_HA_POLICY=\"all\",\n )\n", "path": "warehouse/celery.py"}]} | 1,499 | 159 |
gh_patches_debug_29413 | rasdani/github-patches | git_diff | freedomofpress__securedrop-7045 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
determine post-upgrade failure-mode for a SHA-1-signed submission key
## Description
After #6948 (for #6399), redwood will refuse to encrypt to a submission key with a SHA-1 signature.
After #6928, `securedrop-admin sdconfig` will reject a submission key with a SHA-1 signature. This check guarantees that new and reconfigured instances will comply with #6948.
What will happen to an instance with a SHA-1-signed signature after upgrading to v2.7.0?
## Possible approaches
| Option | Documentation changes | Code changes | Implication |
| --- | --- | --- | --- |
| Fail open, but log | optional | ✓ | Admin must monitor logs and/or OSSEC alerts. |
| Fail open, but document | ✓ | ✗ | Admin must monitor release notes or check documentation. |
| Fail closed | optional | ✓[1] | Admin can contact us for help. |
**Notes:**
1. @legoktm observes that, without a code change to handle this case, Apache will come back up after reboot even if the `postinst` script fails under `unattended-upgrades`.
</issue>
<code>
[start of securedrop/journalist.py]
1 from encryption import EncryptionManager, GpgKeyNotFoundError
2 from execution import asynchronous
3 from journalist_app import create_app
4 from models import Source
5 from sdconfig import SecureDropConfig
6
7 config = SecureDropConfig.get_current()
8 # app is imported by journalist.wsgi
9 app = create_app(config)
10
11
12 @asynchronous
13 def prime_keycache() -> None:
14 """Pre-load the source public keys into Redis."""
15 with app.app_context():
16 encryption_mgr = EncryptionManager.get_default()
17 for source in Source.query.filter_by(pending=False, deleted_at=None).all():
18 try:
19 encryption_mgr.get_source_public_key(source.filesystem_id)
20 except GpgKeyNotFoundError:
21 pass
22
23
24 prime_keycache()
25
26
27 if __name__ == "__main__": # pragma: no cover
28 debug = getattr(config, "env", "prod") != "prod"
29 # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host
30 app.run(debug=debug, host="0.0.0.0", port=8081)
31
[end of securedrop/journalist.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -1,9 +1,13 @@
+import sys
+
from encryption import EncryptionManager, GpgKeyNotFoundError
from execution import asynchronous
from journalist_app import create_app
from models import Source
from sdconfig import SecureDropConfig
+import redwood
+
config = SecureDropConfig.get_current()
# app is imported by journalist.wsgi
app = create_app(config)
@@ -21,10 +25,28 @@
pass
-prime_keycache()
+def validate_journalist_key() -> None:
+ """Verify the journalist PGP key is valid"""
+ encryption_mgr = EncryptionManager.get_default()
+ # First check that we can read it
+ try:
+ journalist_key = encryption_mgr.get_journalist_public_key()
+ except Exception as e:
+ print(f"ERROR: Unable to read journalist public key: {e}", file=sys.stderr)
+ app.logger.error(f"ERROR: Unable to read journalist public key: {e}")
+ sys.exit(1)
+ # And then what we read is valid
+ try:
+ redwood.is_valid_public_key(journalist_key)
+ except redwood.RedwoodError as e:
+ print(f"ERROR: Journalist public key is not valid: {e}", file=sys.stderr)
+ app.logger.error(f"ERROR: Journalist public key is not valid: {e}")
+ sys.exit(1)
if __name__ == "__main__": # pragma: no cover
+ validate_journalist_key()
+ prime_keycache()
debug = getattr(config, "env", "prod") != "prod"
# nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host
app.run(debug=debug, host="0.0.0.0", port=8081)
| {"golden_diff": "diff --git a/securedrop/journalist.py b/securedrop/journalist.py\n--- a/securedrop/journalist.py\n+++ b/securedrop/journalist.py\n@@ -1,9 +1,13 @@\n+import sys\n+\n from encryption import EncryptionManager, GpgKeyNotFoundError\n from execution import asynchronous\n from journalist_app import create_app\n from models import Source\n from sdconfig import SecureDropConfig\n \n+import redwood\n+\n config = SecureDropConfig.get_current()\n # app is imported by journalist.wsgi\n app = create_app(config)\n@@ -21,10 +25,28 @@\n pass\n \n \n-prime_keycache()\n+def validate_journalist_key() -> None:\n+ \"\"\"Verify the journalist PGP key is valid\"\"\"\n+ encryption_mgr = EncryptionManager.get_default()\n+ # First check that we can read it\n+ try:\n+ journalist_key = encryption_mgr.get_journalist_public_key()\n+ except Exception as e:\n+ print(f\"ERROR: Unable to read journalist public key: {e}\", file=sys.stderr)\n+ app.logger.error(f\"ERROR: Unable to read journalist public key: {e}\")\n+ sys.exit(1)\n+ # And then what we read is valid\n+ try:\n+ redwood.is_valid_public_key(journalist_key)\n+ except redwood.RedwoodError as e:\n+ print(f\"ERROR: Journalist public key is not valid: {e}\", file=sys.stderr)\n+ app.logger.error(f\"ERROR: Journalist public key is not valid: {e}\")\n+ sys.exit(1)\n \n \n if __name__ == \"__main__\": # pragma: no cover\n+ validate_journalist_key()\n+ prime_keycache()\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n", "issue": "determine post-upgrade failure-mode for a SHA-1-signed submission key\n## Description\r\n\r\nAfter #6948 (for #6399), redwood will refuse to encrypt to a submission key with a SHA-1 signature.\r\n\r\nAfter #6928, `securedrop-admin sdconfig` will reject a submission key with a SHA-1 signature. This check guarantees that new and reconfigured instances will comply with #6948.\r\n\r\nWhat will happen to an instance with a SHA-1-signed signature after upgrading to v2.7.0?\r\n\r\n## Possible approaches\r\n\r\n| Option | Documentation changes | Code changes | Implication |\r\n| --- | --- | --- | --- |\r\n| Fail open, but log | optional | \u2713 | Admin must monitor logs and/or OSSEC alerts. |\r\n| Fail open, but document | \u2713 | \u2717 | Admin must monitor release notes or check documentation. |\r\n| Fail closed | optional | \u2713[1] | Admin can contact us for help. |\r\n\r\n**Notes:**\r\n1. @legoktm observes that, without a code change to handle this case, Apache will come back up after reboot even if the `postinst` script fails under `unattended-upgrades`.\n", "before_files": [{"content": "from encryption import EncryptionManager, GpgKeyNotFoundError\nfrom execution import asynchronous\nfrom journalist_app import create_app\nfrom models import Source\nfrom sdconfig import SecureDropConfig\n\nconfig = SecureDropConfig.get_current()\n# app is imported by journalist.wsgi\napp = create_app(config)\n\n\n@asynchronous\ndef prime_keycache() -> None:\n \"\"\"Pre-load the source public keys into Redis.\"\"\"\n with app.app_context():\n encryption_mgr = EncryptionManager.get_default()\n for source in Source.query.filter_by(pending=False, deleted_at=None).all():\n try:\n encryption_mgr.get_source_public_key(source.filesystem_id)\n except GpgKeyNotFoundError:\n pass\n\n\nprime_keycache()\n\n\nif __name__ == \"__main__\": # pragma: no cover\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n", "path": "securedrop/journalist.py"}]} | 1,075 | 441 |
gh_patches_debug_21071 | rasdani/github-patches | git_diff | netbox-community__netbox-14608 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Datasources stuck in sync when using git + ssh from ./manage.py syncdatasource
### NetBox version
v3.6.1
### Python version
3.11
### Steps to Reproduce
In Data Sources
Add
Name: test
Type: git
URL: [email protected]:netbox-community/netbox.git
Create
docker compose exec netbox ./manage.py syncdatasource test
### Expected Behavior
Usually leads to some sort of ssh question or failure, and I would expect the exception to set the status to failed, and then be able to hit sync again.
I'm not sure exactly how NetBox works, but looking at one of the exceptions...
core.exceptions.SyncError: Fetching remote data failed (HangupException):
class SyncError(Exception):
pass
Does this mean the status is not being reset correctly due to the status being left as syncing?
### Observed Behavior
datasource.status = syncing in nbshell
'syncing' in gui
Sync option is now greyed out and cannot reset status without manually setting it in nbshell:
for d in DataSource.objects.filter(status='syncing'):
d.status = 'failed'
d.save()
</issue>
<code>
[start of netbox/core/management/commands/syncdatasource.py]
1 from django.core.management.base import BaseCommand, CommandError
2
3 from core.models import DataSource
4
5
6 class Command(BaseCommand):
7 help = "Synchronize a data source from its remote upstream"
8
9 def add_arguments(self, parser):
10 parser.add_argument('name', nargs='*', help="Data source(s) to synchronize")
11 parser.add_argument(
12 "--all", action='store_true', dest='sync_all',
13 help="Synchronize all data sources"
14 )
15
16 def handle(self, *args, **options):
17
18 # Find DataSources to sync
19 if options['sync_all']:
20 datasources = DataSource.objects.all()
21 elif options['name']:
22 datasources = DataSource.objects.filter(name__in=options['name'])
23 # Check for invalid names
24 found_names = {ds['name'] for ds in datasources.values('name')}
25 if invalid_names := set(options['name']) - found_names:
26 raise CommandError(f"Invalid data source names: {', '.join(invalid_names)}")
27 else:
28 raise CommandError(f"Must specify at least one data source, or set --all.")
29
30 if len(options['name']) > 1:
31 self.stdout.write(f"Syncing {len(datasources)} data sources.")
32
33 for i, datasource in enumerate(datasources, start=1):
34 self.stdout.write(f"[{i}] Syncing {datasource}... ", ending='')
35 self.stdout.flush()
36 datasource.sync()
37 self.stdout.write(datasource.get_status_display())
38 self.stdout.flush()
39
40 if len(options['name']) > 1:
41 self.stdout.write(f"Finished.")
42
[end of netbox/core/management/commands/syncdatasource.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/core/management/commands/syncdatasource.py b/netbox/core/management/commands/syncdatasource.py
--- a/netbox/core/management/commands/syncdatasource.py
+++ b/netbox/core/management/commands/syncdatasource.py
@@ -1,5 +1,6 @@
from django.core.management.base import BaseCommand, CommandError
+from core.choices import DataSourceStatusChoices
from core.models import DataSource
@@ -33,9 +34,13 @@
for i, datasource in enumerate(datasources, start=1):
self.stdout.write(f"[{i}] Syncing {datasource}... ", ending='')
self.stdout.flush()
- datasource.sync()
- self.stdout.write(datasource.get_status_display())
- self.stdout.flush()
+ try:
+ datasource.sync()
+ self.stdout.write(datasource.get_status_display())
+ self.stdout.flush()
+ except Exception as e:
+ DataSource.objects.filter(pk=datasource.pk).update(status=DataSourceStatusChoices.FAILED)
+ raise e
if len(options['name']) > 1:
self.stdout.write(f"Finished.")
| {"golden_diff": "diff --git a/netbox/core/management/commands/syncdatasource.py b/netbox/core/management/commands/syncdatasource.py\n--- a/netbox/core/management/commands/syncdatasource.py\n+++ b/netbox/core/management/commands/syncdatasource.py\n@@ -1,5 +1,6 @@\n from django.core.management.base import BaseCommand, CommandError\n \n+from core.choices import DataSourceStatusChoices\n from core.models import DataSource\n \n \n@@ -33,9 +34,13 @@\n for i, datasource in enumerate(datasources, start=1):\n self.stdout.write(f\"[{i}] Syncing {datasource}... \", ending='')\n self.stdout.flush()\n- datasource.sync()\n- self.stdout.write(datasource.get_status_display())\n- self.stdout.flush()\n+ try:\n+ datasource.sync()\n+ self.stdout.write(datasource.get_status_display())\n+ self.stdout.flush()\n+ except Exception as e:\n+ DataSource.objects.filter(pk=datasource.pk).update(status=DataSourceStatusChoices.FAILED)\n+ raise e\n \n if len(options['name']) > 1:\n self.stdout.write(f\"Finished.\")\n", "issue": "Datasources stuck in sync when using git + ssh from ./manage.py syncdatasource\n### NetBox version\n\nv3.6.1\n\n### Python version\n\n3.11\n\n### Steps to Reproduce\n\nIn Data Sources\r\nAdd\r\nName: test\r\nType: git\r\nURL: [email protected]:netbox-community/netbox.git\r\nCreate\r\n\r\ndocker compose exec netbox ./manage.py syncdatasource test\r\n\r\n\r\n\r\n\n\n### Expected Behavior\n\nUsually leads to some sort of ssh question or failure, and I would expect the exception to set the status to failed, and then be able to hit sync again.\r\n\r\nI'm not sure exactly how NetBox works, but looking at one of the exceptions...\r\ncore.exceptions.SyncError: Fetching remote data failed (HangupException): \r\n\r\nclass SyncError(Exception):\r\n pass\r\n\r\nDoes this mean the status is not being reset correctly due to the status being left as syncing?\r\n\r\n\n\n### Observed Behavior\n\ndatasource.status = syncing in nbshell\r\n'syncing' in gui\r\nSync option is now greyed out and cannot reset status without manually setting it in nbshell:\r\n\r\nfor d in DataSource.objects.filter(status='syncing'):\r\n d.status = 'failed'\r\n d.save()\r\n\n", "before_files": [{"content": "from django.core.management.base import BaseCommand, CommandError\n\nfrom core.models import DataSource\n\n\nclass Command(BaseCommand):\n help = \"Synchronize a data source from its remote upstream\"\n\n def add_arguments(self, parser):\n parser.add_argument('name', nargs='*', help=\"Data source(s) to synchronize\")\n parser.add_argument(\n \"--all\", action='store_true', dest='sync_all',\n help=\"Synchronize all data sources\"\n )\n\n def handle(self, *args, **options):\n\n # Find DataSources to sync\n if options['sync_all']:\n datasources = DataSource.objects.all()\n elif options['name']:\n datasources = DataSource.objects.filter(name__in=options['name'])\n # Check for invalid names\n found_names = {ds['name'] for ds in datasources.values('name')}\n if invalid_names := set(options['name']) - found_names:\n raise CommandError(f\"Invalid data source names: {', '.join(invalid_names)}\")\n else:\n raise CommandError(f\"Must specify at least one data source, or set --all.\")\n\n if len(options['name']) > 1:\n self.stdout.write(f\"Syncing {len(datasources)} data sources.\")\n\n for i, datasource in enumerate(datasources, start=1):\n self.stdout.write(f\"[{i}] Syncing {datasource}... \", ending='')\n self.stdout.flush()\n datasource.sync()\n self.stdout.write(datasource.get_status_display())\n self.stdout.flush()\n\n if len(options['name']) > 1:\n self.stdout.write(f\"Finished.\")\n", "path": "netbox/core/management/commands/syncdatasource.py"}]} | 1,220 | 250 |
gh_patches_debug_27449 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-321 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Function to handle deleting schemas
**Problem**
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
Users might want to delete schemas. We don't currently support this.
**Proposed solution**
<!-- A clear and concise description of your proposed solution or feature. -->
A function that handles deleting of schemas in the database. We should raise an error if there is anything outside of the schema referencing the schema.
**Additional context**
<!-- Add any other context or screenshots about the feature request here.-->
This should be in the `db` module.
</issue>
<code>
[start of db/schemas.py]
1 import logging
2 import warnings
3 from sqlalchemy.schema import CreateSchema
4 from sqlalchemy import inspect, MetaData, select, and_, not_, or_, Table
5
6 from db import types
7
8 logger = logging.getLogger(__name__)
9
10 TYPES_SCHEMA = types.base.SCHEMA
11
12 EXCLUDED_SCHEMATA = [TYPES_SCHEMA, "information_schema"]
13
14
15 def get_schema_name_from_oid(oid, engine):
16 return reflect_schema(engine, oid=oid)["name"]
17
18
19 def get_schema_oid_from_name(name, engine):
20 return reflect_schema(engine, name=name)["oid"]
21
22
23 def reflect_schema(engine, name=None, oid=None):
24 # If we have both arguments, the behavior is undefined.
25 try:
26 assert name is None or oid is None
27 except AssertionError as e:
28 logger.error("ERROR: Only one of 'name' or 'oid' can be given!")
29 raise e
30 metadata = MetaData()
31 with warnings.catch_warnings():
32 warnings.filterwarnings("ignore", message="Did not recognize type")
33 pg_namespace = Table("pg_namespace", metadata, autoload_with=engine)
34 sel = (
35 select(pg_namespace.c.oid, pg_namespace.c.nspname.label("name"))
36 .where(or_(pg_namespace.c.nspname == name, pg_namespace.c.oid == oid))
37 )
38 with engine.begin() as conn:
39 schema_info = conn.execute(sel).fetchone()
40 return schema_info
41
42
43 def get_mathesar_schemas(engine):
44 return [schema for schema, _ in get_mathesar_schemas_with_oids(engine)]
45
46
47 def get_mathesar_schemas_with_oids(engine):
48 metadata = MetaData()
49 with warnings.catch_warnings():
50 warnings.filterwarnings("ignore", message="Did not recognize type")
51 pg_namespace = Table("pg_namespace", metadata, autoload_with=engine)
52 sel = (
53 select(pg_namespace.c.nspname.label('schema'), pg_namespace.c.oid)
54 .where(
55 and_(
56 *[pg_namespace.c.nspname != schema for schema in EXCLUDED_SCHEMATA],
57 not_(pg_namespace.c.nspname.like("pg_%"))
58 )
59 )
60 )
61 with engine.begin() as conn:
62 result = conn.execute(sel).fetchall()
63 return result
64
65
66 def get_all_schemas(engine):
67 inspector = inspect(engine)
68 # We don't need to exclude system schemas (i.e., starting with "pg_")
69 # since Inspector.get_schema_names already excludes them. Thus, this
70 # function actually gets all non-pg-reserved schemas.
71 return inspector.get_schema_names()
72
73
74 def create_schema(schema, engine):
75 """
76 This method creates a Postgres schema.
77 """
78 if schema not in get_all_schemas(engine):
79 with engine.begin() as connection:
80 connection.execute(CreateSchema(schema))
81
[end of db/schemas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/db/schemas.py b/db/schemas.py
--- a/db/schemas.py
+++ b/db/schemas.py
@@ -1,7 +1,11 @@
import logging
import warnings
-from sqlalchemy.schema import CreateSchema
+from sqlalchemy.schema import CreateSchema, DropSchema
from sqlalchemy import inspect, MetaData, select, and_, not_, or_, Table
+from sqlalchemy.exc import InternalError
+from sqlalchemy.schema import DDLElement
+from sqlalchemy.ext import compiler
+from psycopg2.errors import DependentObjectsStillExist
from db import types
@@ -78,3 +82,42 @@
if schema not in get_all_schemas(engine):
with engine.begin() as connection:
connection.execute(CreateSchema(schema))
+
+
+def delete_schema(schema, engine, cascade=False, if_exists=False):
+ """
+ This method deletes a Postgres schema.
+ """
+ if if_exists and schema not in get_all_schemas(engine):
+ return
+
+ with engine.begin() as connection:
+ try:
+ connection.execute(DropSchema(schema, cascade=cascade))
+ except InternalError as e:
+ if isinstance(e.orig, DependentObjectsStillExist):
+ raise e.orig
+ else:
+ raise e
+
+
+class RenameSchema(DDLElement):
+ def __init__(self, schema, rename_to):
+ self.schema = schema
+ self.rename_to = rename_to
+
+
[email protected](RenameSchema)
+def compile_rename_schema(element, compiler, **_):
+ return "ALTER SCHEMA %s RENAME TO %s" % (
+ element.schema,
+ element.rename_to
+ )
+
+
+def rename_schema(schema, engine, rename_to):
+ """
+ This method renames a Postgres schema.
+ """
+ with engine.begin() as connection:
+ connection.execute(RenameSchema(schema, rename_to))
| {"golden_diff": "diff --git a/db/schemas.py b/db/schemas.py\n--- a/db/schemas.py\n+++ b/db/schemas.py\n@@ -1,7 +1,11 @@\n import logging\n import warnings\n-from sqlalchemy.schema import CreateSchema\n+from sqlalchemy.schema import CreateSchema, DropSchema\n from sqlalchemy import inspect, MetaData, select, and_, not_, or_, Table\n+from sqlalchemy.exc import InternalError\n+from sqlalchemy.schema import DDLElement\n+from sqlalchemy.ext import compiler\n+from psycopg2.errors import DependentObjectsStillExist\n \n from db import types\n \n@@ -78,3 +82,42 @@\n if schema not in get_all_schemas(engine):\n with engine.begin() as connection:\n connection.execute(CreateSchema(schema))\n+\n+\n+def delete_schema(schema, engine, cascade=False, if_exists=False):\n+ \"\"\"\n+ This method deletes a Postgres schema.\n+ \"\"\"\n+ if if_exists and schema not in get_all_schemas(engine):\n+ return\n+\n+ with engine.begin() as connection:\n+ try:\n+ connection.execute(DropSchema(schema, cascade=cascade))\n+ except InternalError as e:\n+ if isinstance(e.orig, DependentObjectsStillExist):\n+ raise e.orig\n+ else:\n+ raise e\n+\n+\n+class RenameSchema(DDLElement):\n+ def __init__(self, schema, rename_to):\n+ self.schema = schema\n+ self.rename_to = rename_to\n+\n+\[email protected](RenameSchema)\n+def compile_rename_schema(element, compiler, **_):\n+ return \"ALTER SCHEMA %s RENAME TO %s\" % (\n+ element.schema,\n+ element.rename_to\n+ )\n+\n+\n+def rename_schema(schema, engine, rename_to):\n+ \"\"\"\n+ This method renames a Postgres schema.\n+ \"\"\"\n+ with engine.begin() as connection:\n+ connection.execute(RenameSchema(schema, rename_to))\n", "issue": "Function to handle deleting schemas\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nUsers might want to delete schemas. We don't currently support this.\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nA function that handles deleting of schemas in the database. We should raise an error if there is anything outside of the schema referencing the schema.\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\nThis should be in the `db` module.\n", "before_files": [{"content": "import logging\nimport warnings\nfrom sqlalchemy.schema import CreateSchema\nfrom sqlalchemy import inspect, MetaData, select, and_, not_, or_, Table\n\nfrom db import types\n\nlogger = logging.getLogger(__name__)\n\nTYPES_SCHEMA = types.base.SCHEMA\n\nEXCLUDED_SCHEMATA = [TYPES_SCHEMA, \"information_schema\"]\n\n\ndef get_schema_name_from_oid(oid, engine):\n return reflect_schema(engine, oid=oid)[\"name\"]\n\n\ndef get_schema_oid_from_name(name, engine):\n return reflect_schema(engine, name=name)[\"oid\"]\n\n\ndef reflect_schema(engine, name=None, oid=None):\n # If we have both arguments, the behavior is undefined.\n try:\n assert name is None or oid is None\n except AssertionError as e:\n logger.error(\"ERROR: Only one of 'name' or 'oid' can be given!\")\n raise e\n metadata = MetaData()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_namespace = Table(\"pg_namespace\", metadata, autoload_with=engine)\n sel = (\n select(pg_namespace.c.oid, pg_namespace.c.nspname.label(\"name\"))\n .where(or_(pg_namespace.c.nspname == name, pg_namespace.c.oid == oid))\n )\n with engine.begin() as conn:\n schema_info = conn.execute(sel).fetchone()\n return schema_info\n\n\ndef get_mathesar_schemas(engine):\n return [schema for schema, _ in get_mathesar_schemas_with_oids(engine)]\n\n\ndef get_mathesar_schemas_with_oids(engine):\n metadata = MetaData()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_namespace = Table(\"pg_namespace\", metadata, autoload_with=engine)\n sel = (\n select(pg_namespace.c.nspname.label('schema'), pg_namespace.c.oid)\n .where(\n and_(\n *[pg_namespace.c.nspname != schema for schema in EXCLUDED_SCHEMATA],\n not_(pg_namespace.c.nspname.like(\"pg_%\"))\n )\n )\n )\n with engine.begin() as conn:\n result = conn.execute(sel).fetchall()\n return result\n\n\ndef get_all_schemas(engine):\n inspector = inspect(engine)\n # We don't need to exclude system schemas (i.e., starting with \"pg_\")\n # since Inspector.get_schema_names already excludes them. Thus, this\n # function actually gets all non-pg-reserved schemas.\n return inspector.get_schema_names()\n\n\ndef create_schema(schema, engine):\n \"\"\"\n This method creates a Postgres schema.\n \"\"\"\n if schema not in get_all_schemas(engine):\n with engine.begin() as connection:\n connection.execute(CreateSchema(schema))\n", "path": "db/schemas.py"}]} | 1,401 | 422 |
gh_patches_debug_4070 | rasdani/github-patches | git_diff | scrapy__scrapy-4033 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
may be 'accessible'?
in the function [request_fingerprint](https://github.com/scrapy/scrapy/blob/master/scrapy/utils/request.py) ,‘accesible’ may be ‘accessible’ in comments. OCD XD..
</issue>
<code>
[start of scrapy/utils/request.py]
1 """
2 This module provides some useful functions for working with
3 scrapy.http.Request objects
4 """
5
6 from __future__ import print_function
7 import hashlib
8 import weakref
9 from six.moves.urllib.parse import urlunparse
10
11 from w3lib.http import basic_auth_header
12 from scrapy.utils.python import to_bytes, to_native_str
13
14 from w3lib.url import canonicalize_url
15 from scrapy.utils.httpobj import urlparse_cached
16
17
18 _fingerprint_cache = weakref.WeakKeyDictionary()
19 def request_fingerprint(request, include_headers=None):
20 """
21 Return the request fingerprint.
22
23 The request fingerprint is a hash that uniquely identifies the resource the
24 request points to. For example, take the following two urls:
25
26 http://www.example.com/query?id=111&cat=222
27 http://www.example.com/query?cat=222&id=111
28
29 Even though those are two different URLs both point to the same resource
30 and are equivalent (ie. they should return the same response).
31
32 Another example are cookies used to store session ids. Suppose the
33 following page is only accesible to authenticated users:
34
35 http://www.example.com/members/offers.html
36
37 Lot of sites use a cookie to store the session id, which adds a random
38 component to the HTTP Request and thus should be ignored when calculating
39 the fingerprint.
40
41 For this reason, request headers are ignored by default when calculating
42 the fingeprint. If you want to include specific headers use the
43 include_headers argument, which is a list of Request headers to include.
44
45 """
46 if include_headers:
47 include_headers = tuple(to_bytes(h.lower())
48 for h in sorted(include_headers))
49 cache = _fingerprint_cache.setdefault(request, {})
50 if include_headers not in cache:
51 fp = hashlib.sha1()
52 fp.update(to_bytes(request.method))
53 fp.update(to_bytes(canonicalize_url(request.url)))
54 fp.update(request.body or b'')
55 if include_headers:
56 for hdr in include_headers:
57 if hdr in request.headers:
58 fp.update(hdr)
59 for v in request.headers.getlist(hdr):
60 fp.update(v)
61 cache[include_headers] = fp.hexdigest()
62 return cache[include_headers]
63
64
65 def request_authenticate(request, username, password):
66 """Autenticate the given request (in place) using the HTTP basic access
67 authentication mechanism (RFC 2617) and the given username and password
68 """
69 request.headers['Authorization'] = basic_auth_header(username, password)
70
71
72 def request_httprepr(request):
73 """Return the raw HTTP representation (as bytes) of the given request.
74 This is provided only for reference since it's not the actual stream of
75 bytes that will be send when performing the request (that's controlled
76 by Twisted).
77 """
78 parsed = urlparse_cached(request)
79 path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, ''))
80 s = to_bytes(request.method) + b" " + to_bytes(path) + b" HTTP/1.1\r\n"
81 s += b"Host: " + to_bytes(parsed.hostname or b'') + b"\r\n"
82 if request.headers:
83 s += request.headers.to_string() + b"\r\n"
84 s += b"\r\n"
85 s += request.body
86 return s
87
88
89 def referer_str(request):
90 """ Return Referer HTTP header suitable for logging. """
91 referrer = request.headers.get('Referer')
92 if referrer is None:
93 return referrer
94 return to_native_str(referrer, errors='replace')
95
[end of scrapy/utils/request.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/utils/request.py b/scrapy/utils/request.py
--- a/scrapy/utils/request.py
+++ b/scrapy/utils/request.py
@@ -30,7 +30,7 @@
and are equivalent (ie. they should return the same response).
Another example are cookies used to store session ids. Suppose the
- following page is only accesible to authenticated users:
+ following page is only accessible to authenticated users:
http://www.example.com/members/offers.html
| {"golden_diff": "diff --git a/scrapy/utils/request.py b/scrapy/utils/request.py\n--- a/scrapy/utils/request.py\n+++ b/scrapy/utils/request.py\n@@ -30,7 +30,7 @@\n and are equivalent (ie. they should return the same response).\n \n Another example are cookies used to store session ids. Suppose the\n- following page is only accesible to authenticated users:\n+ following page is only accessible to authenticated users:\n \n http://www.example.com/members/offers.html\n", "issue": "may be 'accessible'?\nin the function [request_fingerprint](https://github.com/scrapy/scrapy/blob/master/scrapy/utils/request.py) \uff0c\u2018accesible\u2019 may be \u2018accessible\u2019 in comments. OCD XD..\r\n\n", "before_files": [{"content": "\"\"\"\nThis module provides some useful functions for working with\nscrapy.http.Request objects\n\"\"\"\n\nfrom __future__ import print_function\nimport hashlib\nimport weakref\nfrom six.moves.urllib.parse import urlunparse\n\nfrom w3lib.http import basic_auth_header\nfrom scrapy.utils.python import to_bytes, to_native_str\n\nfrom w3lib.url import canonicalize_url\nfrom scrapy.utils.httpobj import urlparse_cached\n\n\n_fingerprint_cache = weakref.WeakKeyDictionary()\ndef request_fingerprint(request, include_headers=None):\n \"\"\"\n Return the request fingerprint.\n\n The request fingerprint is a hash that uniquely identifies the resource the\n request points to. For example, take the following two urls:\n\n http://www.example.com/query?id=111&cat=222\n http://www.example.com/query?cat=222&id=111\n\n Even though those are two different URLs both point to the same resource\n and are equivalent (ie. they should return the same response).\n\n Another example are cookies used to store session ids. Suppose the\n following page is only accesible to authenticated users:\n\n http://www.example.com/members/offers.html\n\n Lot of sites use a cookie to store the session id, which adds a random\n component to the HTTP Request and thus should be ignored when calculating\n the fingerprint.\n\n For this reason, request headers are ignored by default when calculating\n the fingeprint. If you want to include specific headers use the\n include_headers argument, which is a list of Request headers to include.\n\n \"\"\"\n if include_headers:\n include_headers = tuple(to_bytes(h.lower())\n for h in sorted(include_headers))\n cache = _fingerprint_cache.setdefault(request, {})\n if include_headers not in cache:\n fp = hashlib.sha1()\n fp.update(to_bytes(request.method))\n fp.update(to_bytes(canonicalize_url(request.url)))\n fp.update(request.body or b'')\n if include_headers:\n for hdr in include_headers:\n if hdr in request.headers:\n fp.update(hdr)\n for v in request.headers.getlist(hdr):\n fp.update(v)\n cache[include_headers] = fp.hexdigest()\n return cache[include_headers]\n\n\ndef request_authenticate(request, username, password):\n \"\"\"Autenticate the given request (in place) using the HTTP basic access\n authentication mechanism (RFC 2617) and the given username and password\n \"\"\"\n request.headers['Authorization'] = basic_auth_header(username, password)\n\n\ndef request_httprepr(request):\n \"\"\"Return the raw HTTP representation (as bytes) of the given request.\n This is provided only for reference since it's not the actual stream of\n bytes that will be send when performing the request (that's controlled\n by Twisted).\n \"\"\"\n parsed = urlparse_cached(request)\n path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, ''))\n s = to_bytes(request.method) + b\" \" + to_bytes(path) + b\" HTTP/1.1\\r\\n\"\n s += b\"Host: \" + to_bytes(parsed.hostname or b'') + b\"\\r\\n\"\n if request.headers:\n s += request.headers.to_string() + b\"\\r\\n\"\n s += b\"\\r\\n\"\n s += request.body\n return s\n\n\ndef referer_str(request):\n \"\"\" Return Referer HTTP header suitable for logging. \"\"\"\n referrer = request.headers.get('Referer')\n if referrer is None:\n return referrer\n return to_native_str(referrer, errors='replace')\n", "path": "scrapy/utils/request.py"}]} | 1,538 | 110 |
gh_patches_debug_13920 | rasdani/github-patches | git_diff | searx__searx-1135 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Yacy results crash
Getting:
Engines cannot retrieve results:
yacy (unexpected crash)
> ERROR:searx.search:engine yacy : exception : 'url'
> Traceback (most recent call last):
> File "/home/leo/searx/searx/search.py", line 118, in search_one_request_safe
> search_results = search_one_request(engine, query, request_params, start_time, timeout_limit)
> File "/home/leo/searx/searx/search.py", line 110, in search_one_request
> return engine.response(response)
> File "/home/leo/searx/searx/engines/yacy.py", line 80, in response
> results.append({'url': result['url'],
> KeyError: 'url'
</issue>
<code>
[start of searx/engines/yacy.py]
1 # Yacy (Web, Images, Videos, Music, Files)
2 #
3 # @website http://yacy.net
4 # @provide-api yes
5 # (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)
6 #
7 # @using-api yes
8 # @results JSON
9 # @stable yes
10 # @parse (general) url, title, content, publishedDate
11 # @parse (images) url, title, img_src
12 #
13 # @todo parse video, audio and file results
14
15 from json import loads
16 from dateutil import parser
17 from searx.url_utils import urlencode
18
19 from searx.utils import html_to_text
20
21 # engine dependent config
22 categories = ['general', 'images'] # TODO , 'music', 'videos', 'files'
23 paging = True
24 language_support = True
25 number_of_results = 5
26
27 # search-url
28 base_url = 'http://localhost:8090'
29 search_url = '/yacysearch.json?{query}'\
30 '&startRecord={offset}'\
31 '&maximumRecords={limit}'\
32 '&contentdom={search_type}'\
33 '&resource=global'
34
35 # yacy specific type-definitions
36 search_types = {'general': 'text',
37 'images': 'image',
38 'files': 'app',
39 'music': 'audio',
40 'videos': 'video'}
41
42
43 # do search-request
44 def request(query, params):
45 offset = (params['pageno'] - 1) * number_of_results
46 search_type = search_types.get(params.get('category'), '0')
47
48 params['url'] = base_url +\
49 search_url.format(query=urlencode({'query': query}),
50 offset=offset,
51 limit=number_of_results,
52 search_type=search_type)
53
54 params['url'] += '&lr=lang_' + params['language'].split('-')[0]
55
56 return params
57
58
59 # get response from search-request
60 def response(resp):
61 results = []
62
63 raw_search_results = loads(resp.text)
64
65 # return empty array if there are no results
66 if not raw_search_results:
67 return []
68
69 search_results = raw_search_results.get('channels', [])
70
71 if len(search_results) == 0:
72 return []
73
74 for result in search_results[0].get('items', []):
75 # parse image results
76 if result.get('image'):
77 # append result
78 results.append({'url': result['url'],
79 'title': result['title'],
80 'content': '',
81 'img_src': result['image'],
82 'template': 'images.html'})
83
84 # parse general results
85 else:
86 publishedDate = parser.parse(result['pubDate'])
87
88 # append result
89 results.append({'url': result['link'],
90 'title': result['title'],
91 'content': html_to_text(result['description']),
92 'publishedDate': publishedDate})
93
94 # TODO parse video, audio and file results
95
96 # return results
97 return results
98
[end of searx/engines/yacy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py
--- a/searx/engines/yacy.py
+++ b/searx/engines/yacy.py
@@ -74,8 +74,17 @@
for result in search_results[0].get('items', []):
# parse image results
if result.get('image'):
+
+ result_url = ''
+ if 'url' in result:
+ result_url = result['url']
+ elif 'link' in result:
+ result_url = result['link']
+ else:
+ continue
+
# append result
- results.append({'url': result['url'],
+ results.append({'url': result_url,
'title': result['title'],
'content': '',
'img_src': result['image'],
| {"golden_diff": "diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py\n--- a/searx/engines/yacy.py\n+++ b/searx/engines/yacy.py\n@@ -74,8 +74,17 @@\n for result in search_results[0].get('items', []):\n # parse image results\n if result.get('image'):\n+\n+ result_url = ''\n+ if 'url' in result:\n+ result_url = result['url']\n+ elif 'link' in result:\n+ result_url = result['link']\n+ else:\n+ continue\n+\n # append result\n- results.append({'url': result['url'],\n+ results.append({'url': result_url,\n 'title': result['title'],\n 'content': '',\n 'img_src': result['image'],\n", "issue": "Yacy results crash\nGetting:\r\nEngines cannot retrieve results:\r\nyacy (unexpected crash)\r\n\r\n> ERROR:searx.search:engine yacy : exception : 'url'\r\n> Traceback (most recent call last):\r\n> File \"/home/leo/searx/searx/search.py\", line 118, in search_one_request_safe\r\n> search_results = search_one_request(engine, query, request_params, start_time, timeout_limit)\r\n> File \"/home/leo/searx/searx/search.py\", line 110, in search_one_request\r\n> return engine.response(response)\r\n> File \"/home/leo/searx/searx/engines/yacy.py\", line 80, in response\r\n> results.append({'url': result['url'],\r\n> KeyError: 'url'\n", "before_files": [{"content": "# Yacy (Web, Images, Videos, Music, Files)\n#\n# @website http://yacy.net\n# @provide-api yes\n# (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)\n#\n# @using-api yes\n# @results JSON\n# @stable yes\n# @parse (general) url, title, content, publishedDate\n# @parse (images) url, title, img_src\n#\n# @todo parse video, audio and file results\n\nfrom json import loads\nfrom dateutil import parser\nfrom searx.url_utils import urlencode\n\nfrom searx.utils import html_to_text\n\n# engine dependent config\ncategories = ['general', 'images'] # TODO , 'music', 'videos', 'files'\npaging = True\nlanguage_support = True\nnumber_of_results = 5\n\n# search-url\nbase_url = 'http://localhost:8090'\nsearch_url = '/yacysearch.json?{query}'\\\n '&startRecord={offset}'\\\n '&maximumRecords={limit}'\\\n '&contentdom={search_type}'\\\n '&resource=global'\n\n# yacy specific type-definitions\nsearch_types = {'general': 'text',\n 'images': 'image',\n 'files': 'app',\n 'music': 'audio',\n 'videos': 'video'}\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * number_of_results\n search_type = search_types.get(params.get('category'), '0')\n\n params['url'] = base_url +\\\n search_url.format(query=urlencode({'query': query}),\n offset=offset,\n limit=number_of_results,\n search_type=search_type)\n\n params['url'] += '&lr=lang_' + params['language'].split('-')[0]\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n raw_search_results = loads(resp.text)\n\n # return empty array if there are no results\n if not raw_search_results:\n return []\n\n search_results = raw_search_results.get('channels', [])\n\n if len(search_results) == 0:\n return []\n\n for result in search_results[0].get('items', []):\n # parse image results\n if result.get('image'):\n # append result\n results.append({'url': result['url'],\n 'title': result['title'],\n 'content': '',\n 'img_src': result['image'],\n 'template': 'images.html'})\n\n # parse general results\n else:\n publishedDate = parser.parse(result['pubDate'])\n\n # append result\n results.append({'url': result['link'],\n 'title': result['title'],\n 'content': html_to_text(result['description']),\n 'publishedDate': publishedDate})\n\n # TODO parse video, audio and file results\n\n # return results\n return results\n", "path": "searx/engines/yacy.py"}]} | 1,556 | 190 |
gh_patches_debug_12988 | rasdani/github-patches | git_diff | elastic__ecs-1488 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`doc_values` parameter not set in Beats artifact
Certain fields have `index: false` and `doc_values: false` in their ECS definition, like `event.original`:
https://github.com/elastic/ecs/blob/master/schemas/event.yml#L577-L599
When `doc_values: false` is defined in the field definition, it's not being added to the maintained Beats fields YAML artifact:
https://github.com/elastic/ecs/blob/master/generated/beats/fields.ecs.yml#L1737-L1750
</issue>
<code>
[start of scripts/generators/beats.py]
1 from os.path import join
2 from collections import OrderedDict
3 from generators import ecs_helpers
4
5
6 def generate(ecs_nested, ecs_version, out_dir):
7 # Load temporary allowlist for default_fields workaround.
8 df_allowlist = ecs_helpers.yaml_load('scripts/generators/beats_default_fields_allowlist.yml')
9
10 # base first
11 beats_fields = fieldset_field_array(ecs_nested['base']['fields'], df_allowlist, ecs_nested['base']['prefix'])
12
13 allowed_fieldset_keys = ['name', 'title', 'group', 'description', 'footnote', 'type']
14 # other fieldsets
15 for fieldset_name in sorted(ecs_nested):
16 if 'base' == fieldset_name:
17 continue
18 fieldset = ecs_nested[fieldset_name]
19
20 # Handle when `root:true`
21 if fieldset.get('root', False):
22 beats_fields.extend(fieldset_field_array(fieldset['fields'], df_allowlist, fieldset['prefix']))
23 continue
24
25 beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys)
26 beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_allowlist, fieldset['prefix'])
27 beats_fields.append(beats_field)
28
29 beats_file = OrderedDict()
30 beats_file['key'] = 'ecs'
31 beats_file['title'] = 'ECS'
32 beats_file['description'] = 'ECS Fields.'
33 beats_file['fields'] = beats_fields
34
35 write_beats_yaml(beats_file, ecs_version, out_dir)
36
37
38 def fieldset_field_array(source_fields, df_allowlist, fieldset_prefix):
39 allowed_keys = ['name', 'level', 'required', 'type', 'object_type',
40 'ignore_above', 'multi_fields', 'format', 'input_format',
41 'output_format', 'output_precision', 'description',
42 'example', 'enabled', 'index', 'path', 'scaling_factor']
43 multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']
44
45 fields = []
46 for nested_field_name in source_fields:
47 ecs_field = source_fields[nested_field_name]
48 beats_field = ecs_helpers.dict_copy_keys_ordered(ecs_field, allowed_keys)
49 if '' == fieldset_prefix:
50 contextual_name = nested_field_name
51 else:
52 contextual_name = '.'.join(nested_field_name.split('.')[1:])
53
54 cleaned_multi_fields = []
55 if 'multi_fields' in ecs_field:
56 for mf in ecs_field['multi_fields']:
57 # Set default_field if necessary. Avoid adding the key if the parent
58 # field already is marked with default_field: false.
59 if not mf['flat_name'] in df_allowlist and ecs_field['flat_name'] in df_allowlist:
60 mf['default_field'] = False
61 cleaned_multi_fields.append(
62 ecs_helpers.dict_copy_keys_ordered(mf, multi_fields_allowed_keys))
63 beats_field['multi_fields'] = cleaned_multi_fields
64
65 beats_field['name'] = contextual_name
66
67 if not ecs_field['flat_name'] in df_allowlist:
68 beats_field['default_field'] = False
69
70 fields.append(beats_field)
71 return sorted(fields, key=lambda x: x['name'])
72
73 # Helpers
74
75
76 def write_beats_yaml(beats_file, ecs_version, out_dir):
77 ecs_helpers.make_dirs(join(out_dir, 'beats'))
78 warning = file_header().format(version=ecs_version)
79 ecs_helpers.yaml_dump(join(out_dir, 'beats/fields.ecs.yml'), [beats_file], preamble=warning)
80
81
82 # Templates
83
84
85 def file_header():
86 return """
87 # WARNING! Do not edit this file directly, it was generated by the ECS project,
88 # based on ECS version {version}.
89 # Please visit https://github.com/elastic/ecs to suggest changes to ECS fields.
90
91 """.lstrip()
92
[end of scripts/generators/beats.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/generators/beats.py b/scripts/generators/beats.py
--- a/scripts/generators/beats.py
+++ b/scripts/generators/beats.py
@@ -39,7 +39,8 @@
allowed_keys = ['name', 'level', 'required', 'type', 'object_type',
'ignore_above', 'multi_fields', 'format', 'input_format',
'output_format', 'output_precision', 'description',
- 'example', 'enabled', 'index', 'path', 'scaling_factor']
+ 'example', 'enabled', 'index', 'doc_values', 'path',
+ 'scaling_factor']
multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']
fields = []
| {"golden_diff": "diff --git a/scripts/generators/beats.py b/scripts/generators/beats.py\n--- a/scripts/generators/beats.py\n+++ b/scripts/generators/beats.py\n@@ -39,7 +39,8 @@\n allowed_keys = ['name', 'level', 'required', 'type', 'object_type',\n 'ignore_above', 'multi_fields', 'format', 'input_format',\n 'output_format', 'output_precision', 'description',\n- 'example', 'enabled', 'index', 'path', 'scaling_factor']\n+ 'example', 'enabled', 'index', 'doc_values', 'path',\n+ 'scaling_factor']\n multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']\n \n fields = []\n", "issue": "`doc_values` parameter not set in Beats artifact \nCertain fields have `index: false` and `doc_values: false` in their ECS definition, like `event.original`:\r\n\r\nhttps://github.com/elastic/ecs/blob/master/schemas/event.yml#L577-L599\r\n\r\nWhen `doc_values: false` is defined in the field definition, it's not being added to the maintained Beats fields YAML artifact:\r\n\r\nhttps://github.com/elastic/ecs/blob/master/generated/beats/fields.ecs.yml#L1737-L1750\n", "before_files": [{"content": "from os.path import join\nfrom collections import OrderedDict\nfrom generators import ecs_helpers\n\n\ndef generate(ecs_nested, ecs_version, out_dir):\n # Load temporary allowlist for default_fields workaround.\n df_allowlist = ecs_helpers.yaml_load('scripts/generators/beats_default_fields_allowlist.yml')\n\n # base first\n beats_fields = fieldset_field_array(ecs_nested['base']['fields'], df_allowlist, ecs_nested['base']['prefix'])\n\n allowed_fieldset_keys = ['name', 'title', 'group', 'description', 'footnote', 'type']\n # other fieldsets\n for fieldset_name in sorted(ecs_nested):\n if 'base' == fieldset_name:\n continue\n fieldset = ecs_nested[fieldset_name]\n\n # Handle when `root:true`\n if fieldset.get('root', False):\n beats_fields.extend(fieldset_field_array(fieldset['fields'], df_allowlist, fieldset['prefix']))\n continue\n\n beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys)\n beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_allowlist, fieldset['prefix'])\n beats_fields.append(beats_field)\n\n beats_file = OrderedDict()\n beats_file['key'] = 'ecs'\n beats_file['title'] = 'ECS'\n beats_file['description'] = 'ECS Fields.'\n beats_file['fields'] = beats_fields\n\n write_beats_yaml(beats_file, ecs_version, out_dir)\n\n\ndef fieldset_field_array(source_fields, df_allowlist, fieldset_prefix):\n allowed_keys = ['name', 'level', 'required', 'type', 'object_type',\n 'ignore_above', 'multi_fields', 'format', 'input_format',\n 'output_format', 'output_precision', 'description',\n 'example', 'enabled', 'index', 'path', 'scaling_factor']\n multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']\n\n fields = []\n for nested_field_name in source_fields:\n ecs_field = source_fields[nested_field_name]\n beats_field = ecs_helpers.dict_copy_keys_ordered(ecs_field, allowed_keys)\n if '' == fieldset_prefix:\n contextual_name = nested_field_name\n else:\n contextual_name = '.'.join(nested_field_name.split('.')[1:])\n\n cleaned_multi_fields = []\n if 'multi_fields' in ecs_field:\n for mf in ecs_field['multi_fields']:\n # Set default_field if necessary. Avoid adding the key if the parent\n # field already is marked with default_field: false.\n if not mf['flat_name'] in df_allowlist and ecs_field['flat_name'] in df_allowlist:\n mf['default_field'] = False\n cleaned_multi_fields.append(\n ecs_helpers.dict_copy_keys_ordered(mf, multi_fields_allowed_keys))\n beats_field['multi_fields'] = cleaned_multi_fields\n\n beats_field['name'] = contextual_name\n\n if not ecs_field['flat_name'] in df_allowlist:\n beats_field['default_field'] = False\n\n fields.append(beats_field)\n return sorted(fields, key=lambda x: x['name'])\n\n# Helpers\n\n\ndef write_beats_yaml(beats_file, ecs_version, out_dir):\n ecs_helpers.make_dirs(join(out_dir, 'beats'))\n warning = file_header().format(version=ecs_version)\n ecs_helpers.yaml_dump(join(out_dir, 'beats/fields.ecs.yml'), [beats_file], preamble=warning)\n\n\n# Templates\n\n\ndef file_header():\n return \"\"\"\n# WARNING! Do not edit this file directly, it was generated by the ECS project,\n# based on ECS version {version}.\n# Please visit https://github.com/elastic/ecs to suggest changes to ECS fields.\n\n\"\"\".lstrip()\n", "path": "scripts/generators/beats.py"}]} | 1,659 | 176 |
gh_patches_debug_8516 | rasdani/github-patches | git_diff | iterative__dvc-10005 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
exp save: Short option for --message is -M, but for dvc exp run it is -m
It would be nice if the short options of `dvc exp run` and `dvc exp save` for specifying a commit message would be identical. Also, best to use the same options as one would use for `git commit`, i.e., `-m` instead of `-M`.
```
usage: dvc experiments save [-h] [-q | -v] [-f] [--json] [-n <name>] [-I <path>] [-M MESSAGE]
Save current workspace as an experiment.
Documentation: <https://man.dvc.org/exp/save>
options:
-h, --help show this help message and exit
-q, --quiet Be quiet.
-v, --verbose Be verbose.
-f, --force Replace experiment if it already exists.
--json Show output in JSON format.
-n <name>, --name <name>
Human-readable experiment name. If not specified, a name will be auto-generated.
-I <path>, --include-untracked <path>
List of untracked paths to include in the experiment.
-M MESSAGE, --message MESSAGE
Custom commit message to use when committing the experiment.
```
DVC CLI v3.22.1
</issue>
<code>
[start of dvc/commands/experiments/save.py]
1 import argparse
2 import logging
3
4 from dvc.cli.command import CmdBase
5 from dvc.cli.utils import append_doc_link
6 from dvc.exceptions import DvcException
7 from dvc.ui import ui
8
9 logger = logging.getLogger(__name__)
10
11
12 class CmdExperimentsSave(CmdBase):
13 def run(self):
14 try:
15 ref = self.repo.experiments.save(
16 name=self.args.name,
17 force=self.args.force,
18 include_untracked=self.args.include_untracked,
19 message=self.args.message,
20 )
21 except DvcException:
22 logger.exception("failed to save experiment")
23 return 1
24
25 if self.args.json:
26 ui.write_json({"ref": ref})
27 else:
28 name = self.repo.experiments.get_exact_name([ref])[ref]
29 ui.write(f"Experiment has been saved as: {name}")
30
31 return 0
32
33
34 def add_parser(experiments_subparsers, parent_parser):
35 EXPERIMENTS_SAVE_HELP = "Save current workspace as an experiment."
36 save_parser = experiments_subparsers.add_parser(
37 "save",
38 parents=[parent_parser],
39 description=append_doc_link(EXPERIMENTS_SAVE_HELP, "exp/save"),
40 help=EXPERIMENTS_SAVE_HELP,
41 formatter_class=argparse.RawDescriptionHelpFormatter,
42 )
43 save_parser.add_argument(
44 "-f",
45 "--force",
46 action="store_true",
47 default=False,
48 help="Replace experiment if it already exists.",
49 )
50 save_parser.add_argument(
51 "--json",
52 action="store_true",
53 default=False,
54 help="Show output in JSON format.",
55 )
56 save_parser.add_argument(
57 "-n",
58 "--name",
59 default=None,
60 help=(
61 "Human-readable experiment name. If not specified, a name will "
62 "be auto-generated."
63 ),
64 metavar="<name>",
65 )
66 save_parser.add_argument(
67 "-I",
68 "--include-untracked",
69 action="append",
70 default=[],
71 help="List of untracked paths to include in the experiment.",
72 metavar="<path>",
73 )
74 save_parser.add_argument(
75 "-M",
76 "--message",
77 type=str,
78 default=None,
79 help="Custom commit message to use when committing the experiment.",
80 )
81 save_parser.set_defaults(func=CmdExperimentsSave)
82
[end of dvc/commands/experiments/save.py]
[start of dvc/commands/experiments/exec_run.py]
1 import logging
2
3 from dvc.cli.command import CmdBaseNoRepo
4
5 logger = logging.getLogger(__name__)
6
7
8 class CmdExecutorRun(CmdBaseNoRepo):
9 """Run an experiment executor."""
10
11 def run(self):
12 from dvc.repo.experiments.executor.base import BaseExecutor, ExecutorInfo
13 from dvc.utils.serialize import load_json
14
15 info = ExecutorInfo.from_dict(load_json(self.args.infofile))
16 BaseExecutor.reproduce(
17 info=info,
18 rev="",
19 queue=None,
20 log_level=logger.getEffectiveLevel(),
21 infofile=self.args.infofile,
22 copy_paths=self.args.copy_paths,
23 message=self.args.message,
24 )
25 return 0
26
27
28 def add_parser(experiments_subparsers, parent_parser):
29 EXEC_RUN_HELP = "Run an experiment executor."
30 exec_run_parser = experiments_subparsers.add_parser(
31 "exec-run",
32 parents=[parent_parser],
33 description=EXEC_RUN_HELP,
34 add_help=False,
35 )
36 exec_run_parser.add_argument(
37 "--infofile",
38 help="Path to executor info file",
39 default=None,
40 )
41 exec_run_parser.add_argument(
42 "-C",
43 "--copy-paths",
44 action="append",
45 default=[],
46 help=(
47 "List of ignored or untracked paths to copy into the temp directory."
48 " Only used if `--temp` or `--queue` is specified."
49 ),
50 )
51 exec_run_parser.add_argument(
52 "-M",
53 "--message",
54 type=str,
55 default=None,
56 help="Custom commit message to use when committing the experiment.",
57 )
58 exec_run_parser.set_defaults(func=CmdExecutorRun)
59
[end of dvc/commands/experiments/exec_run.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/commands/experiments/exec_run.py b/dvc/commands/experiments/exec_run.py
--- a/dvc/commands/experiments/exec_run.py
+++ b/dvc/commands/experiments/exec_run.py
@@ -49,7 +49,7 @@
),
)
exec_run_parser.add_argument(
- "-M",
+ "-m",
"--message",
type=str,
default=None,
diff --git a/dvc/commands/experiments/save.py b/dvc/commands/experiments/save.py
--- a/dvc/commands/experiments/save.py
+++ b/dvc/commands/experiments/save.py
@@ -72,7 +72,7 @@
metavar="<path>",
)
save_parser.add_argument(
- "-M",
+ "-m",
"--message",
type=str,
default=None,
| {"golden_diff": "diff --git a/dvc/commands/experiments/exec_run.py b/dvc/commands/experiments/exec_run.py\n--- a/dvc/commands/experiments/exec_run.py\n+++ b/dvc/commands/experiments/exec_run.py\n@@ -49,7 +49,7 @@\n ),\n )\n exec_run_parser.add_argument(\n- \"-M\",\n+ \"-m\",\n \"--message\",\n type=str,\n default=None,\ndiff --git a/dvc/commands/experiments/save.py b/dvc/commands/experiments/save.py\n--- a/dvc/commands/experiments/save.py\n+++ b/dvc/commands/experiments/save.py\n@@ -72,7 +72,7 @@\n metavar=\"<path>\",\n )\n save_parser.add_argument(\n- \"-M\",\n+ \"-m\",\n \"--message\",\n type=str,\n default=None,\n", "issue": "exp save: Short option for --message is -M, but for dvc exp run it is -m\nIt would be nice if the short options of `dvc exp run` and `dvc exp save` for specifying a commit message would be identical. Also, best to use the same options as one would use for `git commit`, i.e., `-m` instead of `-M`.\r\n\r\n```\r\nusage: dvc experiments save [-h] [-q | -v] [-f] [--json] [-n <name>] [-I <path>] [-M MESSAGE]\r\n\r\nSave current workspace as an experiment.\r\nDocumentation: <https://man.dvc.org/exp/save>\r\n\r\noptions:\r\n -h, --help show this help message and exit\r\n -q, --quiet Be quiet.\r\n -v, --verbose Be verbose.\r\n -f, --force Replace experiment if it already exists.\r\n --json Show output in JSON format.\r\n -n <name>, --name <name>\r\n Human-readable experiment name. If not specified, a name will be auto-generated.\r\n -I <path>, --include-untracked <path>\r\n List of untracked paths to include in the experiment.\r\n -M MESSAGE, --message MESSAGE\r\n Custom commit message to use when committing the experiment.\r\n```\r\n\r\nDVC CLI v3.22.1\n", "before_files": [{"content": "import argparse\nimport logging\n\nfrom dvc.cli.command import CmdBase\nfrom dvc.cli.utils import append_doc_link\nfrom dvc.exceptions import DvcException\nfrom dvc.ui import ui\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdExperimentsSave(CmdBase):\n def run(self):\n try:\n ref = self.repo.experiments.save(\n name=self.args.name,\n force=self.args.force,\n include_untracked=self.args.include_untracked,\n message=self.args.message,\n )\n except DvcException:\n logger.exception(\"failed to save experiment\")\n return 1\n\n if self.args.json:\n ui.write_json({\"ref\": ref})\n else:\n name = self.repo.experiments.get_exact_name([ref])[ref]\n ui.write(f\"Experiment has been saved as: {name}\")\n\n return 0\n\n\ndef add_parser(experiments_subparsers, parent_parser):\n EXPERIMENTS_SAVE_HELP = \"Save current workspace as an experiment.\"\n save_parser = experiments_subparsers.add_parser(\n \"save\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_SAVE_HELP, \"exp/save\"),\n help=EXPERIMENTS_SAVE_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n save_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Replace experiment if it already exists.\",\n )\n save_parser.add_argument(\n \"--json\",\n action=\"store_true\",\n default=False,\n help=\"Show output in JSON format.\",\n )\n save_parser.add_argument(\n \"-n\",\n \"--name\",\n default=None,\n help=(\n \"Human-readable experiment name. If not specified, a name will \"\n \"be auto-generated.\"\n ),\n metavar=\"<name>\",\n )\n save_parser.add_argument(\n \"-I\",\n \"--include-untracked\",\n action=\"append\",\n default=[],\n help=\"List of untracked paths to include in the experiment.\",\n metavar=\"<path>\",\n )\n save_parser.add_argument(\n \"-M\",\n \"--message\",\n type=str,\n default=None,\n help=\"Custom commit message to use when committing the experiment.\",\n )\n save_parser.set_defaults(func=CmdExperimentsSave)\n", "path": "dvc/commands/experiments/save.py"}, {"content": "import logging\n\nfrom dvc.cli.command import CmdBaseNoRepo\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdExecutorRun(CmdBaseNoRepo):\n \"\"\"Run an experiment executor.\"\"\"\n\n def run(self):\n from dvc.repo.experiments.executor.base import BaseExecutor, ExecutorInfo\n from dvc.utils.serialize import load_json\n\n info = ExecutorInfo.from_dict(load_json(self.args.infofile))\n BaseExecutor.reproduce(\n info=info,\n rev=\"\",\n queue=None,\n log_level=logger.getEffectiveLevel(),\n infofile=self.args.infofile,\n copy_paths=self.args.copy_paths,\n message=self.args.message,\n )\n return 0\n\n\ndef add_parser(experiments_subparsers, parent_parser):\n EXEC_RUN_HELP = \"Run an experiment executor.\"\n exec_run_parser = experiments_subparsers.add_parser(\n \"exec-run\",\n parents=[parent_parser],\n description=EXEC_RUN_HELP,\n add_help=False,\n )\n exec_run_parser.add_argument(\n \"--infofile\",\n help=\"Path to executor info file\",\n default=None,\n )\n exec_run_parser.add_argument(\n \"-C\",\n \"--copy-paths\",\n action=\"append\",\n default=[],\n help=(\n \"List of ignored or untracked paths to copy into the temp directory.\"\n \" Only used if `--temp` or `--queue` is specified.\"\n ),\n )\n exec_run_parser.add_argument(\n \"-M\",\n \"--message\",\n type=str,\n default=None,\n help=\"Custom commit message to use when committing the experiment.\",\n )\n exec_run_parser.set_defaults(func=CmdExecutorRun)\n", "path": "dvc/commands/experiments/exec_run.py"}]} | 1,948 | 185 |
gh_patches_debug_22467 | rasdani/github-patches | git_diff | pre-commit__pre-commit-400 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stashed changes lost if hook fails with non-UTF-8 diff containing trailing whitespace
Hi,
A colleague almost lost all the changes she was working on after launching a `git commit` (with zero file added) and `pre-commit` crashing without restoring its [patch](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/staged_files_only.py#L15).
Here is the terminal message she got:
```
[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...
An unexpected error has occurred: CalledProcessError: Command: ['git', 'apply', 'C:\\Users\\toto\\.pre-commit\\patch1471341002']
```
This seems very similar to a past solved issue:
https://github.com/pre-commit/pre-commit/issues/176
I think it had to do with CRLF conversion.
I'm going to try to reproduce this.
</issue>
<code>
[start of pre_commit/staged_files_only.py]
1 from __future__ import unicode_literals
2
3 import contextlib
4 import io
5 import logging
6 import time
7
8 from pre_commit.util import CalledProcessError
9
10
11 logger = logging.getLogger('pre_commit')
12
13
14 @contextlib.contextmanager
15 def staged_files_only(cmd_runner):
16 """Clear any unstaged changes from the git working directory inside this
17 context.
18
19 Args:
20 cmd_runner - PrefixedCommandRunner
21 """
22 # Determine if there are unstaged files
23 retcode, diff_stdout_binary, _ = cmd_runner.run(
24 [
25 'git', 'diff', '--ignore-submodules', '--binary', '--exit-code',
26 '--no-color',
27 ],
28 retcode=None,
29 encoding=None,
30 )
31 if retcode and diff_stdout_binary.strip():
32 patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))
33 logger.warning('Unstaged files detected.')
34 logger.info(
35 'Stashing unstaged files to {0}.'.format(patch_filename),
36 )
37 # Save the current unstaged changes as a patch
38 with io.open(patch_filename, 'wb') as patch_file:
39 patch_file.write(diff_stdout_binary)
40
41 # Clear the working directory of unstaged changes
42 cmd_runner.run(['git', 'checkout', '--', '.'])
43 try:
44 yield
45 finally:
46 # Try to apply the patch we saved
47 try:
48 cmd_runner.run(['git', 'apply', patch_filename])
49 except CalledProcessError:
50 logger.warning(
51 'Stashed changes conflicted with hook auto-fixes... '
52 'Rolling back fixes...'
53 )
54 # We failed to apply the patch, presumably due to fixes made
55 # by hooks.
56 # Roll back the changes made by hooks.
57 cmd_runner.run(['git', 'checkout', '--', '.'])
58 cmd_runner.run(['git', 'apply', patch_filename])
59 logger.info('Restored changes from {0}.'.format(patch_filename))
60 else:
61 # There weren't any staged files so we don't need to do anything
62 # special
63 yield
64
[end of pre_commit/staged_files_only.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -45,7 +45,7 @@
finally:
# Try to apply the patch we saved
try:
- cmd_runner.run(['git', 'apply', patch_filename])
+ cmd_runner.run(('git', 'apply', patch_filename), encoding=None)
except CalledProcessError:
logger.warning(
'Stashed changes conflicted with hook auto-fixes... '
@@ -55,7 +55,7 @@
# by hooks.
# Roll back the changes made by hooks.
cmd_runner.run(['git', 'checkout', '--', '.'])
- cmd_runner.run(['git', 'apply', patch_filename])
+ cmd_runner.run(('git', 'apply', patch_filename), encoding=None)
logger.info('Restored changes from {0}.'.format(patch_filename))
else:
# There weren't any staged files so we don't need to do anything
| {"golden_diff": "diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -45,7 +45,7 @@\n finally:\n # Try to apply the patch we saved\n try:\n- cmd_runner.run(['git', 'apply', patch_filename])\n+ cmd_runner.run(('git', 'apply', patch_filename), encoding=None)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n@@ -55,7 +55,7 @@\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(['git', 'checkout', '--', '.'])\n- cmd_runner.run(['git', 'apply', patch_filename])\n+ cmd_runner.run(('git', 'apply', patch_filename), encoding=None)\n logger.info('Restored changes from {0}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n", "issue": "Stashed changes lost if hook fails with non-UTF-8 diff containing trailing whitespace\nHi,\n\nA colleague almost lost all the changes she was working on after launching a `git commit` (with zero file added) and `pre-commit` crashing without restoring its [patch](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/staged_files_only.py#L15).\n\nHere is the terminal message she got:\n\n```\n[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...\nAn unexpected error has occurred: CalledProcessError: Command: ['git', 'apply', 'C:\\\\Users\\\\toto\\\\.pre-commit\\\\patch1471341002']\n```\n\nThis seems very similar to a past solved issue:\nhttps://github.com/pre-commit/pre-commit/issues/176\n\nI think it had to do with CRLF conversion.\nI'm going to try to reproduce this.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport time\n\nfrom pre_commit.util import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n\[email protected]\ndef staged_files_only(cmd_runner):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n\n Args:\n cmd_runner - PrefixedCommandRunner\n \"\"\"\n # Determine if there are unstaged files\n retcode, diff_stdout_binary, _ = cmd_runner.run(\n [\n 'git', 'diff', '--ignore-submodules', '--binary', '--exit-code',\n '--no-color',\n ],\n retcode=None,\n encoding=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {0}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # Clear the working directory of unstaged changes\n cmd_runner.run(['git', 'checkout', '--', '.'])\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n cmd_runner.run(['git', 'apply', patch_filename])\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...'\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(['git', 'checkout', '--', '.'])\n cmd_runner.run(['git', 'apply', patch_filename])\n logger.info('Restored changes from {0}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}]} | 1,292 | 232 |
gh_patches_debug_65236 | rasdani/github-patches | git_diff | streamlink__streamlink-5698 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.btv: No playable streams found
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Your Streamlink version (6.4.2+1.g7e722ec1) is up to date!
### Description
The plug-in does not display video. It displays errors shown in the logs below.
### Debug log
```text
streamlink --loglevel=debug "https://btvplus.bg/live/" best
[cli][debug] OS: Linux-6.2.0-35-generic-x86_64-with-glibc2.35
[cli][debug] Python: 3.10.12
[cli][debug] OpenSSL: OpenSSL 3.0.2 15 Mar 2022
[cli][debug] Streamlink: 6.4.2+1.g7e722ec1
[cli][debug] Dependencies:
[cli][debug] certifi: 2023.5.7
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.8.0
[cli][debug] pycountry: 20.7.3
[cli][debug] pycryptodome: 3.17
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.31.0
[cli][debug] trio: 0.22.2
[cli][debug] trio-websocket: 0.10.3
[cli][debug] typing-extensions: 4.7.1
[cli][debug] urllib3: 1.26.16
[cli][debug] websocket-client: 1.2.3
[cli][debug] Arguments:
[cli][debug] url=https://btvplus.bg/live/
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin btv for URL https://btvplus.bg/live/
[cli][info] Available streams: live (worst, best)
[cli][info] Opening stream: live (hls)
[cli][info] Starting player: /usr/bin/vlc
[stream.hls][debug] Reloading playlist
[cli][debug] Pre-buffering 8192 bytes
[stream.hls][error] Attempted to play a variant playlist, use 'hls://https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8' instead
[stream.segmented][debug] Closing worker thread
[stream.segmented][debug] Closing writer thread
[cli][error] Try 1/1: Could not open stream <HLSStream ['hls', 'https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8']> (No data returned from stream)
error: Could not open stream <HLSStream ['hls', 'https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8']>, tried 1 times, exiting
[cli][info] Closing currently open stream...
```
</issue>
<code>
[start of src/streamlink/plugins/btv.py]
1 """
2 $description A privately owned Bulgarian live TV channel.
3 $url btvplus.bg
4 $type live
5 $region Bulgaria
6 """
7
8 import logging
9 import re
10
11 from streamlink.plugin import Plugin, pluginmatcher
12 from streamlink.plugin.api import validate
13 from streamlink.stream.hls import HLSStream
14
15
16 log = logging.getLogger(__name__)
17
18
19 @pluginmatcher(re.compile(
20 r"https?://(?:www\.)?btvplus\.bg/live/?",
21 ))
22 class BTV(Plugin):
23 URL_API = "https://btvplus.bg/lbin/v3/btvplus/player_config.php"
24
25 def _get_streams(self):
26 media_id = self.session.http.get(self.url, schema=validate.Schema(
27 re.compile(r"media_id=(\d+)"),
28 validate.any(None, validate.get(1)),
29 ))
30 if media_id is None:
31 return
32
33 stream_url = self.session.http.get(
34 self.URL_API,
35 params={
36 "media_id": media_id,
37 },
38 schema=validate.Schema(
39 validate.any(
40 validate.all(
41 validate.regex(re.compile(r"geo_blocked_stream")),
42 validate.get(0),
43 ),
44 validate.all(
45 validate.parse_json(),
46 {
47 "status": "ok",
48 "info": {
49 "file": validate.url(path=validate.endswith(".m3u8")),
50 },
51 },
52 validate.get(("info", "file")),
53 ),
54 ),
55 ),
56 )
57 if not stream_url:
58 return
59
60 if stream_url == "geo_blocked_stream":
61 log.error("The content is not available in your region")
62 return
63
64 return {"live": HLSStream(self.session, stream_url)}
65
66
67 __plugin__ = BTV
68
[end of src/streamlink/plugins/btv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/btv.py b/src/streamlink/plugins/btv.py
--- a/src/streamlink/plugins/btv.py
+++ b/src/streamlink/plugins/btv.py
@@ -61,7 +61,7 @@
log.error("The content is not available in your region")
return
- return {"live": HLSStream(self.session, stream_url)}
+ return HLSStream.parse_variant_playlist(self.session, stream_url)
__plugin__ = BTV
| {"golden_diff": "diff --git a/src/streamlink/plugins/btv.py b/src/streamlink/plugins/btv.py\n--- a/src/streamlink/plugins/btv.py\n+++ b/src/streamlink/plugins/btv.py\n@@ -61,7 +61,7 @@\n log.error(\"The content is not available in your region\")\n return\n \n- return {\"live\": HLSStream(self.session, stream_url)}\n+ return HLSStream.parse_variant_playlist(self.session, stream_url)\n \n \n __plugin__ = BTV\n", "issue": "plugins.btv: No playable streams found\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nYour Streamlink version (6.4.2+1.g7e722ec1) is up to date!\n\n### Description\n\nThe plug-in does not display video. It displays errors shown in the logs below.\r\n\n\n### Debug log\n\n```text\nstreamlink --loglevel=debug \"https://btvplus.bg/live/\" best\r\n[cli][debug] OS: Linux-6.2.0-35-generic-x86_64-with-glibc2.35\r\n[cli][debug] Python: 3.10.12\r\n[cli][debug] OpenSSL: OpenSSL 3.0.2 15 Mar 2022\r\n[cli][debug] Streamlink: 6.4.2+1.g7e722ec1\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2023.5.7\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.8.0\r\n[cli][debug] pycountry: 20.7.3\r\n[cli][debug] pycryptodome: 3.17\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.31.0\r\n[cli][debug] trio: 0.22.2\r\n[cli][debug] trio-websocket: 0.10.3\r\n[cli][debug] typing-extensions: 4.7.1\r\n[cli][debug] urllib3: 1.26.16\r\n[cli][debug] websocket-client: 1.2.3\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://btvplus.bg/live/\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin btv for URL https://btvplus.bg/live/\r\n[cli][info] Available streams: live (worst, best)\r\n[cli][info] Opening stream: live (hls)\r\n[cli][info] Starting player: /usr/bin/vlc\r\n[stream.hls][debug] Reloading playlist\r\n[cli][debug] Pre-buffering 8192 bytes\r\n[stream.hls][error] Attempted to play a variant playlist, use 'hls://https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8' instead\r\n[stream.segmented][debug] Closing worker thread\r\n[stream.segmented][debug] Closing writer thread\r\n[cli][error] Try 1/1: Could not open stream <HLSStream ['hls', 'https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8']> (No data returned from stream)\r\nerror: Could not open stream <HLSStream ['hls', 'https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8']>, tried 1 times, exiting\r\n[cli][info] Closing currently open stream...\n```\n\n", "before_files": [{"content": "\"\"\"\n$description A privately owned Bulgarian live TV channel.\n$url btvplus.bg\n$type live\n$region Bulgaria\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?btvplus\\.bg/live/?\",\n))\nclass BTV(Plugin):\n URL_API = \"https://btvplus.bg/lbin/v3/btvplus/player_config.php\"\n\n def _get_streams(self):\n media_id = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"media_id=(\\d+)\"),\n validate.any(None, validate.get(1)),\n ))\n if media_id is None:\n return\n\n stream_url = self.session.http.get(\n self.URL_API,\n params={\n \"media_id\": media_id,\n },\n schema=validate.Schema(\n validate.any(\n validate.all(\n validate.regex(re.compile(r\"geo_blocked_stream\")),\n validate.get(0),\n ),\n validate.all(\n validate.parse_json(),\n {\n \"status\": \"ok\",\n \"info\": {\n \"file\": validate.url(path=validate.endswith(\".m3u8\")),\n },\n },\n validate.get((\"info\", \"file\")),\n ),\n ),\n ),\n )\n if not stream_url:\n return\n\n if stream_url == \"geo_blocked_stream\":\n log.error(\"The content is not available in your region\")\n return\n\n return {\"live\": HLSStream(self.session, stream_url)}\n\n\n__plugin__ = BTV\n", "path": "src/streamlink/plugins/btv.py"}]} | 1,974 | 105 |
gh_patches_debug_7467 | rasdani/github-patches | git_diff | sublimelsp__LSP-660 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
when cancelling the symbols panel, the last symbol is selected
https://github.com/tomv564/LSP/blob/be904c56fddf35f724486de405a168786ed4ffeb/plugin/symbols.py#L82-L92
```diff
def on_symbol_selected(self, symbol_index):
+ if symbol_index == -1:
+ return
selected_symbol = self.symbols[symbol_index]
range = selected_symbol.get('location', selected_symbol.get('range'))
range = range.get('range', range)
```
</issue>
<code>
[start of plugin/symbols.py]
1 from .core.logging import debug
2 from .core.protocol import Request, Range
3 from .core.protocol import SymbolKind
4 from .core.registry import client_for_view, LspTextCommand
5 from .core.url import filename_to_uri
6 from .core.views import range_to_region
7
8 try:
9 from typing import List, Optional, Any
10 assert List and Optional and Any
11 except ImportError:
12 pass
13
14 symbol_kind_names = {
15 SymbolKind.File: "file",
16 SymbolKind.Module: "module",
17 SymbolKind.Namespace: "namespace",
18 SymbolKind.Package: "package",
19 SymbolKind.Class: "class",
20 SymbolKind.Method: "method",
21 SymbolKind.Property: "property",
22 SymbolKind.Field: "field",
23 SymbolKind.Constructor: "constructor",
24 SymbolKind.Enum: "enum",
25 SymbolKind.Interface: "interface",
26 SymbolKind.Function: "function",
27 SymbolKind.Variable: "variable",
28 SymbolKind.Constant: "constant",
29 SymbolKind.String: "string",
30 SymbolKind.Number: "number",
31 SymbolKind.Boolean: "boolean",
32 SymbolKind.Array: "array",
33 SymbolKind.Object: "object",
34 SymbolKind.Key: "key",
35 SymbolKind.Null: "null",
36 SymbolKind.EnumMember: "enum member",
37 SymbolKind.Struct: "struct",
38 SymbolKind.Event: "event",
39 SymbolKind.Operator: "operator",
40 SymbolKind.TypeParameter: "type parameter"
41 }
42
43
44 def format_symbol_kind(kind):
45 return symbol_kind_names.get(kind, str(kind))
46
47
48 def format_symbol(item):
49 """
50 items may be a list of strings, or a list of string lists.
51 In the latter case, each entry in the quick panel will show multiple rows
52 """
53 prefix = item.get("containerName", "")
54 label = prefix + "." + item.get("name") if prefix else item.get("name")
55 return [label, format_symbol_kind(item.get("kind"))]
56
57
58 class LspDocumentSymbolsCommand(LspTextCommand):
59 def __init__(self, view):
60 super().__init__(view)
61
62 def is_enabled(self, event=None):
63 return self.has_client_with_capability('documentSymbolProvider')
64
65 def run(self, edit) -> None:
66 client = client_for_view(self.view)
67 if client:
68 params = {
69 "textDocument": {
70 "uri": filename_to_uri(self.view.file_name())
71 }
72 }
73 request = Request.documentSymbols(params)
74 client.send_request(request, self.handle_response)
75
76 def handle_response(self, response: 'Optional[List]') -> None:
77 response_list = response or []
78 symbols = list(format_symbol(item) for item in response_list)
79 self.symbols = response_list
80 self.view.window().show_quick_panel(symbols, self.on_symbol_selected)
81
82 def on_symbol_selected(self, symbol_index):
83 selected_symbol = self.symbols[symbol_index]
84 range = selected_symbol.get('location', selected_symbol.get('range'))
85 range = range.get('range', range)
86 if not range:
87 debug('could not recognize the type: expected either SymbolInformation or DocumentSymbol')
88 return
89 region = range_to_region(Range.from_lsp(range), self.view)
90 self.view.show_at_center(region)
91 self.view.sel().clear()
92 self.view.sel().add(region)
93
[end of plugin/symbols.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/symbols.py b/plugin/symbols.py
--- a/plugin/symbols.py
+++ b/plugin/symbols.py
@@ -80,6 +80,8 @@
self.view.window().show_quick_panel(symbols, self.on_symbol_selected)
def on_symbol_selected(self, symbol_index):
+ if symbol_index == -1:
+ return
selected_symbol = self.symbols[symbol_index]
range = selected_symbol.get('location', selected_symbol.get('range'))
range = range.get('range', range)
| {"golden_diff": "diff --git a/plugin/symbols.py b/plugin/symbols.py\n--- a/plugin/symbols.py\n+++ b/plugin/symbols.py\n@@ -80,6 +80,8 @@\n self.view.window().show_quick_panel(symbols, self.on_symbol_selected)\n \n def on_symbol_selected(self, symbol_index):\n+ if symbol_index == -1:\n+ return\n selected_symbol = self.symbols[symbol_index]\n range = selected_symbol.get('location', selected_symbol.get('range'))\n range = range.get('range', range)\n", "issue": "when cancelling the symbols panel, the last symbol is selected\nhttps://github.com/tomv564/LSP/blob/be904c56fddf35f724486de405a168786ed4ffeb/plugin/symbols.py#L82-L92\r\n```diff\r\n def on_symbol_selected(self, symbol_index):\r\n+ if symbol_index == -1:\r\n+ return\r\n selected_symbol = self.symbols[symbol_index]\r\n range = selected_symbol.get('location', selected_symbol.get('range'))\r\n range = range.get('range', range)\r\n```\n", "before_files": [{"content": "from .core.logging import debug\nfrom .core.protocol import Request, Range\nfrom .core.protocol import SymbolKind\nfrom .core.registry import client_for_view, LspTextCommand\nfrom .core.url import filename_to_uri\nfrom .core.views import range_to_region\n\ntry:\n from typing import List, Optional, Any\n assert List and Optional and Any\nexcept ImportError:\n pass\n\nsymbol_kind_names = {\n SymbolKind.File: \"file\",\n SymbolKind.Module: \"module\",\n SymbolKind.Namespace: \"namespace\",\n SymbolKind.Package: \"package\",\n SymbolKind.Class: \"class\",\n SymbolKind.Method: \"method\",\n SymbolKind.Property: \"property\",\n SymbolKind.Field: \"field\",\n SymbolKind.Constructor: \"constructor\",\n SymbolKind.Enum: \"enum\",\n SymbolKind.Interface: \"interface\",\n SymbolKind.Function: \"function\",\n SymbolKind.Variable: \"variable\",\n SymbolKind.Constant: \"constant\",\n SymbolKind.String: \"string\",\n SymbolKind.Number: \"number\",\n SymbolKind.Boolean: \"boolean\",\n SymbolKind.Array: \"array\",\n SymbolKind.Object: \"object\",\n SymbolKind.Key: \"key\",\n SymbolKind.Null: \"null\",\n SymbolKind.EnumMember: \"enum member\",\n SymbolKind.Struct: \"struct\",\n SymbolKind.Event: \"event\",\n SymbolKind.Operator: \"operator\",\n SymbolKind.TypeParameter: \"type parameter\"\n}\n\n\ndef format_symbol_kind(kind):\n return symbol_kind_names.get(kind, str(kind))\n\n\ndef format_symbol(item):\n \"\"\"\n items may be a list of strings, or a list of string lists.\n In the latter case, each entry in the quick panel will show multiple rows\n \"\"\"\n prefix = item.get(\"containerName\", \"\")\n label = prefix + \".\" + item.get(\"name\") if prefix else item.get(\"name\")\n return [label, format_symbol_kind(item.get(\"kind\"))]\n\n\nclass LspDocumentSymbolsCommand(LspTextCommand):\n def __init__(self, view):\n super().__init__(view)\n\n def is_enabled(self, event=None):\n return self.has_client_with_capability('documentSymbolProvider')\n\n def run(self, edit) -> None:\n client = client_for_view(self.view)\n if client:\n params = {\n \"textDocument\": {\n \"uri\": filename_to_uri(self.view.file_name())\n }\n }\n request = Request.documentSymbols(params)\n client.send_request(request, self.handle_response)\n\n def handle_response(self, response: 'Optional[List]') -> None:\n response_list = response or []\n symbols = list(format_symbol(item) for item in response_list)\n self.symbols = response_list\n self.view.window().show_quick_panel(symbols, self.on_symbol_selected)\n\n def on_symbol_selected(self, symbol_index):\n selected_symbol = self.symbols[symbol_index]\n range = selected_symbol.get('location', selected_symbol.get('range'))\n range = range.get('range', range)\n if not range:\n debug('could not recognize the type: expected either SymbolInformation or DocumentSymbol')\n return\n region = range_to_region(Range.from_lsp(range), self.view)\n self.view.show_at_center(region)\n self.view.sel().clear()\n self.view.sel().add(region)\n", "path": "plugin/symbols.py"}]} | 1,546 | 117 |
gh_patches_debug_8479 | rasdani/github-patches | git_diff | spacetelescope__jwql-92 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update environment.yml to update Django version
When running the Django web server (on the `laurenmarietta/web-app-dev branch`) from the `jwql` environment on the VM, and I had to update Django from 1.11.8 to the latest version (2.0.5) to get rid of an error with Django.
The version of Django in `environment.yml` should be specified to >=2.0.5 in the environment file in the future.
</issue>
<code>
[start of setup.py]
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.4.0'
6
7 AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'
8 AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
9
10 REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
11
12 setup(
13 name='jwql',
14 version=VERSION,
15 description='The JWST Quicklook Project',
16 url='https://github.com/spacetelescope/jwql.git',
17 author=AUTHORS,
18 author_email='[email protected]',
19 license='BSD',
20 keywords=['astronomy', 'python'],
21 classifiers=['Programming Language :: Python'],
22 packages=find_packages(),
23 install_requires=REQUIRES,
24 include_package_data=True,
25 include_dirs=[np.get_include()],
26 )
27
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@
AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'
AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
-REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
+REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django==2.0.5', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
setup(
name='jwql',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,7 +7,7 @@\n AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'\n AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n \n-REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n+REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django==2.0.5', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n \n setup(\n name='jwql',\n", "issue": "Update environment.yml to update Django version\nWhen running the Django web server (on the `laurenmarietta/web-app-dev branch`) from the `jwql` environment on the VM, and I had to update Django from 1.11.8 to the latest version (2.0.5) to get rid of an error with Django.\r\n\r\nThe version of Django in `environment.yml` should be specified to >=2.0.5 in the environment file in the future.\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.4.0'\n\nAUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'\nAUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n\nREQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n\nsetup(\n name='jwql',\n version=VERSION,\n description='The JWST Quicklook Project',\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n include_package_data=True,\n include_dirs=[np.get_include()],\n )\n", "path": "setup.py"}]} | 913 | 202 |
gh_patches_debug_26629 | rasdani/github-patches | git_diff | GoogleCloudPlatform__PerfKitBenchmarker-73 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The cluster boot benchmark should the num_cpus function in parallel
The cluster boot benchmark has the following code:
> for vm in vms:
> metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,
> 'machine_instance': vm_number}
> value = vm.TimeToBoot()
This looks great until you realize vm.num_cpus is a method on the virtual machine which in turn calls RemoteCommand leading to an ssh. When large number of VM's boot the result is a long set of serially run ssh's to each VM. This could be done a lot faster by moving the code into a method and then using RunThreaded.
</issue>
<code>
[start of perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py]
1 # Copyright 2014 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Runs a cluster boot benchmark."""
16
17 import logging
18
19 from perfkitbenchmarker import flags
20
21 FLAGS = flags.FLAGS
22 BENCHMARK_INFO = {'name': 'cluster boot',
23 'description': 'Create a cluster, record all times to boot',
24 'scratch_disk': False,
25 'num_machines': None} # Set in GetInfo()
26
27
28 def GetInfo():
29 BENCHMARK_INFO['num_machines'] = FLAGS.num_vms
30 return BENCHMARK_INFO
31
32
33 def Prepare(unused_benchmark_spec):
34 pass
35
36
37 def Run(benchmark_spec):
38 """Measure the boot time for all VMs.
39
40 Args:
41 benchmark_spec: The benchmark specification. Contains all data that is
42 required to run the benchmark.
43
44 Returns:
45 A list of samples in the form of 3 or 4 tuples. The tuples contain
46 the sample metric (string), value (float), and unit (string).
47 If a 4th element is included, it is a dictionary of sample
48 metadata.
49 """
50
51 samples = []
52 vm_number = 0
53 logging.info('Boot Results:')
54 vms = benchmark_spec.vms
55 for vm in vms:
56 metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,
57 'machine_instance': vm_number}
58 value = vm.TimeToBoot()
59 assert value is not None
60 samples.append(('Boot Time', value, 'seconds', metadata))
61 vm_number += 1
62 logging.info(samples)
63 assert vm_number == benchmark_spec.num_vms
64 return samples
65
66
67 def Cleanup(unused_benchmark_spec):
68 pass
69
[end of perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py b/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py
--- a/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py
+++ b/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py
@@ -17,6 +17,7 @@
import logging
from perfkitbenchmarker import flags
+from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
BENCHMARK_INFO = {'name': 'cluster boot',
@@ -34,6 +35,14 @@
pass
+def _GetTimeToBoot(vm, vm_index, result_list):
+ metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,
+ 'machine_instance': vm_index}
+ value = vm.TimeToBoot()
+ assert value is not None
+ result_list.append(('Boot Time', value, 'seconds', metadata))
+
+
def Run(benchmark_spec):
"""Measure the boot time for all VMs.
@@ -49,18 +58,12 @@
"""
samples = []
- vm_number = 0
logging.info('Boot Results:')
vms = benchmark_spec.vms
- for vm in vms:
- metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,
- 'machine_instance': vm_number}
- value = vm.TimeToBoot()
- assert value is not None
- samples.append(('Boot Time', value, 'seconds', metadata))
- vm_number += 1
+ params = [((vm, i, samples), {}) for i, vm in enumerate(vms)]
+ vm_util.RunThreaded(_GetTimeToBoot, params)
logging.info(samples)
- assert vm_number == benchmark_spec.num_vms
+ assert len(samples) == benchmark_spec.num_vms
return samples
| {"golden_diff": "diff --git a/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py b/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py\n--- a/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py\n+++ b/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py\n@@ -17,6 +17,7 @@\n import logging\n \n from perfkitbenchmarker import flags\n+from perfkitbenchmarker import vm_util\n \n FLAGS = flags.FLAGS\n BENCHMARK_INFO = {'name': 'cluster boot',\n@@ -34,6 +35,14 @@\n pass\n \n \n+def _GetTimeToBoot(vm, vm_index, result_list):\n+ metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,\n+ 'machine_instance': vm_index}\n+ value = vm.TimeToBoot()\n+ assert value is not None\n+ result_list.append(('Boot Time', value, 'seconds', metadata))\n+\n+\n def Run(benchmark_spec):\n \"\"\"Measure the boot time for all VMs.\n \n@@ -49,18 +58,12 @@\n \"\"\"\n \n samples = []\n- vm_number = 0\n logging.info('Boot Results:')\n vms = benchmark_spec.vms\n- for vm in vms:\n- metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,\n- 'machine_instance': vm_number}\n- value = vm.TimeToBoot()\n- assert value is not None\n- samples.append(('Boot Time', value, 'seconds', metadata))\n- vm_number += 1\n+ params = [((vm, i, samples), {}) for i, vm in enumerate(vms)]\n+ vm_util.RunThreaded(_GetTimeToBoot, params)\n logging.info(samples)\n- assert vm_number == benchmark_spec.num_vms\n+ assert len(samples) == benchmark_spec.num_vms\n return samples\n", "issue": "The cluster boot benchmark should the num_cpus function in parallel\nThe cluster boot benchmark has the following code:\n\n> for vm in vms:\n> metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,\n> 'machine_instance': vm_number}\n> value = vm.TimeToBoot()\n\nThis looks great until you realize vm.num_cpus is a method on the virtual machine which in turn calls RemoteCommand leading to an ssh. When large number of VM's boot the result is a long set of serially run ssh's to each VM. This could be done a lot faster by moving the code into a method and then using RunThreaded.\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Runs a cluster boot benchmark.\"\"\"\n\nimport logging\n\nfrom perfkitbenchmarker import flags\n\nFLAGS = flags.FLAGS\nBENCHMARK_INFO = {'name': 'cluster boot',\n 'description': 'Create a cluster, record all times to boot',\n 'scratch_disk': False,\n 'num_machines': None} # Set in GetInfo()\n\n\ndef GetInfo():\n BENCHMARK_INFO['num_machines'] = FLAGS.num_vms\n return BENCHMARK_INFO\n\n\ndef Prepare(unused_benchmark_spec):\n pass\n\n\ndef Run(benchmark_spec):\n \"\"\"Measure the boot time for all VMs.\n\n Args:\n benchmark_spec: The benchmark specification. Contains all data that is\n required to run the benchmark.\n\n Returns:\n A list of samples in the form of 3 or 4 tuples. The tuples contain\n the sample metric (string), value (float), and unit (string).\n If a 4th element is included, it is a dictionary of sample\n metadata.\n \"\"\"\n\n samples = []\n vm_number = 0\n logging.info('Boot Results:')\n vms = benchmark_spec.vms\n for vm in vms:\n metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,\n 'machine_instance': vm_number}\n value = vm.TimeToBoot()\n assert value is not None\n samples.append(('Boot Time', value, 'seconds', metadata))\n vm_number += 1\n logging.info(samples)\n assert vm_number == benchmark_spec.num_vms\n return samples\n\n\ndef Cleanup(unused_benchmark_spec):\n pass\n", "path": "perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py"}]} | 1,308 | 426 |
gh_patches_debug_2021 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-112 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ikea group support bind method doesn't return status as expected
https://github.com/dmulcahey/zha-device-handlers/blob/b5b383939944ff541ee38a94c7f4d6cf3edc611f/zhaquirks/ikea/__init__.py#L25
https://github.com/home-assistant/home-assistant/blob/a30c37017b7782473294d7999e85d7a369a0539a/homeassistant/components/zha/core/helpers.py#L56
reported by @Adminiuga
we should return the status in [ ] so the bind helper in HA is happy.
</issue>
<code>
[start of zhaquirks/ikea/__init__.py]
1 """Ikea module."""
2 import logging
3 from zigpy.zcl.clusters.lightlink import LightLink
4 from zigpy.quirks import CustomCluster
5
6 _LOGGER = logging.getLogger(__name__)
7
8
9 class LightLinkCluster(CustomCluster, LightLink):
10 """Ikea LightLink cluster."""
11
12 async def bind(self):
13 """Bind LightLink cluster to coordinator."""
14 application = self._endpoint.device.application
15 try:
16 coordinator = application.get_device(application.ieee)
17 except KeyError:
18 _LOGGER.warning(
19 "Aborting - unable to locate required coordinator device."
20 )
21 return
22 group_list = await self.get_group_identifiers(0)
23 group_record = group_list[2]
24 group_id = group_record[0].group_id
25 await coordinator.add_to_group(group_id)
26
[end of zhaquirks/ikea/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zhaquirks/ikea/__init__.py b/zhaquirks/ikea/__init__.py
--- a/zhaquirks/ikea/__init__.py
+++ b/zhaquirks/ikea/__init__.py
@@ -22,4 +22,5 @@
group_list = await self.get_group_identifiers(0)
group_record = group_list[2]
group_id = group_record[0].group_id
- await coordinator.add_to_group(group_id)
+ status = await coordinator.add_to_group(group_id)
+ return [status]
| {"golden_diff": "diff --git a/zhaquirks/ikea/__init__.py b/zhaquirks/ikea/__init__.py\n--- a/zhaquirks/ikea/__init__.py\n+++ b/zhaquirks/ikea/__init__.py\n@@ -22,4 +22,5 @@\n group_list = await self.get_group_identifiers(0)\n group_record = group_list[2]\n group_id = group_record[0].group_id\n- await coordinator.add_to_group(group_id)\n+ status = await coordinator.add_to_group(group_id)\n+ return [status]\n", "issue": "Ikea group support bind method doesn't return status as expected\nhttps://github.com/dmulcahey/zha-device-handlers/blob/b5b383939944ff541ee38a94c7f4d6cf3edc611f/zhaquirks/ikea/__init__.py#L25\r\n\r\nhttps://github.com/home-assistant/home-assistant/blob/a30c37017b7782473294d7999e85d7a369a0539a/homeassistant/components/zha/core/helpers.py#L56\r\n\r\nreported by @Adminiuga \r\n\r\nwe should return the status in [ ] so the bind helper in HA is happy.\n", "before_files": [{"content": "\"\"\"Ikea module.\"\"\"\nimport logging\nfrom zigpy.zcl.clusters.lightlink import LightLink\nfrom zigpy.quirks import CustomCluster\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass LightLinkCluster(CustomCluster, LightLink):\n \"\"\"Ikea LightLink cluster.\"\"\"\n\n async def bind(self):\n \"\"\"Bind LightLink cluster to coordinator.\"\"\"\n application = self._endpoint.device.application\n try:\n coordinator = application.get_device(application.ieee)\n except KeyError:\n _LOGGER.warning(\n \"Aborting - unable to locate required coordinator device.\"\n )\n return\n group_list = await self.get_group_identifiers(0)\n group_record = group_list[2]\n group_id = group_record[0].group_id\n await coordinator.add_to_group(group_id)\n", "path": "zhaquirks/ikea/__init__.py"}]} | 926 | 131 |
gh_patches_debug_22275 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-39 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor how version is defined
Currently we just have a legacy `version.py` file with version inside it.
It duplicates the version information from `pyproject.toml`.
That's how it should be: https://github.com/sdispater/poetry/issues/273#issuecomment-401983643
</issue>
<code>
[start of wemake_python_styleguide/version.py]
1 # -*- coding: utf-8 -*-
2
3 __version__ = '0.0.2' # noqa
4 # TODO: resolve after https://github.com/sdispater/poetry/issues/273
5
[end of wemake_python_styleguide/version.py]
[start of wemake_python_styleguide/checker.py]
1 # -*- coding: utf-8 -*-
2
3 from ast import Module
4 from typing import Generator, Tuple
5
6 from wemake_python_styleguide.version import __version__
7 from wemake_python_styleguide.visitors.high_complexity import ComplexityVisitor
8 from wemake_python_styleguide.visitors.wrong_function_call import (
9 WrongFunctionCallVisitor,
10 )
11 from wemake_python_styleguide.visitors.wrong_import import WrongImportVisitor
12 from wemake_python_styleguide.visitors.wrong_keyword import (
13 WrongKeywordVisitor,
14 WrongRaiseVisitor,
15 )
16 from wemake_python_styleguide.visitors.wrong_name import (
17 WrongModuleMetadataVisitor,
18 WrongNameVisitor,
19 )
20 from wemake_python_styleguide.visitors.wrong_nested import WrongNestedVisitor
21
22 CheckResult = Tuple[int, int, str, type]
23
24
25 class Checker(object):
26 """
27 Main checker class.
28
29 Runs all possible checks.
30 """
31
32 name = 'wemake-python-styleguide'
33 version = __version__
34
35 def __init__(self, tree: Module, filename: str = '-') -> None:
36 """Creates new checker instance."""
37 self.tree = tree
38 self.filename = filename
39
40 self._visitors = (
41 WrongRaiseVisitor,
42 WrongFunctionCallVisitor,
43 WrongImportVisitor,
44 WrongKeywordVisitor,
45 WrongNestedVisitor,
46 ComplexityVisitor,
47 WrongNameVisitor,
48 WrongModuleMetadataVisitor,
49 )
50
51 def run(self) -> Generator[CheckResult, None, None]:
52 """
53 Runs the checker.
54
55 This method is used by `flake8` API.
56 """
57 for visitor_class in self._visitors:
58 visiter = visitor_class()
59 visiter.visit(self.tree)
60
61 for error in visiter.errors:
62 lineno, col_offset, message = error.node_items()
63 yield lineno, col_offset, message, type(self)
64
[end of wemake_python_styleguide/checker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/checker.py b/wemake_python_styleguide/checker.py
--- a/wemake_python_styleguide/checker.py
+++ b/wemake_python_styleguide/checker.py
@@ -3,7 +3,7 @@
from ast import Module
from typing import Generator, Tuple
-from wemake_python_styleguide.version import __version__
+from wemake_python_styleguide.version import version
from wemake_python_styleguide.visitors.high_complexity import ComplexityVisitor
from wemake_python_styleguide.visitors.wrong_function_call import (
WrongFunctionCallVisitor,
@@ -30,7 +30,7 @@
"""
name = 'wemake-python-styleguide'
- version = __version__
+ version = version
def __init__(self, tree: Module, filename: str = '-') -> None:
"""Creates new checker instance."""
diff --git a/wemake_python_styleguide/version.py b/wemake_python_styleguide/version.py
--- a/wemake_python_styleguide/version.py
+++ b/wemake_python_styleguide/version.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
-__version__ = '0.0.2' # noqa
-# TODO: resolve after https://github.com/sdispater/poetry/issues/273
+import pkg_resources
+
+version = pkg_resources.get_distribution('wemake-python-styleguide').version
| {"golden_diff": "diff --git a/wemake_python_styleguide/checker.py b/wemake_python_styleguide/checker.py\n--- a/wemake_python_styleguide/checker.py\n+++ b/wemake_python_styleguide/checker.py\n@@ -3,7 +3,7 @@\n from ast import Module\n from typing import Generator, Tuple\n \n-from wemake_python_styleguide.version import __version__\n+from wemake_python_styleguide.version import version\n from wemake_python_styleguide.visitors.high_complexity import ComplexityVisitor\n from wemake_python_styleguide.visitors.wrong_function_call import (\n WrongFunctionCallVisitor,\n@@ -30,7 +30,7 @@\n \"\"\"\n \n name = 'wemake-python-styleguide'\n- version = __version__\n+ version = version\n \n def __init__(self, tree: Module, filename: str = '-') -> None:\n \"\"\"Creates new checker instance.\"\"\"\ndiff --git a/wemake_python_styleguide/version.py b/wemake_python_styleguide/version.py\n--- a/wemake_python_styleguide/version.py\n+++ b/wemake_python_styleguide/version.py\n@@ -1,4 +1,5 @@\n # -*- coding: utf-8 -*-\n \n-__version__ = '0.0.2' # noqa\n-# TODO: resolve after https://github.com/sdispater/poetry/issues/273\n+import pkg_resources\n+\n+version = pkg_resources.get_distribution('wemake-python-styleguide').version\n", "issue": "Refactor how version is defined\nCurrently we just have a legacy `version.py` file with version inside it.\r\nIt duplicates the version information from `pyproject.toml`.\r\n\r\nThat's how it should be: https://github.com/sdispater/poetry/issues/273#issuecomment-401983643\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n__version__ = '0.0.2' # noqa\n# TODO: resolve after https://github.com/sdispater/poetry/issues/273\n", "path": "wemake_python_styleguide/version.py"}, {"content": "# -*- coding: utf-8 -*-\n\nfrom ast import Module\nfrom typing import Generator, Tuple\n\nfrom wemake_python_styleguide.version import __version__\nfrom wemake_python_styleguide.visitors.high_complexity import ComplexityVisitor\nfrom wemake_python_styleguide.visitors.wrong_function_call import (\n WrongFunctionCallVisitor,\n)\nfrom wemake_python_styleguide.visitors.wrong_import import WrongImportVisitor\nfrom wemake_python_styleguide.visitors.wrong_keyword import (\n WrongKeywordVisitor,\n WrongRaiseVisitor,\n)\nfrom wemake_python_styleguide.visitors.wrong_name import (\n WrongModuleMetadataVisitor,\n WrongNameVisitor,\n)\nfrom wemake_python_styleguide.visitors.wrong_nested import WrongNestedVisitor\n\nCheckResult = Tuple[int, int, str, type]\n\n\nclass Checker(object):\n \"\"\"\n Main checker class.\n\n Runs all possible checks.\n \"\"\"\n\n name = 'wemake-python-styleguide'\n version = __version__\n\n def __init__(self, tree: Module, filename: str = '-') -> None:\n \"\"\"Creates new checker instance.\"\"\"\n self.tree = tree\n self.filename = filename\n\n self._visitors = (\n WrongRaiseVisitor,\n WrongFunctionCallVisitor,\n WrongImportVisitor,\n WrongKeywordVisitor,\n WrongNestedVisitor,\n ComplexityVisitor,\n WrongNameVisitor,\n WrongModuleMetadataVisitor,\n )\n\n def run(self) -> Generator[CheckResult, None, None]:\n \"\"\"\n Runs the checker.\n\n This method is used by `flake8` API.\n \"\"\"\n for visitor_class in self._visitors:\n visiter = visitor_class()\n visiter.visit(self.tree)\n\n for error in visiter.errors:\n lineno, col_offset, message = error.node_items()\n yield lineno, col_offset, message, type(self)\n", "path": "wemake_python_styleguide/checker.py"}]} | 1,203 | 321 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.