problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_709 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-4021 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problème de thumbnail avec les SVG
Bug détecté à partir de la version 2.101.4 de Geotrek Admin.
Celui est déclenché par l'ajout d'un SVG comme pictogramme sur un lieu de renseignement.
Explication : la dernière version de easy_thumbnail n'accepte pas de faire le thumbnail d'un SVG. -> l'api V2 plante
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/python3
2 import os
3 import distutils.command.build
4 from pathlib import Path
5 from setuptools import setup, find_packages
6 from shutil import copy
7
8 here = os.path.abspath(os.path.dirname(__file__))
9
10
11 class BuildCommand(distutils.command.build.build):
12 def run(self):
13 distutils.command.build.build.run(self)
14 from django.core.management import call_command
15 curdir = os.getcwd()
16 for subdir in ('geotrek', ):
17 os.chdir(subdir)
18 call_command('compilemessages')
19 for path in Path('.').rglob('*.mo'):
20 copy(path, os.path.join(curdir, self.build_lib, subdir, path))
21 os.chdir(curdir)
22
23
24 setup(
25 name='geotrek',
26 version=open(os.path.join(here, 'VERSION')).read().strip(),
27 author='Makina Corpus',
28 author_email='[email protected]',
29 url='https://makina-corpus.com',
30 description="Geotrek",
31 scripts=['manage.py'],
32 install_requires=[
33 'Django==3.2.*',
34 'mapentity',
35 'chardet',
36 'cairosvg',
37 'cairocffi',
38 'env_file',
39 # pinned by requirements.txt
40 'pymemcache',
41 'coreschema',
42 'coreapi',
43 'psycopg2',
44 'pdfimpose',
45 'docutils',
46 'Pillow',
47 'simplekml',
48 'pygal',
49 'paperclip',
50 'django-extended-choices',
51 'django-modelcluster',
52 'django-mptt',
53 'geojson',
54 'tif2geojson',
55 'drf-dynamic-fields',
56 'drf-yasg',
57 'xlrd',
58 'landez',
59 'large-image-source-vips',
60 'django-large-image',
61 'celery',
62 'redis',
63 'django-celery-results',
64 'drf-extensions',
65 'django-colorfield',
66 'Fiona',
67 'markdown',
68 "weasyprint==52.5", # newer version required libpango (not available in bionic)
69 'django-weasyprint<2.0.0', # 2.10 require weasyprint > 53
70 "django-clearcache",
71 "pyopenair",
72 # prod,
73 'gunicorn',
74 'sentry-sdk',
75 ],
76 cmdclass={"build": BuildCommand},
77 include_package_data=True,
78 license='BSD, see LICENSE file.',
79 packages=find_packages(),
80 classifiers=['Natural Language :: English',
81 'Environment :: Web Environment',
82 'Framework :: Django',
83 'Development Status :: 5 - Production/Stable',
84 'Programming Language :: Python :: 3'],
85 )
86
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -72,6 +72,7 @@
# prod,
'gunicorn',
'sentry-sdk',
+ 'easy-thumbnails[svg]',
],
cmdclass={"build": BuildCommand},
include_package_data=True,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -72,6 +72,7 @@\n # prod,\n 'gunicorn',\n 'sentry-sdk',\n+ 'easy-thumbnails[svg]',\n ],\n cmdclass={\"build\": BuildCommand},\n include_package_data=True,\n", "issue": "Probl\u00e8me de thumbnail avec les SVG\nBug d\u00e9tect\u00e9 \u00e0 partir de la version 2.101.4 de Geotrek Admin. \r\n\r\nCelui est d\u00e9clench\u00e9 par l'ajout d'un SVG comme pictogramme sur un lieu de renseignement. \r\n\r\nExplication : la derni\u00e8re version de easy_thumbnail n'accepte pas de faire le thumbnail d'un SVG. -> l'api V2 plante\r\n\n", "before_files": [{"content": "#!/usr/bin/python3\nimport os\nimport distutils.command.build\nfrom pathlib import Path\nfrom setuptools import setup, find_packages\nfrom shutil import copy\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\nclass BuildCommand(distutils.command.build.build):\n def run(self):\n distutils.command.build.build.run(self)\n from django.core.management import call_command\n curdir = os.getcwd()\n for subdir in ('geotrek', ):\n os.chdir(subdir)\n call_command('compilemessages')\n for path in Path('.').rglob('*.mo'):\n copy(path, os.path.join(curdir, self.build_lib, subdir, path))\n os.chdir(curdir)\n\n\nsetup(\n name='geotrek',\n version=open(os.path.join(here, 'VERSION')).read().strip(),\n author='Makina Corpus',\n author_email='[email protected]',\n url='https://makina-corpus.com',\n description=\"Geotrek\",\n scripts=['manage.py'],\n install_requires=[\n 'Django==3.2.*',\n 'mapentity',\n 'chardet',\n 'cairosvg',\n 'cairocffi',\n 'env_file',\n # pinned by requirements.txt\n 'pymemcache',\n 'coreschema',\n 'coreapi',\n 'psycopg2',\n 'pdfimpose',\n 'docutils',\n 'Pillow',\n 'simplekml',\n 'pygal',\n 'paperclip',\n 'django-extended-choices',\n 'django-modelcluster',\n 'django-mptt',\n 'geojson',\n 'tif2geojson',\n 'drf-dynamic-fields',\n 'drf-yasg',\n 'xlrd',\n 'landez',\n 'large-image-source-vips',\n 'django-large-image',\n 'celery',\n 'redis',\n 'django-celery-results',\n 'drf-extensions',\n 'django-colorfield',\n 'Fiona',\n 'markdown',\n \"weasyprint==52.5\", # newer version required libpango (not available in bionic)\n 'django-weasyprint<2.0.0', # 2.10 require weasyprint > 53\n \"django-clearcache\",\n \"pyopenair\",\n # prod,\n 'gunicorn',\n 'sentry-sdk',\n ],\n cmdclass={\"build\": BuildCommand},\n include_package_data=True,\n license='BSD, see LICENSE file.',\n packages=find_packages(),\n classifiers=['Natural Language :: English',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python :: 3'],\n)\n", "path": "setup.py"}]} | 1,378 | 73 |
gh_patches_debug_9365 | rasdani/github-patches | git_diff | systemd__mkosi-1906 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Is it possible to create system extensions based on Arch Linux
I have followed the documentation to the best of my abilities, however it seems not to be possible. When using `BaseTrees=…` and `Overlay=true`, pacman fails because it cannot lock its database. This is the reported error message:
```
‣ Building extension image
Create subvolume '/home/x/mkosi-test/.mkosi-tmp_eq6cbps/root'
‣ Mounting base trees…
‣ Installing extra packages for Arch
:: Synchronizing package databases...
error: failed to synchronize all databases (unable to lock database)
‣ "bwrap --dev-bind / / --chdir /home/x/mkosi-test --tmpfs /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/run --tmpfs /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/tmp --bind /home/x/mkosi-test/.mkosi-tmp_eq6cbps /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/var/tmp --proc /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/proc --dev /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/dev --ro-bind /sys /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/sys --unsetenv TMPDIR --ro-bind /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/etc/machine-id /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/etc/machine-id --bind /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/etc/passwd /etc/passwd --bind /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/etc/group /etc/group --bind /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/etc/shadow /etc/shadow --bind /x/mkosi-test/.mkosi-tmp_eq6cbps/root/etc/gshadow /etc/gshadow sh -c 'chmod 1777 /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/tmp /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/var/tmp /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/dev/shm && mkdir /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/run/host && echo mkosi >/home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/run/host/container-manager && exec $0 "$@"' pacman --config /home/x/mkosi-test/.mkosi-tmp_eq6cbps/pkgmngr/etc/pacman.conf --root /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root --logfile=/dev/null --cachedir '/home/x/mkosi-test/mkosi.cache/arch~rolling' --gpgdir /etc/pacman.d/gnupg --hookdir /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/etc/pacman.d/hooks --arch x86_64 --color auto --noconfirm --needed -Sy base-devel helix" returned non-zero exit code 1.
```
Is there any known solution or workaround?
### Basic Info
Host OS: Arch Linux (up to date)
mkosi version: 16
### Small Example
Create a `mkosi` directory with two presets, `base` and `extension`. Create a `mkosi.conf` file in both, with the following contents.
`mkosi.presets/base/mkosi.conf`:
```
[Distribution]
Distribution=arch
[Output]
Format=directory
ImageId=base
[Content]
Packages=
base
linux
Bootable=false
```
`mkosi.presets/extension/mkosi.conf`:
```
[Preset]
Dependencies=base
[Distribution]
Distribution=arch
[Output]
Format=directory
ImageId=extension
Overlay=true
[Content]
BaseTrees=../../mkosi.output/base/
Packages=
base-devel
helix
Bootable=false
```
</issue>
<code>
[start of mkosi/state.py]
1 # SPDX-License-Identifier: LGPL-2.1+
2
3 from pathlib import Path
4
5 from mkosi.config import MkosiArgs, MkosiConfig
6 from mkosi.tree import make_tree
7 from mkosi.util import umask
8
9
10 class MkosiState:
11 """State related properties."""
12
13 def __init__(self, args: MkosiArgs, config: MkosiConfig, workspace: Path) -> None:
14 self.args = args
15 self.config = config
16 self.workspace = workspace
17
18 with umask(~0o755):
19 make_tree(self.config, self.root)
20 self.staging.mkdir()
21 self.pkgmngr.mkdir()
22 self.install_dir.mkdir(exist_ok=True)
23 self.cache_dir.mkdir(parents=True, exist_ok=True)
24
25 @property
26 def root(self) -> Path:
27 return self.workspace / "root"
28
29 @property
30 def staging(self) -> Path:
31 return self.workspace / "staging"
32
33 @property
34 def pkgmngr(self) -> Path:
35 return self.workspace / "pkgmngr"
36
37 @property
38 def cache_dir(self) -> Path:
39 return self.config.cache_dir or self.workspace / f"cache/{self.config.distribution}~{self.config.release}"
40
41 @property
42 def install_dir(self) -> Path:
43 return self.workspace / "dest"
44
[end of mkosi/state.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkosi/state.py b/mkosi/state.py
--- a/mkosi/state.py
+++ b/mkosi/state.py
@@ -16,7 +16,13 @@
self.workspace = workspace
with umask(~0o755):
- make_tree(self.config, self.root)
+ # Using a btrfs subvolume as the upperdir in an overlayfs results in EXDEV so make sure we create
+ # the root directory as a regular directory if the Overlay= option is enabled.
+ if config.overlay:
+ self.root.mkdir()
+ else:
+ make_tree(self.config, self.root)
+
self.staging.mkdir()
self.pkgmngr.mkdir()
self.install_dir.mkdir(exist_ok=True)
| {"golden_diff": "diff --git a/mkosi/state.py b/mkosi/state.py\n--- a/mkosi/state.py\n+++ b/mkosi/state.py\n@@ -16,7 +16,13 @@\n self.workspace = workspace\n \n with umask(~0o755):\n- make_tree(self.config, self.root)\n+ # Using a btrfs subvolume as the upperdir in an overlayfs results in EXDEV so make sure we create\n+ # the root directory as a regular directory if the Overlay= option is enabled.\n+ if config.overlay:\n+ self.root.mkdir()\n+ else:\n+ make_tree(self.config, self.root)\n+\n self.staging.mkdir()\n self.pkgmngr.mkdir()\n self.install_dir.mkdir(exist_ok=True)\n", "issue": "Is it possible to create system extensions based on Arch Linux\nI have followed the documentation to the best of my abilities, however it seems not to be possible. When using `BaseTrees=\u2026` and `Overlay=true`, pacman fails because it cannot lock its database. This is the reported error message:\r\n\r\n```\r\n\u2023 Building extension image\r\nCreate subvolume '/home/x/mkosi-test/.mkosi-tmp_eq6cbps/root'\r\n\u2023 Mounting base trees\u2026\r\n\u2023 Installing extra packages for Arch\r\n:: Synchronizing package databases...\r\nerror: failed to synchronize all databases (unable to lock database)\r\n\u2023 \"bwrap --dev-bind / / --chdir /home/x/mkosi-test --tmpfs /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/run --tmpfs /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/tmp --bind /home/x/mkosi-test/.mkosi-tmp_eq6cbps /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/var/tmp --proc /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/proc --dev /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/dev --ro-bind /sys /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/sys --unsetenv TMPDIR --ro-bind /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/etc/machine-id /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/etc/machine-id --bind /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/etc/passwd /etc/passwd --bind /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/etc/group /etc/group --bind /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/etc/shadow /etc/shadow --bind /x/mkosi-test/.mkosi-tmp_eq6cbps/root/etc/gshadow /etc/gshadow sh -c 'chmod 1777 /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/tmp /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/var/tmp /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/dev/shm && mkdir /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/run/host && echo mkosi >/home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/run/host/container-manager && exec $0 \"$@\"' pacman --config /home/x/mkosi-test/.mkosi-tmp_eq6cbps/pkgmngr/etc/pacman.conf --root /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root --logfile=/dev/null --cachedir '/home/x/mkosi-test/mkosi.cache/arch~rolling' --gpgdir /etc/pacman.d/gnupg --hookdir /home/x/mkosi-test/.mkosi-tmp_eq6cbps/root/etc/pacman.d/hooks --arch x86_64 --color auto --noconfirm --needed -Sy base-devel helix\" returned non-zero exit code 1.\r\n```\r\n\r\nIs there any known solution or workaround?\r\n\r\n### Basic Info\r\nHost OS: Arch Linux (up to date)\r\nmkosi version: 16\r\n\r\n### Small Example\r\n\r\nCreate a `mkosi` directory with two presets, `base` and `extension`. Create a `mkosi.conf` file in both, with the following contents.\r\n\r\n`mkosi.presets/base/mkosi.conf`:\r\n```\r\n[Distribution]\r\nDistribution=arch\r\n\r\n[Output]\r\nFormat=directory\r\nImageId=base\r\n\r\n[Content]\r\nPackages=\r\n base\r\n linux\r\nBootable=false\r\n```\r\n\r\n`mkosi.presets/extension/mkosi.conf`:\r\n```\r\n[Preset]\r\nDependencies=base\r\n\r\n[Distribution]\r\nDistribution=arch\r\n\r\n[Output]\r\nFormat=directory\r\nImageId=extension\r\nOverlay=true\r\n\r\n[Content]\r\nBaseTrees=../../mkosi.output/base/\r\nPackages=\r\n base-devel\r\n helix\r\nBootable=false\r\n```\n", "before_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\n\nfrom pathlib import Path\n\nfrom mkosi.config import MkosiArgs, MkosiConfig\nfrom mkosi.tree import make_tree\nfrom mkosi.util import umask\n\n\nclass MkosiState:\n \"\"\"State related properties.\"\"\"\n\n def __init__(self, args: MkosiArgs, config: MkosiConfig, workspace: Path) -> None:\n self.args = args\n self.config = config\n self.workspace = workspace\n\n with umask(~0o755):\n make_tree(self.config, self.root)\n self.staging.mkdir()\n self.pkgmngr.mkdir()\n self.install_dir.mkdir(exist_ok=True)\n self.cache_dir.mkdir(parents=True, exist_ok=True)\n\n @property\n def root(self) -> Path:\n return self.workspace / \"root\"\n\n @property\n def staging(self) -> Path:\n return self.workspace / \"staging\"\n\n @property\n def pkgmngr(self) -> Path:\n return self.workspace / \"pkgmngr\"\n\n @property\n def cache_dir(self) -> Path:\n return self.config.cache_dir or self.workspace / f\"cache/{self.config.distribution}~{self.config.release}\"\n\n @property\n def install_dir(self) -> Path:\n return self.workspace / \"dest\"\n", "path": "mkosi/state.py"}]} | 1,794 | 168 |
gh_patches_debug_27335 | rasdani/github-patches | git_diff | geopandas__geopandas-1105 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lat Long order is wrong in WKT example
In regards to documentation of converting WKT format to geopandas GeoDataFrame, the order of LAT LONG is wrong.
See #690
In the WKT POINT, you put the Lat first, amd then Long. It should be the opposite.
When plotting the WKT example, and comparing it to the simple Long Lat example, you can clearly see that the WKT points are not placed where they should be.
From what I understand, WKT should follow the WGS84 standard.
See for example: https://www.drupal.org/project/geo/issues/511370
</issue>
<code>
[start of examples/create_geopandas_from_pandas.py]
1 """
2 Creating a GeoDataFrame from a DataFrame with coordinates
3 ---------------------------------------------------------
4
5 This example shows how to create a ``GeoDataFrame`` when starting from
6 a *regular* ``DataFrame`` that has coordinates either WKT
7 (`well-known text <https://en.wikipedia.org/wiki/Well-known_text>`_)
8 format, or in
9 two columns.
10
11 """
12 import pandas as pd
13 import geopandas
14 import matplotlib.pyplot as plt
15
16 ###############################################################################
17 # From longitudes and latitudes
18 # =============================
19 #
20 # First, let's consider a ``DataFrame`` containing cities and their respective
21 # longitudes and latitudes.
22
23 df = pd.DataFrame(
24 {'City': ['Buenos Aires', 'Brasilia', 'Santiago', 'Bogota', 'Caracas'],
25 'Country': ['Argentina', 'Brazil', 'Chile', 'Colombia', 'Venezuela'],
26 'Latitude': [-34.58, -15.78, -33.45, 4.60, 10.48],
27 'Longitude': [-58.66, -47.91, -70.66, -74.08, -66.86]})
28
29 ###############################################################################
30 # A ``GeoDataFrame`` needs a ``shapely`` object. We use geopandas
31 # ``points_from_xy()`` to transform **Longitude** and **Latitude** into a list
32 # of ``shapely.Point`` objects and set it as a ``geometry`` while creating the
33 # ``GeoDataFrame``. (note that ``points_from_xy()`` is an enhanced wrapper for
34 # ``[Point(x, y) for x, y in zip(df.Longitude, df.Latitude)]``)
35
36 gdf = geopandas.GeoDataFrame(
37 df, geometry=geopandas.points_from_xy(df.Longitude, df.Latitude))
38
39
40 ###############################################################################
41 # ``gdf`` looks like this :
42
43 print(gdf.head())
44
45 ###############################################################################
46 # Finally, we plot the coordinates over a country-level map.
47
48 world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
49
50 # We restrict to South America.
51 ax = world[world.continent == 'South America'].plot(
52 color='white', edgecolor='black')
53
54 # We can now plot our GeoDataFrame.
55 gdf.plot(ax=ax, color='red')
56
57 plt.show()
58
59 ###############################################################################
60 # From WKT format
61 # ===============
62 # Here, we consider a ``DataFrame`` having coordinates in WKT format.
63
64 df = pd.DataFrame(
65 {'City': ['Buenos Aires', 'Brasilia', 'Santiago', 'Bogota', 'Caracas'],
66 'Country': ['Argentina', 'Brazil', 'Chile', 'Colombia', 'Venezuela'],
67 'Coordinates': ['POINT(-34.58 -58.66)', 'POINT(-15.78 -47.91)',
68 'POINT(-33.45 -70.66)', 'POINT(4.60 -74.08)',
69 'POINT(10.48 -66.86)']})
70
71 ###############################################################################
72 # We use ``shapely.wkt`` sub-module to parse wkt format:
73 from shapely import wkt
74
75 df['Coordinates'] = df['Coordinates'].apply(wkt.loads)
76
77 ###############################################################################
78 # The ``GeoDataFrame`` is constructed as follows :
79
80 gdf = geopandas.GeoDataFrame(df, geometry='Coordinates')
81
82 print(gdf.head())
83
[end of examples/create_geopandas_from_pandas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/create_geopandas_from_pandas.py b/examples/create_geopandas_from_pandas.py
--- a/examples/create_geopandas_from_pandas.py
+++ b/examples/create_geopandas_from_pandas.py
@@ -51,7 +51,7 @@
ax = world[world.continent == 'South America'].plot(
color='white', edgecolor='black')
-# We can now plot our GeoDataFrame.
+# We can now plot our ``GeoDataFrame``.
gdf.plot(ax=ax, color='red')
plt.show()
@@ -64,9 +64,9 @@
df = pd.DataFrame(
{'City': ['Buenos Aires', 'Brasilia', 'Santiago', 'Bogota', 'Caracas'],
'Country': ['Argentina', 'Brazil', 'Chile', 'Colombia', 'Venezuela'],
- 'Coordinates': ['POINT(-34.58 -58.66)', 'POINT(-15.78 -47.91)',
- 'POINT(-33.45 -70.66)', 'POINT(4.60 -74.08)',
- 'POINT(10.48 -66.86)']})
+ 'Coordinates': ['POINT(-58.66 -34.58)', 'POINT(-47.91 -15.78)',
+ 'POINT(-70.66 -33.45)', 'POINT(-74.08 4.60)',
+ 'POINT(-66.86 10.48)']})
###############################################################################
# We use ``shapely.wkt`` sub-module to parse wkt format:
@@ -80,3 +80,9 @@
gdf = geopandas.GeoDataFrame(df, geometry='Coordinates')
print(gdf.head())
+
+#################################################################################
+# Again, we can plot our ``GeoDataFrame``.
+gdf.plot(ax=ax, color='red')
+
+plt.show()
| {"golden_diff": "diff --git a/examples/create_geopandas_from_pandas.py b/examples/create_geopandas_from_pandas.py\n--- a/examples/create_geopandas_from_pandas.py\n+++ b/examples/create_geopandas_from_pandas.py\n@@ -51,7 +51,7 @@\n ax = world[world.continent == 'South America'].plot(\n color='white', edgecolor='black')\n \n-# We can now plot our GeoDataFrame.\n+# We can now plot our ``GeoDataFrame``.\n gdf.plot(ax=ax, color='red')\n \n plt.show()\n@@ -64,9 +64,9 @@\n df = pd.DataFrame(\n {'City': ['Buenos Aires', 'Brasilia', 'Santiago', 'Bogota', 'Caracas'],\n 'Country': ['Argentina', 'Brazil', 'Chile', 'Colombia', 'Venezuela'],\n- 'Coordinates': ['POINT(-34.58 -58.66)', 'POINT(-15.78 -47.91)',\n- 'POINT(-33.45 -70.66)', 'POINT(4.60 -74.08)',\n- 'POINT(10.48 -66.86)']})\n+ 'Coordinates': ['POINT(-58.66 -34.58)', 'POINT(-47.91 -15.78)',\n+ 'POINT(-70.66 -33.45)', 'POINT(-74.08 4.60)',\n+ 'POINT(-66.86 10.48)']})\n \n ###############################################################################\n # We use ``shapely.wkt`` sub-module to parse wkt format:\n@@ -80,3 +80,9 @@\n gdf = geopandas.GeoDataFrame(df, geometry='Coordinates')\n \n print(gdf.head())\n+\n+#################################################################################\n+# Again, we can plot our ``GeoDataFrame``.\n+gdf.plot(ax=ax, color='red')\n+\n+plt.show()\n", "issue": "Lat Long order is wrong in WKT example\nIn regards to documentation of converting WKT format to geopandas GeoDataFrame, the order of LAT LONG is wrong.\r\nSee #690 \r\nIn the WKT POINT, you put the Lat first, amd then Long. It should be the opposite. \r\nWhen plotting the WKT example, and comparing it to the simple Long Lat example, you can clearly see that the WKT points are not placed where they should be.\r\n\r\nFrom what I understand, WKT should follow the WGS84 standard.\r\nSee for example: https://www.drupal.org/project/geo/issues/511370\n", "before_files": [{"content": "\"\"\"\nCreating a GeoDataFrame from a DataFrame with coordinates\n---------------------------------------------------------\n\nThis example shows how to create a ``GeoDataFrame`` when starting from\na *regular* ``DataFrame`` that has coordinates either WKT\n(`well-known text <https://en.wikipedia.org/wiki/Well-known_text>`_)\nformat, or in\ntwo columns.\n\n\"\"\"\nimport pandas as pd\nimport geopandas\nimport matplotlib.pyplot as plt\n\n###############################################################################\n# From longitudes and latitudes\n# =============================\n#\n# First, let's consider a ``DataFrame`` containing cities and their respective\n# longitudes and latitudes.\n\ndf = pd.DataFrame(\n {'City': ['Buenos Aires', 'Brasilia', 'Santiago', 'Bogota', 'Caracas'],\n 'Country': ['Argentina', 'Brazil', 'Chile', 'Colombia', 'Venezuela'],\n 'Latitude': [-34.58, -15.78, -33.45, 4.60, 10.48],\n 'Longitude': [-58.66, -47.91, -70.66, -74.08, -66.86]})\n\n###############################################################################\n# A ``GeoDataFrame`` needs a ``shapely`` object. We use geopandas\n# ``points_from_xy()`` to transform **Longitude** and **Latitude** into a list\n# of ``shapely.Point`` objects and set it as a ``geometry`` while creating the\n# ``GeoDataFrame``. (note that ``points_from_xy()`` is an enhanced wrapper for\n# ``[Point(x, y) for x, y in zip(df.Longitude, df.Latitude)]``)\n\ngdf = geopandas.GeoDataFrame(\n df, geometry=geopandas.points_from_xy(df.Longitude, df.Latitude))\n\n\n###############################################################################\n# ``gdf`` looks like this :\n\nprint(gdf.head())\n\n###############################################################################\n# Finally, we plot the coordinates over a country-level map.\n\nworld = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))\n\n# We restrict to South America.\nax = world[world.continent == 'South America'].plot(\n color='white', edgecolor='black')\n\n# We can now plot our GeoDataFrame.\ngdf.plot(ax=ax, color='red')\n\nplt.show()\n\n###############################################################################\n# From WKT format\n# ===============\n# Here, we consider a ``DataFrame`` having coordinates in WKT format.\n\ndf = pd.DataFrame(\n {'City': ['Buenos Aires', 'Brasilia', 'Santiago', 'Bogota', 'Caracas'],\n 'Country': ['Argentina', 'Brazil', 'Chile', 'Colombia', 'Venezuela'],\n 'Coordinates': ['POINT(-34.58 -58.66)', 'POINT(-15.78 -47.91)',\n 'POINT(-33.45 -70.66)', 'POINT(4.60 -74.08)',\n 'POINT(10.48 -66.86)']})\n\n###############################################################################\n# We use ``shapely.wkt`` sub-module to parse wkt format:\nfrom shapely import wkt\n\ndf['Coordinates'] = df['Coordinates'].apply(wkt.loads)\n\n###############################################################################\n# The ``GeoDataFrame`` is constructed as follows :\n\ngdf = geopandas.GeoDataFrame(df, geometry='Coordinates')\n\nprint(gdf.head())\n", "path": "examples/create_geopandas_from_pandas.py"}]} | 1,598 | 446 |
gh_patches_debug_39786 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3457 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider statefarm is broken
During the global build at 2021-09-22-14-42-27, spider **statefarm** failed with **0 features** and **6575 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/logs/statefarm.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/output/statefarm.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/output/statefarm.geojson))
</issue>
<code>
[start of locations/spiders/statefarm.py]
1 import json
2 import re
3 import scrapy
4
5 from locations.items import GeojsonPointItem
6
7
8 class StateFarmSpider(scrapy.Spider):
9 name = "statefarm"
10 item_attributes = { 'brand': "State Farm" }
11 allowed_domains = ["statefarm.com"]
12 download_delay = 0.2
13
14 start_urls = [
15 'https://www.statefarm.com/agent/us',
16 ]
17
18 def parse_location(self, response):
19
20 name = response.xpath('//*[@id="AgentNameLabelId"]//span[@itemprop="name"]/text()').extract_first()
21 if name:
22 name += ' - State Farm Insurance Agent'
23
24 lat = response.xpath('//*[@id="agentOfficePrimaryLocLat"]/@value').extract_first()
25 lon = response.xpath('//*[@id="agentOfficePrimaryLocLong"]/@value').extract_first()
26
27 properties = {
28 'ref': "_".join(response.url.split('/')[-3:]),
29 'name': name,
30 'addr_full': response.xpath('normalize-space(//div[@itemtype="http://schema.org/PostalAddress"]//span[@id="locStreetContent_mainLocContent"]/text())').extract_first(),
31 'city': response.xpath('//div[@itemtype="http://schema.org/PostalAddress"]/div[2]/span/span[1]/text()').extract_first().strip(', '),
32 'state': response.xpath('//div[@itemtype="http://schema.org/PostalAddress"]/div[2]/span/span[2]/text()').extract_first(),
33 'postcode': response.xpath('//div[@itemtype="http://schema.org/PostalAddress"]/div[2]/span/span[3]/text()').extract_first(),
34 'phone': response.xpath('normalize-space(//span[@id="offNumber_mainLocContent"]/span/text())').extract_first(),
35 'lat': float(lat) if lat else None,
36 'lon': float(lon) if lon else None,
37 'website': response.url,
38 }
39
40 yield GeojsonPointItem(**properties)
41
42 def parse(self, response):
43 agents = response.xpath('//div[contains(@id, "agent-details")]')
44 # agent_sites = response.xpath('//a[contains(text(), "Visit agent site")]/@href').extract()
45
46 if agents:
47 for agent in agents:
48 agent_site = agent.xpath('.//a[contains(text(), "Visit agent site")]/@href').extract_first()
49 if not agent_site:
50 raise Exception('no agent site found')
51 yield scrapy.Request(response.urljoin(agent_site), callback=self.parse_location)
52
53 else:
54 urls = response.xpath('//li/div/a/@href').extract()
55
56 for url in urls:
57 yield scrapy.Request(response.urljoin(url))
58
59
60
[end of locations/spiders/statefarm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/statefarm.py b/locations/spiders/statefarm.py
--- a/locations/spiders/statefarm.py
+++ b/locations/spiders/statefarm.py
@@ -9,7 +9,7 @@
name = "statefarm"
item_attributes = { 'brand': "State Farm" }
allowed_domains = ["statefarm.com"]
- download_delay = 0.2
+ download_delay = 0.1
start_urls = [
'https://www.statefarm.com/agent/us',
@@ -17,23 +17,20 @@
def parse_location(self, response):
- name = response.xpath('//*[@id="AgentNameLabelId"]//span[@itemprop="name"]/text()').extract_first()
+ name = response.xpath('//span[@itemprop="name"]/text()').extract_first()
if name:
name += ' - State Farm Insurance Agent'
- lat = response.xpath('//*[@id="agentOfficePrimaryLocLat"]/@value').extract_first()
- lon = response.xpath('//*[@id="agentOfficePrimaryLocLong"]/@value').extract_first()
-
properties = {
'ref': "_".join(response.url.split('/')[-3:]),
'name': name,
- 'addr_full': response.xpath('normalize-space(//div[@itemtype="http://schema.org/PostalAddress"]//span[@id="locStreetContent_mainLocContent"]/text())').extract_first(),
- 'city': response.xpath('//div[@itemtype="http://schema.org/PostalAddress"]/div[2]/span/span[1]/text()').extract_first().strip(', '),
- 'state': response.xpath('//div[@itemtype="http://schema.org/PostalAddress"]/div[2]/span/span[2]/text()').extract_first(),
- 'postcode': response.xpath('//div[@itemtype="http://schema.org/PostalAddress"]/div[2]/span/span[3]/text()').extract_first(),
- 'phone': response.xpath('normalize-space(//span[@id="offNumber_mainLocContent"]/span/text())').extract_first(),
- 'lat': float(lat) if lat else None,
- 'lon': float(lon) if lon else None,
+ 'addr_full': response.xpath('normalize-space(//div[@itemtype="http://schema.org/PostalAddress"]//*[@itemprop="streetAddress"]/text())').extract_first(),
+ 'city': response.xpath('//div[@itemtype="http://schema.org/PostalAddress"]//*[@itemprop="addressLocality"]/text()').extract_first(),
+ 'state': response.xpath('//div[@itemtype="http://schema.org/PostalAddress"]//*[@itemprop="addressRegion"]/text()').extract_first(),
+ 'postcode': response.xpath('//div[@itemtype="http://schema.org/PostalAddress"]//*[@itemprop="postalCode"]/text()').extract_first(),
+ 'phone': response.xpath('//*[@itemprop="telephone"]/a/text()').extract_first(),
+ 'lat': float(response.xpath('//@data-latitude').extract_first()),
+ 'lon': float(response.xpath('//@data-longitude').extract_first()),
'website': response.url,
}
@@ -45,7 +42,7 @@
if agents:
for agent in agents:
- agent_site = agent.xpath('.//a[contains(text(), "Visit agent site")]/@href').extract_first()
+ agent_site = agent.xpath('.//a[contains(text(), "Agent Website")]/@href').extract_first()
if not agent_site:
raise Exception('no agent site found')
yield scrapy.Request(response.urljoin(agent_site), callback=self.parse_location)
| {"golden_diff": "diff --git a/locations/spiders/statefarm.py b/locations/spiders/statefarm.py\n--- a/locations/spiders/statefarm.py\n+++ b/locations/spiders/statefarm.py\n@@ -9,7 +9,7 @@\n name = \"statefarm\"\n item_attributes = { 'brand': \"State Farm\" }\n allowed_domains = [\"statefarm.com\"]\n- download_delay = 0.2\n+ download_delay = 0.1\n \n start_urls = [\n 'https://www.statefarm.com/agent/us',\n@@ -17,23 +17,20 @@\n \n def parse_location(self, response):\n \n- name = response.xpath('//*[@id=\"AgentNameLabelId\"]//span[@itemprop=\"name\"]/text()').extract_first()\n+ name = response.xpath('//span[@itemprop=\"name\"]/text()').extract_first()\n if name:\n name += ' - State Farm Insurance Agent'\n \n- lat = response.xpath('//*[@id=\"agentOfficePrimaryLocLat\"]/@value').extract_first()\n- lon = response.xpath('//*[@id=\"agentOfficePrimaryLocLong\"]/@value').extract_first()\n-\n properties = {\n 'ref': \"_\".join(response.url.split('/')[-3:]),\n 'name': name,\n- 'addr_full': response.xpath('normalize-space(//div[@itemtype=\"http://schema.org/PostalAddress\"]//span[@id=\"locStreetContent_mainLocContent\"]/text())').extract_first(),\n- 'city': response.xpath('//div[@itemtype=\"http://schema.org/PostalAddress\"]/div[2]/span/span[1]/text()').extract_first().strip(', '),\n- 'state': response.xpath('//div[@itemtype=\"http://schema.org/PostalAddress\"]/div[2]/span/span[2]/text()').extract_first(),\n- 'postcode': response.xpath('//div[@itemtype=\"http://schema.org/PostalAddress\"]/div[2]/span/span[3]/text()').extract_first(),\n- 'phone': response.xpath('normalize-space(//span[@id=\"offNumber_mainLocContent\"]/span/text())').extract_first(),\n- 'lat': float(lat) if lat else None,\n- 'lon': float(lon) if lon else None,\n+ 'addr_full': response.xpath('normalize-space(//div[@itemtype=\"http://schema.org/PostalAddress\"]//*[@itemprop=\"streetAddress\"]/text())').extract_first(),\n+ 'city': response.xpath('//div[@itemtype=\"http://schema.org/PostalAddress\"]//*[@itemprop=\"addressLocality\"]/text()').extract_first(),\n+ 'state': response.xpath('//div[@itemtype=\"http://schema.org/PostalAddress\"]//*[@itemprop=\"addressRegion\"]/text()').extract_first(),\n+ 'postcode': response.xpath('//div[@itemtype=\"http://schema.org/PostalAddress\"]//*[@itemprop=\"postalCode\"]/text()').extract_first(),\n+ 'phone': response.xpath('//*[@itemprop=\"telephone\"]/a/text()').extract_first(),\n+ 'lat': float(response.xpath('//@data-latitude').extract_first()),\n+ 'lon': float(response.xpath('//@data-longitude').extract_first()),\n 'website': response.url,\n }\n \n@@ -45,7 +42,7 @@\n \n if agents:\n for agent in agents:\n- agent_site = agent.xpath('.//a[contains(text(), \"Visit agent site\")]/@href').extract_first()\n+ agent_site = agent.xpath('.//a[contains(text(), \"Agent Website\")]/@href').extract_first()\n if not agent_site:\n raise Exception('no agent site found')\n yield scrapy.Request(response.urljoin(agent_site), callback=self.parse_location)\n", "issue": "Spider statefarm is broken\nDuring the global build at 2021-09-22-14-42-27, spider **statefarm** failed with **0 features** and **6575 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/logs/statefarm.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/output/statefarm.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/output/statefarm.geojson))\n", "before_files": [{"content": "import json\nimport re\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass StateFarmSpider(scrapy.Spider):\n name = \"statefarm\"\n item_attributes = { 'brand': \"State Farm\" }\n allowed_domains = [\"statefarm.com\"]\n download_delay = 0.2\n\n start_urls = [\n 'https://www.statefarm.com/agent/us',\n ]\n\n def parse_location(self, response):\n\n name = response.xpath('//*[@id=\"AgentNameLabelId\"]//span[@itemprop=\"name\"]/text()').extract_first()\n if name:\n name += ' - State Farm Insurance Agent'\n\n lat = response.xpath('//*[@id=\"agentOfficePrimaryLocLat\"]/@value').extract_first()\n lon = response.xpath('//*[@id=\"agentOfficePrimaryLocLong\"]/@value').extract_first()\n\n properties = {\n 'ref': \"_\".join(response.url.split('/')[-3:]),\n 'name': name,\n 'addr_full': response.xpath('normalize-space(//div[@itemtype=\"http://schema.org/PostalAddress\"]//span[@id=\"locStreetContent_mainLocContent\"]/text())').extract_first(),\n 'city': response.xpath('//div[@itemtype=\"http://schema.org/PostalAddress\"]/div[2]/span/span[1]/text()').extract_first().strip(', '),\n 'state': response.xpath('//div[@itemtype=\"http://schema.org/PostalAddress\"]/div[2]/span/span[2]/text()').extract_first(),\n 'postcode': response.xpath('//div[@itemtype=\"http://schema.org/PostalAddress\"]/div[2]/span/span[3]/text()').extract_first(),\n 'phone': response.xpath('normalize-space(//span[@id=\"offNumber_mainLocContent\"]/span/text())').extract_first(),\n 'lat': float(lat) if lat else None,\n 'lon': float(lon) if lon else None,\n 'website': response.url,\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n agents = response.xpath('//div[contains(@id, \"agent-details\")]')\n # agent_sites = response.xpath('//a[contains(text(), \"Visit agent site\")]/@href').extract()\n\n if agents:\n for agent in agents:\n agent_site = agent.xpath('.//a[contains(text(), \"Visit agent site\")]/@href').extract_first()\n if not agent_site:\n raise Exception('no agent site found')\n yield scrapy.Request(response.urljoin(agent_site), callback=self.parse_location)\n\n else:\n urls = response.xpath('//li/div/a/@href').extract()\n\n for url in urls:\n yield scrapy.Request(response.urljoin(url))\n\n\n", "path": "locations/spiders/statefarm.py"}]} | 1,412 | 794 |
gh_patches_debug_17114 | rasdani/github-patches | git_diff | safe-global__safe-config-service-15 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve admin page
- [x] Add custom Admin models with `list_display`, `display_filter` (for example, by network), `ordering` and `search_fields`: https://docs.djangoproject.com/en/3.2/ref/contrib/admin/
- [x] Use admin decorator https://docs.djangoproject.com/en/3.2/ref/contrib/admin/#the-register-decorator
</issue>
<code>
[start of src/safe_apps/admin.py]
1 from django.contrib import admin
2
3 from .models import SafeApp, Provider
4
5 models = [SafeApp, Provider]
6 admin.site.register(models)
7
[end of src/safe_apps/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/safe_apps/admin.py b/src/safe_apps/admin.py
--- a/src/safe_apps/admin.py
+++ b/src/safe_apps/admin.py
@@ -2,5 +2,34 @@
from .models import SafeApp, Provider
-models = [SafeApp, Provider]
-admin.site.register(models)
+
+class NetworksFilter(admin.SimpleListFilter):
+ title = 'Networks'
+ parameter_name = 'networks'
+
+ def lookups(self, request, model_admin):
+ values = SafeApp.objects.values_list('networks', flat=True)
+ # lookups requires a tuple to be returned – (value, verbose value)
+ networks = [(network, network) for networks in values for network in networks]
+ networks = sorted(set(networks))
+ return networks
+
+ def queryset(self, request, queryset):
+ if value := self.value():
+ queryset = queryset.filter(networks__contains=[value])
+ return queryset
+
+
[email protected](SafeApp)
+class SafeAppAdmin(admin.ModelAdmin):
+ list_display = ('name', 'url', 'networks')
+ list_filter = (NetworksFilter,)
+ search_fields = ('name', 'url')
+ ordering = ('name',)
+
+
[email protected](Provider)
+class ProviderAdmin(admin.ModelAdmin):
+ list_display = ('name', 'url')
+ search_fields = ('name',)
+ ordering = ('name',)
| {"golden_diff": "diff --git a/src/safe_apps/admin.py b/src/safe_apps/admin.py\n--- a/src/safe_apps/admin.py\n+++ b/src/safe_apps/admin.py\n@@ -2,5 +2,34 @@\n \n from .models import SafeApp, Provider\n \n-models = [SafeApp, Provider]\n-admin.site.register(models)\n+\n+class NetworksFilter(admin.SimpleListFilter):\n+ title = 'Networks'\n+ parameter_name = 'networks'\n+\n+ def lookups(self, request, model_admin):\n+ values = SafeApp.objects.values_list('networks', flat=True)\n+ # lookups requires a tuple to be returned \u2013 (value, verbose value)\n+ networks = [(network, network) for networks in values for network in networks]\n+ networks = sorted(set(networks))\n+ return networks\n+\n+ def queryset(self, request, queryset):\n+ if value := self.value():\n+ queryset = queryset.filter(networks__contains=[value])\n+ return queryset\n+\n+\[email protected](SafeApp)\n+class SafeAppAdmin(admin.ModelAdmin):\n+ list_display = ('name', 'url', 'networks')\n+ list_filter = (NetworksFilter,)\n+ search_fields = ('name', 'url')\n+ ordering = ('name',)\n+\n+\[email protected](Provider)\n+class ProviderAdmin(admin.ModelAdmin):\n+ list_display = ('name', 'url')\n+ search_fields = ('name',)\n+ ordering = ('name',)\n", "issue": "Improve admin page\n- [x] Add custom Admin models with `list_display`, `display_filter` (for example, by network), `ordering` and `search_fields`: https://docs.djangoproject.com/en/3.2/ref/contrib/admin/\r\n- [x] Use admin decorator https://docs.djangoproject.com/en/3.2/ref/contrib/admin/#the-register-decorator\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom .models import SafeApp, Provider\n\nmodels = [SafeApp, Provider]\nadmin.site.register(models)\n", "path": "src/safe_apps/admin.py"}]} | 655 | 318 |
gh_patches_debug_8670 | rasdani/github-patches | git_diff | xonsh__xonsh-1566 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: 'BaseShell' object has no attribute 'cmdloop'
My installed xonsh (from [AUR](https://aur.archlinux.org/packages/xonsh/)) suddenly broke?
``` console
$ xonsh
The most exciting phrase to hear in science, the one that heralds new
discoveries, is not "Eureka!" (I found it!) but "That's funny ..."
-- Isaac Asimov
Traceback (most recent call last):
File "/bin/xonsh", line 3, in <module>
main()
File "/usr/lib/python3.5/site-packages/xonsh/__amalgam__.py", line 16531, in main
shell.shell.cmdloop()
AttributeError: 'BaseShell' object has no attribute 'cmdloop'
$ xonsh --version
('xonsh/0.4.4',)
$ XONSH_DEBUG=1 xonsh
Today is the first day of the rest of your lossage.
Traceback (most recent call last):
File "/bin/xonsh", line 3, in <module>
main()
File "/usr/lib/python3.5/site-packages/xonsh/main.py", line 222, in main
shell.shell.cmdloop()
AttributeError: 'BaseShell' object has no attribute 'cmdloop'
```
(Ignore the output of `fortune`.)
</issue>
<code>
[start of xonsh/shell.py]
1 # -*- coding: utf-8 -*-
2 """The xonsh shell"""
3 import os
4 import random
5 import builtins
6 import warnings
7
8 from xonsh.xontribs import update_context
9 from xonsh.environ import xonshrc_context
10 from xonsh.execer import Execer
11 from xonsh.platform import (best_shell_type, has_prompt_toolkit,
12 ptk_version_is_supported)
13 from xonsh.tools import XonshError, to_bool_or_int
14
15
16 class Shell(object):
17 """Main xonsh shell.
18
19 Initializes execution environment and decides if prompt_toolkit or
20 readline version of shell should be used.
21 """
22
23 def __init__(self, ctx=None, shell_type=None, config=None, rc=None,
24 **kwargs):
25 """
26 Parameters
27 ----------
28 ctx : Mapping, optional
29 The execution context for the shell (e.g. the globals namespace).
30 If none, this is computed by loading the rc files. If not None,
31 this no additional context is computed and this is used
32 directly.
33 shell_type : str, optional
34 The shell type to start, such as 'readline', 'prompt_toolkit',
35 or 'random'.
36 config : str, optional
37 Path to configuration file.
38 rc : list of str, optional
39 Sequence of paths to run control files.
40 """
41 self.login = kwargs.get('login', True)
42 self.stype = shell_type
43 self._init_environ(ctx, config, rc,
44 kwargs.get('scriptcache', True),
45 kwargs.get('cacheall', False))
46 env = builtins.__xonsh_env__
47 # pick a valid shell -- if no shell is specified by the user,
48 # shell type is pulled from env
49 if shell_type is None:
50 shell_type = env.get('SHELL_TYPE')
51 if shell_type == 'best' or shell_type is None:
52 shell_type = best_shell_type()
53 elif shell_type == 'random':
54 shell_type = random.choice(('readline', 'prompt_toolkit'))
55 if shell_type == 'prompt_toolkit':
56 if not has_prompt_toolkit():
57 warnings.warn('prompt_toolkit is not available, using '
58 'readline instead.')
59 shell_type = 'readline'
60 elif not ptk_version_is_supported():
61 warnings.warn('prompt-toolkit version < v1.0.0 is not '
62 'supported. Please update prompt-toolkit. Using '
63 'readline instead.')
64 shell_type = 'readline'
65 env['SHELL_TYPE'] = shell_type
66 # actually make the shell
67 if shell_type == 'none':
68 from xonsh.base_shell import BaseShell as shell_class
69 elif shell_type == 'prompt_toolkit':
70 from xonsh.ptk.shell import PromptToolkitShell as shell_class
71 elif shell_type == 'readline':
72 from xonsh.readline_shell import ReadlineShell as shell_class
73 else:
74 raise XonshError('{} is not recognized as a shell type'.format(
75 shell_type))
76 self.shell = shell_class(execer=self.execer,
77 ctx=self.ctx, **kwargs)
78 # allows history garbace colector to start running
79 builtins.__xonsh_history__.gc.wait_for_shell = False
80
81 def __getattr__(self, attr):
82 """Delegates calls to appropriate shell instance."""
83 return getattr(self.shell, attr)
84
85 def _init_environ(self, ctx, config, rc, scriptcache, cacheall):
86 self.ctx = {} if ctx is None else ctx
87 debug = to_bool_or_int(os.getenv('XONSH_DEBUG', '0'))
88 self.execer = Execer(config=config, login=self.login, xonsh_ctx=self.ctx,
89 debug_level=debug)
90 self.execer.scriptcache = scriptcache
91 self.execer.cacheall = cacheall
92 if self.stype != 'none' or self.login:
93 # load xontribs from config file
94 names = builtins.__xonsh_config__.get('xontribs', ())
95 for name in names:
96 update_context(name, ctx=self.ctx)
97 # load run control files
98 env = builtins.__xonsh_env__
99 rc = env.get('XONSHRC') if rc is None else rc
100 self.ctx.update(xonshrc_context(rcfiles=rc, execer=self.execer, initial=self.ctx))
101 self.ctx['__name__'] = '__main__'
102
[end of xonsh/shell.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xonsh/shell.py b/xonsh/shell.py
--- a/xonsh/shell.py
+++ b/xonsh/shell.py
@@ -48,6 +48,10 @@
# shell type is pulled from env
if shell_type is None:
shell_type = env.get('SHELL_TYPE')
+ if shell_type == 'none':
+ # This bricks interactive xonsh
+ # Can happen from the use of .xinitrc, .xsession, etc
+ shell_type = 'best'
if shell_type == 'best' or shell_type is None:
shell_type = best_shell_type()
elif shell_type == 'random':
| {"golden_diff": "diff --git a/xonsh/shell.py b/xonsh/shell.py\n--- a/xonsh/shell.py\n+++ b/xonsh/shell.py\n@@ -48,6 +48,10 @@\n # shell type is pulled from env\n if shell_type is None:\n shell_type = env.get('SHELL_TYPE')\n+ if shell_type == 'none':\n+ # This bricks interactive xonsh\n+ # Can happen from the use of .xinitrc, .xsession, etc\n+ shell_type = 'best'\n if shell_type == 'best' or shell_type is None:\n shell_type = best_shell_type()\n elif shell_type == 'random':\n", "issue": "AttributeError: 'BaseShell' object has no attribute 'cmdloop'\nMy installed xonsh (from [AUR](https://aur.archlinux.org/packages/xonsh/)) suddenly broke?\n\n``` console\n$ xonsh\nThe most exciting phrase to hear in science, the one that heralds new\ndiscoveries, is not \"Eureka!\" (I found it!) but \"That's funny ...\"\n -- Isaac Asimov\nTraceback (most recent call last):\n File \"/bin/xonsh\", line 3, in <module>\n main()\n File \"/usr/lib/python3.5/site-packages/xonsh/__amalgam__.py\", line 16531, in main\n shell.shell.cmdloop()\nAttributeError: 'BaseShell' object has no attribute 'cmdloop'\n$ xonsh --version\n('xonsh/0.4.4',)\n$ XONSH_DEBUG=1 xonsh\nToday is the first day of the rest of your lossage.\nTraceback (most recent call last):\n File \"/bin/xonsh\", line 3, in <module>\n main()\n File \"/usr/lib/python3.5/site-packages/xonsh/main.py\", line 222, in main\n shell.shell.cmdloop()\nAttributeError: 'BaseShell' object has no attribute 'cmdloop'\n```\n\n(Ignore the output of `fortune`.)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"The xonsh shell\"\"\"\nimport os\nimport random\nimport builtins\nimport warnings\n\nfrom xonsh.xontribs import update_context\nfrom xonsh.environ import xonshrc_context\nfrom xonsh.execer import Execer\nfrom xonsh.platform import (best_shell_type, has_prompt_toolkit,\n ptk_version_is_supported)\nfrom xonsh.tools import XonshError, to_bool_or_int\n\n\nclass Shell(object):\n \"\"\"Main xonsh shell.\n\n Initializes execution environment and decides if prompt_toolkit or\n readline version of shell should be used.\n \"\"\"\n\n def __init__(self, ctx=None, shell_type=None, config=None, rc=None,\n **kwargs):\n \"\"\"\n Parameters\n ----------\n ctx : Mapping, optional\n The execution context for the shell (e.g. the globals namespace).\n If none, this is computed by loading the rc files. If not None,\n this no additional context is computed and this is used\n directly.\n shell_type : str, optional\n The shell type to start, such as 'readline', 'prompt_toolkit',\n or 'random'.\n config : str, optional\n Path to configuration file.\n rc : list of str, optional\n Sequence of paths to run control files.\n \"\"\"\n self.login = kwargs.get('login', True)\n self.stype = shell_type\n self._init_environ(ctx, config, rc,\n kwargs.get('scriptcache', True),\n kwargs.get('cacheall', False))\n env = builtins.__xonsh_env__\n # pick a valid shell -- if no shell is specified by the user,\n # shell type is pulled from env\n if shell_type is None:\n shell_type = env.get('SHELL_TYPE')\n if shell_type == 'best' or shell_type is None:\n shell_type = best_shell_type()\n elif shell_type == 'random':\n shell_type = random.choice(('readline', 'prompt_toolkit'))\n if shell_type == 'prompt_toolkit':\n if not has_prompt_toolkit():\n warnings.warn('prompt_toolkit is not available, using '\n 'readline instead.')\n shell_type = 'readline'\n elif not ptk_version_is_supported():\n warnings.warn('prompt-toolkit version < v1.0.0 is not '\n 'supported. Please update prompt-toolkit. Using '\n 'readline instead.')\n shell_type = 'readline'\n env['SHELL_TYPE'] = shell_type\n # actually make the shell\n if shell_type == 'none':\n from xonsh.base_shell import BaseShell as shell_class\n elif shell_type == 'prompt_toolkit':\n from xonsh.ptk.shell import PromptToolkitShell as shell_class\n elif shell_type == 'readline':\n from xonsh.readline_shell import ReadlineShell as shell_class\n else:\n raise XonshError('{} is not recognized as a shell type'.format(\n shell_type))\n self.shell = shell_class(execer=self.execer,\n ctx=self.ctx, **kwargs)\n # allows history garbace colector to start running\n builtins.__xonsh_history__.gc.wait_for_shell = False\n\n def __getattr__(self, attr):\n \"\"\"Delegates calls to appropriate shell instance.\"\"\"\n return getattr(self.shell, attr)\n\n def _init_environ(self, ctx, config, rc, scriptcache, cacheall):\n self.ctx = {} if ctx is None else ctx\n debug = to_bool_or_int(os.getenv('XONSH_DEBUG', '0'))\n self.execer = Execer(config=config, login=self.login, xonsh_ctx=self.ctx,\n debug_level=debug)\n self.execer.scriptcache = scriptcache\n self.execer.cacheall = cacheall\n if self.stype != 'none' or self.login:\n # load xontribs from config file\n names = builtins.__xonsh_config__.get('xontribs', ())\n for name in names:\n update_context(name, ctx=self.ctx)\n # load run control files\n env = builtins.__xonsh_env__\n rc = env.get('XONSHRC') if rc is None else rc\n self.ctx.update(xonshrc_context(rcfiles=rc, execer=self.execer, initial=self.ctx))\n self.ctx['__name__'] = '__main__'\n", "path": "xonsh/shell.py"}]} | 1,984 | 154 |
gh_patches_debug_31353 | rasdani/github-patches | git_diff | CTFd__CTFd-1832 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't require teams for viewing challenges if challenges are public
Looks like if challenges are set to be public but teams are required then challenges can't be seen. This requirement needs to be relaxed a bit.
</issue>
<code>
[start of CTFd/challenges.py]
1 from flask import Blueprint, render_template
2
3 from CTFd.utils import config
4 from CTFd.utils.dates import ctf_ended, ctf_paused, ctf_started
5 from CTFd.utils.decorators import (
6 during_ctf_time_only,
7 require_team,
8 require_verified_emails,
9 )
10 from CTFd.utils.decorators.visibility import check_challenge_visibility
11 from CTFd.utils.helpers import get_errors, get_infos
12
13 challenges = Blueprint("challenges", __name__)
14
15
16 @challenges.route("/challenges", methods=["GET"])
17 @during_ctf_time_only
18 @require_verified_emails
19 @check_challenge_visibility
20 @require_team
21 def listing():
22 infos = get_infos()
23 errors = get_errors()
24
25 if ctf_started() is False:
26 errors.append(f"{config.ctf_name()} has not started yet")
27
28 if ctf_paused() is True:
29 infos.append(f"{config.ctf_name()} is paused")
30
31 if ctf_ended() is True:
32 infos.append(f"{config.ctf_name()} has ended")
33
34 return render_template("challenges.html", infos=infos, errors=errors)
35
[end of CTFd/challenges.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/CTFd/challenges.py b/CTFd/challenges.py
--- a/CTFd/challenges.py
+++ b/CTFd/challenges.py
@@ -1,14 +1,12 @@
-from flask import Blueprint, render_template
+from flask import Blueprint, redirect, render_template, request, url_for
-from CTFd.utils import config
+from CTFd.constants.config import ChallengeVisibilityTypes, Configs
+from CTFd.utils.config import is_teams_mode
from CTFd.utils.dates import ctf_ended, ctf_paused, ctf_started
-from CTFd.utils.decorators import (
- during_ctf_time_only,
- require_team,
- require_verified_emails,
-)
+from CTFd.utils.decorators import during_ctf_time_only, require_verified_emails
from CTFd.utils.decorators.visibility import check_challenge_visibility
from CTFd.utils.helpers import get_errors, get_infos
+from CTFd.utils.user import authed, get_current_team
challenges = Blueprint("challenges", __name__)
@@ -17,18 +15,26 @@
@during_ctf_time_only
@require_verified_emails
@check_challenge_visibility
-@require_team
def listing():
+ if (
+ Configs.challenge_visibility == ChallengeVisibilityTypes.PUBLIC
+ and authed() is False
+ ):
+ pass
+ else:
+ if is_teams_mode() and get_current_team() is None:
+ return redirect(url_for("teams.private", next=request.full_path))
+
infos = get_infos()
errors = get_errors()
if ctf_started() is False:
- errors.append(f"{config.ctf_name()} has not started yet")
+ errors.append(f"{Configs.ctf_name} has not started yet")
if ctf_paused() is True:
- infos.append(f"{config.ctf_name()} is paused")
+ infos.append(f"{Configs.ctf_name} is paused")
if ctf_ended() is True:
- infos.append(f"{config.ctf_name()} has ended")
+ infos.append(f"{Configs.ctf_name} has ended")
return render_template("challenges.html", infos=infos, errors=errors)
| {"golden_diff": "diff --git a/CTFd/challenges.py b/CTFd/challenges.py\n--- a/CTFd/challenges.py\n+++ b/CTFd/challenges.py\n@@ -1,14 +1,12 @@\n-from flask import Blueprint, render_template\n+from flask import Blueprint, redirect, render_template, request, url_for\n \n-from CTFd.utils import config\n+from CTFd.constants.config import ChallengeVisibilityTypes, Configs\n+from CTFd.utils.config import is_teams_mode\n from CTFd.utils.dates import ctf_ended, ctf_paused, ctf_started\n-from CTFd.utils.decorators import (\n- during_ctf_time_only,\n- require_team,\n- require_verified_emails,\n-)\n+from CTFd.utils.decorators import during_ctf_time_only, require_verified_emails\n from CTFd.utils.decorators.visibility import check_challenge_visibility\n from CTFd.utils.helpers import get_errors, get_infos\n+from CTFd.utils.user import authed, get_current_team\n \n challenges = Blueprint(\"challenges\", __name__)\n \n@@ -17,18 +15,26 @@\n @during_ctf_time_only\n @require_verified_emails\n @check_challenge_visibility\n-@require_team\n def listing():\n+ if (\n+ Configs.challenge_visibility == ChallengeVisibilityTypes.PUBLIC\n+ and authed() is False\n+ ):\n+ pass\n+ else:\n+ if is_teams_mode() and get_current_team() is None:\n+ return redirect(url_for(\"teams.private\", next=request.full_path))\n+\n infos = get_infos()\n errors = get_errors()\n \n if ctf_started() is False:\n- errors.append(f\"{config.ctf_name()} has not started yet\")\n+ errors.append(f\"{Configs.ctf_name} has not started yet\")\n \n if ctf_paused() is True:\n- infos.append(f\"{config.ctf_name()} is paused\")\n+ infos.append(f\"{Configs.ctf_name} is paused\")\n \n if ctf_ended() is True:\n- infos.append(f\"{config.ctf_name()} has ended\")\n+ infos.append(f\"{Configs.ctf_name} has ended\")\n \n return render_template(\"challenges.html\", infos=infos, errors=errors)\n", "issue": "Don't require teams for viewing challenges if challenges are public\nLooks like if challenges are set to be public but teams are required then challenges can't be seen. This requirement needs to be relaxed a bit. \n", "before_files": [{"content": "from flask import Blueprint, render_template\n\nfrom CTFd.utils import config\nfrom CTFd.utils.dates import ctf_ended, ctf_paused, ctf_started\nfrom CTFd.utils.decorators import (\n during_ctf_time_only,\n require_team,\n require_verified_emails,\n)\nfrom CTFd.utils.decorators.visibility import check_challenge_visibility\nfrom CTFd.utils.helpers import get_errors, get_infos\n\nchallenges = Blueprint(\"challenges\", __name__)\n\n\[email protected](\"/challenges\", methods=[\"GET\"])\n@during_ctf_time_only\n@require_verified_emails\n@check_challenge_visibility\n@require_team\ndef listing():\n infos = get_infos()\n errors = get_errors()\n\n if ctf_started() is False:\n errors.append(f\"{config.ctf_name()} has not started yet\")\n\n if ctf_paused() is True:\n infos.append(f\"{config.ctf_name()} is paused\")\n\n if ctf_ended() is True:\n infos.append(f\"{config.ctf_name()} has ended\")\n\n return render_template(\"challenges.html\", infos=infos, errors=errors)\n", "path": "CTFd/challenges.py"}]} | 882 | 486 |
gh_patches_debug_369 | rasdani/github-patches | git_diff | kserve__kserve-2478 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update ModelMesh version to `v0.10.0`
#### Proposed Changes
Update to the latest ModelMesh version (`v0.10.0`)
> If your changes should also be in the most recent release, add the corresponding "cherrypick-0.X"
label to the original PR; for example, "cherrypick-0.10".
#### TODOs:
> - [ ] Best practice is to open a PR for the cherry-pick yourself after your original PR has been merged
into the main branch.
> - [ ] After the cherry-pick PR has merged, remove the cherry-pick label from the original PR.
**Type of changes**
- [x] This change requires a documentation update --> https://github.com/kserve/website/pull/214
**Special notes for your reviewer**:
1. Please confirm that if this PR changes any image versions, then that's the sole change this PR makes.
YES
**Checklist**:
- [x] Have you made corresponding changes to the documentation?
**Release note**:
<!-- Write your release note:
1. Enter your extended release note in the below block. If the PR requires additional action from users switching to the new release, include the string "action required".
3. If no release note is required, just write "NONE".
-->
```release-note
???
```
Not sure, maybe,
- "Updgrade to ModelMesh v0.10.0"
or
- point to ModelMesh release notes here
https://github.com/kserve/modelmesh-serving/releases/tag/v0.10.0
---
/cc @yuzisun @rachitchauhan43 @njhill
</issue>
<code>
[start of python/kserve/setup.py]
1 # Copyright 2021 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import pathlib
15
16 import setuptools
17
18 TESTS_REQUIRES = [
19 'pytest',
20 'pytest-xdist',
21 'pytest-cov',
22 'pytest-asyncio',
23 'pytest-tornasync',
24 'mypy'
25 ]
26
27 with open('requirements.txt') as f:
28 REQUIRES = f.readlines()
29
30 with open(pathlib.Path(__file__).parent.parent / 'VERSION') as version_file:
31 version = version_file.read().strip()
32
33 setuptools.setup(
34 name='kserve',
35 version=version,
36 author="The KServe Authors",
37 author_email='[email protected], [email protected], [email protected]',
38 license="Apache License Version 2.0",
39 url="https://github.com/kserve/kserve/tree/master/python/kserve",
40 description="KServe Python SDK",
41 long_description="Python SDK for KServe Server and Client.",
42 python_requires='>=3.7',
43 packages=[
44 'kserve',
45 'kserve.api',
46 'kserve.constants',
47 'kserve.models',
48 'kserve.handlers',
49 'kserve.utils',
50 ],
51 package_data={'': ['requirements.txt']},
52 include_package_data=True,
53 zip_safe=False,
54 classifiers=[
55 'Intended Audience :: Developers',
56 'Intended Audience :: Education',
57 'Intended Audience :: Science/Research',
58 'Programming Language :: Python :: 3',
59 'Programming Language :: Python :: 3.7',
60 'Programming Language :: Python :: 3.8',
61 'Programming Language :: Python :: 3.9',
62 "License :: OSI Approved :: Apache Software License",
63 "Operating System :: OS Independent",
64 'Topic :: Scientific/Engineering',
65 'Topic :: Scientific/Engineering :: Artificial Intelligence',
66 'Topic :: Software Development',
67 'Topic :: Software Development :: Libraries',
68 'Topic :: Software Development :: Libraries :: Python Modules',
69 ],
70 install_requires=REQUIRES,
71 tests_require=TESTS_REQUIRES,
72 extras_require={'test': TESTS_REQUIRES}
73 )
74
[end of python/kserve/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/kserve/setup.py b/python/kserve/setup.py
--- a/python/kserve/setup.py
+++ b/python/kserve/setup.py
@@ -21,7 +21,8 @@
'pytest-cov',
'pytest-asyncio',
'pytest-tornasync',
- 'mypy'
+ 'mypy',
+ 'portforward',
]
with open('requirements.txt') as f:
| {"golden_diff": "diff --git a/python/kserve/setup.py b/python/kserve/setup.py\n--- a/python/kserve/setup.py\n+++ b/python/kserve/setup.py\n@@ -21,7 +21,8 @@\n 'pytest-cov',\n 'pytest-asyncio',\n 'pytest-tornasync',\n- 'mypy'\n+ 'mypy',\n+ 'portforward',\n ]\n \n with open('requirements.txt') as f:\n", "issue": "Update ModelMesh version to `v0.10.0`\n#### Proposed Changes\r\n\r\nUpdate to the latest ModelMesh version (`v0.10.0`)\r\n\r\n> If your changes should also be in the most recent release, add the corresponding \"cherrypick-0.X\"\r\nlabel to the original PR; for example, \"cherrypick-0.10\".\r\n\r\n#### TODOs:\r\n\r\n> - [ ] Best practice is to open a PR for the cherry-pick yourself after your original PR has been merged\r\ninto the main branch.\r\n> - [ ] After the cherry-pick PR has merged, remove the cherry-pick label from the original PR.\r\n\r\n\r\n**Type of changes**\r\n\r\n- [x] This change requires a documentation update --> https://github.com/kserve/website/pull/214\r\n\r\n**Special notes for your reviewer**:\r\n\r\n1. Please confirm that if this PR changes any image versions, then that's the sole change this PR makes.\r\n\r\nYES\r\n\r\n**Checklist**:\r\n\r\n- [x] Have you made corresponding changes to the documentation?\r\n\r\n**Release note**:\r\n<!-- Write your release note:\r\n1. Enter your extended release note in the below block. If the PR requires additional action from users switching to the new release, include the string \"action required\".\r\n3. If no release note is required, just write \"NONE\".\r\n-->\r\n```release-note\r\n???\r\n```\r\nNot sure, maybe, \r\n- \"Updgrade to ModelMesh v0.10.0\" \r\nor\r\n- point to ModelMesh release notes here\r\nhttps://github.com/kserve/modelmesh-serving/releases/tag/v0.10.0\r\n\r\n\r\n---\r\n\r\n/cc @yuzisun @rachitchauhan43 @njhill \r\n\n", "before_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pathlib\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nwith open(pathlib.Path(__file__).parent.parent / 'VERSION') as version_file:\n version = version_file.read().strip()\n\nsetuptools.setup(\n name='kserve',\n version=version,\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kserve/kserve/tree/master/python/kserve\",\n description=\"KServe Python SDK\",\n long_description=\"Python SDK for KServe Server and Client.\",\n python_requires='>=3.7',\n packages=[\n 'kserve',\n 'kserve.api',\n 'kserve.constants',\n 'kserve.models',\n 'kserve.handlers',\n 'kserve.utils',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n", "path": "python/kserve/setup.py"}]} | 1,612 | 94 |
gh_patches_debug_21948 | rasdani/github-patches | git_diff | urllib3__urllib3-817 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Honor PYTHONWARNINGS for SNIMissingWarning
.local/lib/python2.7/site-packages/requests/packages/urllib3/util/ssl_.py:315: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#snimissingwarning.
SNIMissingWarning
.local/lib/python2.7/site-packages/requests/packages/urllib3/util/ssl_.py:120: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.
InsecurePlatformWarning
I'm not here to discuss the validity of those warnings, but when I tried to get rid of them with the PYTHONWARNINGS environment variable, it only works for the InsecurePlatformWarning one. The other is always displayed whatever I put in PYTHONWARNINGS.
Please advise what PYTHONWARNINGS value to use to get rid of this one, this is a documented python feature:
https://docs.python.org/2/library/warnings.html
https://docs.python.org/2/using/cmdline.html?highlight=pythonwarnings#envvar-PYTHONWARNINGS
Also seen the same problem here: in https://github.com/shazow/urllib3/issues/497
Thanks
</issue>
<code>
[start of urllib3/__init__.py]
1 """
2 urllib3 - Thread-safe connection pooling and re-using.
3 """
4 from __future__ import absolute_import
5 import warnings
6
7 from .connectionpool import (
8 HTTPConnectionPool,
9 HTTPSConnectionPool,
10 connection_from_url
11 )
12
13 from . import exceptions
14 from .filepost import encode_multipart_formdata
15 from .poolmanager import PoolManager, ProxyManager, proxy_from_url
16 from .response import HTTPResponse
17 from .util.request import make_headers
18 from .util.url import get_host
19 from .util.timeout import Timeout
20 from .util.retry import Retry
21
22
23 # Set default logging handler to avoid "No handler found" warnings.
24 import logging
25 try: # Python 2.7+
26 from logging import NullHandler
27 except ImportError:
28 class NullHandler(logging.Handler):
29 def emit(self, record):
30 pass
31
32 __author__ = 'Andrey Petrov ([email protected])'
33 __license__ = 'MIT'
34 __version__ = 'dev'
35
36 __all__ = (
37 'HTTPConnectionPool',
38 'HTTPSConnectionPool',
39 'PoolManager',
40 'ProxyManager',
41 'HTTPResponse',
42 'Retry',
43 'Timeout',
44 'add_stderr_logger',
45 'connection_from_url',
46 'disable_warnings',
47 'encode_multipart_formdata',
48 'get_host',
49 'make_headers',
50 'proxy_from_url',
51 )
52
53 logging.getLogger(__name__).addHandler(NullHandler())
54
55
56 def add_stderr_logger(level=logging.DEBUG):
57 """
58 Helper for quickly adding a StreamHandler to the logger. Useful for
59 debugging.
60
61 Returns the handler after adding it.
62 """
63 # This method needs to be in this __init__.py to get the __name__ correct
64 # even if urllib3 is vendored within another package.
65 logger = logging.getLogger(__name__)
66 handler = logging.StreamHandler()
67 handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
68 logger.addHandler(handler)
69 logger.setLevel(level)
70 logger.debug('Added a stderr logging handler to logger: %s', __name__)
71 return handler
72
73 # ... Clean up.
74 del NullHandler
75
76
77 # SecurityWarning's always go off by default.
78 warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
79 # SubjectAltNameWarning's should go off once per host
80 warnings.simplefilter('default', exceptions.SubjectAltNameWarning)
81 # InsecurePlatformWarning's don't vary between requests, so we keep it default.
82 warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
83 append=True)
84 # SNIMissingWarnings should go off only once.
85 warnings.simplefilter('default', exceptions.SNIMissingWarning)
86
87
88 def disable_warnings(category=exceptions.HTTPWarning):
89 """
90 Helper for quickly disabling all urllib3 warnings.
91 """
92 warnings.simplefilter('ignore', category)
93
[end of urllib3/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/urllib3/__init__.py b/urllib3/__init__.py
--- a/urllib3/__init__.py
+++ b/urllib3/__init__.py
@@ -74,15 +74,18 @@
del NullHandler
+# All warning filters *must* be appended unless you're really certain that they
+# shouldn't be: otherwise, it's very hard for users to use most Python
+# mechanisms to silence them.
# SecurityWarning's always go off by default.
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# SubjectAltNameWarning's should go off once per host
-warnings.simplefilter('default', exceptions.SubjectAltNameWarning)
+warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
append=True)
# SNIMissingWarnings should go off only once.
-warnings.simplefilter('default', exceptions.SNIMissingWarning)
+warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
def disable_warnings(category=exceptions.HTTPWarning):
| {"golden_diff": "diff --git a/urllib3/__init__.py b/urllib3/__init__.py\n--- a/urllib3/__init__.py\n+++ b/urllib3/__init__.py\n@@ -74,15 +74,18 @@\n del NullHandler\n \n \n+# All warning filters *must* be appended unless you're really certain that they\n+# shouldn't be: otherwise, it's very hard for users to use most Python\n+# mechanisms to silence them.\n # SecurityWarning's always go off by default.\n warnings.simplefilter('always', exceptions.SecurityWarning, append=True)\n # SubjectAltNameWarning's should go off once per host\n-warnings.simplefilter('default', exceptions.SubjectAltNameWarning)\n+warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)\n # InsecurePlatformWarning's don't vary between requests, so we keep it default.\n warnings.simplefilter('default', exceptions.InsecurePlatformWarning,\n append=True)\n # SNIMissingWarnings should go off only once.\n-warnings.simplefilter('default', exceptions.SNIMissingWarning)\n+warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)\n \n \n def disable_warnings(category=exceptions.HTTPWarning):\n", "issue": "Honor PYTHONWARNINGS for SNIMissingWarning\n.local/lib/python2.7/site-packages/requests/packages/urllib3/util/ssl_.py:315: SNIMissingWarning: An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform. This may cause the server to present an incorrect TLS certificate, which can cause validation failures. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#snimissingwarning.\n SNIMissingWarning\n.local/lib/python2.7/site-packages/requests/packages/urllib3/util/ssl_.py:120: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.\n InsecurePlatformWarning\n\nI'm not here to discuss the validity of those warnings, but when I tried to get rid of them with the PYTHONWARNINGS environment variable, it only works for the InsecurePlatformWarning one. The other is always displayed whatever I put in PYTHONWARNINGS.\n\nPlease advise what PYTHONWARNINGS value to use to get rid of this one, this is a documented python feature: \nhttps://docs.python.org/2/library/warnings.html\nhttps://docs.python.org/2/using/cmdline.html?highlight=pythonwarnings#envvar-PYTHONWARNINGS\n\nAlso seen the same problem here: in https://github.com/shazow/urllib3/issues/497\n\nThanks\n\n", "before_files": [{"content": "\"\"\"\nurllib3 - Thread-safe connection pooling and re-using.\n\"\"\"\nfrom __future__ import absolute_import\nimport warnings\n\nfrom .connectionpool import (\n HTTPConnectionPool,\n HTTPSConnectionPool,\n connection_from_url\n)\n\nfrom . import exceptions\nfrom .filepost import encode_multipart_formdata\nfrom .poolmanager import PoolManager, ProxyManager, proxy_from_url\nfrom .response import HTTPResponse\nfrom .util.request import make_headers\nfrom .util.url import get_host\nfrom .util.timeout import Timeout\nfrom .util.retry import Retry\n\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nimport logging\ntry: # Python 2.7+\n from logging import NullHandler\nexcept ImportError:\n class NullHandler(logging.Handler):\n def emit(self, record):\n pass\n\n__author__ = 'Andrey Petrov ([email protected])'\n__license__ = 'MIT'\n__version__ = 'dev'\n\n__all__ = (\n 'HTTPConnectionPool',\n 'HTTPSConnectionPool',\n 'PoolManager',\n 'ProxyManager',\n 'HTTPResponse',\n 'Retry',\n 'Timeout',\n 'add_stderr_logger',\n 'connection_from_url',\n 'disable_warnings',\n 'encode_multipart_formdata',\n 'get_host',\n 'make_headers',\n 'proxy_from_url',\n)\n\nlogging.getLogger(__name__).addHandler(NullHandler())\n\n\ndef add_stderr_logger(level=logging.DEBUG):\n \"\"\"\n Helper for quickly adding a StreamHandler to the logger. Useful for\n debugging.\n\n Returns the handler after adding it.\n \"\"\"\n # This method needs to be in this __init__.py to get the __name__ correct\n # even if urllib3 is vendored within another package.\n logger = logging.getLogger(__name__)\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.debug('Added a stderr logging handler to logger: %s', __name__)\n return handler\n\n# ... Clean up.\ndel NullHandler\n\n\n# SecurityWarning's always go off by default.\nwarnings.simplefilter('always', exceptions.SecurityWarning, append=True)\n# SubjectAltNameWarning's should go off once per host\nwarnings.simplefilter('default', exceptions.SubjectAltNameWarning)\n# InsecurePlatformWarning's don't vary between requests, so we keep it default.\nwarnings.simplefilter('default', exceptions.InsecurePlatformWarning,\n append=True)\n# SNIMissingWarnings should go off only once.\nwarnings.simplefilter('default', exceptions.SNIMissingWarning)\n\n\ndef disable_warnings(category=exceptions.HTTPWarning):\n \"\"\"\n Helper for quickly disabling all urllib3 warnings.\n \"\"\"\n warnings.simplefilter('ignore', category)\n", "path": "urllib3/__init__.py"}]} | 1,653 | 256 |
gh_patches_debug_277 | rasdani/github-patches | git_diff | sanic-org__sanic-1045 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
0.6.1 release to PyPi
Hey folks,
There's been a bunch of substantive changes in the past few months; I think it warrants a release of 0.6.1 (or 0.7, considering there may be large changes in PRs like #939). Any chance we could get a new candidate uploaded to PyPi?
If there's a better place to ask this, I'm happy to head there.
</issue>
<code>
[start of sanic/__init__.py]
1 from sanic.app import Sanic
2 from sanic.blueprints import Blueprint
3
4 __version__ = '0.6.0'
5
6 __all__ = ['Sanic', 'Blueprint']
7
[end of sanic/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sanic/__init__.py b/sanic/__init__.py
--- a/sanic/__init__.py
+++ b/sanic/__init__.py
@@ -1,6 +1,6 @@
from sanic.app import Sanic
from sanic.blueprints import Blueprint
-__version__ = '0.6.0'
+__version__ = '0.7.0'
__all__ = ['Sanic', 'Blueprint']
| {"golden_diff": "diff --git a/sanic/__init__.py b/sanic/__init__.py\n--- a/sanic/__init__.py\n+++ b/sanic/__init__.py\n@@ -1,6 +1,6 @@\n from sanic.app import Sanic\n from sanic.blueprints import Blueprint\n \n-__version__ = '0.6.0'\n+__version__ = '0.7.0'\n \n __all__ = ['Sanic', 'Blueprint']\n", "issue": "0.6.1 release to PyPi\nHey folks,\r\n\r\nThere's been a bunch of substantive changes in the past few months; I think it warrants a release of 0.6.1 (or 0.7, considering there may be large changes in PRs like #939). Any chance we could get a new candidate uploaded to PyPi? \r\n\r\nIf there's a better place to ask this, I'm happy to head there.\n", "before_files": [{"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n__version__ = '0.6.0'\n\n__all__ = ['Sanic', 'Blueprint']\n", "path": "sanic/__init__.py"}]} | 678 | 99 |
gh_patches_debug_18265 | rasdani/github-patches | git_diff | python-poetry__poetry-4110 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exported requirements.txt contains spurious error message, making it unparseable
- [X] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.
- [X] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [X] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
- **OS version and name**: macOS 10.15.7
- **Poetry version**: 1.1.6
- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: Too much internal stuff to share, it's not really related, though
## Issue
When I run `poetry export > requirements.txt && pip install -r requirements.txt`, I see this:
```
Invalid requirement: 'Warning: The lock file is not up to date with the latest changes in pyproject.toml. You may be getting outdated dependencies. Run update to update them.' (from line 1 of requirements.txt)
```
It appears that my Poetry lockfile is out of sync. No biggie, that's correctable and logged in #3092 when it's not necessary to resync.
However, this error message should not make it into the output of the `poetry export` command. That error log line should go to stderr, not stdout.
## Proposed Fix
I think that the `self.line` here https://github.com/python-poetry/poetry/blob/b753aaf4c3c08ef0e54941a6616fe318fdf4f6e4/poetry/console/commands/export.py#L56-L63
should be `self.line_error` which, according to the [docstring for that method](https://github.com/sdispater/cleo/blob/9d289bf709ab2119b58e95da58a655ab70661798/cleo/commands/command.py#L262-L276) outputs to stderr instead of stdout.
</issue>
<code>
[start of poetry/console/commands/export.py]
1 from cleo.helpers import option
2
3 from poetry.utils.exporter import Exporter
4
5 from .command import Command
6
7
8 class ExportCommand(Command):
9
10 name = "export"
11 description = "Exports the lock file to alternative formats."
12
13 options = [
14 option(
15 "format",
16 "f",
17 "Format to export to. Currently, only requirements.txt is supported.",
18 flag=False,
19 default=Exporter.FORMAT_REQUIREMENTS_TXT,
20 ),
21 option("output", "o", "The name of the output file.", flag=False),
22 option("without-hashes", None, "Exclude hashes from the exported file."),
23 option("dev", None, "Include development dependencies."),
24 option(
25 "extras",
26 "E",
27 "Extra sets of dependencies to include.",
28 flag=False,
29 multiple=True,
30 ),
31 option("with-credentials", None, "Include credentials for extra indices."),
32 ]
33
34 def handle(self) -> None:
35 fmt = self.option("format")
36
37 if fmt not in Exporter.ACCEPTED_FORMATS:
38 raise ValueError("Invalid export format: {}".format(fmt))
39
40 output = self.option("output")
41
42 locker = self.poetry.locker
43 if not locker.is_locked():
44 self.line("<comment>The lock file does not exist. Locking.</comment>")
45 options = []
46 if self.io.is_debug():
47 options.append(("-vvv", None))
48 elif self.io.is_very_verbose():
49 options.append(("-vv", None))
50 elif self.io.is_verbose():
51 options.append(("-v", None))
52
53 self.call("lock", " ".join(options))
54
55 if not locker.is_fresh():
56 self.line(
57 "<warning>"
58 "Warning: The lock file is not up to date with "
59 "the latest changes in pyproject.toml. "
60 "You may be getting outdated dependencies. "
61 "Run update to update them."
62 "</warning>"
63 )
64
65 exporter = Exporter(self.poetry)
66 exporter.export(
67 fmt,
68 self.poetry.file.parent,
69 output or self.io,
70 with_hashes=not self.option("without-hashes"),
71 dev=self.option("dev"),
72 extras=self.option("extras"),
73 with_credentials=self.option("with-credentials"),
74 )
75
[end of poetry/console/commands/export.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/poetry/console/commands/export.py b/poetry/console/commands/export.py
--- a/poetry/console/commands/export.py
+++ b/poetry/console/commands/export.py
@@ -41,7 +41,7 @@
locker = self.poetry.locker
if not locker.is_locked():
- self.line("<comment>The lock file does not exist. Locking.</comment>")
+ self.line_error("<comment>The lock file does not exist. Locking.</comment>")
options = []
if self.io.is_debug():
options.append(("-vvv", None))
@@ -53,7 +53,7 @@
self.call("lock", " ".join(options))
if not locker.is_fresh():
- self.line(
+ self.line_error(
"<warning>"
"Warning: The lock file is not up to date with "
"the latest changes in pyproject.toml. "
| {"golden_diff": "diff --git a/poetry/console/commands/export.py b/poetry/console/commands/export.py\n--- a/poetry/console/commands/export.py\n+++ b/poetry/console/commands/export.py\n@@ -41,7 +41,7 @@\n \n locker = self.poetry.locker\n if not locker.is_locked():\n- self.line(\"<comment>The lock file does not exist. Locking.</comment>\")\n+ self.line_error(\"<comment>The lock file does not exist. Locking.</comment>\")\n options = []\n if self.io.is_debug():\n options.append((\"-vvv\", None))\n@@ -53,7 +53,7 @@\n self.call(\"lock\", \" \".join(options))\n \n if not locker.is_fresh():\n- self.line(\n+ self.line_error(\n \"<warning>\"\n \"Warning: The lock file is not up to date with \"\n \"the latest changes in pyproject.toml. \"\n", "issue": "Exported requirements.txt contains spurious error message, making it unparseable\n- [X] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.\r\n- [X] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [X] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n- **OS version and name**: macOS 10.15.7\r\n- **Poetry version**: 1.1.6\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: Too much internal stuff to share, it's not really related, though\r\n\r\n## Issue\r\n\r\nWhen I run `poetry export > requirements.txt && pip install -r requirements.txt`, I see this:\r\n\r\n```\r\n Invalid requirement: 'Warning: The lock file is not up to date with the latest changes in pyproject.toml. You may be getting outdated dependencies. Run update to update them.' (from line 1 of requirements.txt)\r\n```\r\n\r\nIt appears that my Poetry lockfile is out of sync. No biggie, that's correctable and logged in #3092 when it's not necessary to resync.\r\n\r\nHowever, this error message should not make it into the output of the `poetry export` command. That error log line should go to stderr, not stdout.\r\n\r\n## Proposed Fix\r\n\r\nI think that the `self.line` here https://github.com/python-poetry/poetry/blob/b753aaf4c3c08ef0e54941a6616fe318fdf4f6e4/poetry/console/commands/export.py#L56-L63\r\nshould be `self.line_error` which, according to the [docstring for that method](https://github.com/sdispater/cleo/blob/9d289bf709ab2119b58e95da58a655ab70661798/cleo/commands/command.py#L262-L276) outputs to stderr instead of stdout.\n", "before_files": [{"content": "from cleo.helpers import option\n\nfrom poetry.utils.exporter import Exporter\n\nfrom .command import Command\n\n\nclass ExportCommand(Command):\n\n name = \"export\"\n description = \"Exports the lock file to alternative formats.\"\n\n options = [\n option(\n \"format\",\n \"f\",\n \"Format to export to. Currently, only requirements.txt is supported.\",\n flag=False,\n default=Exporter.FORMAT_REQUIREMENTS_TXT,\n ),\n option(\"output\", \"o\", \"The name of the output file.\", flag=False),\n option(\"without-hashes\", None, \"Exclude hashes from the exported file.\"),\n option(\"dev\", None, \"Include development dependencies.\"),\n option(\n \"extras\",\n \"E\",\n \"Extra sets of dependencies to include.\",\n flag=False,\n multiple=True,\n ),\n option(\"with-credentials\", None, \"Include credentials for extra indices.\"),\n ]\n\n def handle(self) -> None:\n fmt = self.option(\"format\")\n\n if fmt not in Exporter.ACCEPTED_FORMATS:\n raise ValueError(\"Invalid export format: {}\".format(fmt))\n\n output = self.option(\"output\")\n\n locker = self.poetry.locker\n if not locker.is_locked():\n self.line(\"<comment>The lock file does not exist. Locking.</comment>\")\n options = []\n if self.io.is_debug():\n options.append((\"-vvv\", None))\n elif self.io.is_very_verbose():\n options.append((\"-vv\", None))\n elif self.io.is_verbose():\n options.append((\"-v\", None))\n\n self.call(\"lock\", \" \".join(options))\n\n if not locker.is_fresh():\n self.line(\n \"<warning>\"\n \"Warning: The lock file is not up to date with \"\n \"the latest changes in pyproject.toml. \"\n \"You may be getting outdated dependencies. \"\n \"Run update to update them.\"\n \"</warning>\"\n )\n\n exporter = Exporter(self.poetry)\n exporter.export(\n fmt,\n self.poetry.file.parent,\n output or self.io,\n with_hashes=not self.option(\"without-hashes\"),\n dev=self.option(\"dev\"),\n extras=self.option(\"extras\"),\n with_credentials=self.option(\"with-credentials\"),\n )\n", "path": "poetry/console/commands/export.py"}]} | 1,646 | 205 |
gh_patches_debug_24595 | rasdani/github-patches | git_diff | conan-io__conan-552 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scope variables do not accept a value containing a ':'
Hi,
I modified the Boost package recipe so that I can specify an alternate url for downloading the source zip file. I used a scope variable for this purpose. However it seems that conan does not allow scope variables to contain a ':'.
When issuing this command:
conan install Boost/1.61.0@epr-eliaskousk/stable --build -s os=Windows -s arch=x86_64 -s compiler="Visual Studio" -s compiler.version=14 -s compiler.runtime=MDd -s build_type=Debug -sc ALL:source_url=http://sourceforge.net/projects/boost/files/boost
Here is the error message I get:
ERROR: Bad scope ALL:source_url=http://sourceforge.net/projects/boost/files/boost
I suppose there is an issue because the parsing treats the ':' as a special separator?...
Would it be possible to fix this or is it supposed to be like that?
(I can workaround the issue by replacing the character ':' in my string value by yet-another special character)
thanks
</issue>
<code>
[start of conans/model/scope.py]
1 from collections import defaultdict
2 from conans.errors import ConanException
3
4
5 class Scope(dict):
6 """ the set of possible scopes than a package can have, by name(string):
7 "dev", "test", "myscope"...
8 it is just a set, but with syntax to be queried as:
9 if self.scope.dev:
10 """
11
12 def __getattr__(self, field):
13 return self.get(field)
14
15 def __setattr__(self, field, value):
16 self[field] = value
17
18 def __repr__(self):
19 return ", ".join("%s=%s" % (k, v) for k, v in sorted(self.items()))
20
21
22 # This is necessary, as None cannot be ordered in Py3
23 _root = "0CONAN_ROOT*"
24 _all = "ALL"
25
26
27 class Scopes(defaultdict):
28 """ all the scopes of a dependency graph, as a dict{package name(str): Scope
29 the root package of the graph might not have name, then its key is None.
30 It is loaded and saved to text as:
31 Package1:dev
32 Package1:test
33 Package2:dev
34 dev # for the root package, without name
35 other # any name allowed
36 This will be stored in memory as {Package1: Scopes(set[dev, test]),
37 Package2: Scopes(...),
38 None: Scopes(set[dev, other])
39 """
40 def __init__(self):
41 super(Scopes, self).__init__(Scope)
42 self[_root].dev = True
43
44 def package_scope(self, name=None):
45 """ return the scopes for the given package which are the scopes set
46 for ALL, updated (high priority) with the specific package scopes
47 if the package name is None, then it is the ROOT package/consumer
48 """
49 scope = Scope(self.get(_all, {}))
50 scope.update(self[name or _root])
51 return scope
52
53 @staticmethod
54 def from_list(items):
55 result = Scopes()
56 for item in items:
57 chunks = item.split(":")
58 if len(chunks) == 2:
59 root = chunks[0]
60 scope = chunks[1]
61 elif len(chunks) == 1:
62 root = _root
63 scope = chunks[0]
64 else:
65 raise ConanException("Bad scope %s" % item)
66 try:
67 key, value = scope.split("=")
68 except:
69 raise ConanException("Bad scope %s" % item)
70 v = value.upper()
71 if v == "TRUE":
72 value = True
73 elif v == "FALSE":
74 value = False
75 elif v == "NONE":
76 value = None
77 result[root][key] = value
78 return result
79
80 def update_scope(self, other):
81 for name, scopes in other.items():
82 self[name].update(scopes)
83
84 @staticmethod
85 def loads(text):
86 return Scopes.from_list([s.strip() for s in text.splitlines()])
87
88 def dumps(self):
89 result = []
90 for name, scopes in sorted(self.items()):
91 if name != _root:
92 result.extend("%s:%s=%s" % (name, k, v) for (k, v) in sorted(scopes.items()))
93 else:
94 result.extend("%s=%s" % (k, v) for (k, v) in sorted(scopes.items()))
95 return "\n".join(result)
96
[end of conans/model/scope.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conans/model/scope.py b/conans/model/scope.py
--- a/conans/model/scope.py
+++ b/conans/model/scope.py
@@ -54,17 +54,8 @@
def from_list(items):
result = Scopes()
for item in items:
- chunks = item.split(":")
- if len(chunks) == 2:
- root = chunks[0]
- scope = chunks[1]
- elif len(chunks) == 1:
- root = _root
- scope = chunks[0]
- else:
- raise ConanException("Bad scope %s" % item)
try:
- key, value = scope.split("=")
+ key, value = item.split("=")
except:
raise ConanException("Bad scope %s" % item)
v = value.upper()
@@ -74,7 +65,18 @@
value = False
elif v == "NONE":
value = None
- result[root][key] = value
+
+ chunks = key.split(":")
+ if len(chunks) == 2:
+ root = chunks[0]
+ scope = chunks[1]
+ elif len(chunks) == 1:
+ root = _root
+ scope = chunks[0]
+ else:
+ raise ConanException("Bad scope %s" % item)
+
+ result[root][scope] = value
return result
def update_scope(self, other):
| {"golden_diff": "diff --git a/conans/model/scope.py b/conans/model/scope.py\n--- a/conans/model/scope.py\n+++ b/conans/model/scope.py\n@@ -54,17 +54,8 @@\n def from_list(items):\n result = Scopes()\n for item in items:\n- chunks = item.split(\":\")\n- if len(chunks) == 2:\n- root = chunks[0]\n- scope = chunks[1]\n- elif len(chunks) == 1:\n- root = _root\n- scope = chunks[0]\n- else:\n- raise ConanException(\"Bad scope %s\" % item)\n try:\n- key, value = scope.split(\"=\")\n+ key, value = item.split(\"=\")\n except:\n raise ConanException(\"Bad scope %s\" % item)\n v = value.upper()\n@@ -74,7 +65,18 @@\n value = False\n elif v == \"NONE\":\n value = None\n- result[root][key] = value\n+\n+ chunks = key.split(\":\")\n+ if len(chunks) == 2:\n+ root = chunks[0]\n+ scope = chunks[1]\n+ elif len(chunks) == 1:\n+ root = _root\n+ scope = chunks[0]\n+ else:\n+ raise ConanException(\"Bad scope %s\" % item)\n+\n+ result[root][scope] = value\n return result\n \n def update_scope(self, other):\n", "issue": "Scope variables do not accept a value containing a ':'\nHi,\nI modified the Boost package recipe so that I can specify an alternate url for downloading the source zip file. I used a scope variable for this purpose. However it seems that conan does not allow scope variables to contain a ':'.\n\nWhen issuing this command:\nconan install Boost/1.61.0@epr-eliaskousk/stable --build -s os=Windows -s arch=x86_64 -s compiler=\"Visual Studio\" -s compiler.version=14 -s compiler.runtime=MDd -s build_type=Debug -sc ALL:source_url=http://sourceforge.net/projects/boost/files/boost\n\nHere is the error message I get:\nERROR: Bad scope ALL:source_url=http://sourceforge.net/projects/boost/files/boost\n\nI suppose there is an issue because the parsing treats the ':' as a special separator?...\n\nWould it be possible to fix this or is it supposed to be like that?\n(I can workaround the issue by replacing the character ':' in my string value by yet-another special character)\n\nthanks\n\n", "before_files": [{"content": "from collections import defaultdict\nfrom conans.errors import ConanException\n\n\nclass Scope(dict):\n \"\"\" the set of possible scopes than a package can have, by name(string):\n \"dev\", \"test\", \"myscope\"...\n it is just a set, but with syntax to be queried as:\n if self.scope.dev:\n \"\"\"\n\n def __getattr__(self, field):\n return self.get(field)\n\n def __setattr__(self, field, value):\n self[field] = value\n\n def __repr__(self):\n return \", \".join(\"%s=%s\" % (k, v) for k, v in sorted(self.items()))\n\n\n# This is necessary, as None cannot be ordered in Py3\n_root = \"0CONAN_ROOT*\"\n_all = \"ALL\"\n\n\nclass Scopes(defaultdict):\n \"\"\" all the scopes of a dependency graph, as a dict{package name(str): Scope\n the root package of the graph might not have name, then its key is None.\n It is loaded and saved to text as:\n Package1:dev\n Package1:test\n Package2:dev\n dev # for the root package, without name\n other # any name allowed\n This will be stored in memory as {Package1: Scopes(set[dev, test]),\n Package2: Scopes(...),\n None: Scopes(set[dev, other])\n \"\"\"\n def __init__(self):\n super(Scopes, self).__init__(Scope)\n self[_root].dev = True\n\n def package_scope(self, name=None):\n \"\"\" return the scopes for the given package which are the scopes set\n for ALL, updated (high priority) with the specific package scopes\n if the package name is None, then it is the ROOT package/consumer\n \"\"\"\n scope = Scope(self.get(_all, {}))\n scope.update(self[name or _root])\n return scope\n\n @staticmethod\n def from_list(items):\n result = Scopes()\n for item in items:\n chunks = item.split(\":\")\n if len(chunks) == 2:\n root = chunks[0]\n scope = chunks[1]\n elif len(chunks) == 1:\n root = _root\n scope = chunks[0]\n else:\n raise ConanException(\"Bad scope %s\" % item)\n try:\n key, value = scope.split(\"=\")\n except:\n raise ConanException(\"Bad scope %s\" % item)\n v = value.upper()\n if v == \"TRUE\":\n value = True\n elif v == \"FALSE\":\n value = False\n elif v == \"NONE\":\n value = None\n result[root][key] = value\n return result\n\n def update_scope(self, other):\n for name, scopes in other.items():\n self[name].update(scopes)\n\n @staticmethod\n def loads(text):\n return Scopes.from_list([s.strip() for s in text.splitlines()])\n\n def dumps(self):\n result = []\n for name, scopes in sorted(self.items()):\n if name != _root:\n result.extend(\"%s:%s=%s\" % (name, k, v) for (k, v) in sorted(scopes.items()))\n else:\n result.extend(\"%s=%s\" % (k, v) for (k, v) in sorted(scopes.items()))\n return \"\\n\".join(result)\n", "path": "conans/model/scope.py"}]} | 1,690 | 335 |
gh_patches_debug_1371 | rasdani/github-patches | git_diff | dotkom__onlineweb4-1902 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot view inventory
## What kind of an issue is this?
- [x] Bug report
## What is the expected behaviour?
To be able to view the inventory
## What is the current behaviour?
A 500 error, with the message `TypeError: '>=' not supported between instances of 'datetime.date' and 'NoneType'`.
## How do you reproduce this problem?
Make sure the inventory is not empty, and try to visit it.
</issue>
<code>
[start of apps/inventory/models.py]
1 # -*- coding: utf-8 -*-
2
3 from django.conf import settings
4 from django.core.mail import EmailMessage
5 from django.db import models
6 from django.utils import timezone
7 from django.utils.translation import ugettext as _
8
9 from apps.gallery.models import ResponsiveImage
10
11
12 class ItemCategory(models.Model):
13 name = models.CharField(_("Kategori"), max_length=50)
14
15 def __str__(self):
16 return self.name
17
18
19 class Item(models.Model):
20
21 name = models.CharField(_("Varetype"), max_length=50)
22 description = models.CharField(_("Beskrivelse"), max_length=50, null=True, blank=True)
23 price = models.IntegerField(_("Pris"), null=True, blank=True)
24 available = models.BooleanField(_("Til salgs"), default=False)
25 category = models.ForeignKey(ItemCategory, verbose_name=_("Kategori"),
26 related_name="category", null=True, blank=True)
27 image = models.ForeignKey(ResponsiveImage, null=True, blank=True, default=None)
28
29 @property
30 def oldest_expiration_date(self):
31 batches = self.batches.all().order_by("expiration_date")
32 if batches:
33 return batches[0].expiration_date
34 else:
35 return None
36
37 @property
38 def last_added(self):
39 batches = self.batches.all().order_by("-date_added")
40 if batches:
41 return batches[0].date_added
42 else:
43 return None
44
45 def oldest_batch(self):
46 batches = self.batches.filter(amount__gt=0).order_by("date_added")
47 if batches:
48 return batches[0]
49 else:
50 return None
51
52 @property
53 def total_amount(self):
54 return sum([batch.amount for batch in self.batches.all()])
55
56 @property
57 def has_expired_batch(self):
58 if timezone.now().date() >= self.oldest_expiration_date:
59 return True
60 return False
61
62 def reduce_stock(self, amount):
63 """
64 Makes an assumption that the oldest batches are sold first and reduce them first.
65 """
66
67 oldest_batch = self.oldest_batch()
68
69 if oldest_batch:
70 if oldest_batch.amount > amount:
71 oldest_batch.amount = oldest_batch.amount - amount
72 oldest_batch.save()
73 else:
74 diff = amount - oldest_batch.amount
75 oldest_batch.amount = 0
76 oldest_batch.save()
77 self.reduce_stock(diff)
78
79 self.handle_notifications(amount)
80
81 def handle_notifications(self, amount):
82
83 # Send one notification when the stock goes to or below 10
84 if self.total_amount <= 10 and self.total_amount + amount > 10:
85 message = "Det er kun " + str(self.total_amount) + " igjen av " + str(self.name) + \
86 " på kontoret.\n\n" \
87 "Dette er en automatisk generert melding og antallet kan være noe feil."
88
89 EmailMessage(
90 "[Nibble] Lav stock på " + self.name,
91 str(message),
92 "[email protected]",
93 [],
94 [settings.EMAIL_TRIKOM]
95 ).send()
96
97 def __str__(self):
98 return self.name
99
100 class Meta(object):
101 verbose_name = _("Vare")
102 verbose_name_plural = _("Varer")
103 permissions = (
104 ("view_item", "View Inventory Item"),
105 )
106
107
108 class Batch(models.Model):
109
110 item = models.ForeignKey(Item, verbose_name=_("Vare"), related_name="batches")
111 amount = models.IntegerField(_("Antall"), default=0)
112 date_added = models.DateField(_("Dato lagt til"), editable=False, auto_now_add=True)
113 expiration_date = models.DateField(_("Utløpsdato"), null=True, blank=True, editable=True)
114
115 class Meta(object):
116 verbose_name = _("Batch")
117 verbose_name_plural = _("Batches")
118
[end of apps/inventory/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/inventory/models.py b/apps/inventory/models.py
--- a/apps/inventory/models.py
+++ b/apps/inventory/models.py
@@ -55,7 +55,7 @@
@property
def has_expired_batch(self):
- if timezone.now().date() >= self.oldest_expiration_date:
+ if self.oldest_expiration_date and timezone.now().date() >= self.oldest_expiration_date:
return True
return False
| {"golden_diff": "diff --git a/apps/inventory/models.py b/apps/inventory/models.py\n--- a/apps/inventory/models.py\n+++ b/apps/inventory/models.py\n@@ -55,7 +55,7 @@\n \n @property\n def has_expired_batch(self):\n- if timezone.now().date() >= self.oldest_expiration_date:\n+ if self.oldest_expiration_date and timezone.now().date() >= self.oldest_expiration_date:\n return True\n return False\n", "issue": "Cannot view inventory\n## What kind of an issue is this?\r\n\r\n- [x] Bug report\r\n\r\n\r\n## What is the expected behaviour?\r\n\r\nTo be able to view the inventory\r\n\r\n## What is the current behaviour?\r\n\r\nA 500 error, with the message `TypeError: '>=' not supported between instances of 'datetime.date' and 'NoneType'`. \r\n\r\n\r\n## How do you reproduce this problem? \r\n\r\nMake sure the inventory is not empty, and try to visit it.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\n\nfrom apps.gallery.models import ResponsiveImage\n\n\nclass ItemCategory(models.Model):\n name = models.CharField(_(\"Kategori\"), max_length=50)\n\n def __str__(self):\n return self.name\n\n\nclass Item(models.Model):\n\n name = models.CharField(_(\"Varetype\"), max_length=50)\n description = models.CharField(_(\"Beskrivelse\"), max_length=50, null=True, blank=True)\n price = models.IntegerField(_(\"Pris\"), null=True, blank=True)\n available = models.BooleanField(_(\"Til salgs\"), default=False)\n category = models.ForeignKey(ItemCategory, verbose_name=_(\"Kategori\"),\n related_name=\"category\", null=True, blank=True)\n image = models.ForeignKey(ResponsiveImage, null=True, blank=True, default=None)\n\n @property\n def oldest_expiration_date(self):\n batches = self.batches.all().order_by(\"expiration_date\")\n if batches:\n return batches[0].expiration_date\n else:\n return None\n\n @property\n def last_added(self):\n batches = self.batches.all().order_by(\"-date_added\")\n if batches:\n return batches[0].date_added\n else:\n return None\n\n def oldest_batch(self):\n batches = self.batches.filter(amount__gt=0).order_by(\"date_added\")\n if batches:\n return batches[0]\n else:\n return None\n\n @property\n def total_amount(self):\n return sum([batch.amount for batch in self.batches.all()])\n\n @property\n def has_expired_batch(self):\n if timezone.now().date() >= self.oldest_expiration_date:\n return True\n return False\n\n def reduce_stock(self, amount):\n \"\"\"\n Makes an assumption that the oldest batches are sold first and reduce them first.\n \"\"\"\n\n oldest_batch = self.oldest_batch()\n\n if oldest_batch:\n if oldest_batch.amount > amount:\n oldest_batch.amount = oldest_batch.amount - amount\n oldest_batch.save()\n else:\n diff = amount - oldest_batch.amount\n oldest_batch.amount = 0\n oldest_batch.save()\n self.reduce_stock(diff)\n\n self.handle_notifications(amount)\n\n def handle_notifications(self, amount):\n\n # Send one notification when the stock goes to or below 10\n if self.total_amount <= 10 and self.total_amount + amount > 10:\n message = \"Det er kun \" + str(self.total_amount) + \" igjen av \" + str(self.name) + \\\n \" p\u00e5 kontoret.\\n\\n\" \\\n \"Dette er en automatisk generert melding og antallet kan v\u00e6re noe feil.\"\n\n EmailMessage(\n \"[Nibble] Lav stock p\u00e5 \" + self.name,\n str(message),\n \"[email protected]\",\n [],\n [settings.EMAIL_TRIKOM]\n ).send()\n\n def __str__(self):\n return self.name\n\n class Meta(object):\n verbose_name = _(\"Vare\")\n verbose_name_plural = _(\"Varer\")\n permissions = (\n (\"view_item\", \"View Inventory Item\"),\n )\n\n\nclass Batch(models.Model):\n\n item = models.ForeignKey(Item, verbose_name=_(\"Vare\"), related_name=\"batches\")\n amount = models.IntegerField(_(\"Antall\"), default=0)\n date_added = models.DateField(_(\"Dato lagt til\"), editable=False, auto_now_add=True)\n expiration_date = models.DateField(_(\"Utl\u00f8psdato\"), null=True, blank=True, editable=True)\n\n class Meta(object):\n verbose_name = _(\"Batch\")\n verbose_name_plural = _(\"Batches\")\n", "path": "apps/inventory/models.py"}]} | 1,699 | 104 |
gh_patches_debug_4626 | rasdani/github-patches | git_diff | sopel-irc__sopel-1677 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
etymology: Empty argument not handled
Doing `.ety` with no or an empty argument doesn't behave properly.
On Python 2 (my old personal instance), it causes `TypeError: None object cannot be quoted (file "/usr/lib/python2.7/urllib.py", line 1255, in quote)`
On Python 3 ("official" Sopel instance on freenode), the command looks up the etymology of "None" (and gets no results, because of the capital letter).
This is a good, easy issue for first-time contributors to fix.
</issue>
<code>
[start of sopel/modules/etymology.py]
1 # coding=utf-8
2 """
3 etymology.py - Sopel Etymology Module
4 Copyright 2007-9, Sean B. Palmer, inamidst.com
5 Copyright 2018-9, Sopel contributors
6 Licensed under the Eiffel Forum License 2.
7
8 https://sopel.chat
9 """
10 from __future__ import unicode_literals, absolute_import, print_function, division
11
12 from re import sub
13
14 from requests import get
15
16 from sopel.module import commands, example, NOLIMIT
17 from sopel.tools import web
18
19 try:
20 # Python 2.7
21 from HTMLParser import HTMLParser
22 h = HTMLParser()
23 unescape = h.unescape
24 except ImportError:
25 try:
26 # Python 3.4+
27 from html import unescape # https://stackoverflow.com/a/2087433
28 except ImportError:
29 # Python 3.3... sigh
30 from html.parser import HTMLParser
31 h = HTMLParser()
32 unescape = h.unescape
33
34
35 ETYURI = 'https://www.etymonline.com/word/%s'
36 ETYSEARCH = 'https://www.etymonline.com/search?q=%s'
37
38
39 def etymology(word):
40 # @@ <nsh> sbp, would it be possible to have a flag for .ety to get 2nd/etc
41 # entries? - http://swhack.com/logs/2006-07-19#T15-05-29
42
43 if len(word) > 25:
44 raise ValueError("Word too long: %s[…]" % word[:10])
45
46 ety = get(ETYURI % web.quote(word))
47 if ety.status_code != 200:
48 return None
49
50 # Let's find it
51 start = ety.text.find("word__defination")
52 start = ety.text.find("<p>", start)
53 stop = ety.text.find("</p>", start)
54 sentence = ety.text[start + 3:stop]
55 # Clean up
56 sentence = unescape(sentence)
57 sentence = sub('<[^<]+?>', '', sentence)
58
59 maxlength = 275
60 if len(sentence) > maxlength:
61 sentence = sentence[:maxlength]
62 words = sentence[:-5].split(' ')
63 words.pop()
64 sentence = ' '.join(words) + ' […]'
65
66 sentence = '"' + sentence.replace('"', "'") + '"'
67 return sentence + ' - ' + (ETYURI % web.quote(word))
68
69
70 @commands('ety')
71 @example('.ety word')
72 def f_etymology(bot, trigger):
73 """Look up the etymology of a word"""
74 word = trigger.group(2)
75
76 try:
77 result = etymology(word)
78 except IOError:
79 msg = "Can't connect to etymonline.com (%s)" % (ETYURI % web.quote(word))
80 bot.say(msg, trigger.sender)
81 return NOLIMIT
82 except (AttributeError, TypeError):
83 result = None
84 except ValueError as ve:
85 result = str(ve)
86
87 if result is not None:
88 bot.say(result, trigger.sender)
89 else:
90 uri = ETYSEARCH % web.quote(word)
91 msg = 'Can\'t find the etymology for "%s". Try %s' % (word, uri)
92 bot.say(msg, trigger.sender)
93 return NOLIMIT
94
[end of sopel/modules/etymology.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/etymology.py b/sopel/modules/etymology.py
--- a/sopel/modules/etymology.py
+++ b/sopel/modules/etymology.py
@@ -40,6 +40,9 @@
# @@ <nsh> sbp, would it be possible to have a flag for .ety to get 2nd/etc
# entries? - http://swhack.com/logs/2006-07-19#T15-05-29
+ if not word:
+ raise ValueError("No word to look for.")
+
if len(word) > 25:
raise ValueError("Word too long: %s[…]" % word[:10])
| {"golden_diff": "diff --git a/sopel/modules/etymology.py b/sopel/modules/etymology.py\n--- a/sopel/modules/etymology.py\n+++ b/sopel/modules/etymology.py\n@@ -40,6 +40,9 @@\n # @@ <nsh> sbp, would it be possible to have a flag for .ety to get 2nd/etc\n # entries? - http://swhack.com/logs/2006-07-19#T15-05-29\n \n+ if not word:\n+ raise ValueError(\"No word to look for.\")\n+\n if len(word) > 25:\n raise ValueError(\"Word too long: %s[\u2026]\" % word[:10])\n", "issue": "etymology: Empty argument not handled\nDoing `.ety` with no or an empty argument doesn't behave properly.\r\n\r\nOn Python 2 (my old personal instance), it causes `TypeError: None object cannot be quoted (file \"/usr/lib/python2.7/urllib.py\", line 1255, in quote)`\r\n\r\nOn Python 3 (\"official\" Sopel instance on freenode), the command looks up the etymology of \"None\" (and gets no results, because of the capital letter).\r\n\r\nThis is a good, easy issue for first-time contributors to fix.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\netymology.py - Sopel Etymology Module\nCopyright 2007-9, Sean B. Palmer, inamidst.com\nCopyright 2018-9, Sopel contributors\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom re import sub\n\nfrom requests import get\n\nfrom sopel.module import commands, example, NOLIMIT\nfrom sopel.tools import web\n\ntry:\n # Python 2.7\n from HTMLParser import HTMLParser\n h = HTMLParser()\n unescape = h.unescape\nexcept ImportError:\n try:\n # Python 3.4+\n from html import unescape # https://stackoverflow.com/a/2087433\n except ImportError:\n # Python 3.3... sigh\n from html.parser import HTMLParser\n h = HTMLParser()\n unescape = h.unescape\n\n\nETYURI = 'https://www.etymonline.com/word/%s'\nETYSEARCH = 'https://www.etymonline.com/search?q=%s'\n\n\ndef etymology(word):\n # @@ <nsh> sbp, would it be possible to have a flag for .ety to get 2nd/etc\n # entries? - http://swhack.com/logs/2006-07-19#T15-05-29\n\n if len(word) > 25:\n raise ValueError(\"Word too long: %s[\u2026]\" % word[:10])\n\n ety = get(ETYURI % web.quote(word))\n if ety.status_code != 200:\n return None\n\n # Let's find it\n start = ety.text.find(\"word__defination\")\n start = ety.text.find(\"<p>\", start)\n stop = ety.text.find(\"</p>\", start)\n sentence = ety.text[start + 3:stop]\n # Clean up\n sentence = unescape(sentence)\n sentence = sub('<[^<]+?>', '', sentence)\n\n maxlength = 275\n if len(sentence) > maxlength:\n sentence = sentence[:maxlength]\n words = sentence[:-5].split(' ')\n words.pop()\n sentence = ' '.join(words) + ' [\u2026]'\n\n sentence = '\"' + sentence.replace('\"', \"'\") + '\"'\n return sentence + ' - ' + (ETYURI % web.quote(word))\n\n\n@commands('ety')\n@example('.ety word')\ndef f_etymology(bot, trigger):\n \"\"\"Look up the etymology of a word\"\"\"\n word = trigger.group(2)\n\n try:\n result = etymology(word)\n except IOError:\n msg = \"Can't connect to etymonline.com (%s)\" % (ETYURI % web.quote(word))\n bot.say(msg, trigger.sender)\n return NOLIMIT\n except (AttributeError, TypeError):\n result = None\n except ValueError as ve:\n result = str(ve)\n\n if result is not None:\n bot.say(result, trigger.sender)\n else:\n uri = ETYSEARCH % web.quote(word)\n msg = 'Can\\'t find the etymology for \"%s\". Try %s' % (word, uri)\n bot.say(msg, trigger.sender)\n return NOLIMIT\n", "path": "sopel/modules/etymology.py"}]} | 1,583 | 165 |
gh_patches_debug_16778 | rasdani/github-patches | git_diff | sanic-org__sanic-2373 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Websocket logger uses sanic.log.error_logger
Hey there,
Why do we see:
sanic.error - INFO - connection open
via stderr when getting new websocket connections. Shouldn't this go to stdout?
Also, is it possible to add "middleware" so we can properly log websocket connections and disconnects? Is it possible to get a callback on websocket disconnects?
Thanks!
</issue>
<code>
[start of sanic/server/protocols/websocket_protocol.py]
1 from typing import TYPE_CHECKING, Optional, Sequence, cast
2
3 from websockets.connection import CLOSED, CLOSING, OPEN
4 from websockets.server import ServerConnection
5 from websockets.typing import Subprotocol
6
7 from sanic.exceptions import ServerError
8 from sanic.log import error_logger
9 from sanic.server import HttpProtocol
10
11 from ..websockets.impl import WebsocketImplProtocol
12
13
14 if TYPE_CHECKING: # no cov
15 from websockets import http11
16
17
18 class WebSocketProtocol(HttpProtocol):
19 __slots__ = (
20 "websocket",
21 "websocket_timeout",
22 "websocket_max_size",
23 "websocket_ping_interval",
24 "websocket_ping_timeout",
25 )
26
27 def __init__(
28 self,
29 *args,
30 websocket_timeout: float = 10.0,
31 websocket_max_size: Optional[int] = None,
32 websocket_ping_interval: Optional[float] = 20.0,
33 websocket_ping_timeout: Optional[float] = 20.0,
34 **kwargs,
35 ):
36 super().__init__(*args, **kwargs)
37 self.websocket: Optional[WebsocketImplProtocol] = None
38 self.websocket_timeout = websocket_timeout
39 self.websocket_max_size = websocket_max_size
40 self.websocket_ping_interval = websocket_ping_interval
41 self.websocket_ping_timeout = websocket_ping_timeout
42
43 def connection_lost(self, exc):
44 if self.websocket is not None:
45 self.websocket.connection_lost(exc)
46 super().connection_lost(exc)
47
48 def data_received(self, data):
49 if self.websocket is not None:
50 self.websocket.data_received(data)
51 else:
52 # Pass it to HttpProtocol handler first
53 # That will (hopefully) upgrade it to a websocket.
54 super().data_received(data)
55
56 def eof_received(self) -> Optional[bool]:
57 if self.websocket is not None:
58 return self.websocket.eof_received()
59 else:
60 return False
61
62 def close(self, timeout: Optional[float] = None):
63 # Called by HttpProtocol at the end of connection_task
64 # If we've upgraded to websocket, we do our own closing
65 if self.websocket is not None:
66 # Note, we don't want to use websocket.close()
67 # That is used for user's application code to send a
68 # websocket close packet. This is different.
69 self.websocket.end_connection(1001)
70 else:
71 super().close()
72
73 def close_if_idle(self):
74 # Called by Sanic Server when shutting down
75 # If we've upgraded to websocket, shut it down
76 if self.websocket is not None:
77 if self.websocket.connection.state in (CLOSING, CLOSED):
78 return True
79 elif self.websocket.loop is not None:
80 self.websocket.loop.create_task(self.websocket.close(1001))
81 else:
82 self.websocket.end_connection(1001)
83 else:
84 return super().close_if_idle()
85
86 async def websocket_handshake(
87 self, request, subprotocols: Optional[Sequence[str]] = None
88 ):
89 # let the websockets package do the handshake with the client
90 try:
91 if subprotocols is not None:
92 # subprotocols can be a set or frozenset,
93 # but ServerConnection needs a list
94 subprotocols = cast(
95 Optional[Sequence[Subprotocol]],
96 list(
97 [
98 Subprotocol(subprotocol)
99 for subprotocol in subprotocols
100 ]
101 ),
102 )
103 ws_conn = ServerConnection(
104 max_size=self.websocket_max_size,
105 subprotocols=subprotocols,
106 state=OPEN,
107 logger=error_logger,
108 )
109 resp: "http11.Response" = ws_conn.accept(request)
110 except Exception:
111 msg = (
112 "Failed to open a WebSocket connection.\n"
113 "See server log for more information.\n"
114 )
115 raise ServerError(msg, status_code=500)
116 if 100 <= resp.status_code <= 299:
117 first_line = (
118 f"HTTP/1.1 {resp.status_code} {resp.reason_phrase}\r\n"
119 ).encode()
120 rbody = bytearray(first_line)
121 rbody += (
122 "".join([f"{k}: {v}\r\n" for k, v in resp.headers.items()])
123 ).encode()
124 rbody += b"\r\n"
125 if resp.body is not None:
126 rbody += resp.body
127 rbody += b"\r\n\r\n"
128 await super().send(rbody)
129 else:
130 raise ServerError(resp.body, resp.status_code)
131 self.websocket = WebsocketImplProtocol(
132 ws_conn,
133 ping_interval=self.websocket_ping_interval,
134 ping_timeout=self.websocket_ping_timeout,
135 close_timeout=self.websocket_timeout,
136 )
137 loop = (
138 request.transport.loop
139 if hasattr(request, "transport")
140 and hasattr(request.transport, "loop")
141 else None
142 )
143 await self.websocket.connection_made(self, loop=loop)
144 return self.websocket
145
[end of sanic/server/protocols/websocket_protocol.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sanic/server/protocols/websocket_protocol.py b/sanic/server/protocols/websocket_protocol.py
--- a/sanic/server/protocols/websocket_protocol.py
+++ b/sanic/server/protocols/websocket_protocol.py
@@ -5,7 +5,7 @@
from websockets.typing import Subprotocol
from sanic.exceptions import ServerError
-from sanic.log import error_logger
+from sanic.log import logger
from sanic.server import HttpProtocol
from ..websockets.impl import WebsocketImplProtocol
@@ -104,7 +104,7 @@
max_size=self.websocket_max_size,
subprotocols=subprotocols,
state=OPEN,
- logger=error_logger,
+ logger=logger,
)
resp: "http11.Response" = ws_conn.accept(request)
except Exception:
| {"golden_diff": "diff --git a/sanic/server/protocols/websocket_protocol.py b/sanic/server/protocols/websocket_protocol.py\n--- a/sanic/server/protocols/websocket_protocol.py\n+++ b/sanic/server/protocols/websocket_protocol.py\n@@ -5,7 +5,7 @@\n from websockets.typing import Subprotocol\n \n from sanic.exceptions import ServerError\n-from sanic.log import error_logger\n+from sanic.log import logger\n from sanic.server import HttpProtocol\n \n from ..websockets.impl import WebsocketImplProtocol\n@@ -104,7 +104,7 @@\n max_size=self.websocket_max_size,\n subprotocols=subprotocols,\n state=OPEN,\n- logger=error_logger,\n+ logger=logger,\n )\n resp: \"http11.Response\" = ws_conn.accept(request)\n except Exception:\n", "issue": "Websocket logger uses sanic.log.error_logger\nHey there,\r\n\r\nWhy do we see:\r\n\r\n sanic.error - INFO - connection open\r\n\r\nvia stderr when getting new websocket connections. Shouldn't this go to stdout?\r\n\r\nAlso, is it possible to add \"middleware\" so we can properly log websocket connections and disconnects? Is it possible to get a callback on websocket disconnects? \r\n\r\nThanks!\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Optional, Sequence, cast\n\nfrom websockets.connection import CLOSED, CLOSING, OPEN\nfrom websockets.server import ServerConnection\nfrom websockets.typing import Subprotocol\n\nfrom sanic.exceptions import ServerError\nfrom sanic.log import error_logger\nfrom sanic.server import HttpProtocol\n\nfrom ..websockets.impl import WebsocketImplProtocol\n\n\nif TYPE_CHECKING: # no cov\n from websockets import http11\n\n\nclass WebSocketProtocol(HttpProtocol):\n __slots__ = (\n \"websocket\",\n \"websocket_timeout\",\n \"websocket_max_size\",\n \"websocket_ping_interval\",\n \"websocket_ping_timeout\",\n )\n\n def __init__(\n self,\n *args,\n websocket_timeout: float = 10.0,\n websocket_max_size: Optional[int] = None,\n websocket_ping_interval: Optional[float] = 20.0,\n websocket_ping_timeout: Optional[float] = 20.0,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n self.websocket: Optional[WebsocketImplProtocol] = None\n self.websocket_timeout = websocket_timeout\n self.websocket_max_size = websocket_max_size\n self.websocket_ping_interval = websocket_ping_interval\n self.websocket_ping_timeout = websocket_ping_timeout\n\n def connection_lost(self, exc):\n if self.websocket is not None:\n self.websocket.connection_lost(exc)\n super().connection_lost(exc)\n\n def data_received(self, data):\n if self.websocket is not None:\n self.websocket.data_received(data)\n else:\n # Pass it to HttpProtocol handler first\n # That will (hopefully) upgrade it to a websocket.\n super().data_received(data)\n\n def eof_received(self) -> Optional[bool]:\n if self.websocket is not None:\n return self.websocket.eof_received()\n else:\n return False\n\n def close(self, timeout: Optional[float] = None):\n # Called by HttpProtocol at the end of connection_task\n # If we've upgraded to websocket, we do our own closing\n if self.websocket is not None:\n # Note, we don't want to use websocket.close()\n # That is used for user's application code to send a\n # websocket close packet. This is different.\n self.websocket.end_connection(1001)\n else:\n super().close()\n\n def close_if_idle(self):\n # Called by Sanic Server when shutting down\n # If we've upgraded to websocket, shut it down\n if self.websocket is not None:\n if self.websocket.connection.state in (CLOSING, CLOSED):\n return True\n elif self.websocket.loop is not None:\n self.websocket.loop.create_task(self.websocket.close(1001))\n else:\n self.websocket.end_connection(1001)\n else:\n return super().close_if_idle()\n\n async def websocket_handshake(\n self, request, subprotocols: Optional[Sequence[str]] = None\n ):\n # let the websockets package do the handshake with the client\n try:\n if subprotocols is not None:\n # subprotocols can be a set or frozenset,\n # but ServerConnection needs a list\n subprotocols = cast(\n Optional[Sequence[Subprotocol]],\n list(\n [\n Subprotocol(subprotocol)\n for subprotocol in subprotocols\n ]\n ),\n )\n ws_conn = ServerConnection(\n max_size=self.websocket_max_size,\n subprotocols=subprotocols,\n state=OPEN,\n logger=error_logger,\n )\n resp: \"http11.Response\" = ws_conn.accept(request)\n except Exception:\n msg = (\n \"Failed to open a WebSocket connection.\\n\"\n \"See server log for more information.\\n\"\n )\n raise ServerError(msg, status_code=500)\n if 100 <= resp.status_code <= 299:\n first_line = (\n f\"HTTP/1.1 {resp.status_code} {resp.reason_phrase}\\r\\n\"\n ).encode()\n rbody = bytearray(first_line)\n rbody += (\n \"\".join([f\"{k}: {v}\\r\\n\" for k, v in resp.headers.items()])\n ).encode()\n rbody += b\"\\r\\n\"\n if resp.body is not None:\n rbody += resp.body\n rbody += b\"\\r\\n\\r\\n\"\n await super().send(rbody)\n else:\n raise ServerError(resp.body, resp.status_code)\n self.websocket = WebsocketImplProtocol(\n ws_conn,\n ping_interval=self.websocket_ping_interval,\n ping_timeout=self.websocket_ping_timeout,\n close_timeout=self.websocket_timeout,\n )\n loop = (\n request.transport.loop\n if hasattr(request, \"transport\")\n and hasattr(request.transport, \"loop\")\n else None\n )\n await self.websocket.connection_made(self, loop=loop)\n return self.websocket\n", "path": "sanic/server/protocols/websocket_protocol.py"}]} | 2,004 | 178 |
gh_patches_debug_25651 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1136 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve annotation admin pages
The graders have asked me to delete all annotations for one user for a specific image. There is currently no easy way to do this in the admin pages. I will need to find a way to make this possible. Probably by adding certain search_fields or list_filters.
Also, I have noticed that some admin pages take extremely long to load. For example the change page for a polygon annotation set. I think this happens because it has to load a lot of data which it does not do efficiently and because it makes all inline model fields are editable by default. It will most likely never happen that we need to change this data manually so those fields can all be set to read_only. It might also be better to just have some statistics there about the number of inline models and not show the actual inline models at all.
Example loading time for an admin page:

</issue>
<code>
[start of app/grandchallenge/annotations/admin.py]
1 from django.contrib import admin
2
3 from grandchallenge.annotations.models import (
4 BooleanClassificationAnnotation,
5 ETDRSGridAnnotation,
6 LandmarkAnnotationSet,
7 MeasurementAnnotation,
8 PolygonAnnotationSet,
9 SingleLandmarkAnnotation,
10 SinglePolygonAnnotation,
11 )
12
13
14 class BooleanClassificationAnnotationAdmin(admin.ModelAdmin):
15 search_fields = ("grader__username", "name", "created")
16 list_filter = ("created", "value", "name")
17
18
19 class SinglePolygonAnnotationInline(admin.StackedInline):
20 model = SinglePolygonAnnotation
21 extra = 0
22
23
24 class PolygonAnnotationSetAdmin(admin.ModelAdmin):
25 search_fields = ("grader__username", "created", "name")
26 list_filter = ("created", "grader__username", "name")
27 inlines = [SinglePolygonAnnotationInline]
28
29
30 class SingleLandmarkAnnotationInline(admin.StackedInline):
31 model = SingleLandmarkAnnotation
32 extra = 0
33
34
35 class LandmarkAnnotationSetAdmin(admin.ModelAdmin):
36 search_fields = ("grader__username", "created")
37 list_filter = ("created", "grader__username")
38 inlines = [SingleLandmarkAnnotationInline]
39
40
41 admin.site.register(ETDRSGridAnnotation)
42 admin.site.register(MeasurementAnnotation)
43 admin.site.register(
44 BooleanClassificationAnnotation, BooleanClassificationAnnotationAdmin
45 )
46 admin.site.register(PolygonAnnotationSet, PolygonAnnotationSetAdmin)
47 admin.site.register(SinglePolygonAnnotation)
48 admin.site.register(LandmarkAnnotationSet, LandmarkAnnotationSetAdmin)
49 admin.site.register(SingleLandmarkAnnotation)
50
[end of app/grandchallenge/annotations/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/annotations/admin.py b/app/grandchallenge/annotations/admin.py
--- a/app/grandchallenge/annotations/admin.py
+++ b/app/grandchallenge/annotations/admin.py
@@ -19,23 +19,33 @@
class SinglePolygonAnnotationInline(admin.StackedInline):
model = SinglePolygonAnnotation
extra = 0
+ readonly_fields = (
+ "annotation_set",
+ "value",
+ "x_axis_orientation",
+ "y_axis_orientation",
+ "z",
+ )
class PolygonAnnotationSetAdmin(admin.ModelAdmin):
- search_fields = ("grader__username", "created", "name")
+ search_fields = ("grader__username", "created", "name", "image__name")
list_filter = ("created", "grader__username", "name")
inlines = [SinglePolygonAnnotationInline]
+ readonly_fields = ("grader", "image", "name", "created")
class SingleLandmarkAnnotationInline(admin.StackedInline):
model = SingleLandmarkAnnotation
extra = 0
+ readonly_fields = ("image", "landmarks")
class LandmarkAnnotationSetAdmin(admin.ModelAdmin):
search_fields = ("grader__username", "created")
list_filter = ("created", "grader__username")
inlines = [SingleLandmarkAnnotationInline]
+ readonly_fields = ("grader", "created")
admin.site.register(ETDRSGridAnnotation)
| {"golden_diff": "diff --git a/app/grandchallenge/annotations/admin.py b/app/grandchallenge/annotations/admin.py\n--- a/app/grandchallenge/annotations/admin.py\n+++ b/app/grandchallenge/annotations/admin.py\n@@ -19,23 +19,33 @@\n class SinglePolygonAnnotationInline(admin.StackedInline):\n model = SinglePolygonAnnotation\n extra = 0\n+ readonly_fields = (\n+ \"annotation_set\",\n+ \"value\",\n+ \"x_axis_orientation\",\n+ \"y_axis_orientation\",\n+ \"z\",\n+ )\n \n \n class PolygonAnnotationSetAdmin(admin.ModelAdmin):\n- search_fields = (\"grader__username\", \"created\", \"name\")\n+ search_fields = (\"grader__username\", \"created\", \"name\", \"image__name\")\n list_filter = (\"created\", \"grader__username\", \"name\")\n inlines = [SinglePolygonAnnotationInline]\n+ readonly_fields = (\"grader\", \"image\", \"name\", \"created\")\n \n \n class SingleLandmarkAnnotationInline(admin.StackedInline):\n model = SingleLandmarkAnnotation\n extra = 0\n+ readonly_fields = (\"image\", \"landmarks\")\n \n \n class LandmarkAnnotationSetAdmin(admin.ModelAdmin):\n search_fields = (\"grader__username\", \"created\")\n list_filter = (\"created\", \"grader__username\")\n inlines = [SingleLandmarkAnnotationInline]\n+ readonly_fields = (\"grader\", \"created\")\n \n \n admin.site.register(ETDRSGridAnnotation)\n", "issue": "Improve annotation admin pages\nThe graders have asked me to delete all annotations for one user for a specific image. There is currently no easy way to do this in the admin pages. I will need to find a way to make this possible. Probably by adding certain search_fields or list_filters.\r\n\r\nAlso, I have noticed that some admin pages take extremely long to load. For example the change page for a polygon annotation set. I think this happens because it has to load a lot of data which it does not do efficiently and because it makes all inline model fields are editable by default. It will most likely never happen that we need to change this data manually so those fields can all be set to read_only. It might also be better to just have some statistics there about the number of inline models and not show the actual inline models at all.\r\nExample loading time for an admin page:\r\n\r\n\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom grandchallenge.annotations.models import (\n BooleanClassificationAnnotation,\n ETDRSGridAnnotation,\n LandmarkAnnotationSet,\n MeasurementAnnotation,\n PolygonAnnotationSet,\n SingleLandmarkAnnotation,\n SinglePolygonAnnotation,\n)\n\n\nclass BooleanClassificationAnnotationAdmin(admin.ModelAdmin):\n search_fields = (\"grader__username\", \"name\", \"created\")\n list_filter = (\"created\", \"value\", \"name\")\n\n\nclass SinglePolygonAnnotationInline(admin.StackedInline):\n model = SinglePolygonAnnotation\n extra = 0\n\n\nclass PolygonAnnotationSetAdmin(admin.ModelAdmin):\n search_fields = (\"grader__username\", \"created\", \"name\")\n list_filter = (\"created\", \"grader__username\", \"name\")\n inlines = [SinglePolygonAnnotationInline]\n\n\nclass SingleLandmarkAnnotationInline(admin.StackedInline):\n model = SingleLandmarkAnnotation\n extra = 0\n\n\nclass LandmarkAnnotationSetAdmin(admin.ModelAdmin):\n search_fields = (\"grader__username\", \"created\")\n list_filter = (\"created\", \"grader__username\")\n inlines = [SingleLandmarkAnnotationInline]\n\n\nadmin.site.register(ETDRSGridAnnotation)\nadmin.site.register(MeasurementAnnotation)\nadmin.site.register(\n BooleanClassificationAnnotation, BooleanClassificationAnnotationAdmin\n)\nadmin.site.register(PolygonAnnotationSet, PolygonAnnotationSetAdmin)\nadmin.site.register(SinglePolygonAnnotation)\nadmin.site.register(LandmarkAnnotationSet, LandmarkAnnotationSetAdmin)\nadmin.site.register(SingleLandmarkAnnotation)\n", "path": "app/grandchallenge/annotations/admin.py"}]} | 1,198 | 321 |
gh_patches_debug_14177 | rasdani/github-patches | git_diff | spacetelescope__jwql-280 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update install_requires in setup.py
After some research on StackOverflow and discussion with @SaOgaz and @laurenmarietta on the subject of `setup.py` dependencies and `requirements.txt`/`environment.yml` dependencies (#261, #94), we should probably update the list of dependencies in `install_requires` in `setup.py` to better reflect what is actually needed to install and run our application.
In short: `setup.py` should _list the range of dependencies that allow a user to run any code in our repository_, while `environment.yml` should _list a set of specific dependencies that allow the application to operate on a specific architecture_
</issue>
<code>
[start of setup.py]
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.17.0'
6
7 AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, '
8 AUTHORS += 'Graham Kanarek, Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
9
10 REQUIRES = ['astroquery', 'bokeh==1.0.1', 'django==2.1.2', 'matplotlib', 'numpy',
11 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
12
13 setup(
14 name='jwql',
15 version=VERSION,
16 description='The JWST Quicklook Project',
17 url='https://github.com/spacetelescope/jwql.git',
18 author=AUTHORS,
19 author_email='[email protected]',
20 license='BSD',
21 keywords=['astronomy', 'python'],
22 classifiers=['Programming Language :: Python'],
23 packages=find_packages(),
24 install_requires=REQUIRES,
25 include_package_data=True,
26 include_dirs=[np.get_include()],
27 )
28
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,8 +7,22 @@
AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, '
AUTHORS += 'Graham Kanarek, Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
-REQUIRES = ['astroquery', 'bokeh==1.0.1', 'django==2.1.2', 'matplotlib', 'numpy',
- 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
+REQUIRES = ['astropy',
+ 'astroquery>=0.3.9',
+ 'authlib',
+ 'bokeh>=1.0',
+ 'django>=2.0',
+ 'jinja2',
+ 'jwst',
+ 'matplotlib',
+ 'numpy',
+ 'numpydoc',
+ 'pandas',
+ 'psycopg2',
+ 'pytest',
+ 'sphinx',
+ 'sqlalchemy',
+ 'stsci_rtd_theme']
setup(
name='jwql',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,8 +7,22 @@\n AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, '\n AUTHORS += 'Graham Kanarek, Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n \n-REQUIRES = ['astroquery', 'bokeh==1.0.1', 'django==2.1.2', 'matplotlib', 'numpy',\n- 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n+REQUIRES = ['astropy',\n+ 'astroquery>=0.3.9',\n+ 'authlib',\n+ 'bokeh>=1.0',\n+ 'django>=2.0',\n+ 'jinja2',\n+ 'jwst',\n+ 'matplotlib',\n+ 'numpy',\n+ 'numpydoc',\n+ 'pandas',\n+ 'psycopg2',\n+ 'pytest',\n+ 'sphinx',\n+ 'sqlalchemy',\n+ 'stsci_rtd_theme']\n \n setup(\n name='jwql',\n", "issue": "Update install_requires in setup.py\nAfter some research on StackOverflow and discussion with @SaOgaz and @laurenmarietta on the subject of `setup.py` dependencies and `requirements.txt`/`environment.yml` dependencies (#261, #94), we should probably update the list of dependencies in `install_requires` in `setup.py` to better reflect what is actually needed to install and run our application.\r\n\r\nIn short: `setup.py` should _list the range of dependencies that allow a user to run any code in our repository_, while `environment.yml` should _list a set of specific dependencies that allow the application to operate on a specific architecture_ \n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.17.0'\n\nAUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, '\nAUTHORS += 'Graham Kanarek, Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n\nREQUIRES = ['astroquery', 'bokeh==1.0.1', 'django==2.1.2', 'matplotlib', 'numpy',\n 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n\nsetup(\n name='jwql',\n version=VERSION,\n description='The JWST Quicklook Project',\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]} | 956 | 261 |
gh_patches_debug_38504 | rasdani/github-patches | git_diff | wright-group__WrightTools-789 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
from_Cary handle duplicate named data
Currently it will fail to read in data that has the same column header
</issue>
<code>
[start of WrightTools/collection/_cary.py]
1 """Cary."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import os
8 import re
9
10 import numpy as np
11
12 from .. import exceptions as wt_exceptions
13 from ._collection import Collection
14
15
16 # --- define --------------------------------------------------------------------------------------
17
18
19 __all__ = ["from_Cary"]
20
21
22 # --- from function -------------------------------------------------------------------------------
23
24
25 def from_Cary(filepath, name=None, parent=None, verbose=True):
26 """Create a collection object from a Cary UV VIS absorbance file.
27
28 We hope to support as many Cary instruments and datasets as possible.
29 This function has been tested with data collected on a Cary50 UV/VIS spectrometer.
30 If any alternate instruments are found not to work as expected, please
31 submit a bug report on our `issue tracker`__.
32
33 __ github.com/wright-group/WrightTools/issues
34
35 .. plot::
36
37 >>> import WrightTools as wt
38 >>> from WrightTools import datasets
39 >>> p = datasets.Cary.CuPCtS_H2O_vis
40 >>> data = wt.collection.from_Cary(p)[0]
41 >>> wt.artists.quick1D(data)
42
43 Parameters
44 ----------
45 filepath : string
46 Path to Cary output file (.csv).
47 parent : WrightTools.Collection
48 A collection object in which to place a collection of Data objects.
49 verbose : boolean (optional)
50 Toggle talkback. Default is True.
51
52 Returns
53 -------
54 data
55 New data object.
56 """
57 # check filepath
58 filesuffix = os.path.basename(filepath).split(".")[-1]
59 if filesuffix != "csv":
60 wt_exceptions.WrongFileTypeWarning.warn(filepath, "csv")
61 if name is None:
62 name = "cary"
63 # import array
64 lines = []
65 with open(filepath, "r", encoding="iso-8859-1") as f:
66 header = f.readline()
67 columns = f.readline()
68 while True:
69 line = f.readline()
70 if line == "\n" or line == "":
71 break
72 else:
73 # Note, it is necessary to call this twice, as a single call will
74 # result in something like ',,,,' > ',nan,,nan,'.
75 line = line.replace(",,", ",nan,")
76 line = line.replace(",,", ",nan,")
77 # Ensure that the first column has nan, if necessary
78 if line[0] == ",":
79 line = "nan" + line
80 clean = line[:-2] # lines end with ',/n'
81 lines.append(np.fromstring(clean, sep=","))
82 lines = [line for line in lines if len(line) > 0]
83 header = header.split(",")
84 columns = columns.split(",")
85 arr = np.array(lines).T
86 # chew through all scans
87 datas = Collection(name=name, parent=parent, edit_local=parent is not None)
88 units_dict = {"°c": "deg_C", "°f": "deg_F"}
89 for i in range(0, len(header) - 1, 2):
90 r = re.compile(r"[ \t\(\)]+")
91 spl = r.split(columns[i])
92 ax = spl[0].lower() if len(spl) > 0 else None
93 units = spl[1].lower() if len(spl) > 1 else None
94 units = units_dict.get(units, units)
95 dat = datas.create_data(header[i], kind="Cary", source=filepath)
96 dat.create_variable(ax, arr[i][~np.isnan(arr[i])], units=units)
97 dat.create_channel(
98 columns[i + 1].lower(), arr[i + 1][~np.isnan(arr[i + 1])], label=columns[i + 1].lower()
99 )
100 dat.transform(ax)
101 # finish
102 if verbose:
103 print("{0} data objects successfully created from Cary file:".format(len(datas)))
104 for i, data in enumerate(datas):
105 print(" {0}: {1}".format(i, data))
106 return datas
107
[end of WrightTools/collection/_cary.py]
[start of examples/filters.py]
1 #! /usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 """
4 Optical Filters
5 ===============
6
7 A set of optical filters transmission spectra.
8 """
9
10 import WrightTools as wt
11 from WrightTools import datasets
12 from matplotlib import pyplot as plt
13
14 p = datasets.Cary.filters
15 col = wt.collection.from_Cary(p)
16
17 fig, gs = wt.artists.create_figure(width="double", default_aspect=.5)
18 ax = plt.subplot(gs[0])
19
20 for data in col.values():
21 if data.natural_name in ("600LP", "550LP2"):
22 continue
23 data.convert("wn", verbose=False)
24 ax.plot(data, label=data.natural_name)
25
26
27 ax.set_ylabel("%T")
28 ax.set_xlabel("Frequency (cm$^{-1}$)")
29 ax.legend()
30
[end of examples/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/WrightTools/collection/_cary.py b/WrightTools/collection/_cary.py
--- a/WrightTools/collection/_cary.py
+++ b/WrightTools/collection/_cary.py
@@ -4,7 +4,7 @@
# --- import --------------------------------------------------------------------------------------
-import os
+import pathlib
import re
import numpy as np
@@ -55,14 +55,14 @@
New data object.
"""
# check filepath
- filesuffix = os.path.basename(filepath).split(".")[-1]
- if filesuffix != "csv":
+ filesuffix = pathlib.Path(filepath).suffix
+ if filesuffix != ".csv":
wt_exceptions.WrongFileTypeWarning.warn(filepath, "csv")
if name is None:
name = "cary"
# import array
lines = []
- with open(filepath, "r", encoding="iso-8859-1") as f:
+ with open(str(filepath), "r", encoding="iso-8859-1") as f:
header = f.readline()
columns = f.readline()
while True:
@@ -83,6 +83,7 @@
header = header.split(",")
columns = columns.split(",")
arr = np.array(lines).T
+ duplicate = len(header) // 2 == len(set(header) - {""})
# chew through all scans
datas = Collection(name=name, parent=parent, edit_local=parent is not None)
units_dict = {"°c": "deg_C", "°f": "deg_F"}
@@ -92,7 +93,11 @@
ax = spl[0].lower() if len(spl) > 0 else None
units = spl[1].lower() if len(spl) > 1 else None
units = units_dict.get(units, units)
- dat = datas.create_data(header[i], kind="Cary", source=filepath)
+ if duplicate:
+ name = "{}_{:03d}".format(header[i], i // 2)
+ else:
+ name = header[i]
+ dat = datas.create_data(name, kind="Cary", source=filepath)
dat.create_variable(ax, arr[i][~np.isnan(arr[i])], units=units)
dat.create_channel(
columns[i + 1].lower(), arr[i + 1][~np.isnan(arr[i + 1])], label=columns[i + 1].lower()
diff --git a/examples/filters.py b/examples/filters.py
--- a/examples/filters.py
+++ b/examples/filters.py
@@ -1,10 +1,14 @@
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
-Optical Filters
-===============
+Plotting Multiple Lines
+=======================
-A set of optical filters transmission spectra.
+A quick demonstration of how to plot multiple lines on the same
+set of axes, using :meth:`create_figure` to have a set of axes
+which can plot data objects directly.
+
+The dataset is a set of optical filters transmission spectra.
"""
import WrightTools as wt
| {"golden_diff": "diff --git a/WrightTools/collection/_cary.py b/WrightTools/collection/_cary.py\n--- a/WrightTools/collection/_cary.py\n+++ b/WrightTools/collection/_cary.py\n@@ -4,7 +4,7 @@\n # --- import --------------------------------------------------------------------------------------\n \n \n-import os\n+import pathlib\n import re\n \n import numpy as np\n@@ -55,14 +55,14 @@\n New data object.\n \"\"\"\n # check filepath\n- filesuffix = os.path.basename(filepath).split(\".\")[-1]\n- if filesuffix != \"csv\":\n+ filesuffix = pathlib.Path(filepath).suffix\n+ if filesuffix != \".csv\":\n wt_exceptions.WrongFileTypeWarning.warn(filepath, \"csv\")\n if name is None:\n name = \"cary\"\n # import array\n lines = []\n- with open(filepath, \"r\", encoding=\"iso-8859-1\") as f:\n+ with open(str(filepath), \"r\", encoding=\"iso-8859-1\") as f:\n header = f.readline()\n columns = f.readline()\n while True:\n@@ -83,6 +83,7 @@\n header = header.split(\",\")\n columns = columns.split(\",\")\n arr = np.array(lines).T\n+ duplicate = len(header) // 2 == len(set(header) - {\"\"})\n # chew through all scans\n datas = Collection(name=name, parent=parent, edit_local=parent is not None)\n units_dict = {\"\u00b0c\": \"deg_C\", \"\u00b0f\": \"deg_F\"}\n@@ -92,7 +93,11 @@\n ax = spl[0].lower() if len(spl) > 0 else None\n units = spl[1].lower() if len(spl) > 1 else None\n units = units_dict.get(units, units)\n- dat = datas.create_data(header[i], kind=\"Cary\", source=filepath)\n+ if duplicate:\n+ name = \"{}_{:03d}\".format(header[i], i // 2)\n+ else:\n+ name = header[i]\n+ dat = datas.create_data(name, kind=\"Cary\", source=filepath)\n dat.create_variable(ax, arr[i][~np.isnan(arr[i])], units=units)\n dat.create_channel(\n columns[i + 1].lower(), arr[i + 1][~np.isnan(arr[i + 1])], label=columns[i + 1].lower()\ndiff --git a/examples/filters.py b/examples/filters.py\n--- a/examples/filters.py\n+++ b/examples/filters.py\n@@ -1,10 +1,14 @@\n #! /usr/bin/env python3\n # -*- coding: utf-8 -*-\n \"\"\"\n-Optical Filters\n-===============\n+Plotting Multiple Lines\n+=======================\n \n-A set of optical filters transmission spectra.\n+A quick demonstration of how to plot multiple lines on the same\n+set of axes, using :meth:`create_figure` to have a set of axes\n+which can plot data objects directly.\n+\n+The dataset is a set of optical filters transmission spectra.\n \"\"\"\n \n import WrightTools as wt\n", "issue": "from_Cary handle duplicate named data\nCurrently it will fail to read in data that has the same column header\n", "before_files": [{"content": "\"\"\"Cary.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport os\nimport re\n\nimport numpy as np\n\nfrom .. import exceptions as wt_exceptions\nfrom ._collection import Collection\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"from_Cary\"]\n\n\n# --- from function -------------------------------------------------------------------------------\n\n\ndef from_Cary(filepath, name=None, parent=None, verbose=True):\n \"\"\"Create a collection object from a Cary UV VIS absorbance file.\n\n We hope to support as many Cary instruments and datasets as possible.\n This function has been tested with data collected on a Cary50 UV/VIS spectrometer.\n If any alternate instruments are found not to work as expected, please\n submit a bug report on our `issue tracker`__.\n\n __ github.com/wright-group/WrightTools/issues\n\n .. plot::\n\n >>> import WrightTools as wt\n >>> from WrightTools import datasets\n >>> p = datasets.Cary.CuPCtS_H2O_vis\n >>> data = wt.collection.from_Cary(p)[0]\n >>> wt.artists.quick1D(data)\n\n Parameters\n ----------\n filepath : string\n Path to Cary output file (.csv).\n parent : WrightTools.Collection\n A collection object in which to place a collection of Data objects.\n verbose : boolean (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n data\n New data object.\n \"\"\"\n # check filepath\n filesuffix = os.path.basename(filepath).split(\".\")[-1]\n if filesuffix != \"csv\":\n wt_exceptions.WrongFileTypeWarning.warn(filepath, \"csv\")\n if name is None:\n name = \"cary\"\n # import array\n lines = []\n with open(filepath, \"r\", encoding=\"iso-8859-1\") as f:\n header = f.readline()\n columns = f.readline()\n while True:\n line = f.readline()\n if line == \"\\n\" or line == \"\":\n break\n else:\n # Note, it is necessary to call this twice, as a single call will\n # result in something like ',,,,' > ',nan,,nan,'.\n line = line.replace(\",,\", \",nan,\")\n line = line.replace(\",,\", \",nan,\")\n # Ensure that the first column has nan, if necessary\n if line[0] == \",\":\n line = \"nan\" + line\n clean = line[:-2] # lines end with ',/n'\n lines.append(np.fromstring(clean, sep=\",\"))\n lines = [line for line in lines if len(line) > 0]\n header = header.split(\",\")\n columns = columns.split(\",\")\n arr = np.array(lines).T\n # chew through all scans\n datas = Collection(name=name, parent=parent, edit_local=parent is not None)\n units_dict = {\"\u00b0c\": \"deg_C\", \"\u00b0f\": \"deg_F\"}\n for i in range(0, len(header) - 1, 2):\n r = re.compile(r\"[ \\t\\(\\)]+\")\n spl = r.split(columns[i])\n ax = spl[0].lower() if len(spl) > 0 else None\n units = spl[1].lower() if len(spl) > 1 else None\n units = units_dict.get(units, units)\n dat = datas.create_data(header[i], kind=\"Cary\", source=filepath)\n dat.create_variable(ax, arr[i][~np.isnan(arr[i])], units=units)\n dat.create_channel(\n columns[i + 1].lower(), arr[i + 1][~np.isnan(arr[i + 1])], label=columns[i + 1].lower()\n )\n dat.transform(ax)\n # finish\n if verbose:\n print(\"{0} data objects successfully created from Cary file:\".format(len(datas)))\n for i, data in enumerate(datas):\n print(\" {0}: {1}\".format(i, data))\n return datas\n", "path": "WrightTools/collection/_cary.py"}, {"content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nOptical Filters\n===============\n\nA set of optical filters transmission spectra.\n\"\"\"\n\nimport WrightTools as wt\nfrom WrightTools import datasets\nfrom matplotlib import pyplot as plt\n\np = datasets.Cary.filters\ncol = wt.collection.from_Cary(p)\n\nfig, gs = wt.artists.create_figure(width=\"double\", default_aspect=.5)\nax = plt.subplot(gs[0])\n\nfor data in col.values():\n if data.natural_name in (\"600LP\", \"550LP2\"):\n continue\n data.convert(\"wn\", verbose=False)\n ax.plot(data, label=data.natural_name)\n\n\nax.set_ylabel(\"%T\")\nax.set_xlabel(\"Frequency (cm$^{-1}$)\")\nax.legend()\n", "path": "examples/filters.py"}]} | 1,874 | 686 |
gh_patches_debug_35632 | rasdani/github-patches | git_diff | chainer__chainer-1343 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support basic indexing in Variable class
Currently `Variable` partially supports `__getitem__`. I need full support of basic indexing.
- [x] int index #1343
- [x] slices #1154
- [x] new axis #1257
- [x] elipsis #1343
see also: http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#basic-slicing-and-indexing
</issue>
<code>
[start of chainer/functions/array/get_item.py]
1 import collections
2
3 from chainer import cuda
4 from chainer import function
5 from chainer.utils import type_check
6 from chainer import variable
7
8
9 class GetItem(function.Function):
10
11 """Function that slices array and extract elements."""
12
13 def __init__(self, slices):
14 if not isinstance(slices, collections.Iterable):
15 slices = tuple([slices])
16 self.slices = slices
17
18 def check_type_forward(self, in_types):
19 type_check.expect(in_types.size() == 1)
20 valid_slice = len(self.slices) - self.slices.count(None)
21 type_check.expect(in_types[0].ndim == valid_slice)
22
23 def forward(self, xs):
24 ary = xs[0]
25 return ary[tuple(self.slices)],
26
27 def backward(self, xs, gys):
28 xp = cuda.get_array_module(*xs)
29 gy = gys[0]
30 gx = xp.zeros_like(xs[0])
31 gx[tuple(self.slices)] = gy
32 return gx,
33
34
35 def get_item(x, slices):
36 """Extract elements from array with specified shape, axes and offsets.
37
38 Args:
39 x (tuple of Variables): Variable to be sliced.
40 slices (slice or tuple of slices): Slice objects to slice variable.
41
42 Returns:
43 Variable: :class:`~chainer.Variable` object
44 which contains sliced array of ``x``.
45
46 """
47 return GetItem(slices)(x)
48
49
50 def install_variable_get_item():
51 variable.Variable.__getitem__ = get_item
52
[end of chainer/functions/array/get_item.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/functions/array/get_item.py b/chainer/functions/array/get_item.py
--- a/chainer/functions/array/get_item.py
+++ b/chainer/functions/array/get_item.py
@@ -1,7 +1,11 @@
import collections
+import numpy
+
+import chainer
from chainer import cuda
from chainer import function
+from chainer import utils
from chainer.utils import type_check
from chainer import variable
@@ -13,16 +17,29 @@
def __init__(self, slices):
if not isinstance(slices, collections.Iterable):
slices = tuple([slices])
+
+ if chainer.is_debug():
+ n_ellipses = 0
+ for s in slices:
+ if numpy.isscalar(s) or s is None or isinstance(s, slice):
+ pass
+ elif s is Ellipsis:
+ n_ellipses += 1
+ else:
+ raise ValueError('Only basic indexing is supported')
+ if n_ellipses > 1:
+ raise ValueError('Only one Ellipsis is allowed')
+
self.slices = slices
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
valid_slice = len(self.slices) - self.slices.count(None)
- type_check.expect(in_types[0].ndim == valid_slice)
+ type_check.expect(in_types[0].ndim >= valid_slice)
def forward(self, xs):
ary = xs[0]
- return ary[tuple(self.slices)],
+ return utils.force_array(ary[tuple(self.slices)]),
def backward(self, xs, gys):
xp = cuda.get_array_module(*xs)
@@ -37,12 +54,19 @@
Args:
x (tuple of Variables): Variable to be sliced.
- slices (slice or tuple of slices): Slice objects to slice variable.
+ slices (int, slice, None or Ellipsis or tuple of them): Basic slicing
+ to slice a variable. It supports ``int``, ``slice``, ``newaxis``
+ (equivalent to ``None``) and ``Ellipsis``.
Returns:
Variable: :class:`~chainer.Variable` object
which contains sliced array of ``x``.
+ .. note::
+
+ See NumPy document for details of `indexing
+ <http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html>`_.
+
"""
return GetItem(slices)(x)
| {"golden_diff": "diff --git a/chainer/functions/array/get_item.py b/chainer/functions/array/get_item.py\n--- a/chainer/functions/array/get_item.py\n+++ b/chainer/functions/array/get_item.py\n@@ -1,7 +1,11 @@\n import collections\n \n+import numpy\n+\n+import chainer\n from chainer import cuda\n from chainer import function\n+from chainer import utils\n from chainer.utils import type_check\n from chainer import variable\n \n@@ -13,16 +17,29 @@\n def __init__(self, slices):\n if not isinstance(slices, collections.Iterable):\n slices = tuple([slices])\n+\n+ if chainer.is_debug():\n+ n_ellipses = 0\n+ for s in slices:\n+ if numpy.isscalar(s) or s is None or isinstance(s, slice):\n+ pass\n+ elif s is Ellipsis:\n+ n_ellipses += 1\n+ else:\n+ raise ValueError('Only basic indexing is supported')\n+ if n_ellipses > 1:\n+ raise ValueError('Only one Ellipsis is allowed')\n+\n self.slices = slices\n \n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n valid_slice = len(self.slices) - self.slices.count(None)\n- type_check.expect(in_types[0].ndim == valid_slice)\n+ type_check.expect(in_types[0].ndim >= valid_slice)\n \n def forward(self, xs):\n ary = xs[0]\n- return ary[tuple(self.slices)],\n+ return utils.force_array(ary[tuple(self.slices)]),\n \n def backward(self, xs, gys):\n xp = cuda.get_array_module(*xs)\n@@ -37,12 +54,19 @@\n \n Args:\n x (tuple of Variables): Variable to be sliced.\n- slices (slice or tuple of slices): Slice objects to slice variable.\n+ slices (int, slice, None or Ellipsis or tuple of them): Basic slicing\n+ to slice a variable. It supports ``int``, ``slice``, ``newaxis``\n+ (equivalent to ``None``) and ``Ellipsis``.\n \n Returns:\n Variable: :class:`~chainer.Variable` object\n which contains sliced array of ``x``.\n \n+ .. note::\n+\n+ See NumPy document for details of `indexing\n+ <http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html>`_.\n+\n \"\"\"\n return GetItem(slices)(x)\n", "issue": "Support basic indexing in Variable class\nCurrently `Variable` partially supports `__getitem__`. I need full support of basic indexing.\n- [x] int index #1343 \n- [x] slices #1154\n- [x] new axis #1257 \n- [x] elipsis #1343 \n\nsee also: http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#basic-slicing-and-indexing\n\n", "before_files": [{"content": "import collections\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\nfrom chainer import variable\n\n\nclass GetItem(function.Function):\n\n \"\"\"Function that slices array and extract elements.\"\"\"\n\n def __init__(self, slices):\n if not isinstance(slices, collections.Iterable):\n slices = tuple([slices])\n self.slices = slices\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n valid_slice = len(self.slices) - self.slices.count(None)\n type_check.expect(in_types[0].ndim == valid_slice)\n\n def forward(self, xs):\n ary = xs[0]\n return ary[tuple(self.slices)],\n\n def backward(self, xs, gys):\n xp = cuda.get_array_module(*xs)\n gy = gys[0]\n gx = xp.zeros_like(xs[0])\n gx[tuple(self.slices)] = gy\n return gx,\n\n\ndef get_item(x, slices):\n \"\"\"Extract elements from array with specified shape, axes and offsets.\n\n Args:\n x (tuple of Variables): Variable to be sliced.\n slices (slice or tuple of slices): Slice objects to slice variable.\n\n Returns:\n Variable: :class:`~chainer.Variable` object\n which contains sliced array of ``x``.\n\n \"\"\"\n return GetItem(slices)(x)\n\n\ndef install_variable_get_item():\n variable.Variable.__getitem__ = get_item\n", "path": "chainer/functions/array/get_item.py"}]} | 1,061 | 567 |
gh_patches_debug_30158 | rasdani/github-patches | git_diff | saleor__saleor-1480 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Custom 404 page
Right now in demo store we display generic 404 page
https://demo.getsaleor.com/i-do-not-exist/
Could implement something more interesting, with a nice design.
What should be included:
- [ ] Drawing
- [ ] Link to the homepage
- [ ] Search bar (?)
- [ ] ...
</issue>
<code>
[start of saleor/urls.py]
1 from __future__ import unicode_literals
2
3 from django.conf import settings
4 from django.conf.urls import url, include
5 from django.conf.urls.static import static
6 from django.contrib.sitemaps.views import sitemap
7 from django.contrib.staticfiles.views import serve
8 from django.views.i18n import JavaScriptCatalog
9 from graphene_django.views import GraphQLView
10
11 from .cart.urls import urlpatterns as cart_urls
12 from .checkout.urls import urlpatterns as checkout_urls
13 from .core.sitemaps import sitemaps
14 from .core.urls import urlpatterns as core_urls
15 from .dashboard.urls import urlpatterns as dashboard_urls
16 from .data_feeds.urls import urlpatterns as feed_urls
17 from .order.urls import urlpatterns as order_urls
18 from .product.urls import urlpatterns as product_urls
19 from .registration.urls import urlpatterns as registration_urls
20 from .search.urls import urlpatterns as search_urls
21 from .userprofile.urls import urlpatterns as userprofile_urls
22
23 urlpatterns = [
24 url(r'^', include(core_urls)),
25 url(r'^account/', include(registration_urls)),
26 url(r'^cart/', include((cart_urls, 'cart'), namespace='cart')),
27 url(r'^checkout/',
28 include((checkout_urls, 'checkout'), namespace='checkout')),
29 url(r'^dashboard/',
30 include((dashboard_urls, 'dashboard'), namespace='dashboard')),
31 url(r'^graphql', GraphQLView.as_view(graphiql=settings.DEBUG)),
32 url(r'^impersonate/', include('impersonate.urls')),
33 url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),
34 url(r'^order/', include((order_urls, 'order'), namespace='order')),
35 url(r'^products/',
36 include((product_urls, 'product'), namespace='product')),
37 url(r'^profile/',
38 include((userprofile_urls, 'profile'), namespace='profile')),
39 url(r'^feeds/',
40 include((feed_urls, 'data_feeds'), namespace='data_feeds')),
41 url(r'^search/', include((search_urls, 'search'), namespace='search')),
42 url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps},
43 name='django.contrib.sitemaps.views.sitemap'),
44 url(r'', include('payments.urls')),
45 url('', include('social_django.urls', namespace='social')),
46 ]
47
48 if settings.DEBUG:
49 # static files (images, css, javascript, etc.)
50 urlpatterns += [
51 url(r'^static/(?P<path>.*)$', serve)
52 ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
53
[end of saleor/urls.py]
[start of saleor/core/urls.py]
1 from __future__ import unicode_literals
2
3 from django.conf.urls import url
4
5 from . import views
6
7
8 urlpatterns = [
9 url(r'^$', views.home, name='home'),
10 url(r'^style-guide/', views.styleguide, name='styleguide'),
11 url(r'^impersonate/(?P<uid>\d+)/', views.impersonate,
12 name='impersonate-start'),
13 ]
14
[end of saleor/core/urls.py]
[start of saleor/core/views.py]
1 from __future__ import unicode_literals
2
3 from django.template.response import TemplateResponse
4 from django.contrib import messages
5 from django.conf import settings
6 from django.utils.translation import pgettext_lazy
7 from impersonate.views import impersonate as orig_impersonate
8
9 from ..dashboard.views import staff_member_required
10 from ..product.utils import products_with_availability, products_for_homepage
11 from ..userprofile.models import User
12
13
14 def home(request):
15 products = products_for_homepage()[:8]
16 products = products_with_availability(
17 products, discounts=request.discounts, local_currency=request.currency)
18 return TemplateResponse(
19 request, 'home.html',
20 {'products': products, 'parent': None})
21
22
23 @staff_member_required
24 def styleguide(request):
25 return TemplateResponse(request, 'styleguide.html')
26
27
28 def impersonate(request, uid):
29 response = orig_impersonate(request, uid)
30 if request.session.modified:
31 msg = pgettext_lazy(
32 'Impersonation message',
33 'You are now logged as {}'.format(User.objects.get(pk=uid)))
34 messages.success(request, msg)
35 return response
36
[end of saleor/core/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/core/urls.py b/saleor/core/urls.py
--- a/saleor/core/urls.py
+++ b/saleor/core/urls.py
@@ -8,6 +8,7 @@
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^style-guide/', views.styleguide, name='styleguide'),
+ url(r'^404', views.handle_404, name='404'),
url(r'^impersonate/(?P<uid>\d+)/', views.impersonate,
name='impersonate-start'),
]
diff --git a/saleor/core/views.py b/saleor/core/views.py
--- a/saleor/core/views.py
+++ b/saleor/core/views.py
@@ -5,6 +5,7 @@
from django.conf import settings
from django.utils.translation import pgettext_lazy
from impersonate.views import impersonate as orig_impersonate
+from random import randint
from ..dashboard.views import staff_member_required
from ..product.utils import products_with_availability, products_for_homepage
@@ -33,3 +34,8 @@
'You are now logged as {}'.format(User.objects.get(pk=uid)))
messages.success(request, msg)
return response
+
+
+def handle_404(request):
+ ctx = {'variant': randint(0, 2)}
+ return TemplateResponse(request, '404.html', ctx, status=404)
diff --git a/saleor/urls.py b/saleor/urls.py
--- a/saleor/urls.py
+++ b/saleor/urls.py
@@ -20,6 +20,9 @@
from .search.urls import urlpatterns as search_urls
from .userprofile.urls import urlpatterns as userprofile_urls
+
+handler404 = 'saleor.core.views.handle_404'
+
urlpatterns = [
url(r'^', include(core_urls)),
url(r'^account/', include(registration_urls)),
| {"golden_diff": "diff --git a/saleor/core/urls.py b/saleor/core/urls.py\n--- a/saleor/core/urls.py\n+++ b/saleor/core/urls.py\n@@ -8,6 +8,7 @@\n urlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^style-guide/', views.styleguide, name='styleguide'),\n+ url(r'^404', views.handle_404, name='404'),\n url(r'^impersonate/(?P<uid>\\d+)/', views.impersonate,\n name='impersonate-start'),\n ]\ndiff --git a/saleor/core/views.py b/saleor/core/views.py\n--- a/saleor/core/views.py\n+++ b/saleor/core/views.py\n@@ -5,6 +5,7 @@\n from django.conf import settings\n from django.utils.translation import pgettext_lazy\n from impersonate.views import impersonate as orig_impersonate\n+from random import randint\n \n from ..dashboard.views import staff_member_required\n from ..product.utils import products_with_availability, products_for_homepage\n@@ -33,3 +34,8 @@\n 'You are now logged as {}'.format(User.objects.get(pk=uid)))\n messages.success(request, msg)\n return response\n+\n+\n+def handle_404(request):\n+ ctx = {'variant': randint(0, 2)}\n+ return TemplateResponse(request, '404.html', ctx, status=404)\ndiff --git a/saleor/urls.py b/saleor/urls.py\n--- a/saleor/urls.py\n+++ b/saleor/urls.py\n@@ -20,6 +20,9 @@\n from .search.urls import urlpatterns as search_urls\n from .userprofile.urls import urlpatterns as userprofile_urls\n \n+\n+handler404 = 'saleor.core.views.handle_404'\n+\n urlpatterns = [\n url(r'^', include(core_urls)),\n url(r'^account/', include(registration_urls)),\n", "issue": "Custom 404 page\nRight now in demo store we display generic 404 page\r\nhttps://demo.getsaleor.com/i-do-not-exist/\r\n\r\nCould implement something more interesting, with a nice design.\r\n\r\nWhat should be included:\r\n- [ ] Drawing\r\n- [ ] Link to the homepage\r\n- [ ] Search bar (?)\r\n- [ ] ...\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.contrib.staticfiles.views import serve\nfrom django.views.i18n import JavaScriptCatalog\nfrom graphene_django.views import GraphQLView\n\nfrom .cart.urls import urlpatterns as cart_urls\nfrom .checkout.urls import urlpatterns as checkout_urls\nfrom .core.sitemaps import sitemaps\nfrom .core.urls import urlpatterns as core_urls\nfrom .dashboard.urls import urlpatterns as dashboard_urls\nfrom .data_feeds.urls import urlpatterns as feed_urls\nfrom .order.urls import urlpatterns as order_urls\nfrom .product.urls import urlpatterns as product_urls\nfrom .registration.urls import urlpatterns as registration_urls\nfrom .search.urls import urlpatterns as search_urls\nfrom .userprofile.urls import urlpatterns as userprofile_urls\n\nurlpatterns = [\n url(r'^', include(core_urls)),\n url(r'^account/', include(registration_urls)),\n url(r'^cart/', include((cart_urls, 'cart'), namespace='cart')),\n url(r'^checkout/',\n include((checkout_urls, 'checkout'), namespace='checkout')),\n url(r'^dashboard/',\n include((dashboard_urls, 'dashboard'), namespace='dashboard')),\n url(r'^graphql', GraphQLView.as_view(graphiql=settings.DEBUG)),\n url(r'^impersonate/', include('impersonate.urls')),\n url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),\n url(r'^order/', include((order_urls, 'order'), namespace='order')),\n url(r'^products/',\n include((product_urls, 'product'), namespace='product')),\n url(r'^profile/',\n include((userprofile_urls, 'profile'), namespace='profile')),\n url(r'^feeds/',\n include((feed_urls, 'data_feeds'), namespace='data_feeds')),\n url(r'^search/', include((search_urls, 'search'), namespace='search')),\n url(r'^sitemap\\.xml$', sitemap, {'sitemaps': sitemaps},\n name='django.contrib.sitemaps.views.sitemap'),\n url(r'', include('payments.urls')),\n url('', include('social_django.urls', namespace='social')),\n]\n\nif settings.DEBUG:\n # static files (images, css, javascript, etc.)\n urlpatterns += [\n url(r'^static/(?P<path>.*)$', serve)\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "saleor/urls.py"}, {"content": "from __future__ import unicode_literals\n\nfrom django.conf.urls import url\n\nfrom . import views\n\n\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^style-guide/', views.styleguide, name='styleguide'),\n url(r'^impersonate/(?P<uid>\\d+)/', views.impersonate,\n name='impersonate-start'),\n]\n", "path": "saleor/core/urls.py"}, {"content": "from __future__ import unicode_literals\n\nfrom django.template.response import TemplateResponse\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.utils.translation import pgettext_lazy\nfrom impersonate.views import impersonate as orig_impersonate\n\nfrom ..dashboard.views import staff_member_required\nfrom ..product.utils import products_with_availability, products_for_homepage\nfrom ..userprofile.models import User\n\n\ndef home(request):\n products = products_for_homepage()[:8]\n products = products_with_availability(\n products, discounts=request.discounts, local_currency=request.currency)\n return TemplateResponse(\n request, 'home.html',\n {'products': products, 'parent': None})\n\n\n@staff_member_required\ndef styleguide(request):\n return TemplateResponse(request, 'styleguide.html')\n\n\ndef impersonate(request, uid):\n response = orig_impersonate(request, uid)\n if request.session.modified:\n msg = pgettext_lazy(\n 'Impersonation message',\n 'You are now logged as {}'.format(User.objects.get(pk=uid)))\n messages.success(request, msg)\n return response\n", "path": "saleor/core/views.py"}]} | 1,666 | 433 |
gh_patches_debug_26413 | rasdani/github-patches | git_diff | svthalia__concrexit-2105 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for multiple local partners
### Is your feature request related to a problem? Please describe.
We really want to be able to have multiple local partners at Thalia. And the website current does not support this.
### Describe the solution you'd like
We would like the ability to add multiple local partners.
### Motivation
We think it's useful to have multiple local partners so we can show our students what is available in Nijmegen.
### Describe alternatives you've considered
Only having one local partner, which does not make sense in the current partner climate.
### Additional context
The local partner is also shown in the newsletter.
</issue>
<code>
[start of website/newsletters/views.py]
1 """Views provided by the newsletters package."""
2 import os
3
4 from django.conf import settings
5 from django.contrib.admin.views.decorators import staff_member_required
6 from django.contrib.auth.decorators import permission_required
7 from django.shortcuts import get_object_or_404, redirect, render
8 from django.utils.translation import activate, get_language_info
9 from django_sendfile import sendfile
10
11 from newsletters import services
12 from newsletters.models import Newsletter
13 from partners.models import Partner
14
15
16 def preview(request, pk, lang=None):
17 """View that renders the newsletter as HTML.
18
19 :param request: the request object
20 :param pk: the newsletter's primary key
21 :param lang: the language of the render
22 :return: HttpResponse 200 containing the newsletter HTML
23 """
24 lang_code = request.LANGUAGE_CODE
25
26 if lang is not None:
27 try:
28 get_language_info(lang)
29 activate(lang)
30 lang_code = lang
31 except KeyError:
32 # Language code not recognised by get_language_info
33 pass
34
35 # Send cached file, if it exists
36 file_path = os.path.join(
37 settings.MEDIA_ROOT, "newsletters", f"{pk}_{lang_code}.html"
38 )
39 if os.path.isfile(file_path):
40 return sendfile(request, file_path)
41
42 newsletter = get_object_or_404(Newsletter, pk=pk)
43 events = services.get_agenda(newsletter.date) if newsletter.date else None
44
45 return render(
46 request,
47 "newsletters/email.html",
48 {
49 "newsletter": newsletter,
50 "agenda_events": events,
51 "main_partner": Partner.objects.filter(is_main_partner=True).first(),
52 "local_partner": Partner.objects.filter(is_local_partner=True).first(),
53 "lang_code": lang_code,
54 },
55 )
56
57
58 @staff_member_required
59 @permission_required("newsletters.send_newsletter")
60 def admin_send(request, pk):
61 """If this is a GET request this view will render a confirmation page for the administrator.
62
63 If it is a POST request the newsletter will be sent to all recipients.
64
65 :param request: the request object
66 :param pk: the newsletter's primary key
67 :return: 302 RedirectResponse if POST else 200 with the
68 confirmation page HTML
69 """
70 newsletter = get_object_or_404(Newsletter, pk=pk)
71
72 if newsletter.sent:
73 return redirect(newsletter)
74
75 if request.POST:
76 services.send_newsletter(newsletter)
77
78 return redirect("admin:newsletters_newsletter_changelist")
79
80 return render(
81 request, "newsletters/admin/send_confirm.html", {"newsletter": newsletter}
82 )
83
[end of website/newsletters/views.py]
[start of website/newsletters/emails.py]
1 """The emails defined by the newsletters package."""
2 import logging
3 from smtplib import SMTPException
4
5 from django.conf import settings
6 from django.core import mail
7 from django.core.mail import EmailMultiAlternatives
8 from django.template.loader import get_template
9 from django.utils import translation, timezone
10 from django.utils.timezone import make_aware
11
12 from newsletters import services
13 from partners.models import Partner
14
15 logger = logging.getLogger(__name__)
16
17
18 def send_newsletter(newsletter):
19 """Send the newsletter as HTML and plaintext email.
20
21 :param newsletter: the newsletter to be send
22 """
23 events = None
24 if newsletter.date:
25 datetime = (
26 make_aware(
27 timezone.datetime(
28 year=newsletter.date.year,
29 month=newsletter.date.month,
30 day=newsletter.date.day,
31 )
32 )
33 if newsletter.date
34 else None
35 )
36 events = services.get_agenda(datetime)
37
38 from_email = settings.NEWSLETTER_FROM_ADDRESS
39 html_template = get_template("newsletters/email.html")
40 text_template = get_template("newsletters/email.txt")
41
42 main_partner = Partner.objects.filter(is_main_partner=True).first()
43 local_partner = Partner.objects.filter(is_local_partner=True).first()
44
45 with mail.get_connection() as connection:
46 language = ("en", "English")
47 translation.activate(language[0])
48
49 subject = "[THALIA] " + newsletter.title
50
51 context = {
52 "newsletter": newsletter,
53 "agenda_events": events,
54 "main_partner": main_partner,
55 "local_partner": local_partner,
56 "lang_code": language[0],
57 }
58
59 html_message = html_template.render(context)
60 text_message = text_template.render(context)
61
62 msg = EmailMultiAlternatives(
63 subject=subject,
64 body=text_message,
65 to=[f"newsletter@{settings.GSUITE_DOMAIN}"],
66 from_email=from_email,
67 connection=connection,
68 )
69 msg.attach_alternative(html_message, "text/html")
70
71 try:
72 msg.send()
73 logger.info("Sent %s newsletter", language[1])
74 except SMTPException:
75 logger.exception("Failed to send the %s newsletter", language[1])
76
77 translation.deactivate()
78
[end of website/newsletters/emails.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/newsletters/emails.py b/website/newsletters/emails.py
--- a/website/newsletters/emails.py
+++ b/website/newsletters/emails.py
@@ -40,7 +40,7 @@
text_template = get_template("newsletters/email.txt")
main_partner = Partner.objects.filter(is_main_partner=True).first()
- local_partner = Partner.objects.filter(is_local_partner=True).first()
+ local_partners = Partner.objects.filter(is_local_partner=True)
with mail.get_connection() as connection:
language = ("en", "English")
@@ -52,7 +52,7 @@
"newsletter": newsletter,
"agenda_events": events,
"main_partner": main_partner,
- "local_partner": local_partner,
+ "local_partners": local_partners,
"lang_code": language[0],
}
diff --git a/website/newsletters/views.py b/website/newsletters/views.py
--- a/website/newsletters/views.py
+++ b/website/newsletters/views.py
@@ -49,7 +49,7 @@
"newsletter": newsletter,
"agenda_events": events,
"main_partner": Partner.objects.filter(is_main_partner=True).first(),
- "local_partner": Partner.objects.filter(is_local_partner=True).first(),
+ "local_partners": Partner.objects.filter(is_local_partner=True),
"lang_code": lang_code,
},
)
| {"golden_diff": "diff --git a/website/newsletters/emails.py b/website/newsletters/emails.py\n--- a/website/newsletters/emails.py\n+++ b/website/newsletters/emails.py\n@@ -40,7 +40,7 @@\n text_template = get_template(\"newsletters/email.txt\")\n \n main_partner = Partner.objects.filter(is_main_partner=True).first()\n- local_partner = Partner.objects.filter(is_local_partner=True).first()\n+ local_partners = Partner.objects.filter(is_local_partner=True)\n \n with mail.get_connection() as connection:\n language = (\"en\", \"English\")\n@@ -52,7 +52,7 @@\n \"newsletter\": newsletter,\n \"agenda_events\": events,\n \"main_partner\": main_partner,\n- \"local_partner\": local_partner,\n+ \"local_partners\": local_partners,\n \"lang_code\": language[0],\n }\n \ndiff --git a/website/newsletters/views.py b/website/newsletters/views.py\n--- a/website/newsletters/views.py\n+++ b/website/newsletters/views.py\n@@ -49,7 +49,7 @@\n \"newsletter\": newsletter,\n \"agenda_events\": events,\n \"main_partner\": Partner.objects.filter(is_main_partner=True).first(),\n- \"local_partner\": Partner.objects.filter(is_local_partner=True).first(),\n+ \"local_partners\": Partner.objects.filter(is_local_partner=True),\n \"lang_code\": lang_code,\n },\n )\n", "issue": "Add support for multiple local partners\n### Is your feature request related to a problem? Please describe.\r\nWe really want to be able to have multiple local partners at Thalia. And the website current does not support this.\r\n\r\n### Describe the solution you'd like\r\nWe would like the ability to add multiple local partners.\r\n\r\n### Motivation\r\nWe think it's useful to have multiple local partners so we can show our students what is available in Nijmegen.\r\n\r\n### Describe alternatives you've considered\r\nOnly having one local partner, which does not make sense in the current partner climate.\r\n\r\n### Additional context\r\nThe local partner is also shown in the newsletter.\n", "before_files": [{"content": "\"\"\"Views provided by the newsletters package.\"\"\"\nimport os\n\nfrom django.conf import settings\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import permission_required\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import activate, get_language_info\nfrom django_sendfile import sendfile\n\nfrom newsletters import services\nfrom newsletters.models import Newsletter\nfrom partners.models import Partner\n\n\ndef preview(request, pk, lang=None):\n \"\"\"View that renders the newsletter as HTML.\n\n :param request: the request object\n :param pk: the newsletter's primary key\n :param lang: the language of the render\n :return: HttpResponse 200 containing the newsletter HTML\n \"\"\"\n lang_code = request.LANGUAGE_CODE\n\n if lang is not None:\n try:\n get_language_info(lang)\n activate(lang)\n lang_code = lang\n except KeyError:\n # Language code not recognised by get_language_info\n pass\n\n # Send cached file, if it exists\n file_path = os.path.join(\n settings.MEDIA_ROOT, \"newsletters\", f\"{pk}_{lang_code}.html\"\n )\n if os.path.isfile(file_path):\n return sendfile(request, file_path)\n\n newsletter = get_object_or_404(Newsletter, pk=pk)\n events = services.get_agenda(newsletter.date) if newsletter.date else None\n\n return render(\n request,\n \"newsletters/email.html\",\n {\n \"newsletter\": newsletter,\n \"agenda_events\": events,\n \"main_partner\": Partner.objects.filter(is_main_partner=True).first(),\n \"local_partner\": Partner.objects.filter(is_local_partner=True).first(),\n \"lang_code\": lang_code,\n },\n )\n\n\n@staff_member_required\n@permission_required(\"newsletters.send_newsletter\")\ndef admin_send(request, pk):\n \"\"\"If this is a GET request this view will render a confirmation page for the administrator.\n\n If it is a POST request the newsletter will be sent to all recipients.\n\n :param request: the request object\n :param pk: the newsletter's primary key\n :return: 302 RedirectResponse if POST else 200 with the\n confirmation page HTML\n \"\"\"\n newsletter = get_object_or_404(Newsletter, pk=pk)\n\n if newsletter.sent:\n return redirect(newsletter)\n\n if request.POST:\n services.send_newsletter(newsletter)\n\n return redirect(\"admin:newsletters_newsletter_changelist\")\n\n return render(\n request, \"newsletters/admin/send_confirm.html\", {\"newsletter\": newsletter}\n )\n", "path": "website/newsletters/views.py"}, {"content": "\"\"\"The emails defined by the newsletters package.\"\"\"\nimport logging\nfrom smtplib import SMTPException\n\nfrom django.conf import settings\nfrom django.core import mail\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import get_template\nfrom django.utils import translation, timezone\nfrom django.utils.timezone import make_aware\n\nfrom newsletters import services\nfrom partners.models import Partner\n\nlogger = logging.getLogger(__name__)\n\n\ndef send_newsletter(newsletter):\n \"\"\"Send the newsletter as HTML and plaintext email.\n\n :param newsletter: the newsletter to be send\n \"\"\"\n events = None\n if newsletter.date:\n datetime = (\n make_aware(\n timezone.datetime(\n year=newsletter.date.year,\n month=newsletter.date.month,\n day=newsletter.date.day,\n )\n )\n if newsletter.date\n else None\n )\n events = services.get_agenda(datetime)\n\n from_email = settings.NEWSLETTER_FROM_ADDRESS\n html_template = get_template(\"newsletters/email.html\")\n text_template = get_template(\"newsletters/email.txt\")\n\n main_partner = Partner.objects.filter(is_main_partner=True).first()\n local_partner = Partner.objects.filter(is_local_partner=True).first()\n\n with mail.get_connection() as connection:\n language = (\"en\", \"English\")\n translation.activate(language[0])\n\n subject = \"[THALIA] \" + newsletter.title\n\n context = {\n \"newsletter\": newsletter,\n \"agenda_events\": events,\n \"main_partner\": main_partner,\n \"local_partner\": local_partner,\n \"lang_code\": language[0],\n }\n\n html_message = html_template.render(context)\n text_message = text_template.render(context)\n\n msg = EmailMultiAlternatives(\n subject=subject,\n body=text_message,\n to=[f\"newsletter@{settings.GSUITE_DOMAIN}\"],\n from_email=from_email,\n connection=connection,\n )\n msg.attach_alternative(html_message, \"text/html\")\n\n try:\n msg.send()\n logger.info(\"Sent %s newsletter\", language[1])\n except SMTPException:\n logger.exception(\"Failed to send the %s newsletter\", language[1])\n\n translation.deactivate()\n", "path": "website/newsletters/emails.py"}]} | 2,024 | 312 |
gh_patches_debug_32206 | rasdani/github-patches | git_diff | learningequality__kolibri-5236 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
reminder: remove debug logging code before final release
### Observed behavior
#5223 introduced some verbose logging code
### Expected behavior
remove before release
### User-facing consequences
logs will get swamped
### Errors and logs
<!--
Relevant logs from:
* the command line
* ~/.kolibri/kolibri.log
* the browser console
Please wrap errors in triple backticks for clean formatting like this:
```
01:10 info: something happened
01:12 error: something bad happened
```
-->
…
### Steps to reproduce
<!--
Precise steps that someone else can follow in order to see this behavior
-->
…
### Context
<!--
Tell us about your environment, including:
* Kolibri version
* Operating system
* Browser
-->
…
</issue>
<code>
[start of kolibri/core/notifications/tasks.py]
1 import logging as logger
2 import threading
3 import time
4
5 from django.db import connection
6 from django.db import transaction
7
8 logging = logger.getLogger(__name__)
9
10
11 class AsyncNotificationQueue():
12
13 def __init__(self):
14
15 # Value in seconds to determine the sleep time between log saving batches
16 self.log_saving_interval = 5
17
18 # Where new log saving functions are appended
19 self.queue = []
20
21 # Where the to be executed log saving functions are stored
22 # once a batch save has been invoked
23 self.running = []
24
25 def append(self, fn):
26 """
27 Convenience method to append log saving function to the current queue
28 """
29 self.queue.append(fn)
30
31 def toggle_queue(self):
32 """
33 Method to swap the queue and running, to allow new log saving functions
34 to be added to the queue while previously added functions are being executed
35 and cleared without fear of race conditions dropping saves.
36 """
37 old_queue = self.queue
38 new_queue = self.running
39 self.queue = new_queue
40 self.running = old_queue
41
42 def clear_running(self):
43 """
44 Reset the running list to drop references to already executed log saving functions
45 """
46 self.running = []
47
48 def run(self):
49 """
50 Execute any log saving functions in the self.running list
51 """
52 if self.running:
53 # Do this conditionally to avoid opening an unnecessary transaction
54 with transaction.atomic():
55 for fn in self.running:
56 try:
57 logging.warn('>>>>>> AsyncNotificationQueue.run try')
58 fn()
59 except Exception as e:
60 # Catch all exceptions and log, otherwise the background process will end
61 # and no more logs will be saved!
62 logging.warn('>>>>>> AsyncNotificationQueue.run except {}'.format(e))
63 logging.debug("Exception raised during background notification calculation: ", e)
64 connection.close()
65
66 def start(self):
67 while True:
68 logging.warn('>>>>>> AsyncNotificationQueue.start: {}'.format(threading.currentThread().ident))
69 logging.warn('\t\t len(self.running): {}'.format(self.running))
70 logging.warn('\t\t len(self.queue): {}'.format(self.queue))
71 self.toggle_queue()
72 self.run()
73 self.clear_running()
74 time.sleep(self.log_saving_interval)
75
76
77 log_queue = AsyncNotificationQueue()
78
79
80 def add_to_save_queue(fn):
81 log_queue.append(fn)
82
83
84 def wrap_to_save_queue(fn, *args):
85 def wrapper():
86 fn(*args)
87 log_queue.append(wrapper)
88
89
90 class AsyncNotificationsThread(threading.Thread):
91
92 @classmethod
93 def start_command(cls):
94 logging.warn('>>>>>> AsyncNotificationsThread.start_command: {}'.format(threading.currentThread().ident))
95 thread = cls()
96 thread.daemon = True
97 thread.start()
98
99 def run(self):
100 logging.info("Initializing background log saving process")
101 logging.warn('>>>>>> AsyncNotificationsThread.run: {}'.format(threading.currentThread().ident))
102 log_queue.start()
103
[end of kolibri/core/notifications/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/core/notifications/tasks.py b/kolibri/core/notifications/tasks.py
--- a/kolibri/core/notifications/tasks.py
+++ b/kolibri/core/notifications/tasks.py
@@ -54,20 +54,15 @@
with transaction.atomic():
for fn in self.running:
try:
- logging.warn('>>>>>> AsyncNotificationQueue.run try')
fn()
except Exception as e:
# Catch all exceptions and log, otherwise the background process will end
# and no more logs will be saved!
- logging.warn('>>>>>> AsyncNotificationQueue.run except {}'.format(e))
- logging.debug("Exception raised during background notification calculation: ", e)
+ logging.warn("Exception raised during background notification calculation: ", e)
connection.close()
def start(self):
while True:
- logging.warn('>>>>>> AsyncNotificationQueue.start: {}'.format(threading.currentThread().ident))
- logging.warn('\t\t len(self.running): {}'.format(self.running))
- logging.warn('\t\t len(self.queue): {}'.format(self.queue))
self.toggle_queue()
self.run()
self.clear_running()
@@ -91,12 +86,10 @@
@classmethod
def start_command(cls):
- logging.warn('>>>>>> AsyncNotificationsThread.start_command: {}'.format(threading.currentThread().ident))
thread = cls()
thread.daemon = True
thread.start()
def run(self):
logging.info("Initializing background log saving process")
- logging.warn('>>>>>> AsyncNotificationsThread.run: {}'.format(threading.currentThread().ident))
log_queue.start()
| {"golden_diff": "diff --git a/kolibri/core/notifications/tasks.py b/kolibri/core/notifications/tasks.py\n--- a/kolibri/core/notifications/tasks.py\n+++ b/kolibri/core/notifications/tasks.py\n@@ -54,20 +54,15 @@\n with transaction.atomic():\n for fn in self.running:\n try:\n- logging.warn('>>>>>> AsyncNotificationQueue.run try')\n fn()\n except Exception as e:\n # Catch all exceptions and log, otherwise the background process will end\n # and no more logs will be saved!\n- logging.warn('>>>>>> AsyncNotificationQueue.run except {}'.format(e))\n- logging.debug(\"Exception raised during background notification calculation: \", e)\n+ logging.warn(\"Exception raised during background notification calculation: \", e)\n connection.close()\n \n def start(self):\n while True:\n- logging.warn('>>>>>> AsyncNotificationQueue.start: {}'.format(threading.currentThread().ident))\n- logging.warn('\\t\\t len(self.running): {}'.format(self.running))\n- logging.warn('\\t\\t len(self.queue): {}'.format(self.queue))\n self.toggle_queue()\n self.run()\n self.clear_running()\n@@ -91,12 +86,10 @@\n \n @classmethod\n def start_command(cls):\n- logging.warn('>>>>>> AsyncNotificationsThread.start_command: {}'.format(threading.currentThread().ident))\n thread = cls()\n thread.daemon = True\n thread.start()\n \n def run(self):\n logging.info(\"Initializing background log saving process\")\n- logging.warn('>>>>>> AsyncNotificationsThread.run: {}'.format(threading.currentThread().ident))\n log_queue.start()\n", "issue": "reminder: remove debug logging code before final release\n\r\n### Observed behavior\r\n\r\n#5223 introduced some verbose logging code\r\n\r\n### Expected behavior\r\n\r\nremove before release\r\n\r\n### User-facing consequences\r\n\r\nlogs will get swamped\r\n\r\n### Errors and logs\r\n<!--\r\nRelevant logs from:\r\n * the command line\r\n * ~/.kolibri/kolibri.log\r\n * the browser console\r\n\r\nPlease wrap errors in triple backticks for clean formatting like this:\r\n```\r\n01:10 info: something happened\r\n01:12 error: something bad happened\r\n```\r\n-->\r\n\r\n\u2026\r\n\r\n### Steps to reproduce\r\n<!--\r\nPrecise steps that someone else can follow in order to see this behavior\r\n-->\r\n\r\n\u2026\r\n\r\n### Context\r\n<!--\r\nTell us about your environment, including:\r\n * Kolibri version\r\n * Operating system\r\n * Browser\r\n-->\r\n\r\n\u2026\r\n\n", "before_files": [{"content": "import logging as logger\nimport threading\nimport time\n\nfrom django.db import connection\nfrom django.db import transaction\n\nlogging = logger.getLogger(__name__)\n\n\nclass AsyncNotificationQueue():\n\n def __init__(self):\n\n # Value in seconds to determine the sleep time between log saving batches\n self.log_saving_interval = 5\n\n # Where new log saving functions are appended\n self.queue = []\n\n # Where the to be executed log saving functions are stored\n # once a batch save has been invoked\n self.running = []\n\n def append(self, fn):\n \"\"\"\n Convenience method to append log saving function to the current queue\n \"\"\"\n self.queue.append(fn)\n\n def toggle_queue(self):\n \"\"\"\n Method to swap the queue and running, to allow new log saving functions\n to be added to the queue while previously added functions are being executed\n and cleared without fear of race conditions dropping saves.\n \"\"\"\n old_queue = self.queue\n new_queue = self.running\n self.queue = new_queue\n self.running = old_queue\n\n def clear_running(self):\n \"\"\"\n Reset the running list to drop references to already executed log saving functions\n \"\"\"\n self.running = []\n\n def run(self):\n \"\"\"\n Execute any log saving functions in the self.running list\n \"\"\"\n if self.running:\n # Do this conditionally to avoid opening an unnecessary transaction\n with transaction.atomic():\n for fn in self.running:\n try:\n logging.warn('>>>>>> AsyncNotificationQueue.run try')\n fn()\n except Exception as e:\n # Catch all exceptions and log, otherwise the background process will end\n # and no more logs will be saved!\n logging.warn('>>>>>> AsyncNotificationQueue.run except {}'.format(e))\n logging.debug(\"Exception raised during background notification calculation: \", e)\n connection.close()\n\n def start(self):\n while True:\n logging.warn('>>>>>> AsyncNotificationQueue.start: {}'.format(threading.currentThread().ident))\n logging.warn('\\t\\t len(self.running): {}'.format(self.running))\n logging.warn('\\t\\t len(self.queue): {}'.format(self.queue))\n self.toggle_queue()\n self.run()\n self.clear_running()\n time.sleep(self.log_saving_interval)\n\n\nlog_queue = AsyncNotificationQueue()\n\n\ndef add_to_save_queue(fn):\n log_queue.append(fn)\n\n\ndef wrap_to_save_queue(fn, *args):\n def wrapper():\n fn(*args)\n log_queue.append(wrapper)\n\n\nclass AsyncNotificationsThread(threading.Thread):\n\n @classmethod\n def start_command(cls):\n logging.warn('>>>>>> AsyncNotificationsThread.start_command: {}'.format(threading.currentThread().ident))\n thread = cls()\n thread.daemon = True\n thread.start()\n\n def run(self):\n logging.info(\"Initializing background log saving process\")\n logging.warn('>>>>>> AsyncNotificationsThread.run: {}'.format(threading.currentThread().ident))\n log_queue.start()\n", "path": "kolibri/core/notifications/tasks.py"}]} | 1,541 | 350 |
gh_patches_debug_3181 | rasdani/github-patches | git_diff | translate__pootle-6456 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Terminology is not updated when term units are updated
the terminology stemmer listens to `submission.post_save` - but submissions are always `bulk_created` so it doesn't seem to get triggered
</issue>
<code>
[start of pootle/apps/pootle_terminology/receivers.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django.db.models.signals import post_save
10 from django.dispatch import receiver
11
12 from pootle.core.delegate import terminology
13 from pootle_statistics.models import Submission, SubmissionFields
14 from pootle_store.constants import TRANSLATED
15 from pootle_store.models import Unit
16
17
18 @receiver(post_save, sender=Unit)
19 def handle_unit_save(**kwargs):
20 unit = kwargs["instance"]
21 if not kwargs.get("created"):
22 return
23 if unit.state != TRANSLATED:
24 return
25 is_terminology = (
26 unit.store.name.startswith("pootle-terminology")
27 or (unit.store.translation_project.project.code
28 == "terminology"))
29 if not is_terminology:
30 return
31 terminology.get(Unit)(unit).stem()
32
33
34 @receiver(post_save, sender=Submission)
35 def handle_submission_save(**kwargs):
36 sub = kwargs["instance"]
37 if sub.type != SubmissionFields.TARGET:
38 return
39 unit = sub.unit
40 if unit.state != TRANSLATED:
41 return
42 is_terminology = (
43 unit.store.name.startswith("pootle-terminology")
44 or (unit.store.translation_project.project.code
45 == "terminology"))
46 if not is_terminology:
47 return
48 terminology.get(Unit)(unit).stem()
49
[end of pootle/apps/pootle_terminology/receivers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_terminology/receivers.py b/pootle/apps/pootle_terminology/receivers.py
--- a/pootle/apps/pootle_terminology/receivers.py
+++ b/pootle/apps/pootle_terminology/receivers.py
@@ -34,7 +34,7 @@
@receiver(post_save, sender=Submission)
def handle_submission_save(**kwargs):
sub = kwargs["instance"]
- if sub.type != SubmissionFields.TARGET:
+ if sub.field != SubmissionFields.TARGET:
return
unit = sub.unit
if unit.state != TRANSLATED:
| {"golden_diff": "diff --git a/pootle/apps/pootle_terminology/receivers.py b/pootle/apps/pootle_terminology/receivers.py\n--- a/pootle/apps/pootle_terminology/receivers.py\n+++ b/pootle/apps/pootle_terminology/receivers.py\n@@ -34,7 +34,7 @@\n @receiver(post_save, sender=Submission)\n def handle_submission_save(**kwargs):\n sub = kwargs[\"instance\"]\n- if sub.type != SubmissionFields.TARGET:\n+ if sub.field != SubmissionFields.TARGET:\n return\n unit = sub.unit\n if unit.state != TRANSLATED:\n", "issue": "Terminology is not updated when term units are updated\nthe terminology stemmer listens to `submission.post_save` - but submissions are always `bulk_created` so it doesn't seem to get triggered\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom pootle.core.delegate import terminology\nfrom pootle_statistics.models import Submission, SubmissionFields\nfrom pootle_store.constants import TRANSLATED\nfrom pootle_store.models import Unit\n\n\n@receiver(post_save, sender=Unit)\ndef handle_unit_save(**kwargs):\n unit = kwargs[\"instance\"]\n if not kwargs.get(\"created\"):\n return\n if unit.state != TRANSLATED:\n return\n is_terminology = (\n unit.store.name.startswith(\"pootle-terminology\")\n or (unit.store.translation_project.project.code\n == \"terminology\"))\n if not is_terminology:\n return\n terminology.get(Unit)(unit).stem()\n\n\n@receiver(post_save, sender=Submission)\ndef handle_submission_save(**kwargs):\n sub = kwargs[\"instance\"]\n if sub.type != SubmissionFields.TARGET:\n return\n unit = sub.unit\n if unit.state != TRANSLATED:\n return\n is_terminology = (\n unit.store.name.startswith(\"pootle-terminology\")\n or (unit.store.translation_project.project.code\n == \"terminology\"))\n if not is_terminology:\n return\n terminology.get(Unit)(unit).stem()\n", "path": "pootle/apps/pootle_terminology/receivers.py"}]} | 1,027 | 144 |
gh_patches_debug_9819 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-1520 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: Maven sign and upload should be done inside a docker container
### Describe the bug
We should be using a docker container to run jenkinsjob.
https://github.com/opensearch-project/opensearch-infra/blob/main/jenkins/jobs/OpenSearch_CI/release_ci/opensearch_maven_release/Jenkinsfile#L3-L5
Also, should this file be a part of `opensearch-build`?
### To reproduce
Docker container is not used -
https://github.com/opensearch-project/opensearch-infra/blob/main/jenkins/jobs/OpenSearch_CI/release_ci/opensearch_maven_release/Jenkinsfile#L3-L5
### Expected behavior
_No response_
### Screenshots
If applicable, add screenshots to help explain your problem.
### Host / Environment
_No response_
### Additional context
_No response_
### Relevant log output
_No response_
</issue>
<code>
[start of src/run_sign.py]
1 #!/usr/bin/env python
2
3 # SPDX-License-Identifier: Apache-2.0
4 #
5 # The OpenSearch Contributors require contributions made to
6 # this file be licensed under the Apache-2.0 license or a
7 # compatible open source license.
8
9 import argparse
10 import logging
11 import sys
12 from pathlib import Path
13
14 from sign_workflow.sign_artifacts import SignArtifacts
15 from sign_workflow.signer import Signer
16 from system import console
17
18 ACCEPTED_SIGNATURE_FILE_TYPES = [".sig"]
19
20
21 def main():
22 parser = argparse.ArgumentParser(description="Sign artifacts")
23 parser.add_argument("target", type=Path, help="Path to local manifest file or artifact directory.")
24 parser.add_argument("--component", nargs="?", help="Component name")
25 parser.add_argument("--type", nargs="?", help="Artifact type")
26 parser.add_argument("--sigtype", choices=ACCEPTED_SIGNATURE_FILE_TYPES, help="Type of Signature file", default=".asc")
27 parser.add_argument(
28 "-v",
29 "--verbose",
30 help="Show more verbose output.",
31 action="store_const",
32 default=logging.INFO,
33 const=logging.DEBUG,
34 dest="logging_level",
35 )
36 args = parser.parse_args()
37
38 console.configure(level=args.logging_level)
39
40 sign = SignArtifacts.from_path(path=args.target,
41 component=args.component,
42 artifact_type=args.type,
43 signature_type=args.sigtype,
44 signer=Signer())
45
46 sign.sign()
47
48
49 if __name__ == "__main__":
50 sys.exit(main())
51
[end of src/run_sign.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/run_sign.py b/src/run_sign.py
--- a/src/run_sign.py
+++ b/src/run_sign.py
@@ -24,6 +24,7 @@
parser.add_argument("--component", nargs="?", help="Component name")
parser.add_argument("--type", nargs="?", help="Artifact type")
parser.add_argument("--sigtype", choices=ACCEPTED_SIGNATURE_FILE_TYPES, help="Type of Signature file", default=".asc")
+ parser.add_argument("--platform", nargs="?", help="The distribution platform", default="linux")
parser.add_argument(
"-v",
"--verbose",
| {"golden_diff": "diff --git a/src/run_sign.py b/src/run_sign.py\n--- a/src/run_sign.py\n+++ b/src/run_sign.py\n@@ -24,6 +24,7 @@\n parser.add_argument(\"--component\", nargs=\"?\", help=\"Component name\")\n parser.add_argument(\"--type\", nargs=\"?\", help=\"Artifact type\")\n parser.add_argument(\"--sigtype\", choices=ACCEPTED_SIGNATURE_FILE_TYPES, help=\"Type of Signature file\", default=\".asc\")\n+ parser.add_argument(\"--platform\", nargs=\"?\", help=\"The distribution platform\", default=\"linux\")\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n", "issue": "[Bug]: Maven sign and upload should be done inside a docker container\n### Describe the bug\n\nWe should be using a docker container to run jenkinsjob. \r\n\r\nhttps://github.com/opensearch-project/opensearch-infra/blob/main/jenkins/jobs/OpenSearch_CI/release_ci/opensearch_maven_release/Jenkinsfile#L3-L5\r\n\r\nAlso, should this file be a part of `opensearch-build`?\n\n### To reproduce\n\nDocker container is not used - \r\n\r\nhttps://github.com/opensearch-project/opensearch-infra/blob/main/jenkins/jobs/OpenSearch_CI/release_ci/opensearch_maven_release/Jenkinsfile#L3-L5\n\n### Expected behavior\n\n_No response_\n\n### Screenshots\n\nIf applicable, add screenshots to help explain your problem.\n\n### Host / Environment\n\n_No response_\n\n### Additional context\n\n_No response_\n\n### Relevant log output\n\n_No response_\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport argparse\nimport logging\nimport sys\nfrom pathlib import Path\n\nfrom sign_workflow.sign_artifacts import SignArtifacts\nfrom sign_workflow.signer import Signer\nfrom system import console\n\nACCEPTED_SIGNATURE_FILE_TYPES = [\".sig\"]\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Sign artifacts\")\n parser.add_argument(\"target\", type=Path, help=\"Path to local manifest file or artifact directory.\")\n parser.add_argument(\"--component\", nargs=\"?\", help=\"Component name\")\n parser.add_argument(\"--type\", nargs=\"?\", help=\"Artifact type\")\n parser.add_argument(\"--sigtype\", choices=ACCEPTED_SIGNATURE_FILE_TYPES, help=\"Type of Signature file\", default=\".asc\")\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"Show more verbose output.\",\n action=\"store_const\",\n default=logging.INFO,\n const=logging.DEBUG,\n dest=\"logging_level\",\n )\n args = parser.parse_args()\n\n console.configure(level=args.logging_level)\n\n sign = SignArtifacts.from_path(path=args.target,\n component=args.component,\n artifact_type=args.type,\n signature_type=args.sigtype,\n signer=Signer())\n\n sign.sign()\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "path": "src/run_sign.py"}]} | 1,128 | 132 |
gh_patches_debug_7424 | rasdani/github-patches | git_diff | facebookresearch__habitat-lab-132 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Recursive directory lookup can take long
This [line](https://github.com/facebookresearch/habitat-api/blob/master/setup.py#L38) looks for requirements.txt under all directories, this can be especially costly if you have data directories.
One way to get around this is to only look at specific set of directories and ignore data directories.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 import glob
8 import os.path
9 import sys
10
11 import setuptools
12 from setuptools.command.develop import develop as DefaultDevelopCommand
13 from setuptools.command.install import install as DefaultInstallCommand
14
15 sys.path.insert(0, os.path.join(os.path.dirname(__file__), "habitat"))
16 from version import VERSION # isort:skip noqa
17
18
19 with open("README.md", encoding="utf8") as f:
20 readme = f.read()
21
22 with open("LICENSE") as f:
23 license = f.read()
24
25 with open("requirements.txt") as f:
26 reqs = f.read()
27
28 DISTNAME = "habitat"
29 DESCRIPTION = "habitat: a suite for embodied agent tasks and benchmarks"
30 LONG_DESCRIPTION = readme
31 AUTHOR = "Facebook AI Research"
32 LICENSE = license
33 REQUIREMENTS = reqs.strip().split("\n")
34 BASELINE_PATH = ["habitat_baselines", "habitat_baselines.*"]
35 DEFAULT_EXCLUSION = ["test", "examples"]
36 FULL_REQUIREMENTS = set()
37 # collect requirements.txt file in all subdirectories
38 for file_name in glob.glob("**/requirements.txt", recursive=True):
39 with open(file_name) as f:
40 reqs = f.read()
41 FULL_REQUIREMENTS.update(reqs.strip().split("\n"))
42
43
44 class OptionedCommand:
45 r"""Generic Command class that takes extra user options and modifies
46 arguments in setuptools.setup() accordingly.
47 Though OptionedCommand inherits directly from object, it assumes
48 inheritance from DefaultDevelopCommand or DefaultInstallCommand, as it
49 overrides methods from those two classes.
50 """
51
52 user_options = [("all", None, "include habitat_baselines in installation")]
53
54 def initialize_options(self):
55 super().initialize_options()
56 self.all = None
57
58 def run(self):
59 if not self.all: # install core only
60 DEFAULT_EXCLUSION.extend(BASELINE_PATH)
61 self.distribution.packages = setuptools.find_packages(
62 exclude=DEFAULT_EXCLUSION
63 )
64 # self.distribution accesses arguments of setup() in main()
65 else: # install all except test and examples
66 self.distribution.install_requires = FULL_REQUIREMENTS
67 super().run()
68
69
70 class InstallCommand(OptionedCommand, DefaultInstallCommand):
71 user_options = (
72 getattr(DefaultInstallCommand, "user_options", [])
73 + OptionedCommand.user_options
74 )
75
76
77 class DevelopCommand(OptionedCommand, DefaultDevelopCommand):
78 user_options = (
79 getattr(DefaultDevelopCommand, "user_options", [])
80 + OptionedCommand.user_options
81 )
82
83
84 if __name__ == "__main__":
85 setuptools.setup(
86 name=DISTNAME,
87 install_requires=REQUIREMENTS,
88 packages=setuptools.find_packages(exclude=DEFAULT_EXCLUSION),
89 version=VERSION,
90 description=DESCRIPTION,
91 long_description=LONG_DESCRIPTION,
92 author=AUTHOR,
93 license=LICENSE,
94 setup_requires=["pytest-runner"],
95 tests_require=["pytest"],
96 include_package_data=True,
97 cmdclass={"install": InstallCommand, "develop": DevelopCommand},
98 )
99
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -35,7 +35,9 @@
DEFAULT_EXCLUSION = ["test", "examples"]
FULL_REQUIREMENTS = set()
# collect requirements.txt file in all subdirectories
-for file_name in glob.glob("**/requirements.txt", recursive=True):
+for file_name in ["requirements.txt"] + glob.glob(
+ "habitat_baselines/**/requirements.txt", recursive=True
+):
with open(file_name) as f:
reqs = f.read()
FULL_REQUIREMENTS.update(reqs.strip().split("\n"))
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -35,7 +35,9 @@\n DEFAULT_EXCLUSION = [\"test\", \"examples\"]\n FULL_REQUIREMENTS = set()\n # collect requirements.txt file in all subdirectories\n-for file_name in glob.glob(\"**/requirements.txt\", recursive=True):\n+for file_name in [\"requirements.txt\"] + glob.glob(\n+ \"habitat_baselines/**/requirements.txt\", recursive=True\n+):\n with open(file_name) as f:\n reqs = f.read()\n FULL_REQUIREMENTS.update(reqs.strip().split(\"\\n\"))\n", "issue": "Recursive directory lookup can take long\nThis [line](https://github.com/facebookresearch/habitat-api/blob/master/setup.py#L38) looks for requirements.txt under all directories, this can be especially costly if you have data directories. \r\n\r\nOne way to get around this is to only look at specific set of directories and ignore data directories.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport glob\nimport os.path\nimport sys\n\nimport setuptools\nfrom setuptools.command.develop import develop as DefaultDevelopCommand\nfrom setuptools.command.install import install as DefaultInstallCommand\n\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), \"habitat\"))\nfrom version import VERSION # isort:skip noqa\n\n\nwith open(\"README.md\", encoding=\"utf8\") as f:\n readme = f.read()\n\nwith open(\"LICENSE\") as f:\n license = f.read()\n\nwith open(\"requirements.txt\") as f:\n reqs = f.read()\n\nDISTNAME = \"habitat\"\nDESCRIPTION = \"habitat: a suite for embodied agent tasks and benchmarks\"\nLONG_DESCRIPTION = readme\nAUTHOR = \"Facebook AI Research\"\nLICENSE = license\nREQUIREMENTS = reqs.strip().split(\"\\n\")\nBASELINE_PATH = [\"habitat_baselines\", \"habitat_baselines.*\"]\nDEFAULT_EXCLUSION = [\"test\", \"examples\"]\nFULL_REQUIREMENTS = set()\n# collect requirements.txt file in all subdirectories\nfor file_name in glob.glob(\"**/requirements.txt\", recursive=True):\n with open(file_name) as f:\n reqs = f.read()\n FULL_REQUIREMENTS.update(reqs.strip().split(\"\\n\"))\n\n\nclass OptionedCommand:\n r\"\"\"Generic Command class that takes extra user options and modifies\n arguments in setuptools.setup() accordingly.\n Though OptionedCommand inherits directly from object, it assumes\n inheritance from DefaultDevelopCommand or DefaultInstallCommand, as it\n overrides methods from those two classes.\n \"\"\"\n\n user_options = [(\"all\", None, \"include habitat_baselines in installation\")]\n\n def initialize_options(self):\n super().initialize_options()\n self.all = None\n\n def run(self):\n if not self.all: # install core only\n DEFAULT_EXCLUSION.extend(BASELINE_PATH)\n self.distribution.packages = setuptools.find_packages(\n exclude=DEFAULT_EXCLUSION\n )\n # self.distribution accesses arguments of setup() in main()\n else: # install all except test and examples\n self.distribution.install_requires = FULL_REQUIREMENTS\n super().run()\n\n\nclass InstallCommand(OptionedCommand, DefaultInstallCommand):\n user_options = (\n getattr(DefaultInstallCommand, \"user_options\", [])\n + OptionedCommand.user_options\n )\n\n\nclass DevelopCommand(OptionedCommand, DefaultDevelopCommand):\n user_options = (\n getattr(DefaultDevelopCommand, \"user_options\", [])\n + OptionedCommand.user_options\n )\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=DISTNAME,\n install_requires=REQUIREMENTS,\n packages=setuptools.find_packages(exclude=DEFAULT_EXCLUSION),\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n author=AUTHOR,\n license=LICENSE,\n setup_requires=[\"pytest-runner\"],\n tests_require=[\"pytest\"],\n include_package_data=True,\n cmdclass={\"install\": InstallCommand, \"develop\": DevelopCommand},\n )\n", "path": "setup.py"}]} | 1,476 | 133 |
gh_patches_debug_40883 | rasdani/github-patches | git_diff | fossasia__open-event-server-5135 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add completed orders to order-statistics endpoint
**Is your feature request related to a problem? Please describe.**
Currently order statistics endpoint does not return completed orders.
**Describe the solution you'd like**
It should return completed orders and also fix sales accordingly. sales should return order values calculated from completed orders only.
**Additional context**
Needed in FE.
</issue>
<code>
[start of app/api/order_statistics/events.py]
1 from flask_rest_jsonapi import ResourceDetail
2 from marshmallow_jsonapi import fields
3 from marshmallow_jsonapi.flask import Schema
4 from sqlalchemy import func
5
6 from app.api.bootstrap import api
7 from app.api.helpers.db import get_count
8 from app.api.helpers.db import safe_query
9 from app.api.helpers.utilities import dasherize
10 from app.models import db
11 from app.models.event import Event
12 from app.models.order import Order, OrderTicket
13
14
15 class OrderStatisticsEventSchema(Schema):
16 """
17 Api schema for general statistics of event
18 """
19
20 class Meta:
21 """
22 Meta class
23 """
24 type_ = 'order-statistics-event'
25 self_view = 'v1.order_statistics_event_detail'
26 self_view_kwargs = {'id': '<id>'}
27 inflect = dasherize
28
29 id = fields.Str()
30 identifier = fields.Str()
31 tickets = fields.Method("tickets_count")
32 orders = fields.Method("orders_count")
33 sales = fields.Method("sales_count")
34
35 def tickets_count(self, obj):
36 obj_id = obj.id
37 total = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
38 Order.event_id == obj_id).scalar()
39 draft = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
40 Order.event_id == obj_id, Order.status == 'draft').scalar()
41 cancelled = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
42 Order.event_id == obj_id, Order.status == 'cancelled').scalar()
43 pending = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
44 Order.event_id == obj_id, Order.status == 'pending').scalar()
45 expired = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
46 Order.event_id == obj_id, Order.status == 'expired').scalar()
47 placed = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
48 Order.event_id == obj_id, Order.status == 'placed').scalar()
49 result = {
50 'total': total or 0,
51 'draft': draft or 0,
52 'cancelled': cancelled or 0,
53 'pending': pending or 0,
54 'expired': expired or 0,
55 'placed': placed or 0
56 }
57 return result
58
59 def orders_count(self, obj):
60 obj_id = obj.id
61 total = get_count(db.session.query(Order).filter(Order.event_id == obj_id))
62 draft = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'draft'))
63 cancelled = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'cancelled'))
64 pending = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'pending'))
65 expired = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'expired'))
66 placed = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'placed'))
67 result = {
68 'total': total or 0,
69 'draft': draft or 0,
70 'cancelled': cancelled or 0,
71 'pending': pending or 0,
72 'expired': expired or 0,
73 'placed': placed or 0
74 }
75 return result
76
77 def sales_count(self, obj):
78 obj_id = obj.id
79 total = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id).scalar()
80 draft = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id,
81 Order.status == 'draft').scalar()
82 cancelled = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id,
83 Order.status == 'cancelled').scalar()
84 pending = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id,
85 Order.status == 'pending').scalar()
86 expired = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id,
87 Order.status == 'expired').scalar()
88 placed = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id,
89 Order.status == 'placed').scalar()
90 result = {
91 'total': total or 0,
92 'draft': draft or 0,
93 'cancelled': cancelled or 0,
94 'pending': pending or 0,
95 'expired': expired or 0,
96 'placed': placed or 0
97 }
98 return result
99
100
101 class OrderStatisticsEventDetail(ResourceDetail):
102 """
103 Event statistics detail by id
104 """
105
106 def before_get_object(self, view_kwargs):
107 if view_kwargs.get('identifier'):
108 event = safe_query(self, Event, 'identifier', view_kwargs['identifier'], 'identifier')
109 view_kwargs['id'] = event.id
110
111 methods = ['GET']
112 decorators = (api.has_permission('is_coorganizer', fetch="id", fetch_as="event_id", model=Event),)
113 schema = OrderStatisticsEventSchema
114 data_layer = {'session': db.session,
115 'model': Event,
116 'methods': {
117 'before_get_object': before_get_object
118 }}
119
[end of app/api/order_statistics/events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/order_statistics/events.py b/app/api/order_statistics/events.py
--- a/app/api/order_statistics/events.py
+++ b/app/api/order_statistics/events.py
@@ -46,13 +46,16 @@
Order.event_id == obj_id, Order.status == 'expired').scalar()
placed = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
Order.event_id == obj_id, Order.status == 'placed').scalar()
+ completed = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
+ Order.event_id == obj_id, Order.status == 'completed').scalar()
result = {
'total': total or 0,
'draft': draft or 0,
'cancelled': cancelled or 0,
'pending': pending or 0,
'expired': expired or 0,
- 'placed': placed or 0
+ 'placed': placed or 0,
+ 'completed': completed or 0
}
return result
@@ -64,13 +67,15 @@
pending = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'pending'))
expired = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'expired'))
placed = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'placed'))
+ completed = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'completed'))
result = {
'total': total or 0,
'draft': draft or 0,
'cancelled': cancelled or 0,
'pending': pending or 0,
'expired': expired or 0,
- 'placed': placed or 0
+ 'placed': placed or 0,
+ 'completed': completed or 0
}
return result
@@ -87,13 +92,16 @@
Order.status == 'expired').scalar()
placed = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id,
Order.status == 'placed').scalar()
+ completed = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id,
+ Order.status == 'completed').scalar()
result = {
'total': total or 0,
'draft': draft or 0,
'cancelled': cancelled or 0,
'pending': pending or 0,
'expired': expired or 0,
- 'placed': placed or 0
+ 'placed': placed or 0,
+ 'completed': completed or 0
}
return result
| {"golden_diff": "diff --git a/app/api/order_statistics/events.py b/app/api/order_statistics/events.py\n--- a/app/api/order_statistics/events.py\n+++ b/app/api/order_statistics/events.py\n@@ -46,13 +46,16 @@\n Order.event_id == obj_id, Order.status == 'expired').scalar()\n placed = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n Order.event_id == obj_id, Order.status == 'placed').scalar()\n+ completed = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n+ Order.event_id == obj_id, Order.status == 'completed').scalar()\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n- 'placed': placed or 0\n+ 'placed': placed or 0,\n+ 'completed': completed or 0\n }\n return result\n \n@@ -64,13 +67,15 @@\n pending = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'pending'))\n expired = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'expired'))\n placed = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'placed'))\n+ completed = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'completed'))\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n- 'placed': placed or 0\n+ 'placed': placed or 0,\n+ 'completed': completed or 0\n }\n return result\n \n@@ -87,13 +92,16 @@\n Order.status == 'expired').scalar()\n placed = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id,\n Order.status == 'placed').scalar()\n+ completed = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id,\n+ Order.status == 'completed').scalar()\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n- 'placed': placed or 0\n+ 'placed': placed or 0,\n+ 'completed': completed or 0\n }\n return result\n", "issue": "Add completed orders to order-statistics endpoint\n**Is your feature request related to a problem? Please describe.**\r\nCurrently order statistics endpoint does not return completed orders.\r\n\r\n**Describe the solution you'd like**\r\nIt should return completed orders and also fix sales accordingly. sales should return order values calculated from completed orders only.\r\n\r\n**Additional context**\r\nNeeded in FE.\r\n\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail\nfrom marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Schema\nfrom sqlalchemy import func\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import get_count\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.utilities import dasherize\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.order import Order, OrderTicket\n\n\nclass OrderStatisticsEventSchema(Schema):\n \"\"\"\n Api schema for general statistics of event\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class\n \"\"\"\n type_ = 'order-statistics-event'\n self_view = 'v1.order_statistics_event_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str()\n identifier = fields.Str()\n tickets = fields.Method(\"tickets_count\")\n orders = fields.Method(\"orders_count\")\n sales = fields.Method(\"sales_count\")\n\n def tickets_count(self, obj):\n obj_id = obj.id\n total = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n Order.event_id == obj_id).scalar()\n draft = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n Order.event_id == obj_id, Order.status == 'draft').scalar()\n cancelled = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n Order.event_id == obj_id, Order.status == 'cancelled').scalar()\n pending = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n Order.event_id == obj_id, Order.status == 'pending').scalar()\n expired = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n Order.event_id == obj_id, Order.status == 'expired').scalar()\n placed = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n Order.event_id == obj_id, Order.status == 'placed').scalar()\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n 'placed': placed or 0\n }\n return result\n\n def orders_count(self, obj):\n obj_id = obj.id\n total = get_count(db.session.query(Order).filter(Order.event_id == obj_id))\n draft = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'draft'))\n cancelled = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'cancelled'))\n pending = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'pending'))\n expired = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'expired'))\n placed = get_count(db.session.query(Order).filter(Order.event_id == obj_id, Order.status == 'placed'))\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n 'placed': placed or 0\n }\n return result\n\n def sales_count(self, obj):\n obj_id = obj.id\n total = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id).scalar()\n draft = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id,\n Order.status == 'draft').scalar()\n cancelled = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id,\n Order.status == 'cancelled').scalar()\n pending = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id,\n Order.status == 'pending').scalar()\n expired = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id,\n Order.status == 'expired').scalar()\n placed = db.session.query(func.sum(Order.amount.label('sum'))).filter(Order.event_id == obj_id,\n Order.status == 'placed').scalar()\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n 'placed': placed or 0\n }\n return result\n\n\nclass OrderStatisticsEventDetail(ResourceDetail):\n \"\"\"\n Event statistics detail by id\n \"\"\"\n\n def before_get_object(self, view_kwargs):\n if view_kwargs.get('identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['identifier'], 'identifier')\n view_kwargs['id'] = event.id\n\n methods = ['GET']\n decorators = (api.has_permission('is_coorganizer', fetch=\"id\", fetch_as=\"event_id\", model=Event),)\n schema = OrderStatisticsEventSchema\n data_layer = {'session': db.session,\n 'model': Event,\n 'methods': {\n 'before_get_object': before_get_object\n }}\n", "path": "app/api/order_statistics/events.py"}]} | 2,006 | 602 |
gh_patches_debug_5684 | rasdani/github-patches | git_diff | pulp__pulpcore-4011 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RESTAPI document fix for Upstream Pulp Replication API
**Version**
Pulp installed through the Python modules.
"core:3.28.0"
"certguard:3.28.0"
"file:3.28.0"
"python:3.28.0"
"rpm:3.28.0"
**Describe the bug**
Why the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request "https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/"?
**To Reproduce**
None.
**Expected behavior**
A fix is required in the REST API document.
**Additional context**
Create Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create
Upstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate
</issue>
<code>
[start of pulpcore/app/viewsets/replica.py]
1 """
2 ViewSet for replicating repositories and distributions from an upstream Pulp
3 """
4 from django.conf import settings
5 from drf_spectacular.utils import extend_schema
6 from rest_framework import mixins
7 from rest_framework.decorators import action
8
9 from pulpcore.app.models import TaskGroup, UpstreamPulp
10 from pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer
11 from pulpcore.app.viewsets import NamedModelViewSet
12 from pulpcore.app.response import TaskGroupOperationResponse
13 from pulpcore.app.tasks import replicate_distributions
14 from pulpcore.tasking.tasks import dispatch
15
16
17 class UpstreamPulpViewSet(
18 NamedModelViewSet,
19 mixins.CreateModelMixin,
20 mixins.RetrieveModelMixin,
21 mixins.ListModelMixin,
22 mixins.DestroyModelMixin,
23 mixins.UpdateModelMixin,
24 ):
25 """API for configuring an upstream Pulp to replicate. This API is provided as a tech preview."""
26
27 queryset = UpstreamPulp.objects.all()
28 endpoint_name = "upstream-pulps"
29 serializer_class = UpstreamPulpSerializer
30 ordering = "-pulp_created"
31
32 @extend_schema(
33 summary="Replicate",
34 description="Trigger an asynchronous repository replication task group. This API is "
35 "provided as a tech preview.",
36 responses={202: AsyncOperationResponseSerializer},
37 )
38 @action(detail=True, methods=["post"])
39 def replicate(self, request, pk):
40 """
41 Triggers an asynchronous repository replication operation.
42 """
43 server = UpstreamPulp.objects.get(pk=pk)
44 task_group = TaskGroup.objects.create(description=f"Replication of {server.name}")
45
46 uri = "/api/v3/servers/"
47 if settings.DOMAIN_ENABLED:
48 uri = f"/{request.domain.name}{uri}"
49
50 dispatch(
51 replicate_distributions,
52 exclusive_resources=[uri],
53 kwargs={"server_pk": pk},
54 task_group=task_group,
55 )
56
57 return TaskGroupOperationResponse(task_group, request)
58
[end of pulpcore/app/viewsets/replica.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py
--- a/pulpcore/app/viewsets/replica.py
+++ b/pulpcore/app/viewsets/replica.py
@@ -33,6 +33,7 @@
summary="Replicate",
description="Trigger an asynchronous repository replication task group. This API is "
"provided as a tech preview.",
+ request=None,
responses={202: AsyncOperationResponseSerializer},
)
@action(detail=True, methods=["post"])
| {"golden_diff": "diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py\n--- a/pulpcore/app/viewsets/replica.py\n+++ b/pulpcore/app/viewsets/replica.py\n@@ -33,6 +33,7 @@\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n+ request=None,\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n", "issue": "RESTAPI document fix for Upstream Pulp Replication API\n**Version**\r\nPulp installed through the Python modules.\r\n\"core:3.28.0\"\r\n\"certguard:3.28.0\"\r\n\"file:3.28.0\"\r\n\"python:3.28.0\"\r\n\"rpm:3.28.0\"\r\n\r\n**Describe the bug**\r\nWhy the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate\" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request \"https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/\"?\r\n\r\n**To Reproduce**\r\nNone.\r\n\r\n**Expected behavior**\r\nA fix is required in the REST API document.\r\n\r\n**Additional context**\r\nCreate Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create\r\nUpstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nViewSet for replicating repositories and distributions from an upstream Pulp\n\"\"\"\nfrom django.conf import settings\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\nfrom rest_framework.decorators import action\n\nfrom pulpcore.app.models import TaskGroup, UpstreamPulp\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer\nfrom pulpcore.app.viewsets import NamedModelViewSet\nfrom pulpcore.app.response import TaskGroupOperationResponse\nfrom pulpcore.app.tasks import replicate_distributions\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass UpstreamPulpViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"API for configuring an upstream Pulp to replicate. This API is provided as a tech preview.\"\"\"\n\n queryset = UpstreamPulp.objects.all()\n endpoint_name = \"upstream-pulps\"\n serializer_class = UpstreamPulpSerializer\n ordering = \"-pulp_created\"\n\n @extend_schema(\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n def replicate(self, request, pk):\n \"\"\"\n Triggers an asynchronous repository replication operation.\n \"\"\"\n server = UpstreamPulp.objects.get(pk=pk)\n task_group = TaskGroup.objects.create(description=f\"Replication of {server.name}\")\n\n uri = \"/api/v3/servers/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.domain.name}{uri}\"\n\n dispatch(\n replicate_distributions,\n exclusive_resources=[uri],\n kwargs={\"server_pk\": pk},\n task_group=task_group,\n )\n\n return TaskGroupOperationResponse(task_group, request)\n", "path": "pulpcore/app/viewsets/replica.py"}]} | 1,329 | 123 |
gh_patches_debug_121 | rasdani/github-patches | git_diff | rotki__rotki-4490 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Extract SQLCipher and pysqlcipher building to different repo
## Problem Definition
We have pinned versions of SQLCipher, and pysqlcipher that we use.
The build of SQLCipher happens on every build, docker, windows, macos, linux, arm64.
Since we use pinned versions we should create a new repo that builds sqlcipher for all the supported OSes/architectures and maybe publishes the wheels/packages to PyPI
We only need to build these dependencies when there is a change in version, otherwise there is no need to build them every single time since this increases the build times everywhere and complicates the windows development part.
Ideally, it would be nice to include SQLcipher in the python package to make things easier
### Task
- Create a separate repo to handle the building and publishing
</issue>
<code>
[start of tools/pyinstaller_hooks/hook-pysqlcipher3.py]
1 from PyInstaller.utils.hooks import copy_metadata
2
3 datas = copy_metadata("pysqlcipher3")
4
[end of tools/pyinstaller_hooks/hook-pysqlcipher3.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/pyinstaller_hooks/hook-pysqlcipher3.py b/tools/pyinstaller_hooks/hook-pysqlcipher3.py
--- a/tools/pyinstaller_hooks/hook-pysqlcipher3.py
+++ b/tools/pyinstaller_hooks/hook-pysqlcipher3.py
@@ -1,3 +1,3 @@
from PyInstaller.utils.hooks import copy_metadata
-datas = copy_metadata("pysqlcipher3")
+datas = copy_metadata("rotki-pysqlcipher3")
| {"golden_diff": "diff --git a/tools/pyinstaller_hooks/hook-pysqlcipher3.py b/tools/pyinstaller_hooks/hook-pysqlcipher3.py\n--- a/tools/pyinstaller_hooks/hook-pysqlcipher3.py\n+++ b/tools/pyinstaller_hooks/hook-pysqlcipher3.py\n@@ -1,3 +1,3 @@\n from PyInstaller.utils.hooks import copy_metadata\n \n-datas = copy_metadata(\"pysqlcipher3\")\n+datas = copy_metadata(\"rotki-pysqlcipher3\")\n", "issue": "Extract SQLCipher and pysqlcipher building to different repo\n## Problem Definition\r\n\r\nWe have pinned versions of SQLCipher, and pysqlcipher that we use.\r\n\r\nThe build of SQLCipher happens on every build, docker, windows, macos, linux, arm64.\r\nSince we use pinned versions we should create a new repo that builds sqlcipher for all the supported OSes/architectures and maybe publishes the wheels/packages to PyPI\r\n\r\nWe only need to build these dependencies when there is a change in version, otherwise there is no need to build them every single time since this increases the build times everywhere and complicates the windows development part.\r\n\r\nIdeally, it would be nice to include SQLcipher in the python package to make things easier\r\n\r\n### Task\r\n- Create a separate repo to handle the building and publishing\r\n\r\n\r\n\n", "before_files": [{"content": "from PyInstaller.utils.hooks import copy_metadata\n\ndatas = copy_metadata(\"pysqlcipher3\")\n", "path": "tools/pyinstaller_hooks/hook-pysqlcipher3.py"}]} | 740 | 101 |
gh_patches_debug_36020 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-711 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PyMongo v4 removed collection methods
[PyMongo v4 removed a number of collection methods](https://pymongo.readthedocs.io/en/stable/migrate-to-pymongo4.html). While the agent still functions properly, it's logging failed instrumentation warnings and is breaking the build.
</issue>
<code>
[start of src/scout_apm/instruments/pymongo.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import logging
5
6 import wrapt
7
8 from scout_apm.core.tracked_request import TrackedRequest
9
10 try:
11 from pymongo.collection import Collection
12 except ImportError:
13 Collection = None
14
15 logger = logging.getLogger(__name__)
16
17 have_patched_collection = False
18
19
20 def ensure_installed():
21 global have_patched_collection
22
23 logger.debug("Instrumenting pymongo.")
24
25 if Collection is None:
26 logger.debug("Couldn't import pymongo.Collection - probably not installed.")
27 elif not have_patched_collection:
28 for name in COLLECTION_METHODS:
29 try:
30 setattr(
31 Collection, name, wrap_collection_method(getattr(Collection, name))
32 )
33 except Exception as exc:
34 logger.warning(
35 "Failed to instrument pymongo.Collection.%s: %r",
36 name,
37 exc,
38 exc_info=exc,
39 )
40 have_patched_collection = True
41
42
43 COLLECTION_METHODS = [
44 "aggregate",
45 "aggregate_raw_batches",
46 "bulk_write",
47 "count",
48 "count_documents",
49 "create_index",
50 "create_indexes",
51 "delete_many",
52 "delete_one",
53 "distinct",
54 "drop",
55 "drop_index",
56 "drop_indexes",
57 "ensure_index",
58 "estimated_document_count",
59 "find",
60 "find_and_modify",
61 "find_one",
62 "find_one_and_delete",
63 "find_one_and_replace",
64 "find_one_and_update",
65 "find_raw_batches",
66 "group",
67 "index_information",
68 "inline_map_reduce",
69 "insert",
70 "insert_many",
71 "insert_one",
72 "list_indexes",
73 "map_reduce",
74 "parallel_scan",
75 "reindex",
76 "remove",
77 "rename",
78 "replace_one",
79 "save",
80 "update",
81 "update_many",
82 "update_one",
83 ]
84
85
86 @wrapt.decorator
87 def wrap_collection_method(wrapped, instance, args, kwargs):
88 tracked_request = TrackedRequest.instance()
89 camel_name = "".join(c.title() for c in wrapped.__name__.split("_"))
90 operation = "MongoDB/{}.{}".format(instance.name, camel_name)
91 with tracked_request.span(operation=operation, ignore_children=True) as span:
92 span.tag("name", instance.name)
93 return wrapped(*args, **kwargs)
94
[end of src/scout_apm/instruments/pymongo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/scout_apm/instruments/pymongo.py b/src/scout_apm/instruments/pymongo.py
--- a/src/scout_apm/instruments/pymongo.py
+++ b/src/scout_apm/instruments/pymongo.py
@@ -8,8 +8,10 @@
from scout_apm.core.tracked_request import TrackedRequest
try:
+ import pymongo
from pymongo.collection import Collection
except ImportError:
+ pymongo = None
Collection = None
logger = logging.getLogger(__name__)
@@ -25,7 +27,10 @@
if Collection is None:
logger.debug("Couldn't import pymongo.Collection - probably not installed.")
elif not have_patched_collection:
- for name in COLLECTION_METHODS:
+ methods = COLLECTION_METHODS
+ if pymongo.version_tuple < (4, 0):
+ methods = COLLECTION_METHODS_V3
+ for name in methods:
try:
setattr(
Collection, name, wrap_collection_method(getattr(Collection, name))
@@ -44,7 +49,6 @@
"aggregate",
"aggregate_raw_batches",
"bulk_write",
- "count",
"count_documents",
"create_index",
"create_indexes",
@@ -54,32 +58,36 @@
"drop",
"drop_index",
"drop_indexes",
- "ensure_index",
"estimated_document_count",
"find",
- "find_and_modify",
"find_one",
"find_one_and_delete",
"find_one_and_replace",
"find_one_and_update",
"find_raw_batches",
- "group",
"index_information",
- "inline_map_reduce",
- "insert",
"insert_many",
"insert_one",
"list_indexes",
+ "rename",
+ "replace_one",
+ "update_many",
+ "update_one",
+]
+
+COLLECTION_METHODS_V3 = COLLECTION_METHODS + [
+ "count",
+ "ensure_index",
+ "find_and_modify",
+ "group",
+ "inline_map_reduce",
+ "insert",
"map_reduce",
"parallel_scan",
"reindex",
"remove",
- "rename",
- "replace_one",
"save",
"update",
- "update_many",
- "update_one",
]
| {"golden_diff": "diff --git a/src/scout_apm/instruments/pymongo.py b/src/scout_apm/instruments/pymongo.py\n--- a/src/scout_apm/instruments/pymongo.py\n+++ b/src/scout_apm/instruments/pymongo.py\n@@ -8,8 +8,10 @@\n from scout_apm.core.tracked_request import TrackedRequest\n \n try:\n+ import pymongo\n from pymongo.collection import Collection\n except ImportError:\n+ pymongo = None\n Collection = None\n \n logger = logging.getLogger(__name__)\n@@ -25,7 +27,10 @@\n if Collection is None:\n logger.debug(\"Couldn't import pymongo.Collection - probably not installed.\")\n elif not have_patched_collection:\n- for name in COLLECTION_METHODS:\n+ methods = COLLECTION_METHODS\n+ if pymongo.version_tuple < (4, 0):\n+ methods = COLLECTION_METHODS_V3\n+ for name in methods:\n try:\n setattr(\n Collection, name, wrap_collection_method(getattr(Collection, name))\n@@ -44,7 +49,6 @@\n \"aggregate\",\n \"aggregate_raw_batches\",\n \"bulk_write\",\n- \"count\",\n \"count_documents\",\n \"create_index\",\n \"create_indexes\",\n@@ -54,32 +58,36 @@\n \"drop\",\n \"drop_index\",\n \"drop_indexes\",\n- \"ensure_index\",\n \"estimated_document_count\",\n \"find\",\n- \"find_and_modify\",\n \"find_one\",\n \"find_one_and_delete\",\n \"find_one_and_replace\",\n \"find_one_and_update\",\n \"find_raw_batches\",\n- \"group\",\n \"index_information\",\n- \"inline_map_reduce\",\n- \"insert\",\n \"insert_many\",\n \"insert_one\",\n \"list_indexes\",\n+ \"rename\",\n+ \"replace_one\",\n+ \"update_many\",\n+ \"update_one\",\n+]\n+\n+COLLECTION_METHODS_V3 = COLLECTION_METHODS + [\n+ \"count\",\n+ \"ensure_index\",\n+ \"find_and_modify\",\n+ \"group\",\n+ \"inline_map_reduce\",\n+ \"insert\",\n \"map_reduce\",\n \"parallel_scan\",\n \"reindex\",\n \"remove\",\n- \"rename\",\n- \"replace_one\",\n \"save\",\n \"update\",\n- \"update_many\",\n- \"update_one\",\n ]\n", "issue": "PyMongo v4 removed collection methods\n[PyMongo v4 removed a number of collection methods](https://pymongo.readthedocs.io/en/stable/migrate-to-pymongo4.html). While the agent still functions properly, it's logging failed instrumentation warnings and is breaking the build. \n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nimport wrapt\n\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from pymongo.collection import Collection\nexcept ImportError:\n Collection = None\n\nlogger = logging.getLogger(__name__)\n\nhave_patched_collection = False\n\n\ndef ensure_installed():\n global have_patched_collection\n\n logger.debug(\"Instrumenting pymongo.\")\n\n if Collection is None:\n logger.debug(\"Couldn't import pymongo.Collection - probably not installed.\")\n elif not have_patched_collection:\n for name in COLLECTION_METHODS:\n try:\n setattr(\n Collection, name, wrap_collection_method(getattr(Collection, name))\n )\n except Exception as exc:\n logger.warning(\n \"Failed to instrument pymongo.Collection.%s: %r\",\n name,\n exc,\n exc_info=exc,\n )\n have_patched_collection = True\n\n\nCOLLECTION_METHODS = [\n \"aggregate\",\n \"aggregate_raw_batches\",\n \"bulk_write\",\n \"count\",\n \"count_documents\",\n \"create_index\",\n \"create_indexes\",\n \"delete_many\",\n \"delete_one\",\n \"distinct\",\n \"drop\",\n \"drop_index\",\n \"drop_indexes\",\n \"ensure_index\",\n \"estimated_document_count\",\n \"find\",\n \"find_and_modify\",\n \"find_one\",\n \"find_one_and_delete\",\n \"find_one_and_replace\",\n \"find_one_and_update\",\n \"find_raw_batches\",\n \"group\",\n \"index_information\",\n \"inline_map_reduce\",\n \"insert\",\n \"insert_many\",\n \"insert_one\",\n \"list_indexes\",\n \"map_reduce\",\n \"parallel_scan\",\n \"reindex\",\n \"remove\",\n \"rename\",\n \"replace_one\",\n \"save\",\n \"update\",\n \"update_many\",\n \"update_one\",\n]\n\n\[email protected]\ndef wrap_collection_method(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n camel_name = \"\".join(c.title() for c in wrapped.__name__.split(\"_\"))\n operation = \"MongoDB/{}.{}\".format(instance.name, camel_name)\n with tracked_request.span(operation=operation, ignore_children=True) as span:\n span.tag(\"name\", instance.name)\n return wrapped(*args, **kwargs)\n", "path": "src/scout_apm/instruments/pymongo.py"}]} | 1,295 | 516 |
gh_patches_debug_16145 | rasdani/github-patches | git_diff | dmlc__dgl-4219 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Example][Bug] Running error on the example case: example/pytorch/dimenet
## 🐛 Bug
Dimenet example is crashed.
## To Reproduce
`python main.py --model-cnf config/dimenet.yaml`
```
Traceback (most recent call last):
File "main.py", line 254, in <module>
main()
File "/opt/conda/lib/python3.8/site-packages/click/core.py", line 1128, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.8/site-packages/click/core.py", line 1053, in main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.8/site-packages/click/core.py", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.8/site-packages/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "main.py", line 165, in main
model = DimeNet(emb_size=model_params['emb_size'],
File "/workspace/examples/dimenet/modules/dimenet.py", line 64, in __init__
self.rbf_layer = BesselBasisLayer(num_radial=num_radial,
File "/workspace/examples/dimenet/modules/bessel_basis_layer.py", line 17, in __init__
self.reset_params()
File "/workspace/examples/dimenet/modules/bessel_basis_layer.py", line 20, in reset_params
torch.arange(1, self.frequencies.numel() + 1, out=self.frequencies).mul_(np.pi)
RuntimeError: a leaf Variable that requires grad is being used in an in-place operation.
```
## Expected behavior
The case should run through
## Environment
- DGL Version (e.g., 1.0): 0.9
- Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): 1.12
- OS (e.g., Linux): ubuntu
- How you installed DGL (`conda`, `pip`, source): source
- Build command you used (if compiling from source):
- Python version: 3.8
- CUDA/cuDNN version (if applicable): 11.7
- GPU models and configuration (e.g. V100): A100
- Any other relevant information:
</issue>
<code>
[start of examples/pytorch/dimenet/modules/bessel_basis_layer.py]
1 import numpy as np
2 import torch
3 import torch.nn as nn
4
5 from modules.envelope import Envelope
6
7 class BesselBasisLayer(nn.Module):
8 def __init__(self,
9 num_radial,
10 cutoff,
11 envelope_exponent=5):
12 super(BesselBasisLayer, self).__init__()
13
14 self.cutoff = cutoff
15 self.envelope = Envelope(envelope_exponent)
16 self.frequencies = nn.Parameter(torch.Tensor(num_radial))
17 self.reset_params()
18
19 def reset_params(self):
20 torch.arange(1, self.frequencies.numel() + 1, out=self.frequencies).mul_(np.pi)
21
22 def forward(self, g):
23 d_scaled = g.edata['d'] / self.cutoff
24 # Necessary for proper broadcasting behaviour
25 d_scaled = torch.unsqueeze(d_scaled, -1)
26 d_cutoff = self.envelope(d_scaled)
27 g.edata['rbf'] = d_cutoff * torch.sin(self.frequencies * d_scaled)
28 return g
[end of examples/pytorch/dimenet/modules/bessel_basis_layer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/pytorch/dimenet/modules/bessel_basis_layer.py b/examples/pytorch/dimenet/modules/bessel_basis_layer.py
--- a/examples/pytorch/dimenet/modules/bessel_basis_layer.py
+++ b/examples/pytorch/dimenet/modules/bessel_basis_layer.py
@@ -17,7 +17,9 @@
self.reset_params()
def reset_params(self):
- torch.arange(1, self.frequencies.numel() + 1, out=self.frequencies).mul_(np.pi)
+ with torch.no_grad():
+ torch.arange(1, self.frequencies.numel() + 1, out=self.frequencies).mul_(np.pi)
+ self.frequencies.requires_grad_()
def forward(self, g):
d_scaled = g.edata['d'] / self.cutoff
@@ -25,4 +27,4 @@
d_scaled = torch.unsqueeze(d_scaled, -1)
d_cutoff = self.envelope(d_scaled)
g.edata['rbf'] = d_cutoff * torch.sin(self.frequencies * d_scaled)
- return g
\ No newline at end of file
+ return g
| {"golden_diff": "diff --git a/examples/pytorch/dimenet/modules/bessel_basis_layer.py b/examples/pytorch/dimenet/modules/bessel_basis_layer.py\n--- a/examples/pytorch/dimenet/modules/bessel_basis_layer.py\n+++ b/examples/pytorch/dimenet/modules/bessel_basis_layer.py\n@@ -17,7 +17,9 @@\n self.reset_params()\n \n def reset_params(self):\n- torch.arange(1, self.frequencies.numel() + 1, out=self.frequencies).mul_(np.pi)\n+ with torch.no_grad():\n+ torch.arange(1, self.frequencies.numel() + 1, out=self.frequencies).mul_(np.pi)\n+ self.frequencies.requires_grad_()\n \n def forward(self, g):\n d_scaled = g.edata['d'] / self.cutoff\n@@ -25,4 +27,4 @@\n d_scaled = torch.unsqueeze(d_scaled, -1)\n d_cutoff = self.envelope(d_scaled)\n g.edata['rbf'] = d_cutoff * torch.sin(self.frequencies * d_scaled)\n- return g\n\\ No newline at end of file\n+ return g\n", "issue": "[Example][Bug] Running error on the example case: example/pytorch/dimenet\n## \ud83d\udc1b Bug\r\n\r\nDimenet example is crashed. \r\n\r\n## To Reproduce\r\n\r\n`python main.py --model-cnf config/dimenet.yaml`\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"main.py\", line 254, in <module>\r\n main()\r\n File \"/opt/conda/lib/python3.8/site-packages/click/core.py\", line 1128, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/opt/conda/lib/python3.8/site-packages/click/core.py\", line 1053, in main\r\n rv = self.invoke(ctx)\r\n File \"/opt/conda/lib/python3.8/site-packages/click/core.py\", line 1395, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/opt/conda/lib/python3.8/site-packages/click/core.py\", line 754, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"main.py\", line 165, in main\r\n model = DimeNet(emb_size=model_params['emb_size'],\r\n File \"/workspace/examples/dimenet/modules/dimenet.py\", line 64, in __init__\r\n self.rbf_layer = BesselBasisLayer(num_radial=num_radial,\r\n File \"/workspace/examples/dimenet/modules/bessel_basis_layer.py\", line 17, in __init__\r\n self.reset_params()\r\n File \"/workspace/examples/dimenet/modules/bessel_basis_layer.py\", line 20, in reset_params\r\n torch.arange(1, self.frequencies.numel() + 1, out=self.frequencies).mul_(np.pi)\r\nRuntimeError: a leaf Variable that requires grad is being used in an in-place operation.\r\n```\r\n\r\n## Expected behavior\r\n\r\nThe case should run through\r\n\r\n## Environment\r\n\r\n - DGL Version (e.g., 1.0): 0.9\r\n - Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): 1.12\r\n - OS (e.g., Linux): ubuntu\r\n - How you installed DGL (`conda`, `pip`, source): source\r\n - Build command you used (if compiling from source):\r\n - Python version: 3.8\r\n - CUDA/cuDNN version (if applicable): 11.7\r\n - GPU models and configuration (e.g. V100): A100\r\n - Any other relevant information:\r\n\r\n\n", "before_files": [{"content": "import numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom modules.envelope import Envelope\n\nclass BesselBasisLayer(nn.Module):\n def __init__(self,\n num_radial,\n cutoff,\n envelope_exponent=5):\n super(BesselBasisLayer, self).__init__()\n \n self.cutoff = cutoff\n self.envelope = Envelope(envelope_exponent)\n self.frequencies = nn.Parameter(torch.Tensor(num_radial))\n self.reset_params()\n\n def reset_params(self):\n torch.arange(1, self.frequencies.numel() + 1, out=self.frequencies).mul_(np.pi)\n\n def forward(self, g):\n d_scaled = g.edata['d'] / self.cutoff\n # Necessary for proper broadcasting behaviour\n d_scaled = torch.unsqueeze(d_scaled, -1)\n d_cutoff = self.envelope(d_scaled)\n g.edata['rbf'] = d_cutoff * torch.sin(self.frequencies * d_scaled)\n return g", "path": "examples/pytorch/dimenet/modules/bessel_basis_layer.py"}]} | 1,368 | 250 |
gh_patches_debug_22613 | rasdani/github-patches | git_diff | facebookresearch__fairscale-237 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[ShardedGradScaler] Handle optimizers not being OSS instances
## 🚀 Feature
Some frameworks (Classy for instance) change the optimizers to enable different non-pytorch-compliant features. ShardedGradScaler asserts on that
## Motivation
Enable ShardedDDP + AMP on Classy-like frameworks
## Pitch
Remove the assert, replace by a one time warning
## Alternatives
Not doing anything
## Additional context
@mannatsingh
</issue>
<code>
[start of fairscale/optim/grad_scaler.py]
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 from typing import Dict
7
8 import torch
9 from torch.cuda.amp import GradScaler as TorchGradScaler
10 import torch.distributed as dist
11 from torch.optim import Optimizer
12
13 from .oss import OSS
14
15
16 class GradScaler(TorchGradScaler):
17 def _unscale_grads_(
18 self, optimizer: Optimizer, inv_scale: torch.Tensor, found_inf: torch.Tensor, allow_fp16: bool
19 ) -> Dict[torch.device, torch.Tensor]:
20 return super()._unscale_grads_(optimizer, inv_scale, found_inf, True)
21
22
23 class ShardedGradScaler(TorchGradScaler):
24 """
25 A shard-aware :class:`GradScaler<torch.cuda.amp.GradScaler>`, to be used in conjunction with
26 :class:`OSS` and :class:`ShardedOptimizer`.
27
28 Interface and usecases are not changed, more explanations can be found in the corresponding pytorch
29 documentation https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler
30 """
31
32 def __init__(self) -> None:
33 super().__init__()
34
35 def unscale_(self, optimizer: Optimizer) -> None:
36 assert isinstance(optimizer, OSS), "ShardedGradScaler is to be used in combination with a sharded optimizer"
37
38 # Call the upstream unscale_ method which will only act on this rank's gradients
39 super().unscale_(optimizer)
40
41 # Synchronize the detected inf across the ranks
42 optimizer_state = self._per_optimizer_states[id(optimizer)]
43 handles = [dist.all_reduce(v, async_op=True) for v in optimizer_state["found_inf_per_device"].values()]
44
45 # Make sure that the calls are done before moving out
46 _ = list(map(lambda x: x.wait(), handles))
47
[end of fairscale/optim/grad_scaler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/fairscale/optim/grad_scaler.py b/fairscale/optim/grad_scaler.py
--- a/fairscale/optim/grad_scaler.py
+++ b/fairscale/optim/grad_scaler.py
@@ -3,6 +3,7 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
+import logging
from typing import Dict
import torch
@@ -31,9 +32,16 @@
def __init__(self) -> None:
super().__init__()
+ self.display_warning = True
def unscale_(self, optimizer: Optimizer) -> None:
- assert isinstance(optimizer, OSS), "ShardedGradScaler is to be used in combination with a sharded optimizer"
+ # Could be a mistake, this scaler is supposed to work with ZeroRedundancyOptimizer only
+ if self.display_warning and not isinstance(optimizer, OSS):
+ logging.warning(
+ "ShardedGradScaler is to be used in combination with a sharded optimizer, this could not be checked"
+ )
+
+ self.display_warning = False # Only warn once
# Call the upstream unscale_ method which will only act on this rank's gradients
super().unscale_(optimizer)
| {"golden_diff": "diff --git a/fairscale/optim/grad_scaler.py b/fairscale/optim/grad_scaler.py\n--- a/fairscale/optim/grad_scaler.py\n+++ b/fairscale/optim/grad_scaler.py\n@@ -3,6 +3,7 @@\n # This source code is licensed under the MIT license found in the\n # LICENSE file in the root directory of this source tree.\n \n+import logging\n from typing import Dict\n \n import torch\n@@ -31,9 +32,16 @@\n \n def __init__(self) -> None:\n super().__init__()\n+ self.display_warning = True\n \n def unscale_(self, optimizer: Optimizer) -> None:\n- assert isinstance(optimizer, OSS), \"ShardedGradScaler is to be used in combination with a sharded optimizer\"\n+ # Could be a mistake, this scaler is supposed to work with ZeroRedundancyOptimizer only\n+ if self.display_warning and not isinstance(optimizer, OSS):\n+ logging.warning(\n+ \"ShardedGradScaler is to be used in combination with a sharded optimizer, this could not be checked\"\n+ )\n+\n+ self.display_warning = False # Only warn once\n \n # Call the upstream unscale_ method which will only act on this rank's gradients\n super().unscale_(optimizer)\n", "issue": "[ShardedGradScaler] Handle optimizers not being OSS instances\n## \ud83d\ude80 Feature\r\nSome frameworks (Classy for instance) change the optimizers to enable different non-pytorch-compliant features. ShardedGradScaler asserts on that\r\n\r\n## Motivation\r\nEnable ShardedDDP + AMP on Classy-like frameworks\r\n\r\n## Pitch\r\nRemove the assert, replace by a one time warning\r\n\r\n## Alternatives\r\nNot doing anything\r\n\r\n## Additional context\r\n@mannatsingh \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Dict\n\nimport torch\nfrom torch.cuda.amp import GradScaler as TorchGradScaler\nimport torch.distributed as dist\nfrom torch.optim import Optimizer\n\nfrom .oss import OSS\n\n\nclass GradScaler(TorchGradScaler):\n def _unscale_grads_(\n self, optimizer: Optimizer, inv_scale: torch.Tensor, found_inf: torch.Tensor, allow_fp16: bool\n ) -> Dict[torch.device, torch.Tensor]:\n return super()._unscale_grads_(optimizer, inv_scale, found_inf, True)\n\n\nclass ShardedGradScaler(TorchGradScaler):\n \"\"\"\n A shard-aware :class:`GradScaler<torch.cuda.amp.GradScaler>`, to be used in conjunction with\n :class:`OSS` and :class:`ShardedOptimizer`.\n\n Interface and usecases are not changed, more explanations can be found in the corresponding pytorch\n documentation https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n def unscale_(self, optimizer: Optimizer) -> None:\n assert isinstance(optimizer, OSS), \"ShardedGradScaler is to be used in combination with a sharded optimizer\"\n\n # Call the upstream unscale_ method which will only act on this rank's gradients\n super().unscale_(optimizer)\n\n # Synchronize the detected inf across the ranks\n optimizer_state = self._per_optimizer_states[id(optimizer)]\n handles = [dist.all_reduce(v, async_op=True) for v in optimizer_state[\"found_inf_per_device\"].values()]\n\n # Make sure that the calls are done before moving out\n _ = list(map(lambda x: x.wait(), handles))\n", "path": "fairscale/optim/grad_scaler.py"}]} | 1,152 | 288 |
gh_patches_debug_29280 | rasdani/github-patches | git_diff | lightly-ai__lightly-993 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NNMemoryBank not working with DataParallel
I have been using the NNMemoryBank as a component in my module and noticed that at each forward pass `NNMemoryBank.bank` is equal to `None`. This only occurs when my module is wrapped in `DataParallel`. As a result, throughout training my NN pairs are always random noise (surprisingly, this only hurt the contrastive learning performance by a few percentage point on linear probing??).
Here is a simple test case that highlights the issue:
```
import torch
from lightly.models.modules import NNMemoryBankModule
memory_bank = NNMemoryBankModule(size=1000)
print(memory_bank.bank)
memory_bank(torch.randn((100, 512)))
print(memory_bank.bank)
memory_bank = NNMemoryBankModule(size=1000)
memory_bank = torch.nn.DataParallel(memory_bank, device_ids=[0,1])
print(memory_bank.module.bank)
memory_bank(torch.randn((100, 512)))
print(memory_bank.module.bank)
```
The output of the first is `None` and a `torch.Tensor`, as expected. The output for the second is `None` for both.
</issue>
<code>
[start of lightly/loss/memory_bank.py]
1 """ Memory Bank Wrapper """
2
3 # Copyright (c) 2020. Lightly AG and its affiliates.
4 # All Rights Reserved
5
6 import torch
7 import functools
8
9 class MemoryBankModule(torch.nn.Module):
10 """Memory bank implementation
11
12 This is a parent class to all loss functions implemented by the lightly
13 Python package. This way, any loss can be used with a memory bank if
14 desired.
15
16 Attributes:
17 size:
18 Number of keys the memory bank can store. If set to 0,
19 memory bank is not used.
20
21 Examples:
22 >>> class MyLossFunction(MemoryBankModule):
23 >>>
24 >>> def __init__(self, memory_bank_size: int = 2 ** 16):
25 >>> super(MyLossFunction, self).__init__(memory_bank_size)
26 >>>
27 >>> def forward(self, output: torch.Tensor,
28 >>> labels: torch.Tensor = None):
29 >>>
30 >>> output, negatives = super(
31 >>> MyLossFunction, self).forward(output)
32 >>>
33 >>> if negatives is not None:
34 >>> # evaluate loss with negative samples
35 >>> else:
36 >>> # evaluate loss without negative samples
37
38 """
39
40 def __init__(self, size: int = 2 ** 16):
41
42 super(MemoryBankModule, self).__init__()
43
44 if size < 0:
45 msg = f'Illegal memory bank size {size}, must be non-negative.'
46 raise ValueError(msg)
47
48 self.size = size
49
50 self.bank = None
51 self.bank_ptr = None
52
53 @torch.no_grad()
54 def _init_memory_bank(self, dim: int):
55 """Initialize the memory bank if it's empty
56
57 Args:
58 dim:
59 The dimension of the which are stored in the bank.
60
61 """
62 # create memory bank
63 # we could use register buffers like in the moco repo
64 # https://github.com/facebookresearch/moco but we don't
65 # want to pollute our checkpoints
66 self.bank = torch.randn(dim, self.size)
67 self.bank = torch.nn.functional.normalize(self.bank, dim=0)
68 self.bank_ptr = torch.LongTensor([0])
69
70 @torch.no_grad()
71 def _dequeue_and_enqueue(self, batch: torch.Tensor):
72 """Dequeue the oldest batch and add the latest one
73
74 Args:
75 batch:
76 The latest batch of keys to add to the memory bank.
77
78 """
79 batch_size = batch.shape[0]
80 ptr = int(self.bank_ptr)
81
82 if ptr + batch_size >= self.size:
83 self.bank[:, ptr:] = batch[:self.size - ptr].T.detach()
84 self.bank_ptr[0] = 0
85 else:
86 self.bank[:, ptr:ptr + batch_size] = batch.T.detach()
87 self.bank_ptr[0] = ptr + batch_size
88
89 def forward(self,
90 output: torch.Tensor,
91 labels: torch.Tensor = None,
92 update: bool = False):
93 """Query memory bank for additional negative samples
94
95 Args:
96 output:
97 The output of the model.
98 labels:
99 Should always be None, will be ignored.
100
101 Returns:
102 The output if the memory bank is of size 0, otherwise the output
103 and the entries from the memory bank.
104
105 """
106
107 # no memory bank, return the output
108 if self.size == 0:
109 return output, None
110
111 _, dim = output.shape
112
113 # initialize the memory bank if it is not already done
114 if self.bank is None:
115 self._init_memory_bank(dim)
116
117 # query and update memory bank
118 bank = self.bank.clone().detach()
119
120 # only update memory bank if we later do backward pass (gradient)
121 if update:
122 self._dequeue_and_enqueue(output)
123
124 return output, bank
125
[end of lightly/loss/memory_bank.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightly/loss/memory_bank.py b/lightly/loss/memory_bank.py
--- a/lightly/loss/memory_bank.py
+++ b/lightly/loss/memory_bank.py
@@ -46,10 +46,9 @@
raise ValueError(msg)
self.size = size
+ self.register_buffer("bank", tensor=torch.empty(0, dtype=torch.float), persistent=False)
+ self.register_buffer("bank_ptr", tensor=torch.empty(0, dtype=torch.long), persistent=False)
- self.bank = None
- self.bank_ptr = None
-
@torch.no_grad()
def _init_memory_bank(self, dim: int):
"""Initialize the memory bank if it's empty
@@ -63,9 +62,9 @@
# we could use register buffers like in the moco repo
# https://github.com/facebookresearch/moco but we don't
# want to pollute our checkpoints
- self.bank = torch.randn(dim, self.size)
- self.bank = torch.nn.functional.normalize(self.bank, dim=0)
- self.bank_ptr = torch.LongTensor([0])
+ self.bank = torch.randn(dim, self.size).type_as(self.bank)
+ torch.nn.functional.normalize(self.bank, dim=0)
+ self.bank_ptr = torch.zeros(1).type_as(self.bank_ptr)
@torch.no_grad()
def _dequeue_and_enqueue(self, batch: torch.Tensor):
@@ -111,7 +110,7 @@
_, dim = output.shape
# initialize the memory bank if it is not already done
- if self.bank is None:
+ if self.bank.nelement() == 0:
self._init_memory_bank(dim)
# query and update memory bank
| {"golden_diff": "diff --git a/lightly/loss/memory_bank.py b/lightly/loss/memory_bank.py\n--- a/lightly/loss/memory_bank.py\n+++ b/lightly/loss/memory_bank.py\n@@ -46,10 +46,9 @@\n raise ValueError(msg)\n \n self.size = size\n+ self.register_buffer(\"bank\", tensor=torch.empty(0, dtype=torch.float), persistent=False)\n+ self.register_buffer(\"bank_ptr\", tensor=torch.empty(0, dtype=torch.long), persistent=False)\n \n- self.bank = None\n- self.bank_ptr = None\n- \n @torch.no_grad()\n def _init_memory_bank(self, dim: int):\n \"\"\"Initialize the memory bank if it's empty\n@@ -63,9 +62,9 @@\n # we could use register buffers like in the moco repo\n # https://github.com/facebookresearch/moco but we don't\n # want to pollute our checkpoints\n- self.bank = torch.randn(dim, self.size)\n- self.bank = torch.nn.functional.normalize(self.bank, dim=0)\n- self.bank_ptr = torch.LongTensor([0])\n+ self.bank = torch.randn(dim, self.size).type_as(self.bank)\n+ torch.nn.functional.normalize(self.bank, dim=0)\n+ self.bank_ptr = torch.zeros(1).type_as(self.bank_ptr)\n \n @torch.no_grad()\n def _dequeue_and_enqueue(self, batch: torch.Tensor):\n@@ -111,7 +110,7 @@\n _, dim = output.shape\n \n # initialize the memory bank if it is not already done\n- if self.bank is None:\n+ if self.bank.nelement() == 0:\n self._init_memory_bank(dim)\n \n # query and update memory bank\n", "issue": "NNMemoryBank not working with DataParallel\nI have been using the NNMemoryBank as a component in my module and noticed that at each forward pass `NNMemoryBank.bank` is equal to `None`. This only occurs when my module is wrapped in `DataParallel`. As a result, throughout training my NN pairs are always random noise (surprisingly, this only hurt the contrastive learning performance by a few percentage point on linear probing??).\r\n\r\nHere is a simple test case that highlights the issue:\r\n```\r\nimport torch\r\nfrom lightly.models.modules import NNMemoryBankModule\r\nmemory_bank = NNMemoryBankModule(size=1000)\r\nprint(memory_bank.bank)\r\nmemory_bank(torch.randn((100, 512)))\r\nprint(memory_bank.bank)\r\n\r\nmemory_bank = NNMemoryBankModule(size=1000)\r\nmemory_bank = torch.nn.DataParallel(memory_bank, device_ids=[0,1])\r\nprint(memory_bank.module.bank)\r\nmemory_bank(torch.randn((100, 512)))\r\nprint(memory_bank.module.bank)\r\n```\r\n\r\nThe output of the first is `None` and a `torch.Tensor`, as expected. The output for the second is `None` for both.\n", "before_files": [{"content": "\"\"\" Memory Bank Wrapper \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport torch\nimport functools\n\nclass MemoryBankModule(torch.nn.Module):\n \"\"\"Memory bank implementation\n\n This is a parent class to all loss functions implemented by the lightly\n Python package. This way, any loss can be used with a memory bank if \n desired.\n\n Attributes:\n size:\n Number of keys the memory bank can store. If set to 0,\n memory bank is not used.\n\n Examples:\n >>> class MyLossFunction(MemoryBankModule):\n >>>\n >>> def __init__(self, memory_bank_size: int = 2 ** 16):\n >>> super(MyLossFunction, self).__init__(memory_bank_size)\n >>>\n >>> def forward(self, output: torch.Tensor,\n >>> labels: torch.Tensor = None):\n >>>\n >>> output, negatives = super(\n >>> MyLossFunction, self).forward(output)\n >>>\n >>> if negatives is not None:\n >>> # evaluate loss with negative samples\n >>> else:\n >>> # evaluate loss without negative samples\n\n \"\"\"\n\n def __init__(self, size: int = 2 ** 16):\n\n super(MemoryBankModule, self).__init__()\n\n if size < 0:\n msg = f'Illegal memory bank size {size}, must be non-negative.'\n raise ValueError(msg)\n\n self.size = size\n\n self.bank = None\n self.bank_ptr = None\n \n @torch.no_grad()\n def _init_memory_bank(self, dim: int):\n \"\"\"Initialize the memory bank if it's empty\n\n Args:\n dim:\n The dimension of the which are stored in the bank.\n\n \"\"\"\n # create memory bank\n # we could use register buffers like in the moco repo\n # https://github.com/facebookresearch/moco but we don't\n # want to pollute our checkpoints\n self.bank = torch.randn(dim, self.size)\n self.bank = torch.nn.functional.normalize(self.bank, dim=0)\n self.bank_ptr = torch.LongTensor([0])\n\n @torch.no_grad()\n def _dequeue_and_enqueue(self, batch: torch.Tensor):\n \"\"\"Dequeue the oldest batch and add the latest one\n\n Args:\n batch:\n The latest batch of keys to add to the memory bank.\n\n \"\"\"\n batch_size = batch.shape[0]\n ptr = int(self.bank_ptr)\n\n if ptr + batch_size >= self.size:\n self.bank[:, ptr:] = batch[:self.size - ptr].T.detach()\n self.bank_ptr[0] = 0\n else:\n self.bank[:, ptr:ptr + batch_size] = batch.T.detach()\n self.bank_ptr[0] = ptr + batch_size\n\n def forward(self,\n output: torch.Tensor,\n labels: torch.Tensor = None,\n update: bool = False):\n \"\"\"Query memory bank for additional negative samples\n\n Args:\n output:\n The output of the model.\n labels:\n Should always be None, will be ignored.\n\n Returns:\n The output if the memory bank is of size 0, otherwise the output\n and the entries from the memory bank.\n\n \"\"\"\n\n # no memory bank, return the output\n if self.size == 0:\n return output, None\n\n _, dim = output.shape\n\n # initialize the memory bank if it is not already done\n if self.bank is None:\n self._init_memory_bank(dim)\n\n # query and update memory bank\n bank = self.bank.clone().detach()\n\n # only update memory bank if we later do backward pass (gradient)\n if update:\n self._dequeue_and_enqueue(output)\n\n return output, bank\n", "path": "lightly/loss/memory_bank.py"}]} | 1,885 | 385 |
gh_patches_debug_9243 | rasdani/github-patches | git_diff | getnikola__nikola-971 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drafts are leaked in feeds
Reported by @kayhayen in the mailing list. Proposed patch breaks tests, so checking things out a bit more carefully.
</issue>
<code>
[start of nikola/plugins/task/rss.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 from __future__ import unicode_literals, print_function
28 import os
29 try:
30 from urlparse import urljoin
31 except ImportError:
32 from urllib.parse import urljoin # NOQA
33
34 from nikola import utils
35 from nikola.plugin_categories import Task
36
37
38 class GenerateRSS(Task):
39 """Generate RSS feeds."""
40
41 name = "generate_rss"
42
43 def set_site(self, site):
44 site.register_path_handler('rss', self.rss_path)
45 return super(GenerateRSS, self).set_site(site)
46
47 def gen_tasks(self):
48 """Generate RSS feeds."""
49 kw = {
50 "translations": self.site.config["TRANSLATIONS"],
51 "filters": self.site.config["FILTERS"],
52 "blog_title": self.site.config["BLOG_TITLE"],
53 "site_url": self.site.config["SITE_URL"],
54 "blog_description": self.site.config["BLOG_DESCRIPTION"],
55 "output_folder": self.site.config["OUTPUT_FOLDER"],
56 "rss_teasers": self.site.config["RSS_TEASERS"],
57 "hide_untranslated_posts": self.site.config['HIDE_UNTRANSLATED_POSTS'],
58 "feed_length": self.site.config['FEED_LENGTH'],
59 }
60 self.site.scan_posts()
61 yield self.group_task()
62 for lang in kw["translations"]:
63 output_name = os.path.join(kw['output_folder'],
64 self.site.path("rss", None, lang))
65 deps = []
66 if kw["hide_untranslated_posts"]:
67 posts = [x for x in self.site.timeline if x.use_in_feeds
68 and x.is_translation_available(lang)][:10]
69 else:
70 posts = [x for x in self.site.timeline if x.use_in_feeds][:10]
71 for post in posts:
72 deps += post.deps(lang)
73
74 feed_url = urljoin(self.site.config['BASE_URL'], self.site.link("rss", None, lang).lstrip('/'))
75 yield {
76 'basename': 'generate_rss',
77 'name': os.path.normpath(output_name),
78 'file_dep': deps,
79 'targets': [output_name],
80 'actions': [(utils.generic_rss_renderer,
81 (lang, kw["blog_title"], kw["site_url"],
82 kw["blog_description"], posts, output_name,
83 kw["rss_teasers"], kw['feed_length'], feed_url))],
84 'task_dep': ['render_posts'],
85 'clean': True,
86 'uptodate': [utils.config_changed(kw)],
87 }
88
89 def rss_path(self, name, lang):
90 return [_f for _f in [self.site.config['TRANSLATIONS'][lang],
91 self.site.config['RSS_PATH'], 'rss.xml'] if _f]
92
[end of nikola/plugins/task/rss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nikola/plugins/task/rss.py b/nikola/plugins/task/rss.py
--- a/nikola/plugins/task/rss.py
+++ b/nikola/plugins/task/rss.py
@@ -58,6 +58,11 @@
"feed_length": self.site.config['FEED_LENGTH'],
}
self.site.scan_posts()
+ # Check for any changes in the state of use_in_feeds for any post.
+ # Issue #934
+ kw['use_in_feeds_status'] = ''.join(
+ ['T' if x.use_in_feeds else 'F' for x in self.site.timeline]
+ )
yield self.group_task()
for lang in kw["translations"]:
output_name = os.path.join(kw['output_folder'],
| {"golden_diff": "diff --git a/nikola/plugins/task/rss.py b/nikola/plugins/task/rss.py\n--- a/nikola/plugins/task/rss.py\n+++ b/nikola/plugins/task/rss.py\n@@ -58,6 +58,11 @@\n \"feed_length\": self.site.config['FEED_LENGTH'],\n }\n self.site.scan_posts()\n+ # Check for any changes in the state of use_in_feeds for any post.\n+ # Issue #934\n+ kw['use_in_feeds_status'] = ''.join(\n+ ['T' if x.use_in_feeds else 'F' for x in self.site.timeline]\n+ )\n yield self.group_task()\n for lang in kw[\"translations\"]:\n output_name = os.path.join(kw['output_folder'],\n", "issue": "Drafts are leaked in feeds\nReported by @kayhayen in the mailing list. Proposed patch breaks tests, so checking things out a bit more carefully.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import unicode_literals, print_function\nimport os\ntry:\n from urlparse import urljoin\nexcept ImportError:\n from urllib.parse import urljoin # NOQA\n\nfrom nikola import utils\nfrom nikola.plugin_categories import Task\n\n\nclass GenerateRSS(Task):\n \"\"\"Generate RSS feeds.\"\"\"\n\n name = \"generate_rss\"\n\n def set_site(self, site):\n site.register_path_handler('rss', self.rss_path)\n return super(GenerateRSS, self).set_site(site)\n\n def gen_tasks(self):\n \"\"\"Generate RSS feeds.\"\"\"\n kw = {\n \"translations\": self.site.config[\"TRANSLATIONS\"],\n \"filters\": self.site.config[\"FILTERS\"],\n \"blog_title\": self.site.config[\"BLOG_TITLE\"],\n \"site_url\": self.site.config[\"SITE_URL\"],\n \"blog_description\": self.site.config[\"BLOG_DESCRIPTION\"],\n \"output_folder\": self.site.config[\"OUTPUT_FOLDER\"],\n \"rss_teasers\": self.site.config[\"RSS_TEASERS\"],\n \"hide_untranslated_posts\": self.site.config['HIDE_UNTRANSLATED_POSTS'],\n \"feed_length\": self.site.config['FEED_LENGTH'],\n }\n self.site.scan_posts()\n yield self.group_task()\n for lang in kw[\"translations\"]:\n output_name = os.path.join(kw['output_folder'],\n self.site.path(\"rss\", None, lang))\n deps = []\n if kw[\"hide_untranslated_posts\"]:\n posts = [x for x in self.site.timeline if x.use_in_feeds\n and x.is_translation_available(lang)][:10]\n else:\n posts = [x for x in self.site.timeline if x.use_in_feeds][:10]\n for post in posts:\n deps += post.deps(lang)\n\n feed_url = urljoin(self.site.config['BASE_URL'], self.site.link(\"rss\", None, lang).lstrip('/'))\n yield {\n 'basename': 'generate_rss',\n 'name': os.path.normpath(output_name),\n 'file_dep': deps,\n 'targets': [output_name],\n 'actions': [(utils.generic_rss_renderer,\n (lang, kw[\"blog_title\"], kw[\"site_url\"],\n kw[\"blog_description\"], posts, output_name,\n kw[\"rss_teasers\"], kw['feed_length'], feed_url))],\n 'task_dep': ['render_posts'],\n 'clean': True,\n 'uptodate': [utils.config_changed(kw)],\n }\n\n def rss_path(self, name, lang):\n return [_f for _f in [self.site.config['TRANSLATIONS'][lang],\n self.site.config['RSS_PATH'], 'rss.xml'] if _f]\n", "path": "nikola/plugins/task/rss.py"}]} | 1,573 | 169 |
gh_patches_debug_21516 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-3514 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't allow more than 2 dimensional input for `Inline/ReplyKeyboardMarkup`
### What kind of feature are you missing? Where do you notice a shortcoming of PTB?
When a user passes their list to `Inline/ReplyKeyboardMarkup`, we currently check if their input is valid (exactly 2 dimensional input is valid), i.e.
* the input is a sequence, not just a single button
* it's not just a simple sequence (sequence of buttons)
However it doesn't check if it's more than 2D, i.e. `[[[KeyboardButton(...)], [...], ]]` is invalid.
### Describe the solution you'd like
Modify `tg._utils.markup.check_keyboard_type` to return `False` if we find another sequence.
### Describe alternatives you've considered
_No response_
### Additional context
_No response_
</issue>
<code>
[start of telegram/_utils/markup.py]
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2023
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains a helper function for Telegram's ReplyMarkups
20
21 .. versionchanged:: 20.0
22 Previously, the contents of this module were available through the (no longer existing)
23 class ``telegram.ReplyMarkup``.
24
25 Warning:
26 Contents of this module are intended to be used internally by the library and *not* by the
27 user. Changes to this module are not considered breaking changes and may not be documented in
28 the changelog.
29 """
30 from collections.abc import Sequence
31
32
33 def check_keyboard_type(keyboard: object) -> bool:
34 """Checks if the keyboard provided is of the correct type - A list of lists.
35 Implicitly tested in the init-tests of `{Inline, Reply}KeyboardMarkup`
36 """
37 # string and bytes may actually be used for ReplyKeyboardMarkup in which case each button
38 # would contain a single character. But that use case should be discouraged and we don't
39 # allow it here.
40 if not isinstance(keyboard, Sequence) or isinstance(keyboard, (str, bytes)):
41 return False
42 for row in keyboard:
43 if not isinstance(row, Sequence) or isinstance(row, (str, bytes)):
44 return False
45 return True
46
[end of telegram/_utils/markup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/telegram/_utils/markup.py b/telegram/_utils/markup.py
--- a/telegram/_utils/markup.py
+++ b/telegram/_utils/markup.py
@@ -31,7 +31,7 @@
def check_keyboard_type(keyboard: object) -> bool:
- """Checks if the keyboard provided is of the correct type - A list of lists.
+ """Checks if the keyboard provided is of the correct type - A sequence of sequences.
Implicitly tested in the init-tests of `{Inline, Reply}KeyboardMarkup`
"""
# string and bytes may actually be used for ReplyKeyboardMarkup in which case each button
@@ -39,7 +39,11 @@
# allow it here.
if not isinstance(keyboard, Sequence) or isinstance(keyboard, (str, bytes)):
return False
+
for row in keyboard:
if not isinstance(row, Sequence) or isinstance(row, (str, bytes)):
return False
+ for inner in row:
+ if isinstance(inner, Sequence) and not isinstance(inner, str):
+ return False
return True
| {"golden_diff": "diff --git a/telegram/_utils/markup.py b/telegram/_utils/markup.py\n--- a/telegram/_utils/markup.py\n+++ b/telegram/_utils/markup.py\n@@ -31,7 +31,7 @@\n \n \n def check_keyboard_type(keyboard: object) -> bool:\n- \"\"\"Checks if the keyboard provided is of the correct type - A list of lists.\n+ \"\"\"Checks if the keyboard provided is of the correct type - A sequence of sequences.\n Implicitly tested in the init-tests of `{Inline, Reply}KeyboardMarkup`\n \"\"\"\n # string and bytes may actually be used for ReplyKeyboardMarkup in which case each button\n@@ -39,7 +39,11 @@\n # allow it here.\n if not isinstance(keyboard, Sequence) or isinstance(keyboard, (str, bytes)):\n return False\n+\n for row in keyboard:\n if not isinstance(row, Sequence) or isinstance(row, (str, bytes)):\n return False\n+ for inner in row:\n+ if isinstance(inner, Sequence) and not isinstance(inner, str):\n+ return False\n return True\n", "issue": "Don't allow more than 2 dimensional input for `Inline/ReplyKeyboardMarkup`\n### What kind of feature are you missing? Where do you notice a shortcoming of PTB?\n\nWhen a user passes their list to `Inline/ReplyKeyboardMarkup`, we currently check if their input is valid (exactly 2 dimensional input is valid), i.e. \r\n\r\n* the input is a sequence, not just a single button\r\n* it's not just a simple sequence (sequence of buttons)\r\n\r\nHowever it doesn't check if it's more than 2D, i.e. `[[[KeyboardButton(...)], [...], ]]` is invalid.\n\n### Describe the solution you'd like\n\nModify `tg._utils.markup.check_keyboard_type` to return `False` if we find another sequence.\n\n### Describe alternatives you've considered\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2023\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains a helper function for Telegram's ReplyMarkups\n\n.. versionchanged:: 20.0\n Previously, the contents of this module were available through the (no longer existing)\n class ``telegram.ReplyMarkup``.\n\nWarning:\n Contents of this module are intended to be used internally by the library and *not* by the\n user. Changes to this module are not considered breaking changes and may not be documented in\n the changelog.\n\"\"\"\nfrom collections.abc import Sequence\n\n\ndef check_keyboard_type(keyboard: object) -> bool:\n \"\"\"Checks if the keyboard provided is of the correct type - A list of lists.\n Implicitly tested in the init-tests of `{Inline, Reply}KeyboardMarkup`\n \"\"\"\n # string and bytes may actually be used for ReplyKeyboardMarkup in which case each button\n # would contain a single character. But that use case should be discouraged and we don't\n # allow it here.\n if not isinstance(keyboard, Sequence) or isinstance(keyboard, (str, bytes)):\n return False\n for row in keyboard:\n if not isinstance(row, Sequence) or isinstance(row, (str, bytes)):\n return False\n return True\n", "path": "telegram/_utils/markup.py"}]} | 1,251 | 243 |
gh_patches_debug_6602 | rasdani/github-patches | git_diff | encode__starlette-1410 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ETag checksum fails on FIPS-enabled systems when using MD5
### Checklist
<!-- Please make sure you check all these items before submitting your bug report. -->
- [x] The bug is reproducible against the latest release and/or `master`.
- [x] There are no similar issues or pull requests to fix it yet.
### Describe the bug
The ETag checksum fails when using MD5. This is causing Starlette to not work at all under Red Hat Enterprise Linux when FIPS mode is enabled.
### Debugging material
Here's the exception that's thrown:
```
INFO: 10.42.1.7:34422 - "GET /app/static/foo.html HTTP/1.1" 500 Internal Server Error
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/uvicorn/protocols/http/h11_impl.py", line 373, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/usr/local/lib/python3.8/site-packages/uvicorn/middleware/proxy_headers.py", line 75, in __call__
return await self.app(scope, receive, send)
File "/usr/local/lib/python3.8/site-packages/fastapi/applications.py", line 208, in __call__
await super().__call__(scope, receive, send)
File "/usr/local/lib/python3.8/site-packages/starlette/applications.py", line 112, in __call__
await self.middleware_stack(scope, receive, send)
File "/usr/local/lib/python3.8/site-packages/starlette/middleware/errors.py", line 181, in __call__
raise exc
File "/usr/local/lib/python3.8/site-packages/starlette/middleware/errors.py", line 159, in __call__
await self.app(scope, receive, _send)
File "/usr/local/lib/python3.8/site-packages/starlette/exceptions.py", line 82, in __call__
raise exc
File "/usr/local/lib/python3.8/site-packages/starlette/exceptions.py", line 71, in __call__
await self.app(scope, receive, sender)
File "/usr/local/lib/python3.8/site-packages/starlette/routing.py", line 656, in __call__
await route.handle(scope, receive, send)
File "/usr/local/lib/python3.8/site-packages/starlette/routing.py", line 408, in handle
await self.app(scope, receive, send)
File "/usr/local/lib/python3.8/site-packages/starlette/staticfiles.py", line 97, in __call__
response = await self.get_response(path, scope)
File "/usr/local/lib/python3.8/site-packages/starlette/staticfiles.py", line 118, in get_response
return self.file_response(full_path, stat_result, scope)
File "/usr/local/lib/python3.8/site-packages/starlette/staticfiles.py", line 173, in file_response
response = FileResponse(
File "/usr/local/lib/python3.8/site-packages/starlette/responses.py", line 267, in __init__
self.set_stat_headers(stat_result)
File "/usr/local/lib/python3.8/site-packages/starlette/responses.py", line 273, in set_stat_headers
etag = hashlib.md5(etag_base.encode()).hexdigest()
ValueError: [digital envelope routines: EVP_DigestInit_ex] disabled for FIPS
```
### Environment
- OS: Red Hat Enterprise Linux 8 in FIPS mode
- Python version: 3.8.8
- Starlette version: 0.16.0
</issue>
<code>
[start of starlette/_compat.py]
1 import hashlib
2
3 # Compat wrapper to always include the `usedforsecurity=...` parameter,
4 # which is only added from Python 3.9 onwards.
5 # We use this flag to indicate that we use `md5` hashes only for non-security
6 # cases (our ETag checksums).
7 # If we don't indicate that we're using MD5 for non-security related reasons,
8 # then attempting to use this function will raise an error when used
9 # environments which enable a strict "FIPs mode".
10 #
11 # See issue: https://github.com/encode/starlette/issues/1365
12 try:
13
14 hashlib.md5(b"data", usedforsecurity=True) # type: ignore[call-arg]
15
16 def md5_hexdigest(
17 data: bytes, *, usedforsecurity: bool = True
18 ) -> str: # pragma: no cover
19 return hashlib.md5( # type: ignore[call-arg]
20 data, usedforsecurity=usedforsecurity
21 ).hexdigest()
22
23 except TypeError: # pragma: no cover
24
25 def md5_hexdigest(data: bytes, *, usedforsecurity: bool = True) -> str:
26 return hashlib.md5(data).hexdigest()
27
[end of starlette/_compat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlette/_compat.py b/starlette/_compat.py
--- a/starlette/_compat.py
+++ b/starlette/_compat.py
@@ -11,7 +11,10 @@
# See issue: https://github.com/encode/starlette/issues/1365
try:
- hashlib.md5(b"data", usedforsecurity=True) # type: ignore[call-arg]
+ # check if the Python version supports the parameter
+ # using usedforsecurity=False to avoid an exception on FIPS systems
+ # that reject usedforsecurity=True
+ hashlib.md5(b"data", usedforsecurity=False) # type: ignore[call-arg]
def md5_hexdigest(
data: bytes, *, usedforsecurity: bool = True
| {"golden_diff": "diff --git a/starlette/_compat.py b/starlette/_compat.py\n--- a/starlette/_compat.py\n+++ b/starlette/_compat.py\n@@ -11,7 +11,10 @@\n # See issue: https://github.com/encode/starlette/issues/1365\n try:\n \n- hashlib.md5(b\"data\", usedforsecurity=True) # type: ignore[call-arg]\n+ # check if the Python version supports the parameter\n+ # using usedforsecurity=False to avoid an exception on FIPS systems\n+ # that reject usedforsecurity=True\n+ hashlib.md5(b\"data\", usedforsecurity=False) # type: ignore[call-arg]\n \n def md5_hexdigest(\n data: bytes, *, usedforsecurity: bool = True\n", "issue": "ETag checksum fails on FIPS-enabled systems when using MD5\n### Checklist\r\n\r\n<!-- Please make sure you check all these items before submitting your bug report. -->\r\n\r\n- [x] The bug is reproducible against the latest release and/or `master`.\r\n- [x] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\nThe ETag checksum fails when using MD5. This is causing Starlette to not work at all under Red Hat Enterprise Linux when FIPS mode is enabled.\r\n\r\n### Debugging material\r\n\r\nHere's the exception that's thrown:\r\n\r\n```\r\nINFO: 10.42.1.7:34422 - \"GET /app/static/foo.html HTTP/1.1\" 500 Internal Server Error\r\nERROR: Exception in ASGI application\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/uvicorn/protocols/http/h11_impl.py\", line 373, in run_asgi\r\n result = await app(self.scope, self.receive, self.send)\r\n File \"/usr/local/lib/python3.8/site-packages/uvicorn/middleware/proxy_headers.py\", line 75, in __call__\r\n return await self.app(scope, receive, send)\r\n File \"/usr/local/lib/python3.8/site-packages/fastapi/applications.py\", line 208, in __call__\r\n await super().__call__(scope, receive, send)\r\n File \"/usr/local/lib/python3.8/site-packages/starlette/applications.py\", line 112, in __call__\r\n await self.middleware_stack(scope, receive, send)\r\n File \"/usr/local/lib/python3.8/site-packages/starlette/middleware/errors.py\", line 181, in __call__\r\n raise exc\r\n File \"/usr/local/lib/python3.8/site-packages/starlette/middleware/errors.py\", line 159, in __call__\r\n await self.app(scope, receive, _send)\r\n File \"/usr/local/lib/python3.8/site-packages/starlette/exceptions.py\", line 82, in __call__\r\n raise exc\r\n File \"/usr/local/lib/python3.8/site-packages/starlette/exceptions.py\", line 71, in __call__\r\n await self.app(scope, receive, sender)\r\n File \"/usr/local/lib/python3.8/site-packages/starlette/routing.py\", line 656, in __call__\r\n await route.handle(scope, receive, send)\r\n File \"/usr/local/lib/python3.8/site-packages/starlette/routing.py\", line 408, in handle\r\n await self.app(scope, receive, send)\r\n File \"/usr/local/lib/python3.8/site-packages/starlette/staticfiles.py\", line 97, in __call__\r\n response = await self.get_response(path, scope)\r\n File \"/usr/local/lib/python3.8/site-packages/starlette/staticfiles.py\", line 118, in get_response\r\n return self.file_response(full_path, stat_result, scope)\r\n File \"/usr/local/lib/python3.8/site-packages/starlette/staticfiles.py\", line 173, in file_response\r\n response = FileResponse(\r\n File \"/usr/local/lib/python3.8/site-packages/starlette/responses.py\", line 267, in __init__\r\n self.set_stat_headers(stat_result)\r\n File \"/usr/local/lib/python3.8/site-packages/starlette/responses.py\", line 273, in set_stat_headers\r\n etag = hashlib.md5(etag_base.encode()).hexdigest()\r\nValueError: [digital envelope routines: EVP_DigestInit_ex] disabled for FIPS\r\n```\r\n\r\n### Environment\r\n\r\n- OS: Red Hat Enterprise Linux 8 in FIPS mode\r\n- Python version: 3.8.8\r\n- Starlette version: 0.16.0\r\n\r\n\n", "before_files": [{"content": "import hashlib\n\n# Compat wrapper to always include the `usedforsecurity=...` parameter,\n# which is only added from Python 3.9 onwards.\n# We use this flag to indicate that we use `md5` hashes only for non-security\n# cases (our ETag checksums).\n# If we don't indicate that we're using MD5 for non-security related reasons,\n# then attempting to use this function will raise an error when used\n# environments which enable a strict \"FIPs mode\".\n#\n# See issue: https://github.com/encode/starlette/issues/1365\ntry:\n\n hashlib.md5(b\"data\", usedforsecurity=True) # type: ignore[call-arg]\n\n def md5_hexdigest(\n data: bytes, *, usedforsecurity: bool = True\n ) -> str: # pragma: no cover\n return hashlib.md5( # type: ignore[call-arg]\n data, usedforsecurity=usedforsecurity\n ).hexdigest()\n\nexcept TypeError: # pragma: no cover\n\n def md5_hexdigest(data: bytes, *, usedforsecurity: bool = True) -> str:\n return hashlib.md5(data).hexdigest()\n", "path": "starlette/_compat.py"}]} | 1,662 | 175 |
gh_patches_debug_8956 | rasdani/github-patches | git_diff | keras-team__keras-11147 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sync naming convention and style in NLP datasets
Also fixes a possible bug with np.load()/f.close() pair not being exception-safe.
</issue>
<code>
[start of keras/datasets/boston_housing.py]
1 """Boston housing price regression dataset.
2 """
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6
7 from ..utils.data_utils import get_file
8 import numpy as np
9
10
11 def load_data(path='boston_housing.npz', test_split=0.2, seed=113):
12 """Loads the Boston Housing dataset.
13
14 # Arguments
15 path: path where to cache the dataset locally
16 (relative to ~/.keras/datasets).
17 test_split: fraction of the data to reserve as test set.
18 seed: Random seed for shuffling the data
19 before computing the test split.
20
21 # Returns
22 Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
23 """
24 assert 0 <= test_split < 1
25 path = get_file(path,
26 origin='https://s3.amazonaws.com/keras-datasets/boston_housing.npz',
27 file_hash='f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5')
28 f = np.load(path)
29 x = f['x']
30 y = f['y']
31 f.close()
32
33 np.random.seed(seed)
34 indices = np.arange(len(x))
35 np.random.shuffle(indices)
36 x = x[indices]
37 y = y[indices]
38
39 x_train = np.array(x[:int(len(x) * (1 - test_split))])
40 y_train = np.array(y[:int(len(x) * (1 - test_split))])
41 x_test = np.array(x[int(len(x) * (1 - test_split)):])
42 y_test = np.array(y[int(len(x) * (1 - test_split)):])
43 return (x_train, y_train), (x_test, y_test)
44
[end of keras/datasets/boston_housing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/keras/datasets/boston_housing.py b/keras/datasets/boston_housing.py
--- a/keras/datasets/boston_housing.py
+++ b/keras/datasets/boston_housing.py
@@ -25,10 +25,9 @@
path = get_file(path,
origin='https://s3.amazonaws.com/keras-datasets/boston_housing.npz',
file_hash='f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5')
- f = np.load(path)
- x = f['x']
- y = f['y']
- f.close()
+ with np.load(path) as f:
+ x = f['x']
+ y = f['y']
np.random.seed(seed)
indices = np.arange(len(x))
| {"golden_diff": "diff --git a/keras/datasets/boston_housing.py b/keras/datasets/boston_housing.py\n--- a/keras/datasets/boston_housing.py\n+++ b/keras/datasets/boston_housing.py\n@@ -25,10 +25,9 @@\n path = get_file(path,\n origin='https://s3.amazonaws.com/keras-datasets/boston_housing.npz',\n file_hash='f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5')\n- f = np.load(path)\n- x = f['x']\n- y = f['y']\n- f.close()\n+ with np.load(path) as f:\n+ x = f['x']\n+ y = f['y']\n \n np.random.seed(seed)\n indices = np.arange(len(x))\n", "issue": "Sync naming convention and style in NLP datasets\nAlso fixes a possible bug with np.load()/f.close() pair not being exception-safe.\n", "before_files": [{"content": "\"\"\"Boston housing price regression dataset.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom ..utils.data_utils import get_file\nimport numpy as np\n\n\ndef load_data(path='boston_housing.npz', test_split=0.2, seed=113):\n \"\"\"Loads the Boston Housing dataset.\n\n # Arguments\n path: path where to cache the dataset locally\n (relative to ~/.keras/datasets).\n test_split: fraction of the data to reserve as test set.\n seed: Random seed for shuffling the data\n before computing the test split.\n\n # Returns\n Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.\n \"\"\"\n assert 0 <= test_split < 1\n path = get_file(path,\n origin='https://s3.amazonaws.com/keras-datasets/boston_housing.npz',\n file_hash='f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5')\n f = np.load(path)\n x = f['x']\n y = f['y']\n f.close()\n\n np.random.seed(seed)\n indices = np.arange(len(x))\n np.random.shuffle(indices)\n x = x[indices]\n y = y[indices]\n\n x_train = np.array(x[:int(len(x) * (1 - test_split))])\n y_train = np.array(y[:int(len(x) * (1 - test_split))])\n x_test = np.array(x[int(len(x) * (1 - test_split)):])\n y_test = np.array(y[int(len(x) * (1 - test_split)):])\n return (x_train, y_train), (x_test, y_test)\n", "path": "keras/datasets/boston_housing.py"}]} | 1,076 | 229 |
gh_patches_debug_159 | rasdani/github-patches | git_diff | uccser__cs-unplugged-54 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Bootstrap 4 SCSS
</issue>
<code>
[start of csunplugged/config/settings.py]
1 """
2 Django settings for csunplugged project.
3
4 Generated by 'django-admin startproject' using Django 1.10.3.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.10/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.10/ref/settings/
11 """
12
13 import os
14 from config.settings_secret import *
15
16 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
17 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
18
19 # nasty hard coding
20 SETTINGS_PATH = os.path.dirname(os.path.dirname(__file__))
21
22
23 # Quick-start development settings - unsuitable for production
24 # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
25
26 # SECURITY WARNING: keep the secret key used in production secret!
27 SECRET_KEY = 'l@@)w&&%&u37+sjz^lsx^+29y_333oid3ygxzucar^8o(axo*f'
28
29 # SECURITY WARNING: don't run with debug turned on in production!
30 DEBUG = True
31
32 ALLOWED_HOSTS = []
33
34
35 # Application definition
36
37 INSTALLED_APPS = [
38 'general.apps.GeneralConfig',
39 'topics.apps.TopicsConfig',
40 'resources.apps.ResourcesConfig',
41 'django.contrib.admin',
42 'django.contrib.auth',
43 'django.contrib.contenttypes',
44 'django.contrib.sessions',
45 'django.contrib.messages',
46 'django.contrib.staticfiles',
47 ]
48
49 MIDDLEWARE = [
50 'django.middleware.security.SecurityMiddleware',
51 'django.contrib.sessions.middleware.SessionMiddleware',
52 'django.middleware.locale.LocaleMiddleware',
53 'django.middleware.common.CommonMiddleware',
54 'django.middleware.csrf.CsrfViewMiddleware',
55 'django.contrib.auth.middleware.AuthenticationMiddleware',
56 'django.contrib.messages.middleware.MessageMiddleware',
57 'django.middleware.clickjacking.XFrameOptionsMiddleware',
58 ]
59
60 ROOT_URLCONF = 'config.urls'
61
62 TEMPLATES = [
63 {
64 'BACKEND': 'django.template.backends.django.DjangoTemplates',
65 'DIRS': [
66 os.path.join(SETTINGS_PATH, 'templates'),
67 os.path.join(SETTINGS_PATH, 'resources/content/')
68 ],
69 'APP_DIRS': True,
70 'OPTIONS': {
71 'context_processors': [
72 'django.template.context_processors.debug',
73 'django.template.context_processors.request',
74 'django.contrib.auth.context_processors.auth',
75 'django.contrib.messages.context_processors.messages',
76 ],
77 },
78 },
79 ]
80
81 WSGI_APPLICATION = 'config.wsgi.application'
82
83
84 # Database
85 # https://docs.djangoproject.com/en/1.10/ref/settings/#databases
86 # Database values are stored in `settings_secret.py`
87 # A template of this file is available as `settings_secret_template.py`
88
89
90 # Password validation
91 # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
92
93 AUTH_PASSWORD_VALIDATORS = [
94 {
95 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
96 },
97 {
98 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
99 },
100 {
101 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
102 },
103 {
104 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
105 },
106 ]
107
108
109 # Internationalization
110 # https://docs.djangoproject.com/en/1.10/topics/i18n/
111
112 LANGUAGE_CODE = 'en-us'
113
114 TIME_ZONE = 'UTC'
115
116 USE_I18N = True
117
118 USE_L10N = True
119
120 USE_TZ = True
121
122 LOCALE_PATHS = ['locale']
123
124 # Static files (CSS, JavaScript, Images)
125 # https://docs.djangoproject.com/en/1.10/howto/static-files/
126
127 STATIC_URL = '/static/'
128 STATICFILES_DIRS = (
129 os.path.join(BASE_DIR, 'static'),
130 )
131
[end of csunplugged/config/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/csunplugged/config/settings.py b/csunplugged/config/settings.py
--- a/csunplugged/config/settings.py
+++ b/csunplugged/config/settings.py
@@ -126,5 +126,5 @@
STATIC_URL = '/static/'
STATICFILES_DIRS = (
- os.path.join(BASE_DIR, 'static'),
+ os.path.join(BASE_DIR, 'build'),
)
| {"golden_diff": "diff --git a/csunplugged/config/settings.py b/csunplugged/config/settings.py\n--- a/csunplugged/config/settings.py\n+++ b/csunplugged/config/settings.py\n@@ -126,5 +126,5 @@\n \n STATIC_URL = '/static/'\n STATICFILES_DIRS = (\n- os.path.join(BASE_DIR, 'static'),\n+ os.path.join(BASE_DIR, 'build'),\n )\n", "issue": "Add Bootstrap 4 SCSS\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for csunplugged project.\n\nGenerated by 'django-admin startproject' using Django 1.10.3.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\n\nimport os\nfrom config.settings_secret import *\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# nasty hard coding\nSETTINGS_PATH = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'l@@)w&&%&u37+sjz^lsx^+29y_333oid3ygxzucar^8o(axo*f'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'general.apps.GeneralConfig',\n 'topics.apps.TopicsConfig',\n 'resources.apps.ResourcesConfig',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'config.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(SETTINGS_PATH, 'templates'),\n os.path.join(SETTINGS_PATH, 'resources/content/')\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'config.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n# Database values are stored in `settings_secret.py`\n# A template of this file is available as `settings_secret_template.py`\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.10/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLOCALE_PATHS = ['locale']\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.10/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n )\n", "path": "csunplugged/config/settings.py"}]} | 1,659 | 91 |
gh_patches_debug_20201 | rasdani/github-patches | git_diff | translate__pootle-4492 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add test if TP uses a proper checker
This commit https://github.com/translate/pootle/commit/1d6ef1c987f2ee421b678fb9ac36e16175e4f364 fixed very hidden bug, let's add a test for it.
</issue>
<code>
[start of pytest_pootle/fixtures/models/translation_project.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 import pytest
11
12
13 def _require_tp(language, project):
14 """Helper to get/create a new translation project."""
15 from pootle_translationproject.models import create_translation_project
16
17 return create_translation_project(language, project)
18
19
20 def _require_tp_with_obsolete_dir(language, project):
21 """Helper to get/create a translation project in obsolete state."""
22 from pootle_translationproject.models import create_translation_project
23
24 tp = create_translation_project(language, project)
25 tp.directory.makeobsolete()
26
27 return tp
28
29
30 @pytest.fixture
31 def afrikaans_tutorial(afrikaans, tutorial):
32 """Require Afrikaans Tutorial."""
33 return _require_tp(afrikaans, tutorial)
34
35
36 @pytest.fixture
37 def arabic_tutorial_obsolete(arabic, tutorial):
38 """Require Arabic Tutorial in obsolete state."""
39 return _require_tp_with_obsolete_dir(arabic, tutorial)
40
41
42 @pytest.fixture
43 def english_tutorial(english, tutorial):
44 """Require English Tutorial."""
45 return _require_tp(english, tutorial)
46
47
48 @pytest.fixture
49 def french_tutorial(french, tutorial):
50 """Require French Tutorial."""
51 return _require_tp(french, tutorial)
52
53
54 @pytest.fixture
55 def spanish_tutorial(spanish, tutorial):
56 """Require Spanish Tutorial."""
57 return _require_tp(spanish, tutorial)
58
59
60 @pytest.fixture
61 def italian_tutorial(italian, tutorial):
62 """Require Italian Tutorial."""
63 return _require_tp(italian, tutorial)
64
65
66 @pytest.fixture
67 def russian_tutorial(russian, tutorial):
68 """Require Russian Tutorial."""
69 return _require_tp(russian, tutorial)
70
71
72 @pytest.fixture
73 def afrikaans_vfolder_test(afrikaans, vfolder_test):
74 """Require Afrikaans Virtual Folder Test."""
75 return _require_tp(afrikaans, vfolder_test)
76
77
78 @pytest.fixture
79 def templates_tutorial(templates, tutorial):
80 """Require Template Tutorial."""
81 return _require_tp(templates, tutorial)
82
[end of pytest_pootle/fixtures/models/translation_project.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytest_pootle/fixtures/models/translation_project.py b/pytest_pootle/fixtures/models/translation_project.py
--- a/pytest_pootle/fixtures/models/translation_project.py
+++ b/pytest_pootle/fixtures/models/translation_project.py
@@ -7,6 +7,8 @@
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
+import shutil
+
import pytest
@@ -79,3 +81,25 @@
def templates_tutorial(templates, tutorial):
"""Require Template Tutorial."""
return _require_tp(templates, tutorial)
+
+
+def get_project_checkers():
+ from translate.filters import checks
+
+ return ['standard'] + list(checks.projectcheckers.keys())
+
+
[email protected](params=get_project_checkers())
+def tp_checker_tests(request, english):
+ from pytest_pootle.factories import ProjectFactory
+
+ checker_name = request.param
+ project = ProjectFactory(
+ checkstyle=checker_name,
+ source_language=english)
+
+ def _remove_project_directory():
+ shutil.rmtree(project.get_real_path())
+ request.addfinalizer(_remove_project_directory)
+
+ return (checker_name, project)
| {"golden_diff": "diff --git a/pytest_pootle/fixtures/models/translation_project.py b/pytest_pootle/fixtures/models/translation_project.py\n--- a/pytest_pootle/fixtures/models/translation_project.py\n+++ b/pytest_pootle/fixtures/models/translation_project.py\n@@ -7,6 +7,8 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n+import shutil\n+\n import pytest\n \n \n@@ -79,3 +81,25 @@\n def templates_tutorial(templates, tutorial):\n \"\"\"Require Template Tutorial.\"\"\"\n return _require_tp(templates, tutorial)\n+\n+\n+def get_project_checkers():\n+ from translate.filters import checks\n+\n+ return ['standard'] + list(checks.projectcheckers.keys())\n+\n+\[email protected](params=get_project_checkers())\n+def tp_checker_tests(request, english):\n+ from pytest_pootle.factories import ProjectFactory\n+\n+ checker_name = request.param\n+ project = ProjectFactory(\n+ checkstyle=checker_name,\n+ source_language=english)\n+\n+ def _remove_project_directory():\n+ shutil.rmtree(project.get_real_path())\n+ request.addfinalizer(_remove_project_directory)\n+\n+ return (checker_name, project)\n", "issue": "Add test if TP uses a proper checker\nThis commit https://github.com/translate/pootle/commit/1d6ef1c987f2ee421b678fb9ac36e16175e4f364 fixed very hidden bug, let's add a test for it.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport pytest\n\n\ndef _require_tp(language, project):\n \"\"\"Helper to get/create a new translation project.\"\"\"\n from pootle_translationproject.models import create_translation_project\n\n return create_translation_project(language, project)\n\n\ndef _require_tp_with_obsolete_dir(language, project):\n \"\"\"Helper to get/create a translation project in obsolete state.\"\"\"\n from pootle_translationproject.models import create_translation_project\n\n tp = create_translation_project(language, project)\n tp.directory.makeobsolete()\n\n return tp\n\n\[email protected]\ndef afrikaans_tutorial(afrikaans, tutorial):\n \"\"\"Require Afrikaans Tutorial.\"\"\"\n return _require_tp(afrikaans, tutorial)\n\n\[email protected]\ndef arabic_tutorial_obsolete(arabic, tutorial):\n \"\"\"Require Arabic Tutorial in obsolete state.\"\"\"\n return _require_tp_with_obsolete_dir(arabic, tutorial)\n\n\[email protected]\ndef english_tutorial(english, tutorial):\n \"\"\"Require English Tutorial.\"\"\"\n return _require_tp(english, tutorial)\n\n\[email protected]\ndef french_tutorial(french, tutorial):\n \"\"\"Require French Tutorial.\"\"\"\n return _require_tp(french, tutorial)\n\n\[email protected]\ndef spanish_tutorial(spanish, tutorial):\n \"\"\"Require Spanish Tutorial.\"\"\"\n return _require_tp(spanish, tutorial)\n\n\[email protected]\ndef italian_tutorial(italian, tutorial):\n \"\"\"Require Italian Tutorial.\"\"\"\n return _require_tp(italian, tutorial)\n\n\[email protected]\ndef russian_tutorial(russian, tutorial):\n \"\"\"Require Russian Tutorial.\"\"\"\n return _require_tp(russian, tutorial)\n\n\[email protected]\ndef afrikaans_vfolder_test(afrikaans, vfolder_test):\n \"\"\"Require Afrikaans Virtual Folder Test.\"\"\"\n return _require_tp(afrikaans, vfolder_test)\n\n\[email protected]\ndef templates_tutorial(templates, tutorial):\n \"\"\"Require Template Tutorial.\"\"\"\n return _require_tp(templates, tutorial)\n", "path": "pytest_pootle/fixtures/models/translation_project.py"}]} | 1,273 | 280 |
gh_patches_debug_24147 | rasdani/github-patches | git_diff | UTNkar__moore-310 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Raw HTML content type
<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->
### Description
There is currently no content type for raw HTML that can be used for the Jexpo. A special content type could be created for Jexpo as it works right now but since the way Jexpo is initialized can change (according to themselves), they recommend using a raw HTML.
There should be a content type for raw HTML.
<!-- Please select the appropriate "topic category"/blue and "issue type"/yellow label -->
Forms are missing form introduction.
### Description
Creating a new form in Wagtail lets you specify an introduction to the form, which isn't displayed.
</issue>
<code>
[start of src/home/models/web_page.py]
1 from __future__ import absolute_import, unicode_literals
2 from django.db import models
3 from django.utils.translation import ugettext_lazy as _
4 from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel, \
5 TabbedInterface, ObjectList
6 from wagtail.core.fields import StreamField
7 from wagtail.core.models import Page
8 from blocks.models import WAGTAIL_STATIC_BLOCKTYPES
9 from google.models import GoogleFormBlock, GoogleDriveBlock, \
10 GoogleCalendarBlock
11 from news.models import LatestNewsBlock
12 from utils.translation import TranslatedField
13
14
15 class WebPage(Page):
16 # ---- General Page information ------
17
18 title_sv = models.CharField(max_length=255)
19 translated_title = TranslatedField('title', 'title_sv')
20
21 body_en = StreamField(
22 WAGTAIL_STATIC_BLOCKTYPES + [
23 ('google_calendar', GoogleCalendarBlock()),
24 ('google_drive', GoogleDriveBlock()),
25 ('google_form', GoogleFormBlock()),
26 ('news', LatestNewsBlock()),
27 ],
28 blank=True,
29 )
30 body_sv = StreamField(
31 WAGTAIL_STATIC_BLOCKTYPES + [
32 ('google_calendar', GoogleCalendarBlock()),
33 ('google_drive', GoogleDriveBlock()),
34 ('google_form', GoogleFormBlock()),
35 ('news', LatestNewsBlock()),
36 ],
37 blank=True,
38 )
39 body = TranslatedField('body_en', 'body_sv')
40
41 content_panels_en = Page.content_panels + [
42 StreamFieldPanel('body_en'),
43 ]
44
45 content_panels_sv = [
46 FieldPanel('title_sv', classname="full title"),
47 StreamFieldPanel('body_sv'),
48 ]
49
50 edit_handler = TabbedInterface([
51 ObjectList(content_panels_en, heading=_('English')),
52 ObjectList(content_panels_sv, heading=_('Swedish')),
53 ObjectList(Page.promote_panels, heading=_('Promote')),
54 ObjectList(Page.settings_panels, heading=_('Settings')),
55 ])
56
[end of src/home/models/web_page.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/home/models/web_page.py b/src/home/models/web_page.py
--- a/src/home/models/web_page.py
+++ b/src/home/models/web_page.py
@@ -5,6 +5,7 @@
TabbedInterface, ObjectList
from wagtail.core.fields import StreamField
from wagtail.core.models import Page
+from wagtail.core.blocks import RawHTMLBlock
from blocks.models import WAGTAIL_STATIC_BLOCKTYPES
from google.models import GoogleFormBlock, GoogleDriveBlock, \
GoogleCalendarBlock
@@ -24,6 +25,7 @@
('google_drive', GoogleDriveBlock()),
('google_form', GoogleFormBlock()),
('news', LatestNewsBlock()),
+ ('html', RawHTMLBlock(group="Basic")),
],
blank=True,
)
@@ -33,6 +35,7 @@
('google_drive', GoogleDriveBlock()),
('google_form', GoogleFormBlock()),
('news', LatestNewsBlock()),
+ ('html', RawHTMLBlock(group="Basic")),
],
blank=True,
)
| {"golden_diff": "diff --git a/src/home/models/web_page.py b/src/home/models/web_page.py\n--- a/src/home/models/web_page.py\n+++ b/src/home/models/web_page.py\n@@ -5,6 +5,7 @@\n TabbedInterface, ObjectList\n from wagtail.core.fields import StreamField\n from wagtail.core.models import Page\n+from wagtail.core.blocks import RawHTMLBlock\n from blocks.models import WAGTAIL_STATIC_BLOCKTYPES\n from google.models import GoogleFormBlock, GoogleDriveBlock, \\\n GoogleCalendarBlock\n@@ -24,6 +25,7 @@\n ('google_drive', GoogleDriveBlock()),\n ('google_form', GoogleFormBlock()),\n ('news', LatestNewsBlock()),\n+ ('html', RawHTMLBlock(group=\"Basic\")),\n ],\n blank=True,\n )\n@@ -33,6 +35,7 @@\n ('google_drive', GoogleDriveBlock()),\n ('google_form', GoogleFormBlock()),\n ('news', LatestNewsBlock()),\n+ ('html', RawHTMLBlock(group=\"Basic\")),\n ],\n blank=True,\n )\n", "issue": "Raw HTML content type\n<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->\r\n\r\n### Description\r\nThere is currently no content type for raw HTML that can be used for the Jexpo. A special content type could be created for Jexpo as it works right now but since the way Jexpo is initialized can change (according to themselves), they recommend using a raw HTML. \r\n\r\nThere should be a content type for raw HTML.\r\n\r\n<!-- Please select the appropriate \"topic category\"/blue and \"issue type\"/yellow label -->\r\n\nForms are missing form introduction.\n### Description\r\n\r\nCreating a new form in Wagtail lets you specify an introduction to the form, which isn't displayed.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel, \\\n TabbedInterface, ObjectList\nfrom wagtail.core.fields import StreamField\nfrom wagtail.core.models import Page\nfrom blocks.models import WAGTAIL_STATIC_BLOCKTYPES\nfrom google.models import GoogleFormBlock, GoogleDriveBlock, \\\n GoogleCalendarBlock\nfrom news.models import LatestNewsBlock\nfrom utils.translation import TranslatedField\n\n\nclass WebPage(Page):\n # ---- General Page information ------\n\n title_sv = models.CharField(max_length=255)\n translated_title = TranslatedField('title', 'title_sv')\n\n body_en = StreamField(\n WAGTAIL_STATIC_BLOCKTYPES + [\n ('google_calendar', GoogleCalendarBlock()),\n ('google_drive', GoogleDriveBlock()),\n ('google_form', GoogleFormBlock()),\n ('news', LatestNewsBlock()),\n ],\n blank=True,\n )\n body_sv = StreamField(\n WAGTAIL_STATIC_BLOCKTYPES + [\n ('google_calendar', GoogleCalendarBlock()),\n ('google_drive', GoogleDriveBlock()),\n ('google_form', GoogleFormBlock()),\n ('news', LatestNewsBlock()),\n ],\n blank=True,\n )\n body = TranslatedField('body_en', 'body_sv')\n\n content_panels_en = Page.content_panels + [\n StreamFieldPanel('body_en'),\n ]\n\n content_panels_sv = [\n FieldPanel('title_sv', classname=\"full title\"),\n StreamFieldPanel('body_sv'),\n ]\n\n edit_handler = TabbedInterface([\n ObjectList(content_panels_en, heading=_('English')),\n ObjectList(content_panels_sv, heading=_('Swedish')),\n ObjectList(Page.promote_panels, heading=_('Promote')),\n ObjectList(Page.settings_panels, heading=_('Settings')),\n ])\n", "path": "src/home/models/web_page.py"}]} | 1,197 | 228 |
gh_patches_debug_17224 | rasdani/github-patches | git_diff | cobbler__cobbler-626 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'get-loader' needs syslinux updating.
I've confirmed that the version of syslinux (3.61) currently in get-loader will not let you deploy vmware 5.1 on random machine.
It errors with "fatal error: 10 (out of resources)" when loading tools.t00.
Using the pxelinux.0 and menu.c32 binaries from the syslinux-3.86.tar.gz build on kernel.org fixes it, and lets it work.
</issue>
<code>
[start of cobbler/action_dlcontent.py]
1 """
2 Downloads bootloader content for all arches for when the user doesn't want to supply their own.
3
4 Copyright 2009, Red Hat, Inc and Others
5 Michael DeHaan <michael.dehaan AT gmail>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301 USA
21 """
22
23 import os
24 import urlgrabber
25 import clogger
26
27 class ContentDownloader:
28
29 def __init__(self,config,logger=None):
30 """
31 Constructor
32 """
33 self.config = config
34 self.settings = config.settings()
35 if logger is None:
36 logger = clogger.Logger()
37 self.logger = logger
38
39
40 def run(self,force=False):
41 """
42 Download bootloader content for all of the latest bootloaders, since the user
43 has chosen to not supply their own. You may ask "why not get this from yum", though
44 Fedora has no IA64 repo, for instance, and we also want this to be able to work on Debian and
45 further do not want folks to have to install a cross compiler. For those that don't like this approach
46 they can still source their cross-arch bootloader content manually.
47 """
48
49 content_server = "http://www.cobblerd.org/loaders"
50 dest = "/var/lib/cobbler/loaders"
51
52 files = (
53 ( "%s/README" % content_server, "%s/README" % dest ),
54 ( "%s/COPYING.elilo" % content_server, "%s/COPYING.elilo" % dest ),
55 ( "%s/COPYING.yaboot" % content_server, "%s/COPYING.yaboot" % dest),
56 ( "%s/COPYING.syslinux" % content_server, "%s/COPYING.syslinux" % dest),
57 ( "%s/elilo-3.8-ia64.efi" % content_server, "%s/elilo-ia64.efi" % dest ),
58 ( "%s/yaboot-1.3.14-12" % content_server, "%s/yaboot" % dest),
59 ( "%s/pxelinux.0-4.02" % content_server, "%s/pxelinux.0" % dest),
60 ( "%s/menu.c32-4.02" % content_server, "%s/menu.c32" % dest),
61 ( "%s/grub-0.97-x86.efi" % content_server, "%s/grub-x86.efi" % dest),
62 ( "%s/grub-0.97-x86_64.efi" % content_server, "%s/grub-x86_64.efi" % dest),
63 )
64
65 proxies = {}
66 if os.environ.has_key("HTTP_PROXY"):
67 proxies['http'] = os.environ["HTTP_PROXY"]
68
69 if os.environ.has_key("HTTPS_PROXY"):
70 proxies['https'] = os.environ["HTTPS_PROXY"]
71
72 if os.environ.has_key("FTP_PROXY"):
73 proxies['ftp'] = os.environ["FTP_PROXY"]
74
75 if len(proxies) == 0:
76 proxies = None
77
78 for src,dst in files:
79 if os.path.exists(dst) and not force:
80 self.logger.info("path %s already exists, not overwriting existing content, use --force if you wish to update" % dst)
81 continue
82 self.logger.info("downloading %s to %s" % (src,dst))
83 urlgrabber.grabber.urlgrab(src, filename=dst, proxies=proxies)
84
85 return True
86
87
[end of cobbler/action_dlcontent.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cobbler/action_dlcontent.py b/cobbler/action_dlcontent.py
--- a/cobbler/action_dlcontent.py
+++ b/cobbler/action_dlcontent.py
@@ -56,8 +56,8 @@
( "%s/COPYING.syslinux" % content_server, "%s/COPYING.syslinux" % dest),
( "%s/elilo-3.8-ia64.efi" % content_server, "%s/elilo-ia64.efi" % dest ),
( "%s/yaboot-1.3.14-12" % content_server, "%s/yaboot" % dest),
- ( "%s/pxelinux.0-4.02" % content_server, "%s/pxelinux.0" % dest),
- ( "%s/menu.c32-4.02" % content_server, "%s/menu.c32" % dest),
+ ( "%s/pxelinux.0-3.86" % content_server, "%s/pxelinux.0" % dest),
+ ( "%s/menu.c32-3.86" % content_server, "%s/menu.c32" % dest),
( "%s/grub-0.97-x86.efi" % content_server, "%s/grub-x86.efi" % dest),
( "%s/grub-0.97-x86_64.efi" % content_server, "%s/grub-x86_64.efi" % dest),
)
| {"golden_diff": "diff --git a/cobbler/action_dlcontent.py b/cobbler/action_dlcontent.py\n--- a/cobbler/action_dlcontent.py\n+++ b/cobbler/action_dlcontent.py\n@@ -56,8 +56,8 @@\n ( \"%s/COPYING.syslinux\" % content_server, \"%s/COPYING.syslinux\" % dest),\n ( \"%s/elilo-3.8-ia64.efi\" % content_server, \"%s/elilo-ia64.efi\" % dest ),\n ( \"%s/yaboot-1.3.14-12\" % content_server, \"%s/yaboot\" % dest),\n- ( \"%s/pxelinux.0-4.02\" % content_server, \"%s/pxelinux.0\" % dest),\n- ( \"%s/menu.c32-4.02\" % content_server, \"%s/menu.c32\" % dest),\n+ ( \"%s/pxelinux.0-3.86\" % content_server, \"%s/pxelinux.0\" % dest),\n+ ( \"%s/menu.c32-3.86\" % content_server, \"%s/menu.c32\" % dest),\n ( \"%s/grub-0.97-x86.efi\" % content_server, \"%s/grub-x86.efi\" % dest),\n ( \"%s/grub-0.97-x86_64.efi\" % content_server, \"%s/grub-x86_64.efi\" % dest),\n )\n", "issue": "'get-loader' needs syslinux updating.\nI've confirmed that the version of syslinux (3.61) currently in get-loader will not let you deploy vmware 5.1 on random machine. \n\nIt errors with \"fatal error: 10 (out of resources)\" when loading tools.t00.\n\nUsing the pxelinux.0 and menu.c32 binaries from the syslinux-3.86.tar.gz build on kernel.org fixes it, and lets it work. \n\n", "before_files": [{"content": "\"\"\"\nDownloads bootloader content for all arches for when the user doesn't want to supply their own.\n\nCopyright 2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\n\nimport os\nimport urlgrabber\nimport clogger\n\nclass ContentDownloader:\n\n def __init__(self,config,logger=None):\n \"\"\"\n Constructor\n \"\"\"\n self.config = config\n self.settings = config.settings()\n if logger is None:\n logger = clogger.Logger()\n self.logger = logger\n\n\n def run(self,force=False):\n \"\"\"\n Download bootloader content for all of the latest bootloaders, since the user\n has chosen to not supply their own. You may ask \"why not get this from yum\", though\n Fedora has no IA64 repo, for instance, and we also want this to be able to work on Debian and\n further do not want folks to have to install a cross compiler. For those that don't like this approach\n they can still source their cross-arch bootloader content manually.\n \"\"\"\n\n content_server = \"http://www.cobblerd.org/loaders\"\n dest = \"/var/lib/cobbler/loaders\"\n\n files = (\n ( \"%s/README\" % content_server, \"%s/README\" % dest ),\n ( \"%s/COPYING.elilo\" % content_server, \"%s/COPYING.elilo\" % dest ),\n ( \"%s/COPYING.yaboot\" % content_server, \"%s/COPYING.yaboot\" % dest),\n ( \"%s/COPYING.syslinux\" % content_server, \"%s/COPYING.syslinux\" % dest),\n ( \"%s/elilo-3.8-ia64.efi\" % content_server, \"%s/elilo-ia64.efi\" % dest ),\n ( \"%s/yaboot-1.3.14-12\" % content_server, \"%s/yaboot\" % dest),\n ( \"%s/pxelinux.0-4.02\" % content_server, \"%s/pxelinux.0\" % dest),\n ( \"%s/menu.c32-4.02\" % content_server, \"%s/menu.c32\" % dest),\n ( \"%s/grub-0.97-x86.efi\" % content_server, \"%s/grub-x86.efi\" % dest),\n ( \"%s/grub-0.97-x86_64.efi\" % content_server, \"%s/grub-x86_64.efi\" % dest),\n )\n\n proxies = {}\n if os.environ.has_key(\"HTTP_PROXY\"):\n proxies['http'] = os.environ[\"HTTP_PROXY\"]\n\n if os.environ.has_key(\"HTTPS_PROXY\"):\n proxies['https'] = os.environ[\"HTTPS_PROXY\"]\n\n if os.environ.has_key(\"FTP_PROXY\"):\n proxies['ftp'] = os.environ[\"FTP_PROXY\"]\n\n if len(proxies) == 0:\n proxies = None\n\n for src,dst in files:\n if os.path.exists(dst) and not force:\n self.logger.info(\"path %s already exists, not overwriting existing content, use --force if you wish to update\" % dst)\n continue\n self.logger.info(\"downloading %s to %s\" % (src,dst))\n urlgrabber.grabber.urlgrab(src, filename=dst, proxies=proxies)\n\n return True\n\n", "path": "cobbler/action_dlcontent.py"}]} | 1,723 | 344 |
gh_patches_debug_11180 | rasdani/github-patches | git_diff | scrapy__scrapy-4761 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Have tests generate a certificate on-the-fly
Unless we want to be doing https://github.com/scrapy/scrapy/pull/4650 every year, we should look into making tests generate a fresh certificate at run time.
</issue>
<code>
[start of conftest.py]
1 from pathlib import Path
2
3 import pytest
4
5
6 def _py_files(folder):
7 return (str(p) for p in Path(folder).rglob('*.py'))
8
9
10 collect_ignore = [
11 # not a test, but looks like a test
12 "scrapy/utils/testsite.py",
13 # contains scripts to be run by tests/test_crawler.py::CrawlerProcessSubprocess
14 *_py_files("tests/CrawlerProcess"),
15 # contains scripts to be run by tests/test_crawler.py::CrawlerRunnerSubprocess
16 *_py_files("tests/CrawlerRunner"),
17 # Py36-only parts of respective tests
18 *_py_files("tests/py36"),
19 ]
20
21 for line in open('tests/ignores.txt'):
22 file_path = line.strip()
23 if file_path and file_path[0] != '#':
24 collect_ignore.append(file_path)
25
26
27 @pytest.fixture()
28 def chdir(tmpdir):
29 """Change to pytest-provided temporary directory"""
30 tmpdir.chdir()
31
32
33 def pytest_collection_modifyitems(session, config, items):
34 # Avoid executing tests when executing `--flake8` flag (pytest-flake8)
35 try:
36 from pytest_flake8 import Flake8Item
37 if config.getoption('--flake8'):
38 items[:] = [item for item in items if isinstance(item, Flake8Item)]
39 except ImportError:
40 pass
41
42
43 @pytest.fixture(scope='class')
44 def reactor_pytest(request):
45 if not request.cls:
46 # doctests
47 return
48 request.cls.reactor_pytest = request.config.getoption("--reactor")
49 return request.cls.reactor_pytest
50
51
52 @pytest.fixture(autouse=True)
53 def only_asyncio(request, reactor_pytest):
54 if request.node.get_closest_marker('only_asyncio') and reactor_pytest != 'asyncio':
55 pytest.skip('This test is only run with --reactor=asyncio')
56
[end of conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conftest.py b/conftest.py
--- a/conftest.py
+++ b/conftest.py
@@ -2,6 +2,8 @@
import pytest
+from tests.keys import generate_keys
+
def _py_files(folder):
return (str(p) for p in Path(folder).rglob('*.py'))
@@ -53,3 +55,7 @@
def only_asyncio(request, reactor_pytest):
if request.node.get_closest_marker('only_asyncio') and reactor_pytest != 'asyncio':
pytest.skip('This test is only run with --reactor=asyncio')
+
+
+# Generate localhost certificate files, needed by some tests
+generate_keys()
| {"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -2,6 +2,8 @@\n \n import pytest\n \n+from tests.keys import generate_keys\n+\n \n def _py_files(folder):\n return (str(p) for p in Path(folder).rglob('*.py'))\n@@ -53,3 +55,7 @@\n def only_asyncio(request, reactor_pytest):\n if request.node.get_closest_marker('only_asyncio') and reactor_pytest != 'asyncio':\n pytest.skip('This test is only run with --reactor=asyncio')\n+\n+\n+# Generate localhost certificate files, needed by some tests\n+generate_keys()\n", "issue": "Have tests generate a certificate on-the-fly\nUnless we want to be doing https://github.com/scrapy/scrapy/pull/4650 every year, we should look into making tests generate a fresh certificate at run time.\n", "before_files": [{"content": "from pathlib import Path\n\nimport pytest\n\n\ndef _py_files(folder):\n return (str(p) for p in Path(folder).rglob('*.py'))\n\n\ncollect_ignore = [\n # not a test, but looks like a test\n \"scrapy/utils/testsite.py\",\n # contains scripts to be run by tests/test_crawler.py::CrawlerProcessSubprocess\n *_py_files(\"tests/CrawlerProcess\"),\n # contains scripts to be run by tests/test_crawler.py::CrawlerRunnerSubprocess\n *_py_files(\"tests/CrawlerRunner\"),\n # Py36-only parts of respective tests\n *_py_files(\"tests/py36\"),\n]\n\nfor line in open('tests/ignores.txt'):\n file_path = line.strip()\n if file_path and file_path[0] != '#':\n collect_ignore.append(file_path)\n\n\[email protected]()\ndef chdir(tmpdir):\n \"\"\"Change to pytest-provided temporary directory\"\"\"\n tmpdir.chdir()\n\n\ndef pytest_collection_modifyitems(session, config, items):\n # Avoid executing tests when executing `--flake8` flag (pytest-flake8)\n try:\n from pytest_flake8 import Flake8Item\n if config.getoption('--flake8'):\n items[:] = [item for item in items if isinstance(item, Flake8Item)]\n except ImportError:\n pass\n\n\[email protected](scope='class')\ndef reactor_pytest(request):\n if not request.cls:\n # doctests\n return\n request.cls.reactor_pytest = request.config.getoption(\"--reactor\")\n return request.cls.reactor_pytest\n\n\[email protected](autouse=True)\ndef only_asyncio(request, reactor_pytest):\n if request.node.get_closest_marker('only_asyncio') and reactor_pytest != 'asyncio':\n pytest.skip('This test is only run with --reactor=asyncio')\n", "path": "conftest.py"}]} | 1,088 | 155 |
gh_patches_debug_25712 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-829 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add field for tribe locality on Tribal Goverment Question Page
### Story
As a Registrant Applicant I want a way to specify the locality of my tribe so that I can give analysts more specific information to complete their review of my request

### Acceptance Criteria
- [x] Content is drafted for the question
- [x] Content is approved
- [x] Design prototype demonstrates the look and feel for approval
- [x] Implement the field in the registrar
A new field is added to the page that allows the user to specify the locality of their tribe
### Additional Context
_No response_
### Issue Links
_No response_
</issue>
<code>
[start of src/registrar/templatetags/custom_filters.py]
1 from django import template
2 import re
3
4 register = template.Library()
5
6
7 @register.filter(name="extract_value")
8 def extract_value(html_input):
9 match = re.search(r'value="([^"]*)"', html_input)
10 if match:
11 return match.group(1)
12 return ""
13
14
15 @register.filter
16 def extract_a_text(value):
17 # Use regex to extract the text within the <a> tag
18 pattern = r"<a\b[^>]*>(.*?)</a>"
19 match = re.search(pattern, value)
20 if match:
21 extracted_text = match.group(1)
22 else:
23 extracted_text = ""
24
25 return extracted_text
26
[end of src/registrar/templatetags/custom_filters.py]
[start of src/registrar/views/utility/mixins.py]
1 """Permissions-related mixin classes."""
2
3 from django.contrib.auth.mixins import PermissionRequiredMixin
4
5 from registrar.models import UserDomainRole, DomainApplication, DomainInvitation
6
7
8 class PermissionsLoginMixin(PermissionRequiredMixin):
9
10 """Mixin that redirects to login page if not logged in, otherwise 403."""
11
12 def handle_no_permission(self):
13 self.raise_exception = self.request.user.is_authenticated
14 return super().handle_no_permission()
15
16
17 class DomainPermission(PermissionsLoginMixin):
18
19 """Does the logged-in user have access to this domain?"""
20
21 def has_permission(self):
22 """Check if this user has access to this domain.
23
24 The user is in self.request.user and the domain needs to be looked
25 up from the domain's primary key in self.kwargs["pk"]
26 """
27 if not self.request.user.is_authenticated:
28 return False
29
30 # user needs to have a role on the domain
31 if not UserDomainRole.objects.filter(
32 user=self.request.user, domain__id=self.kwargs["pk"]
33 ).exists():
34 return False
35
36 # if we need to check more about the nature of role, do it here.
37 return True
38
39
40 class DomainApplicationPermission(PermissionsLoginMixin):
41
42 """Does the logged-in user have access to this domain application?"""
43
44 def has_permission(self):
45 """Check if this user has access to this domain application.
46
47 The user is in self.request.user and the domain needs to be looked
48 up from the domain's primary key in self.kwargs["pk"]
49 """
50 if not self.request.user.is_authenticated:
51 return False
52
53 # user needs to be the creator of the application
54 # this query is empty if there isn't a domain application with this
55 # id and this user as creator
56 if not DomainApplication.objects.filter(
57 creator=self.request.user, id=self.kwargs["pk"]
58 ).exists():
59 return False
60
61 return True
62
63
64 class DomainInvitationPermission(PermissionsLoginMixin):
65
66 """Does the logged-in user have access to this domain invitation?
67
68 A user has access to a domain invitation if they have a role on the
69 associated domain.
70 """
71
72 def has_permission(self):
73 """Check if this user has a role on the domain of this invitation."""
74 if not self.request.user.is_authenticated:
75 return False
76
77 if not DomainInvitation.objects.filter(
78 id=self.kwargs["pk"], domain__permissions__user=self.request.user
79 ).exists():
80 return False
81
82 return True
83
[end of src/registrar/views/utility/mixins.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/registrar/templatetags/custom_filters.py b/src/registrar/templatetags/custom_filters.py
--- a/src/registrar/templatetags/custom_filters.py
+++ b/src/registrar/templatetags/custom_filters.py
@@ -23,3 +23,20 @@
extracted_text = ""
return extracted_text
+
+
[email protected]
+def find_index(haystack, needle):
+ try:
+ return haystack.index(needle)
+ except ValueError:
+ return -1
+
+
[email protected]
+def slice_after(value, substring):
+ index = value.find(substring)
+ if index != -1:
+ result = value[index + len(substring) :]
+ return result
+ return value
diff --git a/src/registrar/views/utility/mixins.py b/src/registrar/views/utility/mixins.py
--- a/src/registrar/views/utility/mixins.py
+++ b/src/registrar/views/utility/mixins.py
@@ -24,6 +24,12 @@
The user is in self.request.user and the domain needs to be looked
up from the domain's primary key in self.kwargs["pk"]
"""
+
+ # ticket 806
+ # if self.request.user is staff or admin and
+ # domain.application__status = 'approved' or 'rejected' or 'action needed'
+ # return True
+
if not self.request.user.is_authenticated:
return False
@@ -33,6 +39,10 @@
).exists():
return False
+ # ticket 796
+ # if domain.application__status != 'approved'
+ # return false
+
# if we need to check more about the nature of role, do it here.
return True
| {"golden_diff": "diff --git a/src/registrar/templatetags/custom_filters.py b/src/registrar/templatetags/custom_filters.py\n--- a/src/registrar/templatetags/custom_filters.py\n+++ b/src/registrar/templatetags/custom_filters.py\n@@ -23,3 +23,20 @@\n extracted_text = \"\"\n \n return extracted_text\n+\n+\[email protected]\n+def find_index(haystack, needle):\n+ try:\n+ return haystack.index(needle)\n+ except ValueError:\n+ return -1\n+\n+\[email protected]\n+def slice_after(value, substring):\n+ index = value.find(substring)\n+ if index != -1:\n+ result = value[index + len(substring) :]\n+ return result\n+ return value\ndiff --git a/src/registrar/views/utility/mixins.py b/src/registrar/views/utility/mixins.py\n--- a/src/registrar/views/utility/mixins.py\n+++ b/src/registrar/views/utility/mixins.py\n@@ -24,6 +24,12 @@\n The user is in self.request.user and the domain needs to be looked\n up from the domain's primary key in self.kwargs[\"pk\"]\n \"\"\"\n+\n+ # ticket 806\n+ # if self.request.user is staff or admin and\n+ # domain.application__status = 'approved' or 'rejected' or 'action needed'\n+ # return True\n+\n if not self.request.user.is_authenticated:\n return False\n \n@@ -33,6 +39,10 @@\n ).exists():\n return False\n \n+ # ticket 796\n+ # if domain.application__status != 'approved'\n+ # return false\n+\n # if we need to check more about the nature of role, do it here.\n return True\n", "issue": "Add field for tribe locality on Tribal Goverment Question Page \n### Story\r\n\r\nAs a Registrant Applicant I want a way to specify the locality of my tribe so that I can give analysts more specific information to complete their review of my request\r\n\r\n\r\n\r\n\r\n### Acceptance Criteria\r\n\r\n- [x] Content is drafted for the question\r\n- [x] Content is approved\r\n- [x] Design prototype demonstrates the look and feel for approval\r\n- [x] Implement the field in the registrar\r\n\r\nA new field is added to the page that allows the user to specify the locality of their tribe\r\n\r\n### Additional Context\r\n\r\n_No response_\r\n\r\n### Issue Links\r\n\r\n_No response_\n", "before_files": [{"content": "from django import template\nimport re\n\nregister = template.Library()\n\n\[email protected](name=\"extract_value\")\ndef extract_value(html_input):\n match = re.search(r'value=\"([^\"]*)\"', html_input)\n if match:\n return match.group(1)\n return \"\"\n\n\[email protected]\ndef extract_a_text(value):\n # Use regex to extract the text within the <a> tag\n pattern = r\"<a\\b[^>]*>(.*?)</a>\"\n match = re.search(pattern, value)\n if match:\n extracted_text = match.group(1)\n else:\n extracted_text = \"\"\n\n return extracted_text\n", "path": "src/registrar/templatetags/custom_filters.py"}, {"content": "\"\"\"Permissions-related mixin classes.\"\"\"\n\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\n\nfrom registrar.models import UserDomainRole, DomainApplication, DomainInvitation\n\n\nclass PermissionsLoginMixin(PermissionRequiredMixin):\n\n \"\"\"Mixin that redirects to login page if not logged in, otherwise 403.\"\"\"\n\n def handle_no_permission(self):\n self.raise_exception = self.request.user.is_authenticated\n return super().handle_no_permission()\n\n\nclass DomainPermission(PermissionsLoginMixin):\n\n \"\"\"Does the logged-in user have access to this domain?\"\"\"\n\n def has_permission(self):\n \"\"\"Check if this user has access to this domain.\n\n The user is in self.request.user and the domain needs to be looked\n up from the domain's primary key in self.kwargs[\"pk\"]\n \"\"\"\n if not self.request.user.is_authenticated:\n return False\n\n # user needs to have a role on the domain\n if not UserDomainRole.objects.filter(\n user=self.request.user, domain__id=self.kwargs[\"pk\"]\n ).exists():\n return False\n\n # if we need to check more about the nature of role, do it here.\n return True\n\n\nclass DomainApplicationPermission(PermissionsLoginMixin):\n\n \"\"\"Does the logged-in user have access to this domain application?\"\"\"\n\n def has_permission(self):\n \"\"\"Check if this user has access to this domain application.\n\n The user is in self.request.user and the domain needs to be looked\n up from the domain's primary key in self.kwargs[\"pk\"]\n \"\"\"\n if not self.request.user.is_authenticated:\n return False\n\n # user needs to be the creator of the application\n # this query is empty if there isn't a domain application with this\n # id and this user as creator\n if not DomainApplication.objects.filter(\n creator=self.request.user, id=self.kwargs[\"pk\"]\n ).exists():\n return False\n\n return True\n\n\nclass DomainInvitationPermission(PermissionsLoginMixin):\n\n \"\"\"Does the logged-in user have access to this domain invitation?\n\n A user has access to a domain invitation if they have a role on the\n associated domain.\n \"\"\"\n\n def has_permission(self):\n \"\"\"Check if this user has a role on the domain of this invitation.\"\"\"\n if not self.request.user.is_authenticated:\n return False\n\n if not DomainInvitation.objects.filter(\n id=self.kwargs[\"pk\"], domain__permissions__user=self.request.user\n ).exists():\n return False\n\n return True\n", "path": "src/registrar/views/utility/mixins.py"}]} | 1,639 | 404 |
gh_patches_debug_12955 | rasdani/github-patches | git_diff | sopel-irc__sopel-2052 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
find_updates: Unexpected error (local variable 'info' referenced before assignment)
### Description
An error is logged, errors shouldn't happen.
### Reproduction steps
Seen in log channel, unknown, run the bot for long enough???
### Expected behavior
No error
### Logs
```
[2021-03-31 22:00:24,531] sopel.tools.jobs ERROR - Error while processing job: local variable 'info' referenced before assignment
[2021-03-31 22:00:24,538] sopel.bot ERROR - Unexpected error (local variable 'info' referenced before assignment)
Traceback (most recent call last):
File "/srv/sopelbots/devvenv/lib/python3.7/site-packages/sopel/tools/jobs.py", line 191, in _call
job.execute(self.manager)
File "/srv/sopelbots/devvenv/lib/python3.7/site-packages/sopel/tools/jobs.py", line 463, in execute
return self._handler(manager)
File "/srv/sopelbots/devvenv/lib/python3.7/site-packages/sopel/modules/find_updates.py", line 86, in check_version
latest = info['version']
UnboundLocalError: local variable 'info' referenced before assignment
```
### Environment
- Sopel `.version`: https://github.com/sopel-irc/sopel/commit/96c55aff852bf40bca56de49b2bc30378bf1c819
- Sopel installed via: pip/wheel
- Python version: 3.7.3
- Operating system: Debian 10.9
- IRCd `/version`: freenode
- Relevant plugins: find_updates
</issue>
<code>
[start of sopel/modules/find_updates.py]
1 # coding=utf-8
2 """
3 find_updates.py - Sopel Update Check Plugin
4 This is separated from version.py, so that it can be easily overridden by
5 distribution packagers, and they can check their repositories rather than the
6 Sopel website.
7 Copyright 2014, Elsie Powell, embolalia.com
8 Licensed under the Eiffel Forum License 2.
9
10 https://sopel.chat
11 """
12 from __future__ import absolute_import, division, print_function, unicode_literals
13
14 import requests
15
16 from sopel import (
17 __version__ as current_version,
18 _version_info,
19 plugin,
20 tools,
21 version_info,
22 )
23
24
25 wait_time = 24 * 60 * 60 # check once per day
26 version_url = 'https://sopel.chat/latest.json'
27 stable_message = (
28 'A new Sopel version, {}, is available; I am running {}. Please update '
29 'me. Full release notes at {}'
30 )
31 unstable_message = (
32 'A new pre-release version, {}, is available; I am running {}. Please '
33 'update me.{}'
34 )
35
36
37 @plugin.event(tools.events.RPL_LUSERCLIENT)
38 def startup_version_check(bot, trigger):
39 if not bot.memory.get('update_startup_check_run', False):
40 bot.memory['update_startup_check_run'] = True
41 check_version(bot)
42
43
44 def _check_succeeded(bot):
45 bot.memory['update_failures'] = 0
46
47
48 def _check_failed(bot):
49 bot.memory['update_failures'] = 1 + bot.memory.get('update_failures', 0)
50
51
52 @plugin.interval(wait_time)
53 def check_version(bot):
54 version = version_info
55 success = False
56
57 try:
58 r = requests.get(version_url, timeout=(5, 5))
59 except requests.exceptions.RequestException:
60 _check_failed(bot)
61 else:
62 success = True
63
64 try:
65 if success:
66 info = r.json()
67 except ValueError:
68 # TODO: use JSONDecodeError when dropping Pythons < 3.5
69 _check_failed(bot)
70
71 if not success and bot.memory.get('update_failures', 0) > 4:
72 bot.say(
73 "[update] I haven't been able to check for updates in a while. "
74 "Please verify that {} is working and I can reach it."
75 .format(version_url), bot.config.core.owner)
76 bot.say(
77 "[update] If this issue persists, please alert the Sopel dev team "
78 "in #sopel on freenode, or open a GitHub issue: "
79 "https://github.com/sopel-irc/sopel/issues",
80 bot.config.core.owner)
81 return
82
83 _check_succeeded(bot)
84
85 if version.releaselevel == 'final':
86 latest = info['version']
87 notes = info['release_notes']
88 message = stable_message
89 else:
90 latest = info['unstable']
91 notes = info.get('unstable_notes', '')
92 if notes:
93 notes = ' Full release notes at ' + notes
94 message = unstable_message
95 latest_version = _version_info(latest)
96
97 if version < latest_version:
98 msg = message.format(latest, current_version, notes)
99 bot.say('[update] ' + msg, bot.config.core.owner)
100
[end of sopel/modules/find_updates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/find_updates.py b/sopel/modules/find_updates.py
--- a/sopel/modules/find_updates.py
+++ b/sopel/modules/find_updates.py
@@ -67,8 +67,14 @@
except ValueError:
# TODO: use JSONDecodeError when dropping Pythons < 3.5
_check_failed(bot)
+ success = False
- if not success and bot.memory.get('update_failures', 0) > 4:
+ if not success:
+ if bot.memory.get('update_failures', 0) <= 4:
+ # not enough failures to worry; silently ignore this one
+ return
+
+ # too many failures to ignore; notify owner
bot.say(
"[update] I haven't been able to check for updates in a while. "
"Please verify that {} is working and I can reach it."
| {"golden_diff": "diff --git a/sopel/modules/find_updates.py b/sopel/modules/find_updates.py\n--- a/sopel/modules/find_updates.py\n+++ b/sopel/modules/find_updates.py\n@@ -67,8 +67,14 @@\n except ValueError:\n # TODO: use JSONDecodeError when dropping Pythons < 3.5\n _check_failed(bot)\n+ success = False\n \n- if not success and bot.memory.get('update_failures', 0) > 4:\n+ if not success:\n+ if bot.memory.get('update_failures', 0) <= 4:\n+ # not enough failures to worry; silently ignore this one\n+ return\n+\n+ # too many failures to ignore; notify owner\n bot.say(\n \"[update] I haven't been able to check for updates in a while. \"\n \"Please verify that {} is working and I can reach it.\"\n", "issue": "find_updates: Unexpected error (local variable 'info' referenced before assignment)\n### Description\r\nAn error is logged, errors shouldn't happen.\r\n\r\n### Reproduction steps\r\nSeen in log channel, unknown, run the bot for long enough???\r\n\r\n### Expected behavior\r\nNo error\r\n\r\n### Logs\r\n```\r\n[2021-03-31 22:00:24,531] sopel.tools.jobs ERROR - Error while processing job: local variable 'info' referenced before assignment\r\n[2021-03-31 22:00:24,538] sopel.bot ERROR - Unexpected error (local variable 'info' referenced before assignment)\r\nTraceback (most recent call last):\r\n File \"/srv/sopelbots/devvenv/lib/python3.7/site-packages/sopel/tools/jobs.py\", line 191, in _call\r\n job.execute(self.manager)\r\n File \"/srv/sopelbots/devvenv/lib/python3.7/site-packages/sopel/tools/jobs.py\", line 463, in execute\r\n return self._handler(manager)\r\n File \"/srv/sopelbots/devvenv/lib/python3.7/site-packages/sopel/modules/find_updates.py\", line 86, in check_version\r\n latest = info['version']\r\nUnboundLocalError: local variable 'info' referenced before assignment\r\n\r\n```\r\n\r\n### Environment\r\n- Sopel `.version`: https://github.com/sopel-irc/sopel/commit/96c55aff852bf40bca56de49b2bc30378bf1c819\r\n- Sopel installed via: pip/wheel\r\n- Python version: 3.7.3\r\n- Operating system: Debian 10.9\r\n- IRCd `/version`: freenode\r\n- Relevant plugins: find_updates\r\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nfind_updates.py - Sopel Update Check Plugin\nThis is separated from version.py, so that it can be easily overridden by\ndistribution packagers, and they can check their repositories rather than the\nSopel website.\nCopyright 2014, Elsie Powell, embolalia.com\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport requests\n\nfrom sopel import (\n __version__ as current_version,\n _version_info,\n plugin,\n tools,\n version_info,\n)\n\n\nwait_time = 24 * 60 * 60 # check once per day\nversion_url = 'https://sopel.chat/latest.json'\nstable_message = (\n 'A new Sopel version, {}, is available; I am running {}. Please update '\n 'me. Full release notes at {}'\n)\nunstable_message = (\n 'A new pre-release version, {}, is available; I am running {}. Please '\n 'update me.{}'\n)\n\n\[email protected](tools.events.RPL_LUSERCLIENT)\ndef startup_version_check(bot, trigger):\n if not bot.memory.get('update_startup_check_run', False):\n bot.memory['update_startup_check_run'] = True\n check_version(bot)\n\n\ndef _check_succeeded(bot):\n bot.memory['update_failures'] = 0\n\n\ndef _check_failed(bot):\n bot.memory['update_failures'] = 1 + bot.memory.get('update_failures', 0)\n\n\[email protected](wait_time)\ndef check_version(bot):\n version = version_info\n success = False\n\n try:\n r = requests.get(version_url, timeout=(5, 5))\n except requests.exceptions.RequestException:\n _check_failed(bot)\n else:\n success = True\n\n try:\n if success:\n info = r.json()\n except ValueError:\n # TODO: use JSONDecodeError when dropping Pythons < 3.5\n _check_failed(bot)\n\n if not success and bot.memory.get('update_failures', 0) > 4:\n bot.say(\n \"[update] I haven't been able to check for updates in a while. \"\n \"Please verify that {} is working and I can reach it.\"\n .format(version_url), bot.config.core.owner)\n bot.say(\n \"[update] If this issue persists, please alert the Sopel dev team \"\n \"in #sopel on freenode, or open a GitHub issue: \"\n \"https://github.com/sopel-irc/sopel/issues\",\n bot.config.core.owner)\n return\n\n _check_succeeded(bot)\n\n if version.releaselevel == 'final':\n latest = info['version']\n notes = info['release_notes']\n message = stable_message\n else:\n latest = info['unstable']\n notes = info.get('unstable_notes', '')\n if notes:\n notes = ' Full release notes at ' + notes\n message = unstable_message\n latest_version = _version_info(latest)\n\n if version < latest_version:\n msg = message.format(latest, current_version, notes)\n bot.say('[update] ' + msg, bot.config.core.owner)\n", "path": "sopel/modules/find_updates.py"}]} | 1,856 | 201 |
gh_patches_debug_32345 | rasdani/github-patches | git_diff | optuna__optuna-1103 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`plot_intermediate_values` example code does not contain intermediate values.
The example code snippet embedded in the documentation for [`plot_intermediate_values`](https://github.com/optuna/optuna/blob/master/optuna/visualization/intermediate_values.py) doesn't call `Trial.report` and thus does not contain intermediate values. The example should be updated. See also https://github.com/optuna/optuna/pull/1003#issuecomment-599359742.
Current documentation with empty plot: https://optuna.readthedocs.io/en/latest/reference/visualization.html#optuna.visualization.plot_intermediate_values
</issue>
<code>
[start of optuna/visualization/intermediate_values.py]
1 from optuna.logging import get_logger
2 from optuna.trial import TrialState
3 from optuna import type_checking
4 from optuna.visualization.utils import _check_plotly_availability
5 from optuna.visualization.utils import is_available
6
7 if type_checking.TYPE_CHECKING:
8 from optuna.study import Study # NOQA
9
10 if is_available():
11 from optuna.visualization.plotly_imports import go
12
13 logger = get_logger(__name__)
14
15
16 def plot_intermediate_values(study):
17 # type: (Study) -> go.Figure
18 """Plot intermediate values of all trials in a study.
19
20 Example:
21
22 The following code snippet shows how to plot intermediate values.
23
24 .. testcode::
25
26 import optuna
27
28 def objective(trial):
29 x = trial.suggest_uniform('x', -100, 100)
30 y = trial.suggest_categorical('y', [-1, 0, 1])
31 return x ** 2 + y
32
33 study = optuna.create_study()
34 study.optimize(objective, n_trials=10)
35
36 optuna.visualization.plot_intermediate_values(study)
37
38 .. raw:: html
39
40 <iframe src="../_static/plot_intermediate_values.html"
41 width="100%" height="500px" frameborder="0">
42 </iframe>
43
44 Args:
45 study:
46 A :class:`~optuna.study.Study` object whose trials are plotted for their intermediate
47 values.
48
49 Returns:
50 A :class:`plotly.graph_objs.Figure` object.
51 """
52
53 _check_plotly_availability()
54 return _get_intermediate_plot(study)
55
56
57 def _get_intermediate_plot(study):
58 # type: (Study) -> go.Figure
59
60 layout = go.Layout(
61 title="Intermediate Values Plot",
62 xaxis={"title": "Step"},
63 yaxis={"title": "Intermediate Value"},
64 showlegend=False,
65 )
66
67 target_state = [TrialState.PRUNED, TrialState.COMPLETE, TrialState.RUNNING]
68 trials = [trial for trial in study.trials if trial.state in target_state]
69
70 if len(trials) == 0:
71 logger.warning("Study instance does not contain trials.")
72 return go.Figure(data=[], layout=layout)
73
74 traces = []
75 for trial in trials:
76 if trial.intermediate_values:
77 sorted_intermediate_values = sorted(trial.intermediate_values.items())
78 trace = go.Scatter(
79 x=tuple((x for x, _ in sorted_intermediate_values)),
80 y=tuple((y for _, y in sorted_intermediate_values)),
81 mode="lines+markers",
82 marker={"maxdisplayed": 10},
83 name="Trial{}".format(trial.number),
84 )
85 traces.append(trace)
86
87 if not traces:
88 logger.warning(
89 "You need to set up the pruning feature to utilize `plot_intermediate_values()`"
90 )
91 return go.Figure(data=[], layout=layout)
92
93 figure = go.Figure(data=traces, layout=layout)
94
95 return figure
96
[end of optuna/visualization/intermediate_values.py]
[start of docs/source/scripts/plot_intermediate_values.py]
1 import os
2
3 import plotly
4
5 import optuna
6
7
8 def objective(trial):
9 x = trial.suggest_uniform("x", -100, 100)
10 y = trial.suggest_categorical("y", [-1, 0, 1])
11 return x ** 2 + y
12
13
14 def main():
15 sampler = optuna.samplers.TPESampler(seed=10)
16 study = optuna.create_study(sampler=sampler)
17 study.optimize(objective, n_trials=10)
18
19 fig = optuna.visualization.plot_intermediate_values(study)
20 fig_html = plotly.offline.plot(fig, output_type="div", include_plotlyjs="cdn", auto_open=False)
21
22 fig_dir = "../plotly_figures"
23 os.makedirs(fig_dir, exist_ok=True)
24 with open(os.path.join(fig_dir, "plot_intermediate_values.html"), "w") as f:
25 f.write(fig_html)
26
27
28 if __name__ == "__main__":
29 main()
30
[end of docs/source/scripts/plot_intermediate_values.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/source/scripts/plot_intermediate_values.py b/docs/source/scripts/plot_intermediate_values.py
--- a/docs/source/scripts/plot_intermediate_values.py
+++ b/docs/source/scripts/plot_intermediate_values.py
@@ -5,16 +5,35 @@
import optuna
+def f(x):
+ return (x - 2) ** 2
+
+
+def df(x):
+ return 2 * x - 4
+
+
def objective(trial):
- x = trial.suggest_uniform("x", -100, 100)
- y = trial.suggest_categorical("y", [-1, 0, 1])
- return x ** 2 + y
+ lr = trial.suggest_loguniform("lr", 1e-5, 1e-1)
+
+ x = 3
+ for step in range(128):
+ y = f(x)
+
+ trial.report(y, step=step)
+ if trial.should_prune():
+ raise optuna.exceptions.TrialPruned()
+
+ gy = df(x)
+ x -= gy * lr
+
+ return y
def main():
sampler = optuna.samplers.TPESampler(seed=10)
study = optuna.create_study(sampler=sampler)
- study.optimize(objective, n_trials=10)
+ study.optimize(objective, n_trials=16)
fig = optuna.visualization.plot_intermediate_values(study)
fig_html = plotly.offline.plot(fig, output_type="div", include_plotlyjs="cdn", auto_open=False)
diff --git a/optuna/visualization/intermediate_values.py b/optuna/visualization/intermediate_values.py
--- a/optuna/visualization/intermediate_values.py
+++ b/optuna/visualization/intermediate_values.py
@@ -25,13 +25,30 @@
import optuna
+ def f(x):
+ return (x - 2) ** 2
+
+ def df(x):
+ return 2 * x - 4
+
def objective(trial):
- x = trial.suggest_uniform('x', -100, 100)
- y = trial.suggest_categorical('y', [-1, 0, 1])
- return x ** 2 + y
+ lr = trial.suggest_loguniform("lr", 1e-5, 1e-1)
+
+ x = 3
+ for step in range(128):
+ y = f(x)
+
+ trial.report(y, step=step)
+ if trial.should_prune():
+ raise optuna.exceptions.TrialPruned()
+
+ gy = df(x)
+ x -= gy * lr
+
+ return y
study = optuna.create_study()
- study.optimize(objective, n_trials=10)
+ study.optimize(objective, n_trials=16)
optuna.visualization.plot_intermediate_values(study)
| {"golden_diff": "diff --git a/docs/source/scripts/plot_intermediate_values.py b/docs/source/scripts/plot_intermediate_values.py\n--- a/docs/source/scripts/plot_intermediate_values.py\n+++ b/docs/source/scripts/plot_intermediate_values.py\n@@ -5,16 +5,35 @@\n import optuna\n \n \n+def f(x):\n+ return (x - 2) ** 2\n+\n+\n+def df(x):\n+ return 2 * x - 4\n+\n+\n def objective(trial):\n- x = trial.suggest_uniform(\"x\", -100, 100)\n- y = trial.suggest_categorical(\"y\", [-1, 0, 1])\n- return x ** 2 + y\n+ lr = trial.suggest_loguniform(\"lr\", 1e-5, 1e-1)\n+\n+ x = 3\n+ for step in range(128):\n+ y = f(x)\n+\n+ trial.report(y, step=step)\n+ if trial.should_prune():\n+ raise optuna.exceptions.TrialPruned()\n+\n+ gy = df(x)\n+ x -= gy * lr\n+\n+ return y\n \n \n def main():\n sampler = optuna.samplers.TPESampler(seed=10)\n study = optuna.create_study(sampler=sampler)\n- study.optimize(objective, n_trials=10)\n+ study.optimize(objective, n_trials=16)\n \n fig = optuna.visualization.plot_intermediate_values(study)\n fig_html = plotly.offline.plot(fig, output_type=\"div\", include_plotlyjs=\"cdn\", auto_open=False)\ndiff --git a/optuna/visualization/intermediate_values.py b/optuna/visualization/intermediate_values.py\n--- a/optuna/visualization/intermediate_values.py\n+++ b/optuna/visualization/intermediate_values.py\n@@ -25,13 +25,30 @@\n \n import optuna\n \n+ def f(x):\n+ return (x - 2) ** 2\n+\n+ def df(x):\n+ return 2 * x - 4\n+\n def objective(trial):\n- x = trial.suggest_uniform('x', -100, 100)\n- y = trial.suggest_categorical('y', [-1, 0, 1])\n- return x ** 2 + y\n+ lr = trial.suggest_loguniform(\"lr\", 1e-5, 1e-1)\n+\n+ x = 3\n+ for step in range(128):\n+ y = f(x)\n+\n+ trial.report(y, step=step)\n+ if trial.should_prune():\n+ raise optuna.exceptions.TrialPruned()\n+\n+ gy = df(x)\n+ x -= gy * lr\n+\n+ return y\n \n study = optuna.create_study()\n- study.optimize(objective, n_trials=10)\n+ study.optimize(objective, n_trials=16)\n \n optuna.visualization.plot_intermediate_values(study)\n", "issue": "`plot_intermediate_values` example code does not contain intermediate values.\nThe example code snippet embedded in the documentation for [`plot_intermediate_values`](https://github.com/optuna/optuna/blob/master/optuna/visualization/intermediate_values.py) doesn't call `Trial.report` and thus does not contain intermediate values. The example should be updated. See also https://github.com/optuna/optuna/pull/1003#issuecomment-599359742. \r\n\r\nCurrent documentation with empty plot: https://optuna.readthedocs.io/en/latest/reference/visualization.html#optuna.visualization.plot_intermediate_values\n", "before_files": [{"content": "from optuna.logging import get_logger\nfrom optuna.trial import TrialState\nfrom optuna import type_checking\nfrom optuna.visualization.utils import _check_plotly_availability\nfrom optuna.visualization.utils import is_available\n\nif type_checking.TYPE_CHECKING:\n from optuna.study import Study # NOQA\n\nif is_available():\n from optuna.visualization.plotly_imports import go\n\nlogger = get_logger(__name__)\n\n\ndef plot_intermediate_values(study):\n # type: (Study) -> go.Figure\n \"\"\"Plot intermediate values of all trials in a study.\n\n Example:\n\n The following code snippet shows how to plot intermediate values.\n\n .. testcode::\n\n import optuna\n\n def objective(trial):\n x = trial.suggest_uniform('x', -100, 100)\n y = trial.suggest_categorical('y', [-1, 0, 1])\n return x ** 2 + y\n\n study = optuna.create_study()\n study.optimize(objective, n_trials=10)\n\n optuna.visualization.plot_intermediate_values(study)\n\n .. raw:: html\n\n <iframe src=\"../_static/plot_intermediate_values.html\"\n width=\"100%\" height=\"500px\" frameborder=\"0\">\n </iframe>\n\n Args:\n study:\n A :class:`~optuna.study.Study` object whose trials are plotted for their intermediate\n values.\n\n Returns:\n A :class:`plotly.graph_objs.Figure` object.\n \"\"\"\n\n _check_plotly_availability()\n return _get_intermediate_plot(study)\n\n\ndef _get_intermediate_plot(study):\n # type: (Study) -> go.Figure\n\n layout = go.Layout(\n title=\"Intermediate Values Plot\",\n xaxis={\"title\": \"Step\"},\n yaxis={\"title\": \"Intermediate Value\"},\n showlegend=False,\n )\n\n target_state = [TrialState.PRUNED, TrialState.COMPLETE, TrialState.RUNNING]\n trials = [trial for trial in study.trials if trial.state in target_state]\n\n if len(trials) == 0:\n logger.warning(\"Study instance does not contain trials.\")\n return go.Figure(data=[], layout=layout)\n\n traces = []\n for trial in trials:\n if trial.intermediate_values:\n sorted_intermediate_values = sorted(trial.intermediate_values.items())\n trace = go.Scatter(\n x=tuple((x for x, _ in sorted_intermediate_values)),\n y=tuple((y for _, y in sorted_intermediate_values)),\n mode=\"lines+markers\",\n marker={\"maxdisplayed\": 10},\n name=\"Trial{}\".format(trial.number),\n )\n traces.append(trace)\n\n if not traces:\n logger.warning(\n \"You need to set up the pruning feature to utilize `plot_intermediate_values()`\"\n )\n return go.Figure(data=[], layout=layout)\n\n figure = go.Figure(data=traces, layout=layout)\n\n return figure\n", "path": "optuna/visualization/intermediate_values.py"}, {"content": "import os\n\nimport plotly\n\nimport optuna\n\n\ndef objective(trial):\n x = trial.suggest_uniform(\"x\", -100, 100)\n y = trial.suggest_categorical(\"y\", [-1, 0, 1])\n return x ** 2 + y\n\n\ndef main():\n sampler = optuna.samplers.TPESampler(seed=10)\n study = optuna.create_study(sampler=sampler)\n study.optimize(objective, n_trials=10)\n\n fig = optuna.visualization.plot_intermediate_values(study)\n fig_html = plotly.offline.plot(fig, output_type=\"div\", include_plotlyjs=\"cdn\", auto_open=False)\n\n fig_dir = \"../plotly_figures\"\n os.makedirs(fig_dir, exist_ok=True)\n with open(os.path.join(fig_dir, \"plot_intermediate_values.html\"), \"w\") as f:\n f.write(fig_html)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "docs/source/scripts/plot_intermediate_values.py"}]} | 1,811 | 672 |
gh_patches_debug_31256 | rasdani/github-patches | git_diff | open-mmlab__mmdetection-3529 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ModuleNotFoundError: No module named 'tools'
i would like to test the result of training, so i write the next:
(base) zhangshen@zhangshen-X550JX:~/mmdetection$ python tools/test.py configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth --out./result/result_100/pkl --eval bbox
but i got:
Traceback (most recent call last):
File "tools/test.py", line 9, in <module>
from tools.fuse_conv_bn import fuse_module
ModuleNotFoundError: No module named 'tools'
how can i solve this problem?
</issue>
<code>
[start of tools/fuse_conv_bn.py]
1 import argparse
2
3 import torch
4 import torch.nn as nn
5 from mmcv.runner import save_checkpoint
6
7 from mmdet.apis import init_detector
8
9
10 def fuse_conv_bn(conv, bn):
11 """During inference, the functionary of batch norm layers is turned off but
12 only the mean and var alone channels are used, which exposes the chance to
13 fuse it with the preceding conv layers to save computations and simplify
14 network structures."""
15 conv_w = conv.weight
16 conv_b = conv.bias if conv.bias is not None else torch.zeros_like(
17 bn.running_mean)
18
19 factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)
20 conv.weight = nn.Parameter(conv_w *
21 factor.reshape([conv.out_channels, 1, 1, 1]))
22 conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)
23 return conv
24
25
26 def fuse_module(m):
27 last_conv = None
28 last_conv_name = None
29
30 for name, child in m.named_children():
31 if isinstance(child, (nn.BatchNorm2d, nn.SyncBatchNorm)):
32 if last_conv is None: # only fuse BN that is after Conv
33 continue
34 fused_conv = fuse_conv_bn(last_conv, child)
35 m._modules[last_conv_name] = fused_conv
36 # To reduce changes, set BN as Identity instead of deleting it.
37 m._modules[name] = nn.Identity()
38 last_conv = None
39 elif isinstance(child, nn.Conv2d):
40 last_conv = child
41 last_conv_name = name
42 else:
43 fuse_module(child)
44 return m
45
46
47 def parse_args():
48 parser = argparse.ArgumentParser(
49 description='fuse Conv and BN layers in a model')
50 parser.add_argument('config', help='config file path')
51 parser.add_argument('checkpoint', help='checkpoint file path')
52 parser.add_argument('out', help='output path of the converted model')
53 args = parser.parse_args()
54 return args
55
56
57 def main():
58 args = parse_args()
59 # build the model from a config file and a checkpoint file
60 model = init_detector(args.config, args.checkpoint)
61 # fuse conv and bn layers of the model
62 fused_model = fuse_module(model)
63 save_checkpoint(fused_model, args.out)
64
65
66 if __name__ == '__main__':
67 main()
68
[end of tools/fuse_conv_bn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/fuse_conv_bn.py b/tools/fuse_conv_bn.py
deleted file mode 100644
--- a/tools/fuse_conv_bn.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import argparse
-
-import torch
-import torch.nn as nn
-from mmcv.runner import save_checkpoint
-
-from mmdet.apis import init_detector
-
-
-def fuse_conv_bn(conv, bn):
- """During inference, the functionary of batch norm layers is turned off but
- only the mean and var alone channels are used, which exposes the chance to
- fuse it with the preceding conv layers to save computations and simplify
- network structures."""
- conv_w = conv.weight
- conv_b = conv.bias if conv.bias is not None else torch.zeros_like(
- bn.running_mean)
-
- factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)
- conv.weight = nn.Parameter(conv_w *
- factor.reshape([conv.out_channels, 1, 1, 1]))
- conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)
- return conv
-
-
-def fuse_module(m):
- last_conv = None
- last_conv_name = None
-
- for name, child in m.named_children():
- if isinstance(child, (nn.BatchNorm2d, nn.SyncBatchNorm)):
- if last_conv is None: # only fuse BN that is after Conv
- continue
- fused_conv = fuse_conv_bn(last_conv, child)
- m._modules[last_conv_name] = fused_conv
- # To reduce changes, set BN as Identity instead of deleting it.
- m._modules[name] = nn.Identity()
- last_conv = None
- elif isinstance(child, nn.Conv2d):
- last_conv = child
- last_conv_name = name
- else:
- fuse_module(child)
- return m
-
-
-def parse_args():
- parser = argparse.ArgumentParser(
- description='fuse Conv and BN layers in a model')
- parser.add_argument('config', help='config file path')
- parser.add_argument('checkpoint', help='checkpoint file path')
- parser.add_argument('out', help='output path of the converted model')
- args = parser.parse_args()
- return args
-
-
-def main():
- args = parse_args()
- # build the model from a config file and a checkpoint file
- model = init_detector(args.config, args.checkpoint)
- # fuse conv and bn layers of the model
- fused_model = fuse_module(model)
- save_checkpoint(fused_model, args.out)
-
-
-if __name__ == '__main__':
- main()
| {"golden_diff": "diff --git a/tools/fuse_conv_bn.py b/tools/fuse_conv_bn.py\ndeleted file mode 100644\n--- a/tools/fuse_conv_bn.py\n+++ /dev/null\n@@ -1,67 +0,0 @@\n-import argparse\n-\n-import torch\n-import torch.nn as nn\n-from mmcv.runner import save_checkpoint\n-\n-from mmdet.apis import init_detector\n-\n-\n-def fuse_conv_bn(conv, bn):\n- \"\"\"During inference, the functionary of batch norm layers is turned off but\n- only the mean and var alone channels are used, which exposes the chance to\n- fuse it with the preceding conv layers to save computations and simplify\n- network structures.\"\"\"\n- conv_w = conv.weight\n- conv_b = conv.bias if conv.bias is not None else torch.zeros_like(\n- bn.running_mean)\n-\n- factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)\n- conv.weight = nn.Parameter(conv_w *\n- factor.reshape([conv.out_channels, 1, 1, 1]))\n- conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)\n- return conv\n-\n-\n-def fuse_module(m):\n- last_conv = None\n- last_conv_name = None\n-\n- for name, child in m.named_children():\n- if isinstance(child, (nn.BatchNorm2d, nn.SyncBatchNorm)):\n- if last_conv is None: # only fuse BN that is after Conv\n- continue\n- fused_conv = fuse_conv_bn(last_conv, child)\n- m._modules[last_conv_name] = fused_conv\n- # To reduce changes, set BN as Identity instead of deleting it.\n- m._modules[name] = nn.Identity()\n- last_conv = None\n- elif isinstance(child, nn.Conv2d):\n- last_conv = child\n- last_conv_name = name\n- else:\n- fuse_module(child)\n- return m\n-\n-\n-def parse_args():\n- parser = argparse.ArgumentParser(\n- description='fuse Conv and BN layers in a model')\n- parser.add_argument('config', help='config file path')\n- parser.add_argument('checkpoint', help='checkpoint file path')\n- parser.add_argument('out', help='output path of the converted model')\n- args = parser.parse_args()\n- return args\n-\n-\n-def main():\n- args = parse_args()\n- # build the model from a config file and a checkpoint file\n- model = init_detector(args.config, args.checkpoint)\n- # fuse conv and bn layers of the model\n- fused_model = fuse_module(model)\n- save_checkpoint(fused_model, args.out)\n-\n-\n-if __name__ == '__main__':\n- main()\n", "issue": "ModuleNotFoundError: No module named 'tools'\n i would like to test the result of training, so i write the next:\r\n(base) zhangshen@zhangshen-X550JX:~/mmdetection$ python tools/test.py configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth --out./result/result_100/pkl --eval bbox\r\n\r\nbut i got:\r\nTraceback (most recent call last):\r\n File \"tools/test.py\", line 9, in <module>\r\n from tools.fuse_conv_bn import fuse_module\r\nModuleNotFoundError: No module named 'tools'\r\n\r\nhow can i solve this problem?\n", "before_files": [{"content": "import argparse\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.runner import save_checkpoint\n\nfrom mmdet.apis import init_detector\n\n\ndef fuse_conv_bn(conv, bn):\n \"\"\"During inference, the functionary of batch norm layers is turned off but\n only the mean and var alone channels are used, which exposes the chance to\n fuse it with the preceding conv layers to save computations and simplify\n network structures.\"\"\"\n conv_w = conv.weight\n conv_b = conv.bias if conv.bias is not None else torch.zeros_like(\n bn.running_mean)\n\n factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)\n conv.weight = nn.Parameter(conv_w *\n factor.reshape([conv.out_channels, 1, 1, 1]))\n conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)\n return conv\n\n\ndef fuse_module(m):\n last_conv = None\n last_conv_name = None\n\n for name, child in m.named_children():\n if isinstance(child, (nn.BatchNorm2d, nn.SyncBatchNorm)):\n if last_conv is None: # only fuse BN that is after Conv\n continue\n fused_conv = fuse_conv_bn(last_conv, child)\n m._modules[last_conv_name] = fused_conv\n # To reduce changes, set BN as Identity instead of deleting it.\n m._modules[name] = nn.Identity()\n last_conv = None\n elif isinstance(child, nn.Conv2d):\n last_conv = child\n last_conv_name = name\n else:\n fuse_module(child)\n return m\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='fuse Conv and BN layers in a model')\n parser.add_argument('config', help='config file path')\n parser.add_argument('checkpoint', help='checkpoint file path')\n parser.add_argument('out', help='output path of the converted model')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n # build the model from a config file and a checkpoint file\n model = init_detector(args.config, args.checkpoint)\n # fuse conv and bn layers of the model\n fused_model = fuse_module(model)\n save_checkpoint(fused_model, args.out)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/fuse_conv_bn.py"}]} | 1,343 | 601 |
gh_patches_debug_59179 | rasdani/github-patches | git_diff | TheAlgorithms__Python-1943 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Same name for an attribute and a function
Hi, I'm new to programming and I'm not sure if it's a problem, but the code(
Python/data_structures/queue/queue_on_list.py) have the same name for an attribute and a function.
```
class Queue:
def __init__(self):
self.entries = []
self.length = 0
self.front = 0
def front(self):
return self.entries[0]
```
When executed it gives me the error:
TypeError: 'int' object is not callable
</issue>
<code>
[start of data_structures/queue/queue_on_list.py]
1 """Queue represented by a Python list"""
2
3
4 class Queue:
5 def __init__(self):
6 self.entries = []
7 self.length = 0
8 self.front = 0
9
10 def __str__(self):
11 printed = "<" + str(self.entries)[1:-1] + ">"
12 return printed
13
14 """Enqueues {@code item}
15 @param item
16 item to enqueue"""
17
18 def put(self, item):
19 self.entries.append(item)
20 self.length = self.length + 1
21
22 """Dequeues {@code item}
23 @requirement: |self.length| > 0
24 @return dequeued
25 item that was dequeued"""
26
27 def get(self):
28 self.length = self.length - 1
29 dequeued = self.entries[self.front]
30 # self.front-=1
31 # self.entries = self.entries[self.front:]
32 self.entries = self.entries[1:]
33 return dequeued
34
35 """Rotates the queue {@code rotation} times
36 @param rotation
37 number of times to rotate queue"""
38
39 def rotate(self, rotation):
40 for i in range(rotation):
41 self.put(self.get())
42
43 """Enqueues {@code item}
44 @return item at front of self.entries"""
45
46 def front(self):
47 return self.entries[0]
48
49 """Returns the length of this.entries"""
50
51 def size(self):
52 return self.length
53
[end of data_structures/queue/queue_on_list.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/data_structures/queue/queue_on_list.py b/data_structures/queue/queue_on_list.py
--- a/data_structures/queue/queue_on_list.py
+++ b/data_structures/queue/queue_on_list.py
@@ -43,7 +43,7 @@
"""Enqueues {@code item}
@return item at front of self.entries"""
- def front(self):
+ def get_front(self):
return self.entries[0]
"""Returns the length of this.entries"""
| {"golden_diff": "diff --git a/data_structures/queue/queue_on_list.py b/data_structures/queue/queue_on_list.py\n--- a/data_structures/queue/queue_on_list.py\n+++ b/data_structures/queue/queue_on_list.py\n@@ -43,7 +43,7 @@\n \"\"\"Enqueues {@code item}\r\n @return item at front of self.entries\"\"\"\r\n \r\n- def front(self):\r\n+ def get_front(self):\r\n return self.entries[0]\r\n \r\n \"\"\"Returns the length of this.entries\"\"\"\n", "issue": "Same name for an attribute and a function\nHi, I'm new to programming and I'm not sure if it's a problem, but the code(\r\nPython/data_structures/queue/queue_on_list.py) have the same name for an attribute and a function.\r\n```\r\nclass Queue:\r\n def __init__(self):\r\n self.entries = []\r\n self.length = 0\r\n self.front = 0\r\n\r\n def front(self):\r\n return self.entries[0]\r\n```\r\nWhen executed it gives me the error:\r\nTypeError: 'int' object is not callable\n", "before_files": [{"content": "\"\"\"Queue represented by a Python list\"\"\"\r\n\r\n\r\nclass Queue:\r\n def __init__(self):\r\n self.entries = []\r\n self.length = 0\r\n self.front = 0\r\n\r\n def __str__(self):\r\n printed = \"<\" + str(self.entries)[1:-1] + \">\"\r\n return printed\r\n\r\n \"\"\"Enqueues {@code item}\r\n @param item\r\n item to enqueue\"\"\"\r\n\r\n def put(self, item):\r\n self.entries.append(item)\r\n self.length = self.length + 1\r\n\r\n \"\"\"Dequeues {@code item}\r\n @requirement: |self.length| > 0\r\n @return dequeued\r\n item that was dequeued\"\"\"\r\n\r\n def get(self):\r\n self.length = self.length - 1\r\n dequeued = self.entries[self.front]\r\n # self.front-=1\r\n # self.entries = self.entries[self.front:]\r\n self.entries = self.entries[1:]\r\n return dequeued\r\n\r\n \"\"\"Rotates the queue {@code rotation} times\r\n @param rotation\r\n number of times to rotate queue\"\"\"\r\n\r\n def rotate(self, rotation):\r\n for i in range(rotation):\r\n self.put(self.get())\r\n\r\n \"\"\"Enqueues {@code item}\r\n @return item at front of self.entries\"\"\"\r\n\r\n def front(self):\r\n return self.entries[0]\r\n\r\n \"\"\"Returns the length of this.entries\"\"\"\r\n\r\n def size(self):\r\n return self.length\r\n", "path": "data_structures/queue/queue_on_list.py"}]} | 1,064 | 114 |
gh_patches_debug_31878 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-4817 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Vision: Single feature functions generated by cloud vision client library does not support parameter max_results
As specified in [the gRPC reference](https://cloud.google.com/vision/docs/reference/rpc/google.cloud.vision.v1#google.cloud.vision.v1.AnnotateImageRequest), AnnotateImageRequest message have three fields, _image_, _features[]_ and _image_context_, in which the _features[]_ field determines what feature user is request (_type_) and max number of returned results (_max_results_). The code for generating single-feature functions (for example, _face_detection()_), however, does not allow user to specify _max_results_:
```
feature_value = {'type': enum.__dict__[feature]}
def inner(self, image, options=None, **kwargs):
request = dict(
image=image,
features=[feature_value],
**kwargs
)
return self.annotate_image(request, options=options)
```
Reported in https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1173
</issue>
<code>
[start of vision/google/cloud/vision_helpers/decorators.py]
1 # Copyright 2017, Google LLC All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17
18 def add_single_feature_methods(cls):
19 """Custom decorator intended for :class:`~vision.helpers.VisionHelpers`.
20
21 This metaclass adds a `{feature}` method for every feature
22 defined on the Feature enum.
23 """
24 # Sanity check: This only makes sense if we are building the GAPIC
25 # subclass and have enums already attached.
26 if not hasattr(cls, 'enums'):
27 return cls
28
29 # Iterate over the Feature.Type enum and add get a list of
30 # features which will receive single-feature detection methods.
31 features = [k for k in cls.enums.Feature.Type.__dict__.keys()
32 if k.replace('_', '').isalpha() and k.upper() == k]
33
34 # Add each single-feature method to the class.
35 for feature in features:
36 # Sanity check: Do not make a method for the falsy feature.
37 if feature == 'TYPE_UNSPECIFIED':
38 continue
39
40 # Assign the appropriate metadata to the function.
41 detect = _create_single_feature_method(feature, cls.enums.Feature.Type)
42
43 # Assign a qualified name to the function, and perform module
44 # replacement on the docstring.
45 detect.__qualname__ = '{cls}.{name}'.format(
46 cls=cls.__name__,
47 name=detect.__name__,
48 )
49 detect.__doc__ = detect.__doc__.format(
50 module=cls.__module__,
51 )
52
53 # Place the function on the class being created.
54 setattr(cls, detect.__name__, detect)
55
56 # Done; return the class.
57 return cls
58
59
60 def _create_single_feature_method(feature, enum):
61 """Return a function that will detect a single feature.
62
63 Args:
64 feature (str): A specific feature defined as an attribute on
65 :class:`~enums.Feature.Type`.
66 enum (class): The :class:`~enums.Feature.Type` class.
67
68 Returns:
69 function: A helper function to detect just that feature.
70 """
71 # Define the function properties.
72 fx_name = feature.lower()
73 if 'detection' in fx_name:
74 fx_doc = 'Perform {0}.'.format(fx_name.replace('_', ' '))
75 else:
76 fx_doc = 'Return {desc} information.'.format(
77 desc=fx_name.replace('_', ' '),
78 )
79
80 # Provide a complete docstring with argument and return value
81 # information.
82 fx_doc += """
83
84 Args:
85 image (:class:`~.{module}.types.Image`): The image to analyze.
86 options (:class:`google.gax.CallOptions`): Overrides the
87 default settings for this call, e.g, timeout, retries, etc.
88 kwargs (dict): Additional properties to be set on the
89 :class:`~.{module}.types.AnnotateImageRequest`.
90
91 Returns:
92 :class:`~.{module}.types.AnnotateImageResponse`: The API response.
93 """
94
95 # Get the actual feature value to send.
96 feature_value = {'type': enum.__dict__[feature]}
97
98 # Define the function to be returned.
99 def inner(self, image, retry=None, timeout=None, **kwargs):
100 """Return a single feature annotation for the given image.
101
102 Intended for use with functools.partial, to create the particular
103 single-feature methods.
104 """
105 request = dict(
106 image=image,
107 features=[feature_value],
108 **kwargs
109 )
110 return self.annotate_image(request, retry=retry, timeout=timeout)
111
112 # Set the appropriate function metadata.
113 inner.__name__ = fx_name
114 inner.__doc__ = fx_doc
115
116 # Return the final function.
117 return inner
118
[end of vision/google/cloud/vision_helpers/decorators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vision/google/cloud/vision_helpers/decorators.py b/vision/google/cloud/vision_helpers/decorators.py
--- a/vision/google/cloud/vision_helpers/decorators.py
+++ b/vision/google/cloud/vision_helpers/decorators.py
@@ -83,8 +83,11 @@
Args:
image (:class:`~.{module}.types.Image`): The image to analyze.
- options (:class:`google.gax.CallOptions`): Overrides the
- default settings for this call, e.g, timeout, retries, etc.
+ max_results (int):
+ Number of results to return, does not apply for
+ TEXT_DETECTION, DOCUMENT_TEXT_DETECTION, or CROP_HINTS.
+ retry (int): Number of retries to do before giving up.
+ timeout (int): Number of seconds before timing out.
kwargs (dict): Additional properties to be set on the
:class:`~.{module}.types.AnnotateImageRequest`.
@@ -96,18 +99,23 @@
feature_value = {'type': enum.__dict__[feature]}
# Define the function to be returned.
- def inner(self, image, retry=None, timeout=None, **kwargs):
+ def inner(self, image, max_results=None,
+ retry=None, timeout=None, **kwargs):
"""Return a single feature annotation for the given image.
Intended for use with functools.partial, to create the particular
single-feature methods.
"""
+ copied_features = feature_value.copy()
+ if max_results is not None:
+ copied_features['max_results'] = max_results
request = dict(
image=image,
- features=[feature_value],
+ features=[copied_features],
**kwargs
)
- return self.annotate_image(request, retry=retry, timeout=timeout)
+ response = self.annotate_image(request, retry=retry, timeout=timeout)
+ return response
# Set the appropriate function metadata.
inner.__name__ = fx_name
| {"golden_diff": "diff --git a/vision/google/cloud/vision_helpers/decorators.py b/vision/google/cloud/vision_helpers/decorators.py\n--- a/vision/google/cloud/vision_helpers/decorators.py\n+++ b/vision/google/cloud/vision_helpers/decorators.py\n@@ -83,8 +83,11 @@\n \n Args:\n image (:class:`~.{module}.types.Image`): The image to analyze.\n- options (:class:`google.gax.CallOptions`): Overrides the\n- default settings for this call, e.g, timeout, retries, etc.\n+ max_results (int):\n+ Number of results to return, does not apply for\n+ TEXT_DETECTION, DOCUMENT_TEXT_DETECTION, or CROP_HINTS.\n+ retry (int): Number of retries to do before giving up.\n+ timeout (int): Number of seconds before timing out.\n kwargs (dict): Additional properties to be set on the\n :class:`~.{module}.types.AnnotateImageRequest`.\n \n@@ -96,18 +99,23 @@\n feature_value = {'type': enum.__dict__[feature]}\n \n # Define the function to be returned.\n- def inner(self, image, retry=None, timeout=None, **kwargs):\n+ def inner(self, image, max_results=None,\n+ retry=None, timeout=None, **kwargs):\n \"\"\"Return a single feature annotation for the given image.\n \n Intended for use with functools.partial, to create the particular\n single-feature methods.\n \"\"\"\n+ copied_features = feature_value.copy()\n+ if max_results is not None:\n+ copied_features['max_results'] = max_results\n request = dict(\n image=image,\n- features=[feature_value],\n+ features=[copied_features],\n **kwargs\n )\n- return self.annotate_image(request, retry=retry, timeout=timeout)\n+ response = self.annotate_image(request, retry=retry, timeout=timeout)\n+ return response\n \n # Set the appropriate function metadata.\n inner.__name__ = fx_name\n", "issue": "Vision: Single feature functions generated by cloud vision client library does not support parameter max_results\nAs specified in [the gRPC reference](https://cloud.google.com/vision/docs/reference/rpc/google.cloud.vision.v1#google.cloud.vision.v1.AnnotateImageRequest), AnnotateImageRequest message have three fields, _image_, _features[]_ and _image_context_, in which the _features[]_ field determines what feature user is request (_type_) and max number of returned results (_max_results_). The code for generating single-feature functions (for example, _face_detection()_), however, does not allow user to specify _max_results_:\r\n\r\n```\r\nfeature_value = {'type': enum.__dict__[feature]}\r\ndef inner(self, image, options=None, **kwargs):\r\n request = dict(\r\n image=image,\r\n features=[feature_value],\r\n **kwargs\r\n )\r\n return self.annotate_image(request, options=options)\r\n```\r\n\r\nReported in https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1173\n", "before_files": [{"content": "# Copyright 2017, Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\n\ndef add_single_feature_methods(cls):\n \"\"\"Custom decorator intended for :class:`~vision.helpers.VisionHelpers`.\n\n This metaclass adds a `{feature}` method for every feature\n defined on the Feature enum.\n \"\"\"\n # Sanity check: This only makes sense if we are building the GAPIC\n # subclass and have enums already attached.\n if not hasattr(cls, 'enums'):\n return cls\n\n # Iterate over the Feature.Type enum and add get a list of\n # features which will receive single-feature detection methods.\n features = [k for k in cls.enums.Feature.Type.__dict__.keys()\n if k.replace('_', '').isalpha() and k.upper() == k]\n\n # Add each single-feature method to the class.\n for feature in features:\n # Sanity check: Do not make a method for the falsy feature.\n if feature == 'TYPE_UNSPECIFIED':\n continue\n\n # Assign the appropriate metadata to the function.\n detect = _create_single_feature_method(feature, cls.enums.Feature.Type)\n\n # Assign a qualified name to the function, and perform module\n # replacement on the docstring.\n detect.__qualname__ = '{cls}.{name}'.format(\n cls=cls.__name__,\n name=detect.__name__,\n )\n detect.__doc__ = detect.__doc__.format(\n module=cls.__module__,\n )\n\n # Place the function on the class being created.\n setattr(cls, detect.__name__, detect)\n\n # Done; return the class.\n return cls\n\n\ndef _create_single_feature_method(feature, enum):\n \"\"\"Return a function that will detect a single feature.\n\n Args:\n feature (str): A specific feature defined as an attribute on\n :class:`~enums.Feature.Type`.\n enum (class): The :class:`~enums.Feature.Type` class.\n\n Returns:\n function: A helper function to detect just that feature.\n \"\"\"\n # Define the function properties.\n fx_name = feature.lower()\n if 'detection' in fx_name:\n fx_doc = 'Perform {0}.'.format(fx_name.replace('_', ' '))\n else:\n fx_doc = 'Return {desc} information.'.format(\n desc=fx_name.replace('_', ' '),\n )\n\n # Provide a complete docstring with argument and return value\n # information.\n fx_doc += \"\"\"\n\n Args:\n image (:class:`~.{module}.types.Image`): The image to analyze.\n options (:class:`google.gax.CallOptions`): Overrides the\n default settings for this call, e.g, timeout, retries, etc.\n kwargs (dict): Additional properties to be set on the\n :class:`~.{module}.types.AnnotateImageRequest`.\n\n Returns:\n :class:`~.{module}.types.AnnotateImageResponse`: The API response.\n \"\"\"\n\n # Get the actual feature value to send.\n feature_value = {'type': enum.__dict__[feature]}\n\n # Define the function to be returned.\n def inner(self, image, retry=None, timeout=None, **kwargs):\n \"\"\"Return a single feature annotation for the given image.\n\n Intended for use with functools.partial, to create the particular\n single-feature methods.\n \"\"\"\n request = dict(\n image=image,\n features=[feature_value],\n **kwargs\n )\n return self.annotate_image(request, retry=retry, timeout=timeout)\n\n # Set the appropriate function metadata.\n inner.__name__ = fx_name\n inner.__doc__ = fx_doc\n\n # Return the final function.\n return inner\n", "path": "vision/google/cloud/vision_helpers/decorators.py"}]} | 1,921 | 443 |
gh_patches_debug_25483 | rasdani/github-patches | git_diff | aio-libs-abandoned__aioredis-py-355 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fixed ConnectionsPool.[pubsub_channels/pubsub_patterns]
Fixed bug in `commands.pubsub.PubSubCommandsMixin.subscribe` (and psubscribe). `ConnectionsPool.pubsub_channles` code was called before `ConnectionsPool._pubsub_conn` creation, and therefore `pubsub_channles`
</issue>
<code>
[start of aioredis/commands/pubsub.py]
1 import json
2
3 from aioredis.util import wait_make_dict
4
5
6 class PubSubCommandsMixin:
7 """Pub/Sub commands mixin.
8
9 For commands details see: http://redis.io/commands/#pubsub
10 """
11
12 def publish(self, channel, message):
13 """Post a message to channel."""
14 return self.execute(b'PUBLISH', channel, message)
15
16 def publish_json(self, channel, obj):
17 """Post a JSON-encoded message to channel."""
18 return self.publish(channel, json.dumps(obj))
19
20 def subscribe(self, channel, *channels):
21 """Switch connection to Pub/Sub mode and
22 subscribe to specified channels.
23
24 Arguments can be instances of :class:`~aioredis.Channel`.
25
26 Returns :func:`asyncio.gather()` coroutine which when done will return
27 a list of :class:`~aioredis.Channel` objects.
28 """
29 conn = self._pool_or_conn
30 return wait_return_channels(
31 conn.execute_pubsub(b'SUBSCRIBE', channel, *channels),
32 conn.pubsub_channels)
33
34 def unsubscribe(self, channel, *channels):
35 """Unsubscribe from specific channels.
36
37 Arguments can be instances of :class:`~aioredis.Channel`.
38 """
39 conn = self._pool_or_conn
40 return conn.execute_pubsub(b'UNSUBSCRIBE', channel, *channels)
41
42 def psubscribe(self, pattern, *patterns):
43 """Switch connection to Pub/Sub mode and
44 subscribe to specified patterns.
45
46 Arguments can be instances of :class:`~aioredis.Channel`.
47
48 Returns :func:`asyncio.gather()` coroutine which when done will return
49 a list of subscribed :class:`~aioredis.Channel` objects with
50 ``is_pattern`` property set to ``True``.
51 """
52 conn = self._pool_or_conn
53 return wait_return_channels(
54 conn.execute_pubsub(b'PSUBSCRIBE', pattern, *patterns),
55 conn.pubsub_patterns)
56
57 def punsubscribe(self, pattern, *patterns):
58 """Unsubscribe from specific patterns.
59
60 Arguments can be instances of :class:`~aioredis.Channel`.
61 """
62 conn = self._pool_or_conn
63 return conn.execute_pubsub(b'PUNSUBSCRIBE', pattern, *patterns)
64
65 def pubsub_channels(self, pattern=None):
66 """Lists the currently active channels."""
67 args = [b'PUBSUB', b'CHANNELS']
68 if pattern is not None:
69 args.append(pattern)
70 return self.execute(*args)
71
72 def pubsub_numsub(self, *channels):
73 """Returns the number of subscribers for the specified channels."""
74 return wait_make_dict(self.execute(
75 b'PUBSUB', b'NUMSUB', *channels))
76
77 def pubsub_numpat(self):
78 """Returns the number of subscriptions to patterns."""
79 return self.execute(b'PUBSUB', b'NUMPAT')
80
81 @property
82 def channels(self):
83 """Returns read-only channels dict.
84
85 See :attr:`~aioredis.RedisConnection.pubsub_channels`
86 """
87 return self._pool_or_conn.pubsub_channels
88
89 @property
90 def patterns(self):
91 """Returns read-only patterns dict.
92
93 See :attr:`~aioredis.RedisConnection.pubsub_patterns`
94 """
95 return self._pool_or_conn.pubsub_patterns
96
97 @property
98 def in_pubsub(self):
99 """Indicates that connection is in PUB/SUB mode.
100
101 Provides the number of subscribed channels.
102 """
103 return self._pool_or_conn.in_pubsub
104
105
106 async def wait_return_channels(fut, channels_dict):
107 return [channels_dict[name]
108 for cmd, name, count in await fut]
109
[end of aioredis/commands/pubsub.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/aioredis/commands/pubsub.py b/aioredis/commands/pubsub.py
--- a/aioredis/commands/pubsub.py
+++ b/aioredis/commands/pubsub.py
@@ -29,7 +29,7 @@
conn = self._pool_or_conn
return wait_return_channels(
conn.execute_pubsub(b'SUBSCRIBE', channel, *channels),
- conn.pubsub_channels)
+ conn, 'pubsub_channels')
def unsubscribe(self, channel, *channels):
"""Unsubscribe from specific channels.
@@ -52,7 +52,7 @@
conn = self._pool_or_conn
return wait_return_channels(
conn.execute_pubsub(b'PSUBSCRIBE', pattern, *patterns),
- conn.pubsub_patterns)
+ conn, 'pubsub_patterns')
def punsubscribe(self, pattern, *patterns):
"""Unsubscribe from specific patterns.
@@ -103,6 +103,7 @@
return self._pool_or_conn.in_pubsub
-async def wait_return_channels(fut, channels_dict):
- return [channels_dict[name]
- for cmd, name, count in await fut]
+async def wait_return_channels(fut, conn, field):
+ res = await fut
+ channels_dict = getattr(conn, field)
+ return [channels_dict[name] for cmd, name, count in res]
| {"golden_diff": "diff --git a/aioredis/commands/pubsub.py b/aioredis/commands/pubsub.py\n--- a/aioredis/commands/pubsub.py\n+++ b/aioredis/commands/pubsub.py\n@@ -29,7 +29,7 @@\n conn = self._pool_or_conn\n return wait_return_channels(\n conn.execute_pubsub(b'SUBSCRIBE', channel, *channels),\n- conn.pubsub_channels)\n+ conn, 'pubsub_channels')\n \n def unsubscribe(self, channel, *channels):\n \"\"\"Unsubscribe from specific channels.\n@@ -52,7 +52,7 @@\n conn = self._pool_or_conn\n return wait_return_channels(\n conn.execute_pubsub(b'PSUBSCRIBE', pattern, *patterns),\n- conn.pubsub_patterns)\n+ conn, 'pubsub_patterns')\n \n def punsubscribe(self, pattern, *patterns):\n \"\"\"Unsubscribe from specific patterns.\n@@ -103,6 +103,7 @@\n return self._pool_or_conn.in_pubsub\n \n \n-async def wait_return_channels(fut, channels_dict):\n- return [channels_dict[name]\n- for cmd, name, count in await fut]\n+async def wait_return_channels(fut, conn, field):\n+ res = await fut\n+ channels_dict = getattr(conn, field)\n+ return [channels_dict[name] for cmd, name, count in res]\n", "issue": "Fixed ConnectionsPool.[pubsub_channels/pubsub_patterns]\nFixed bug in `commands.pubsub.PubSubCommandsMixin.subscribe` (and psubscribe). `ConnectionsPool.pubsub_channles` code was called before `ConnectionsPool._pubsub_conn` creation, and therefore `pubsub_channles` \n", "before_files": [{"content": "import json\n\nfrom aioredis.util import wait_make_dict\n\n\nclass PubSubCommandsMixin:\n \"\"\"Pub/Sub commands mixin.\n\n For commands details see: http://redis.io/commands/#pubsub\n \"\"\"\n\n def publish(self, channel, message):\n \"\"\"Post a message to channel.\"\"\"\n return self.execute(b'PUBLISH', channel, message)\n\n def publish_json(self, channel, obj):\n \"\"\"Post a JSON-encoded message to channel.\"\"\"\n return self.publish(channel, json.dumps(obj))\n\n def subscribe(self, channel, *channels):\n \"\"\"Switch connection to Pub/Sub mode and\n subscribe to specified channels.\n\n Arguments can be instances of :class:`~aioredis.Channel`.\n\n Returns :func:`asyncio.gather()` coroutine which when done will return\n a list of :class:`~aioredis.Channel` objects.\n \"\"\"\n conn = self._pool_or_conn\n return wait_return_channels(\n conn.execute_pubsub(b'SUBSCRIBE', channel, *channels),\n conn.pubsub_channels)\n\n def unsubscribe(self, channel, *channels):\n \"\"\"Unsubscribe from specific channels.\n\n Arguments can be instances of :class:`~aioredis.Channel`.\n \"\"\"\n conn = self._pool_or_conn\n return conn.execute_pubsub(b'UNSUBSCRIBE', channel, *channels)\n\n def psubscribe(self, pattern, *patterns):\n \"\"\"Switch connection to Pub/Sub mode and\n subscribe to specified patterns.\n\n Arguments can be instances of :class:`~aioredis.Channel`.\n\n Returns :func:`asyncio.gather()` coroutine which when done will return\n a list of subscribed :class:`~aioredis.Channel` objects with\n ``is_pattern`` property set to ``True``.\n \"\"\"\n conn = self._pool_or_conn\n return wait_return_channels(\n conn.execute_pubsub(b'PSUBSCRIBE', pattern, *patterns),\n conn.pubsub_patterns)\n\n def punsubscribe(self, pattern, *patterns):\n \"\"\"Unsubscribe from specific patterns.\n\n Arguments can be instances of :class:`~aioredis.Channel`.\n \"\"\"\n conn = self._pool_or_conn\n return conn.execute_pubsub(b'PUNSUBSCRIBE', pattern, *patterns)\n\n def pubsub_channels(self, pattern=None):\n \"\"\"Lists the currently active channels.\"\"\"\n args = [b'PUBSUB', b'CHANNELS']\n if pattern is not None:\n args.append(pattern)\n return self.execute(*args)\n\n def pubsub_numsub(self, *channels):\n \"\"\"Returns the number of subscribers for the specified channels.\"\"\"\n return wait_make_dict(self.execute(\n b'PUBSUB', b'NUMSUB', *channels))\n\n def pubsub_numpat(self):\n \"\"\"Returns the number of subscriptions to patterns.\"\"\"\n return self.execute(b'PUBSUB', b'NUMPAT')\n\n @property\n def channels(self):\n \"\"\"Returns read-only channels dict.\n\n See :attr:`~aioredis.RedisConnection.pubsub_channels`\n \"\"\"\n return self._pool_or_conn.pubsub_channels\n\n @property\n def patterns(self):\n \"\"\"Returns read-only patterns dict.\n\n See :attr:`~aioredis.RedisConnection.pubsub_patterns`\n \"\"\"\n return self._pool_or_conn.pubsub_patterns\n\n @property\n def in_pubsub(self):\n \"\"\"Indicates that connection is in PUB/SUB mode.\n\n Provides the number of subscribed channels.\n \"\"\"\n return self._pool_or_conn.in_pubsub\n\n\nasync def wait_return_channels(fut, channels_dict):\n return [channels_dict[name]\n for cmd, name, count in await fut]\n", "path": "aioredis/commands/pubsub.py"}]} | 1,605 | 310 |
gh_patches_debug_13881 | rasdani/github-patches | git_diff | praw-dev__praw-939 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Provide "best" sort for front page (models.Front)
The new "best" sort for the front page isn't currently available via PRAW. See [this Reddit thread](https://www.reddit.com/r/redditdev/comments/8h8ijn/how_do_you_sort_best_via_the_api/).
</issue>
<code>
[start of praw/models/front.py]
1 """Provide the Front class."""
2 from .listing.mixins import SubredditListingMixin
3
4
5 class Front(SubredditListingMixin):
6 """Front is a Listing class that represents the front page."""
7
8 def __init__(self, reddit):
9 """Initialize a Front instance."""
10 super(Front, self).__init__(reddit, None)
11 self._path = '/'
12
[end of praw/models/front.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/praw/models/front.py b/praw/models/front.py
--- a/praw/models/front.py
+++ b/praw/models/front.py
@@ -1,4 +1,6 @@
"""Provide the Front class."""
+from ..const import urljoin
+from .listing.generator import ListingGenerator
from .listing.mixins import SubredditListingMixin
@@ -9,3 +11,13 @@
"""Initialize a Front instance."""
super(Front, self).__init__(reddit, None)
self._path = '/'
+
+ def best(self, **generator_kwargs):
+ """Return a ListingGenerator for best items.
+
+ Additional keyword arguments are passed in the initialization of
+ :class:`.ListingGenerator`.
+
+ """
+ return ListingGenerator(self._reddit, urljoin(self._path, 'best'),
+ **generator_kwargs)
| {"golden_diff": "diff --git a/praw/models/front.py b/praw/models/front.py\n--- a/praw/models/front.py\n+++ b/praw/models/front.py\n@@ -1,4 +1,6 @@\n \"\"\"Provide the Front class.\"\"\"\n+from ..const import urljoin\n+from .listing.generator import ListingGenerator\n from .listing.mixins import SubredditListingMixin\n \n \n@@ -9,3 +11,13 @@\n \"\"\"Initialize a Front instance.\"\"\"\n super(Front, self).__init__(reddit, None)\n self._path = '/'\n+\n+ def best(self, **generator_kwargs):\n+ \"\"\"Return a ListingGenerator for best items.\n+\n+ Additional keyword arguments are passed in the initialization of\n+ :class:`.ListingGenerator`.\n+\n+ \"\"\"\n+ return ListingGenerator(self._reddit, urljoin(self._path, 'best'),\n+ **generator_kwargs)\n", "issue": "Provide \"best\" sort for front page (models.Front)\nThe new \"best\" sort for the front page isn't currently available via PRAW. See [this Reddit thread](https://www.reddit.com/r/redditdev/comments/8h8ijn/how_do_you_sort_best_via_the_api/).\n", "before_files": [{"content": "\"\"\"Provide the Front class.\"\"\"\nfrom .listing.mixins import SubredditListingMixin\n\n\nclass Front(SubredditListingMixin):\n \"\"\"Front is a Listing class that represents the front page.\"\"\"\n\n def __init__(self, reddit):\n \"\"\"Initialize a Front instance.\"\"\"\n super(Front, self).__init__(reddit, None)\n self._path = '/'\n", "path": "praw/models/front.py"}]} | 686 | 188 |
gh_patches_debug_40979 | rasdani/github-patches | git_diff | dotkom__onlineweb4-599 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
(Event) Minimum length on event description and ingress
Possibly we should also limit the text to exclude words like TBA.
Events in ow4 demand some text in order to look good, so let's put a minimum requirement on length for the event texts. Making up a description of 200 characters should be no big deal.
I've really had it with "INFO: TBA"
</issue>
<code>
[start of apps/events/admin.py]
1 # -*- coding: utf-8 -*-
2
3 from apps.events.models import Event
4 from apps.events.models import AttendanceEvent
5 from apps.events.models import Attendee
6 from apps.events.models import CompanyEvent
7 from apps.events.models import RuleBundle
8 from apps.events.models import FieldOfStudyRule
9 from apps.events.models import GradeRule
10 from apps.events.models import UserGroupRule
11
12 from apps.feedback.admin import FeedbackRelationInline
13
14 from django.contrib import admin
15
16
17 class AttendeeInline(admin.TabularInline):
18 model = Attendee
19 extra = 1
20
21
22 class CompanyInline(admin.TabularInline):
23 model = CompanyEvent
24 max_num = 20
25 extra = 0
26
27 class RuleBundleInline(admin.TabularInline):
28 model = RuleBundle
29 extra = 1
30 max_num = 20
31
32
33 class AttendanceEventAdmin(admin.ModelAdmin):
34 model = AttendanceEvent
35 inlines = (AttendeeInline, RuleBundleInline)
36
37 class AttendeeAdmin(admin.ModelAdmin):
38 model = Attendee
39 list_display = ('user', 'event')
40
41 class CompanyEventAdmin(admin.ModelAdmin):
42 model = CompanyEvent
43 inlines = (CompanyInline,)
44
45 class RuleBundleAdmin(admin.ModelAdmin):
46 model = RuleBundle
47
48 class FieldOfStudyRuleAdmin(admin.ModelAdmin):
49 model = FieldOfStudyRule
50
51 class GradeRuleAdmin(admin.ModelAdmin):
52 model = GradeRule
53
54 class UserGroupRuleAdmin(admin.ModelAdmin):
55 model = UserGroupRule
56
57 class AttendanceEventInline(admin.StackedInline):
58 model = AttendanceEvent
59 max_num = 1
60 extra = 0
61 filter_horizontal = ('rule_bundles',)
62
63
64 class EventAdmin(admin.ModelAdmin):
65 inlines = (AttendanceEventInline, FeedbackRelationInline, CompanyInline)
66 exclude = ("author", )
67
68 def save_model(self, request, obj, form, change):
69 if not change: # created
70 obj.author = request.user
71 obj.save()
72
73 def save_formset(self, request, form, formset, change):
74 instances = formset.save(commit=False)
75 for instance in instances:
76 instance.save()
77 formset.save_m2m()
78
79 admin.site.register(Event, EventAdmin)
80 admin.site.register(Attendee, AttendeeAdmin)
81 admin.site.register(AttendanceEvent, AttendanceEventAdmin)
82 admin.site.register(RuleBundle, RuleBundleAdmin)
83 admin.site.register(GradeRule, GradeRuleAdmin)
84 admin.site.register(UserGroupRule, UserGroupRuleAdmin)
85 admin.site.register(FieldOfStudyRule, FieldOfStudyRuleAdmin)
86
[end of apps/events/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/events/admin.py b/apps/events/admin.py
--- a/apps/events/admin.py
+++ b/apps/events/admin.py
@@ -1,5 +1,10 @@
# -*- coding: utf-8 -*-
+from django import forms
+from django.contrib import admin
+from django.core import validators
+from django.utils.translation import ugettext as _
+
from apps.events.models import Event
from apps.events.models import AttendanceEvent
from apps.events.models import Attendee
@@ -8,10 +13,8 @@
from apps.events.models import FieldOfStudyRule
from apps.events.models import GradeRule
from apps.events.models import UserGroupRule
-
from apps.feedback.admin import FeedbackRelationInline
-from django.contrib import admin
class AttendeeInline(admin.TabularInline):
@@ -24,6 +27,7 @@
max_num = 20
extra = 0
+
class RuleBundleInline(admin.TabularInline):
model = RuleBundle
extra = 1
@@ -34,32 +38,39 @@
model = AttendanceEvent
inlines = (AttendeeInline, RuleBundleInline)
+
class AttendeeAdmin(admin.ModelAdmin):
model = Attendee
list_display = ('user', 'event')
+
class CompanyEventAdmin(admin.ModelAdmin):
model = CompanyEvent
inlines = (CompanyInline,)
+
class RuleBundleAdmin(admin.ModelAdmin):
model = RuleBundle
-
+
+
class FieldOfStudyRuleAdmin(admin.ModelAdmin):
model = FieldOfStudyRule
+
class GradeRuleAdmin(admin.ModelAdmin):
model = GradeRule
+
class UserGroupRuleAdmin(admin.ModelAdmin):
model = UserGroupRule
+
class AttendanceEventInline(admin.StackedInline):
model = AttendanceEvent
max_num = 1
extra = 0
filter_horizontal = ('rule_bundles',)
-
+
class EventAdmin(admin.ModelAdmin):
inlines = (AttendanceEventInline, FeedbackRelationInline, CompanyInline)
@@ -76,6 +87,16 @@
instance.save()
formset.save_m2m()
+ def get_form(self, request, obj=None, **kwargs):
+ form = super(EventAdmin, self).get_form(request, obj, **kwargs)
+ form.base_fields['ingress_short'].validators=[validators.MinLengthValidator(75)]
+ form.base_fields['ingress'].validators=[validators.MinLengthValidator(100)]
+ form.base_fields['description'].validators=[
+ validators.MinLengthValidator(200),
+ validators.RegexValidator("^(?:(?!TBA).)*$", _("Beskrivelsen kan ikke inneholde 'TBA'."), "ulovlig"),
+ ]
+ return form
+
admin.site.register(Event, EventAdmin)
admin.site.register(Attendee, AttendeeAdmin)
admin.site.register(AttendanceEvent, AttendanceEventAdmin)
| {"golden_diff": "diff --git a/apps/events/admin.py b/apps/events/admin.py\n--- a/apps/events/admin.py\n+++ b/apps/events/admin.py\n@@ -1,5 +1,10 @@\n # -*- coding: utf-8 -*-\n \n+from django import forms\n+from django.contrib import admin\n+from django.core import validators\n+from django.utils.translation import ugettext as _\n+\n from apps.events.models import Event\n from apps.events.models import AttendanceEvent\n from apps.events.models import Attendee\n@@ -8,10 +13,8 @@\n from apps.events.models import FieldOfStudyRule\n from apps.events.models import GradeRule\n from apps.events.models import UserGroupRule\n-\n from apps.feedback.admin import FeedbackRelationInline\n \n-from django.contrib import admin\n \n \n class AttendeeInline(admin.TabularInline):\n@@ -24,6 +27,7 @@\n max_num = 20\n extra = 0\n \n+\n class RuleBundleInline(admin.TabularInline):\n model = RuleBundle\n extra = 1\n@@ -34,32 +38,39 @@\n model = AttendanceEvent\n inlines = (AttendeeInline, RuleBundleInline)\n \n+\n class AttendeeAdmin(admin.ModelAdmin):\n model = Attendee\n list_display = ('user', 'event')\n \n+\n class CompanyEventAdmin(admin.ModelAdmin):\n model = CompanyEvent\n inlines = (CompanyInline,)\n \n+\n class RuleBundleAdmin(admin.ModelAdmin):\n model = RuleBundle\n- \n+\n+\n class FieldOfStudyRuleAdmin(admin.ModelAdmin):\n model = FieldOfStudyRule\n \n+\n class GradeRuleAdmin(admin.ModelAdmin):\n model = GradeRule\n \n+\n class UserGroupRuleAdmin(admin.ModelAdmin):\n model = UserGroupRule\n \n+\n class AttendanceEventInline(admin.StackedInline):\n model = AttendanceEvent\n max_num = 1\n extra = 0\n filter_horizontal = ('rule_bundles',)\n- \n+\n \n class EventAdmin(admin.ModelAdmin):\n inlines = (AttendanceEventInline, FeedbackRelationInline, CompanyInline)\n@@ -76,6 +87,16 @@\n instance.save()\n formset.save_m2m()\n \n+ def get_form(self, request, obj=None, **kwargs):\n+ form = super(EventAdmin, self).get_form(request, obj, **kwargs)\n+ form.base_fields['ingress_short'].validators=[validators.MinLengthValidator(75)]\n+ form.base_fields['ingress'].validators=[validators.MinLengthValidator(100)]\n+ form.base_fields['description'].validators=[\n+ validators.MinLengthValidator(200),\n+ validators.RegexValidator(\"^(?:(?!TBA).)*$\", _(\"Beskrivelsen kan ikke inneholde 'TBA'.\"), \"ulovlig\"),\n+ ]\n+ return form\n+\n admin.site.register(Event, EventAdmin)\n admin.site.register(Attendee, AttendeeAdmin)\n admin.site.register(AttendanceEvent, AttendanceEventAdmin)\n", "issue": "(Event) Minimum length on event description and ingress\nPossibly we should also limit the text to exclude words like TBA.\n\nEvents in ow4 demand some text in order to look good, so let's put a minimum requirement on length for the event texts. Making up a description of 200 characters should be no big deal.\n\nI've really had it with \"INFO: TBA\"\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom apps.events.models import Event\nfrom apps.events.models import AttendanceEvent\nfrom apps.events.models import Attendee\nfrom apps.events.models import CompanyEvent\nfrom apps.events.models import RuleBundle\nfrom apps.events.models import FieldOfStudyRule\nfrom apps.events.models import GradeRule\nfrom apps.events.models import UserGroupRule\n\nfrom apps.feedback.admin import FeedbackRelationInline\n\nfrom django.contrib import admin\n\n\nclass AttendeeInline(admin.TabularInline):\n model = Attendee\n extra = 1\n\n\nclass CompanyInline(admin.TabularInline):\n model = CompanyEvent\n max_num = 20\n extra = 0\n\nclass RuleBundleInline(admin.TabularInline):\n model = RuleBundle\n extra = 1\n max_num = 20\n\n\nclass AttendanceEventAdmin(admin.ModelAdmin):\n model = AttendanceEvent\n inlines = (AttendeeInline, RuleBundleInline)\n\nclass AttendeeAdmin(admin.ModelAdmin):\n model = Attendee\n list_display = ('user', 'event')\n\nclass CompanyEventAdmin(admin.ModelAdmin):\n model = CompanyEvent\n inlines = (CompanyInline,)\n\nclass RuleBundleAdmin(admin.ModelAdmin):\n model = RuleBundle\n \nclass FieldOfStudyRuleAdmin(admin.ModelAdmin):\n model = FieldOfStudyRule\n\nclass GradeRuleAdmin(admin.ModelAdmin):\n model = GradeRule\n\nclass UserGroupRuleAdmin(admin.ModelAdmin):\n model = UserGroupRule\n\nclass AttendanceEventInline(admin.StackedInline):\n model = AttendanceEvent\n max_num = 1\n extra = 0\n filter_horizontal = ('rule_bundles',)\n \n\nclass EventAdmin(admin.ModelAdmin):\n inlines = (AttendanceEventInline, FeedbackRelationInline, CompanyInline)\n exclude = (\"author\", )\n\n def save_model(self, request, obj, form, change):\n if not change: # created\n obj.author = request.user\n obj.save()\n\n def save_formset(self, request, form, formset, change):\n instances = formset.save(commit=False)\n for instance in instances:\n instance.save()\n formset.save_m2m()\n\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(Attendee, AttendeeAdmin)\nadmin.site.register(AttendanceEvent, AttendanceEventAdmin)\nadmin.site.register(RuleBundle, RuleBundleAdmin)\nadmin.site.register(GradeRule, GradeRuleAdmin)\nadmin.site.register(UserGroupRule, UserGroupRuleAdmin)\nadmin.site.register(FieldOfStudyRule, FieldOfStudyRuleAdmin)\n", "path": "apps/events/admin.py"}]} | 1,315 | 623 |
gh_patches_debug_593 | rasdani/github-patches | git_diff | projectmesa__mesa-1437 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
v1.1.0 Safford Release
Milestone: https://github.com/projectmesa/mesa/milestone/31
Highlighted changes:
- #1376 > 6x perf speedup for add/remove agent in `ContinuousSpace`
- #1391 correctness fix for `SimultaneousActivation` and `StagedActivation`
- #1399 make `self.running = True` optional. We need to tell existing users that initializing this is no longer necessary, and so, reducing the boilerplate code
- #1435 Allow user-specified local dir to be served by Tornado. Needed by Mesa-Geo
- #1413 Allow batch_run to take arbitrary parameters
</issue>
<code>
[start of mesa/__init__.py]
1 """
2 Mesa Agent-Based Modeling Framework
3
4 Core Objects: Model, and Agent.
5
6 """
7 import datetime
8
9 from mesa.model import Model
10 from mesa.agent import Agent
11
12 import mesa.time as time
13 import mesa.space as space
14 import mesa.flat.visualization as visualization
15 from mesa.datacollection import DataCollector
16 from mesa.batchrunner import batch_run # noqa
17
18 __all__ = [
19 "Model",
20 "Agent",
21 "time",
22 "space",
23 "visualization",
24 "DataCollector",
25 "batch_run",
26 ]
27
28 __title__ = "mesa"
29 __version__ = "1.0.0"
30 __license__ = "Apache 2.0"
31 __copyright__ = f"Copyright {datetime.date.today().year} Project Mesa Team"
32
[end of mesa/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mesa/__init__.py b/mesa/__init__.py
--- a/mesa/__init__.py
+++ b/mesa/__init__.py
@@ -26,6 +26,6 @@
]
__title__ = "mesa"
-__version__ = "1.0.0"
+__version__ = "1.1.0"
__license__ = "Apache 2.0"
__copyright__ = f"Copyright {datetime.date.today().year} Project Mesa Team"
| {"golden_diff": "diff --git a/mesa/__init__.py b/mesa/__init__.py\n--- a/mesa/__init__.py\n+++ b/mesa/__init__.py\n@@ -26,6 +26,6 @@\n ]\n \n __title__ = \"mesa\"\n-__version__ = \"1.0.0\"\n+__version__ = \"1.1.0\"\n __license__ = \"Apache 2.0\"\n __copyright__ = f\"Copyright {datetime.date.today().year} Project Mesa Team\"\n", "issue": "v1.1.0 Safford Release\nMilestone: https://github.com/projectmesa/mesa/milestone/31\r\n\r\nHighlighted changes:\r\n- #1376 > 6x perf speedup for add/remove agent in `ContinuousSpace`\r\n- #1391 correctness fix for `SimultaneousActivation` and `StagedActivation`\r\n- #1399 make `self.running = True` optional. We need to tell existing users that initializing this is no longer necessary, and so, reducing the boilerplate code\r\n- #1435 Allow user-specified local dir to be served by Tornado. Needed by Mesa-Geo\r\n- #1413 Allow batch_run to take arbitrary parameters\n", "before_files": [{"content": "\"\"\"\nMesa Agent-Based Modeling Framework\n\nCore Objects: Model, and Agent.\n\n\"\"\"\nimport datetime\n\nfrom mesa.model import Model\nfrom mesa.agent import Agent\n\nimport mesa.time as time\nimport mesa.space as space\nimport mesa.flat.visualization as visualization\nfrom mesa.datacollection import DataCollector\nfrom mesa.batchrunner import batch_run # noqa\n\n__all__ = [\n \"Model\",\n \"Agent\",\n \"time\",\n \"space\",\n \"visualization\",\n \"DataCollector\",\n \"batch_run\",\n]\n\n__title__ = \"mesa\"\n__version__ = \"1.0.0\"\n__license__ = \"Apache 2.0\"\n__copyright__ = f\"Copyright {datetime.date.today().year} Project Mesa Team\"\n", "path": "mesa/__init__.py"}]} | 902 | 112 |
gh_patches_debug_31948 | rasdani/github-patches | git_diff | redis__redis-py-1791 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
redis-py version attribute should be decoupled from the redis module
Following the conversation from https://github.com/redis/redis-py/issues/1625#issuecomment-991744836 looks like importing `redis` module prior to installation in `setup.py` for `version` attribute is not ideal.
Currently there are two places where module version is required.
- `setup.py` for module installation
- `redis/__init__.py` for module level `__version__` attribute
One way to fix this is to maintain a `version.py` file in top level directory and using that as source of truth in both the above places.
@chayim @hartwork What do you think? I can create a PR for this :)
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 from setuptools import find_packages, setup
3
4 import redis
5
6 setup(
7 name="redis",
8 description="Python client for Redis database and key-value store",
9 long_description=open("README.md").read().strip(),
10 long_description_content_type="text/markdown",
11 keywords=["Redis", "key-value store", "database"],
12 license="MIT",
13 version=redis.__version__,
14 packages=find_packages(
15 include=[
16 "redis",
17 "redis.commands",
18 "redis.commands.bf",
19 "redis.commands.json",
20 "redis.commands.search",
21 "redis.commands.timeseries",
22 "redis.commands.graph",
23 ]
24 ),
25 url="https://github.com/redis/redis-py",
26 author="Redis Inc.",
27 author_email="[email protected]",
28 python_requires=">=3.6",
29 setup_requires=[
30 "packaging>=21.3",
31 ],
32 install_requires=[
33 "deprecated>=1.2.3",
34 "packaging>=21.3",
35 ],
36 classifiers=[
37 "Development Status :: 5 - Production/Stable",
38 "Environment :: Console",
39 "Intended Audience :: Developers",
40 "License :: OSI Approved :: MIT License",
41 "Operating System :: OS Independent",
42 "Programming Language :: Python",
43 "Programming Language :: Python :: 3",
44 "Programming Language :: Python :: 3 :: Only",
45 "Programming Language :: Python :: 3.6",
46 "Programming Language :: Python :: 3.7",
47 "Programming Language :: Python :: 3.8",
48 "Programming Language :: Python :: 3.9",
49 "Programming Language :: Python :: 3.10",
50 "Programming Language :: Python :: Implementation :: CPython",
51 "Programming Language :: Python :: Implementation :: PyPy",
52 ],
53 extras_require={
54 "hiredis": ["hiredis>=1.0.0"],
55 },
56 )
57
[end of setup.py]
[start of redis/__init__.py]
1 from redis.client import Redis, StrictRedis
2 from redis.cluster import RedisCluster
3 from redis.connection import (
4 BlockingConnectionPool,
5 Connection,
6 ConnectionPool,
7 SSLConnection,
8 UnixDomainSocketConnection,
9 )
10 from redis.exceptions import (
11 AuthenticationError,
12 AuthenticationWrongNumberOfArgsError,
13 BusyLoadingError,
14 ChildDeadlockedError,
15 ConnectionError,
16 DataError,
17 InvalidResponse,
18 PubSubError,
19 ReadOnlyError,
20 RedisError,
21 ResponseError,
22 TimeoutError,
23 WatchError,
24 )
25 from redis.sentinel import (
26 Sentinel,
27 SentinelConnectionPool,
28 SentinelManagedConnection,
29 SentinelManagedSSLConnection,
30 )
31 from redis.utils import from_url
32
33
34 def int_or_str(value):
35 try:
36 return int(value)
37 except ValueError:
38 return value
39
40
41 __version__ = "4.1.0rc2"
42
43
44 VERSION = tuple(map(int_or_str, __version__.split(".")))
45
46 __all__ = [
47 "AuthenticationError",
48 "AuthenticationWrongNumberOfArgsError",
49 "BlockingConnectionPool",
50 "BusyLoadingError",
51 "ChildDeadlockedError",
52 "Connection",
53 "ConnectionError",
54 "ConnectionPool",
55 "DataError",
56 "from_url",
57 "InvalidResponse",
58 "PubSubError",
59 "ReadOnlyError",
60 "Redis",
61 "RedisCluster",
62 "RedisError",
63 "ResponseError",
64 "Sentinel",
65 "SentinelConnectionPool",
66 "SentinelManagedConnection",
67 "SentinelManagedSSLConnection",
68 "SSLConnection",
69 "StrictRedis",
70 "TimeoutError",
71 "UnixDomainSocketConnection",
72 "WatchError",
73 ]
74
[end of redis/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redis/__init__.py b/redis/__init__.py
--- a/redis/__init__.py
+++ b/redis/__init__.py
@@ -1,3 +1,10 @@
+import sys
+
+if sys.version_info >= (3, 8):
+ from importlib import metadata
+else:
+ import importlib_metadata as metadata
+
from redis.client import Redis, StrictRedis
from redis.cluster import RedisCluster
from redis.connection import (
@@ -38,7 +45,10 @@
return value
-__version__ = "4.1.0rc2"
+try:
+ __version__ = metadata.version("redis")
+except metadata.PackageNotFoundError:
+ __version__ = "99.99.99"
VERSION = tuple(map(int_or_str, __version__.split(".")))
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,8 +1,6 @@
#!/usr/bin/env python
from setuptools import find_packages, setup
-import redis
-
setup(
name="redis",
description="Python client for Redis database and key-value store",
@@ -10,7 +8,7 @@
long_description_content_type="text/markdown",
keywords=["Redis", "key-value store", "database"],
license="MIT",
- version=redis.__version__,
+ version="4.1.0rc2",
packages=find_packages(
include=[
"redis",
@@ -26,12 +24,10 @@
author="Redis Inc.",
author_email="[email protected]",
python_requires=">=3.6",
- setup_requires=[
- "packaging>=21.3",
- ],
install_requires=[
"deprecated>=1.2.3",
"packaging>=21.3",
+ 'importlib-metadata >= 1.0; python_version < "3.8"',
],
classifiers=[
"Development Status :: 5 - Production/Stable",
| {"golden_diff": "diff --git a/redis/__init__.py b/redis/__init__.py\n--- a/redis/__init__.py\n+++ b/redis/__init__.py\n@@ -1,3 +1,10 @@\n+import sys\n+\n+if sys.version_info >= (3, 8):\n+ from importlib import metadata\n+else:\n+ import importlib_metadata as metadata\n+\n from redis.client import Redis, StrictRedis\n from redis.cluster import RedisCluster\n from redis.connection import (\n@@ -38,7 +45,10 @@\n return value\n \n \n-__version__ = \"4.1.0rc2\"\n+try:\n+ __version__ = metadata.version(\"redis\")\n+except metadata.PackageNotFoundError:\n+ __version__ = \"99.99.99\"\n \n \n VERSION = tuple(map(int_or_str, __version__.split(\".\")))\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,8 +1,6 @@\n #!/usr/bin/env python\n from setuptools import find_packages, setup\n \n-import redis\n-\n setup(\n name=\"redis\",\n description=\"Python client for Redis database and key-value store\",\n@@ -10,7 +8,7 @@\n long_description_content_type=\"text/markdown\",\n keywords=[\"Redis\", \"key-value store\", \"database\"],\n license=\"MIT\",\n- version=redis.__version__,\n+ version=\"4.1.0rc2\",\n packages=find_packages(\n include=[\n \"redis\",\n@@ -26,12 +24,10 @@\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n- setup_requires=[\n- \"packaging>=21.3\",\n- ],\n install_requires=[\n \"deprecated>=1.2.3\",\n \"packaging>=21.3\",\n+ 'importlib-metadata >= 1.0; python_version < \"3.8\"',\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n", "issue": "redis-py version attribute should be decoupled from the redis module\nFollowing the conversation from https://github.com/redis/redis-py/issues/1625#issuecomment-991744836 looks like importing `redis` module prior to installation in `setup.py` for `version` attribute is not ideal.\r\n\r\nCurrently there are two places where module version is required.\r\n- `setup.py` for module installation\r\n- `redis/__init__.py` for module level `__version__` attribute\r\n\r\nOne way to fix this is to maintain a `version.py` file in top level directory and using that as source of truth in both the above places. \r\n\r\n@chayim @hartwork What do you think? I can create a PR for this :)\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import find_packages, setup\n\nimport redis\n\nsetup(\n name=\"redis\",\n description=\"Python client for Redis database and key-value store\",\n long_description=open(\"README.md\").read().strip(),\n long_description_content_type=\"text/markdown\",\n keywords=[\"Redis\", \"key-value store\", \"database\"],\n license=\"MIT\",\n version=redis.__version__,\n packages=find_packages(\n include=[\n \"redis\",\n \"redis.commands\",\n \"redis.commands.bf\",\n \"redis.commands.json\",\n \"redis.commands.search\",\n \"redis.commands.timeseries\",\n \"redis.commands.graph\",\n ]\n ),\n url=\"https://github.com/redis/redis-py\",\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n setup_requires=[\n \"packaging>=21.3\",\n ],\n install_requires=[\n \"deprecated>=1.2.3\",\n \"packaging>=21.3\",\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n extras_require={\n \"hiredis\": [\"hiredis>=1.0.0\"],\n },\n)\n", "path": "setup.py"}, {"content": "from redis.client import Redis, StrictRedis\nfrom redis.cluster import RedisCluster\nfrom redis.connection import (\n BlockingConnectionPool,\n Connection,\n ConnectionPool,\n SSLConnection,\n UnixDomainSocketConnection,\n)\nfrom redis.exceptions import (\n AuthenticationError,\n AuthenticationWrongNumberOfArgsError,\n BusyLoadingError,\n ChildDeadlockedError,\n ConnectionError,\n DataError,\n InvalidResponse,\n PubSubError,\n ReadOnlyError,\n RedisError,\n ResponseError,\n TimeoutError,\n WatchError,\n)\nfrom redis.sentinel import (\n Sentinel,\n SentinelConnectionPool,\n SentinelManagedConnection,\n SentinelManagedSSLConnection,\n)\nfrom redis.utils import from_url\n\n\ndef int_or_str(value):\n try:\n return int(value)\n except ValueError:\n return value\n\n\n__version__ = \"4.1.0rc2\"\n\n\nVERSION = tuple(map(int_or_str, __version__.split(\".\")))\n\n__all__ = [\n \"AuthenticationError\",\n \"AuthenticationWrongNumberOfArgsError\",\n \"BlockingConnectionPool\",\n \"BusyLoadingError\",\n \"ChildDeadlockedError\",\n \"Connection\",\n \"ConnectionError\",\n \"ConnectionPool\",\n \"DataError\",\n \"from_url\",\n \"InvalidResponse\",\n \"PubSubError\",\n \"ReadOnlyError\",\n \"Redis\",\n \"RedisCluster\",\n \"RedisError\",\n \"ResponseError\",\n \"Sentinel\",\n \"SentinelConnectionPool\",\n \"SentinelManagedConnection\",\n \"SentinelManagedSSLConnection\",\n \"SSLConnection\",\n \"StrictRedis\",\n \"TimeoutError\",\n \"UnixDomainSocketConnection\",\n \"WatchError\",\n]\n", "path": "redis/__init__.py"}]} | 1,709 | 448 |
gh_patches_debug_32341 | rasdani/github-patches | git_diff | zulip__zulip-3217 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
contrib_bots: Prevent runaway bots with rate limiting
It's possible to create a runaway bot if you have a bot send a message to certain stimuli that in turns becomes a stimulus for the bot to send another message and so on and so forth.
We can prevent that in `contrib_bots/run.py` by adding some logic to prevent runaway bots.
Right now RestrictedClient make self.send_message just be client.send_message, but we can instead have a wrapper like this:
```
def send_message(self, *args, **kwargs):
self.rate_limit()
self.client.send_message(*args, **kwargs)
```
And then have appropriate logic in `rate_limit()` and some state variable to make sure less than N messages have been sent in the last second. It might make sense to have a small class for rate limiting that RestrictedClient uses an instance of.
You can do a pretty naive rate limiting scheme where you just keep the last N timestamps in a Python list and truncate it off the front as new sends come in and the list grows to size N+1.
</issue>
<code>
[start of contrib_bots/run.py]
1 #!/usr/bin/env python
2 from __future__ import print_function
3
4 import importlib
5 import logging
6 import optparse
7 import os
8 import sys
9
10 our_dir = os.path.dirname(os.path.abspath(__file__))
11
12 # For dev setups, we can find the API in the repo itself.
13 if os.path.exists(os.path.join(our_dir, '../api/zulip')):
14 sys.path.insert(0, '../api')
15
16 from zulip import Client
17
18 class RestrictedClient(object):
19 def __init__(self, client):
20 # Only expose a subset of our Client's functionality
21 user_profile = client.get_profile()
22 self.send_message = client.send_message
23 try:
24 self.full_name = user_profile['full_name']
25 self.email = user_profile['email']
26 except KeyError:
27 logging.error('Cannot fetch user profile, make sure you have set'
28 ' up the zuliprc file correctly.')
29 sys.exit(1)
30
31 def get_lib_module(lib_fn):
32 lib_fn = os.path.abspath(lib_fn)
33 if not os.path.dirname(lib_fn).startswith(os.path.join(our_dir, 'lib')):
34 print('Sorry, we will only import code from contrib_bots/lib.')
35 sys.exit(1)
36
37 if not lib_fn.endswith('.py'):
38 print('Please use a .py extension for library files.')
39 sys.exit(1)
40
41 sys.path.append('lib')
42 base_lib_fn = os.path.basename(os.path.splitext(lib_fn)[0])
43 module_name = 'lib.' + base_lib_fn
44 module = importlib.import_module(module_name)
45 return module
46
47 def run_message_handler_for_bot(lib_module, quiet, config_file):
48 # Make sure you set up your ~/.zuliprc
49 client = Client(config_file=config_file)
50 restricted_client = RestrictedClient(client)
51
52 message_handler = lib_module.handler_class()
53
54 class StateHandler(object):
55 def __init__(self):
56 self.state = None
57
58 def set_state(self, state):
59 self.state = state
60
61 def get_state(self):
62 return self.state
63
64 state_handler = StateHandler()
65
66 if not quiet:
67 print(message_handler.usage())
68
69 def handle_message(message):
70 logging.info('waiting for next message')
71 if message_handler.triage_message(message=message,
72 client=restricted_client):
73 message_handler.handle_message(
74 message=message,
75 client=restricted_client,
76 state_handler=state_handler
77 )
78
79 logging.info('starting message handling...')
80 client.call_on_each_message(handle_message)
81
82 def run():
83 usage = '''
84 ./run.py <lib file>
85
86 Example: ./run.py lib/followup.py
87
88 (This program loads bot-related code from the
89 library code and then runs a message loop,
90 feeding messages to the library code to handle.)
91
92 Please make sure you have a current ~/.zuliprc
93 file with the credentials you want to use for
94 this bot.
95
96 See lib/readme.md for more context.
97 '''
98
99 parser = optparse.OptionParser(usage=usage)
100 parser.add_option('--quiet', '-q',
101 action='store_true',
102 help='Turn off logging output.')
103 parser.add_option('--config-file',
104 action='store',
105 help='(alternate config file to ~/.zuliprc)')
106 (options, args) = parser.parse_args()
107
108 if len(args) == 0:
109 print('You must specify a library!')
110 sys.exit(1)
111
112 lib_module = get_lib_module(lib_fn=args[0])
113
114 if not options.quiet:
115 logging.basicConfig(stream=sys.stdout, level=logging.INFO)
116
117 run_message_handler_for_bot(
118 lib_module=lib_module,
119 config_file=options.config_file,
120 quiet=options.quiet
121 )
122
123 if __name__ == '__main__':
124 run()
125
[end of contrib_bots/run.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/contrib_bots/run.py b/contrib_bots/run.py
--- a/contrib_bots/run.py
+++ b/contrib_bots/run.py
@@ -6,6 +6,7 @@
import optparse
import os
import sys
+import time
our_dir = os.path.dirname(os.path.abspath(__file__))
@@ -15,11 +16,27 @@
from zulip import Client
+class RateLimit(object):
+ def __init__(self, message_limit, interval_limit):
+ self.message_limit = message_limit
+ self.interval_limit = interval_limit
+ self.message_list = []
+
+ def is_legal(self):
+ self.message_list.append(time.time())
+ if len(self.message_list) > self.message_limit:
+ self.message_list.pop(0)
+ time_diff = self.message_list[-1] - self.message_list[0]
+ return time_diff >= self.interval_limit
+ else:
+ return True
+
class RestrictedClient(object):
def __init__(self, client):
# Only expose a subset of our Client's functionality
user_profile = client.get_profile()
- self.send_message = client.send_message
+ self.rate_limit = RateLimit(20, 5)
+ self.client = client
try:
self.full_name = user_profile['full_name']
self.email = user_profile['email']
@@ -28,6 +45,15 @@
' up the zuliprc file correctly.')
sys.exit(1)
+ def send_message(self, *args, **kwargs):
+ if self.rate_limit.is_legal():
+ self.client.send_message(*args, **kwargs)
+ else:
+ logging.error('-----> !*!*!*MESSAGE RATE LIMIT REACHED, EXITING*!*!*! <-----\n'
+ 'Is your bot trapped in an infinite loop by reacting to'
+ ' its own messages?')
+ sys.exit(1)
+
def get_lib_module(lib_fn):
lib_fn = os.path.abspath(lib_fn)
if not os.path.dirname(lib_fn).startswith(os.path.join(our_dir, 'lib')):
| {"golden_diff": "diff --git a/contrib_bots/run.py b/contrib_bots/run.py\n--- a/contrib_bots/run.py\n+++ b/contrib_bots/run.py\n@@ -6,6 +6,7 @@\n import optparse\n import os\n import sys\n+import time\n \n our_dir = os.path.dirname(os.path.abspath(__file__))\n \n@@ -15,11 +16,27 @@\n \n from zulip import Client\n \n+class RateLimit(object):\n+ def __init__(self, message_limit, interval_limit):\n+ self.message_limit = message_limit\n+ self.interval_limit = interval_limit\n+ self.message_list = []\n+\n+ def is_legal(self):\n+ self.message_list.append(time.time())\n+ if len(self.message_list) > self.message_limit:\n+ self.message_list.pop(0)\n+ time_diff = self.message_list[-1] - self.message_list[0]\n+ return time_diff >= self.interval_limit\n+ else:\n+ return True\n+\n class RestrictedClient(object):\n def __init__(self, client):\n # Only expose a subset of our Client's functionality\n user_profile = client.get_profile()\n- self.send_message = client.send_message\n+ self.rate_limit = RateLimit(20, 5)\n+ self.client = client\n try:\n self.full_name = user_profile['full_name']\n self.email = user_profile['email']\n@@ -28,6 +45,15 @@\n ' up the zuliprc file correctly.')\n sys.exit(1)\n \n+ def send_message(self, *args, **kwargs):\n+ if self.rate_limit.is_legal():\n+ self.client.send_message(*args, **kwargs)\n+ else:\n+ logging.error('-----> !*!*!*MESSAGE RATE LIMIT REACHED, EXITING*!*!*! <-----\\n'\n+ 'Is your bot trapped in an infinite loop by reacting to'\n+ ' its own messages?')\n+ sys.exit(1)\n+\n def get_lib_module(lib_fn):\n lib_fn = os.path.abspath(lib_fn)\n if not os.path.dirname(lib_fn).startswith(os.path.join(our_dir, 'lib')):\n", "issue": "contrib_bots: Prevent runaway bots with rate limiting\nIt's possible to create a runaway bot if you have a bot send a message to certain stimuli that in turns becomes a stimulus for the bot to send another message and so on and so forth.\r\n\r\nWe can prevent that in `contrib_bots/run.py` by adding some logic to prevent runaway bots.\r\n\r\nRight now RestrictedClient make self.send_message just be client.send_message, but we can instead have a wrapper like this:\r\n\r\n```\r\ndef send_message(self, *args, **kwargs):\r\n self.rate_limit()\r\n self.client.send_message(*args, **kwargs)\r\n```\r\n\r\nAnd then have appropriate logic in `rate_limit()` and some state variable to make sure less than N messages have been sent in the last second. It might make sense to have a small class for rate limiting that RestrictedClient uses an instance of.\r\n\r\nYou can do a pretty naive rate limiting scheme where you just keep the last N timestamps in a Python list and truncate it off the front as new sends come in and the list grows to size N+1.\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport importlib\nimport logging\nimport optparse\nimport os\nimport sys\n\nour_dir = os.path.dirname(os.path.abspath(__file__))\n\n# For dev setups, we can find the API in the repo itself.\nif os.path.exists(os.path.join(our_dir, '../api/zulip')):\n sys.path.insert(0, '../api')\n\nfrom zulip import Client\n\nclass RestrictedClient(object):\n def __init__(self, client):\n # Only expose a subset of our Client's functionality\n user_profile = client.get_profile()\n self.send_message = client.send_message\n try:\n self.full_name = user_profile['full_name']\n self.email = user_profile['email']\n except KeyError:\n logging.error('Cannot fetch user profile, make sure you have set'\n ' up the zuliprc file correctly.')\n sys.exit(1)\n\ndef get_lib_module(lib_fn):\n lib_fn = os.path.abspath(lib_fn)\n if not os.path.dirname(lib_fn).startswith(os.path.join(our_dir, 'lib')):\n print('Sorry, we will only import code from contrib_bots/lib.')\n sys.exit(1)\n\n if not lib_fn.endswith('.py'):\n print('Please use a .py extension for library files.')\n sys.exit(1)\n\n sys.path.append('lib')\n base_lib_fn = os.path.basename(os.path.splitext(lib_fn)[0])\n module_name = 'lib.' + base_lib_fn\n module = importlib.import_module(module_name)\n return module\n\ndef run_message_handler_for_bot(lib_module, quiet, config_file):\n # Make sure you set up your ~/.zuliprc\n client = Client(config_file=config_file)\n restricted_client = RestrictedClient(client)\n\n message_handler = lib_module.handler_class()\n\n class StateHandler(object):\n def __init__(self):\n self.state = None\n\n def set_state(self, state):\n self.state = state\n\n def get_state(self):\n return self.state\n\n state_handler = StateHandler()\n\n if not quiet:\n print(message_handler.usage())\n\n def handle_message(message):\n logging.info('waiting for next message')\n if message_handler.triage_message(message=message,\n client=restricted_client):\n message_handler.handle_message(\n message=message,\n client=restricted_client,\n state_handler=state_handler\n )\n\n logging.info('starting message handling...')\n client.call_on_each_message(handle_message)\n\ndef run():\n usage = '''\n ./run.py <lib file>\n\n Example: ./run.py lib/followup.py\n\n (This program loads bot-related code from the\n library code and then runs a message loop,\n feeding messages to the library code to handle.)\n\n Please make sure you have a current ~/.zuliprc\n file with the credentials you want to use for\n this bot.\n\n See lib/readme.md for more context.\n '''\n\n parser = optparse.OptionParser(usage=usage)\n parser.add_option('--quiet', '-q',\n action='store_true',\n help='Turn off logging output.')\n parser.add_option('--config-file',\n action='store',\n help='(alternate config file to ~/.zuliprc)')\n (options, args) = parser.parse_args()\n\n if len(args) == 0:\n print('You must specify a library!')\n sys.exit(1)\n\n lib_module = get_lib_module(lib_fn=args[0])\n\n if not options.quiet:\n logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\n run_message_handler_for_bot(\n lib_module=lib_module,\n config_file=options.config_file,\n quiet=options.quiet\n )\n\nif __name__ == '__main__':\n run()\n", "path": "contrib_bots/run.py"}]} | 1,837 | 472 |
gh_patches_debug_38222 | rasdani/github-patches | git_diff | getredash__redash-1394 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Share access permissions for add/remove widgets
Hi @arikfr ,
in our PR #1113 we focused primarily on sharing access permissions for queries, and for dashboards we currently only allow to modify basic info of the dashboard. What is still missing is ability to allow other users to add and remove widgets. Is that something you are planning to add, or should we give it a shot?
Another thing that came up - we're currently enforcing `require_admin_or_owner(...)` for managing access permissions (e.g., https://github.com/getredash/redash/blob/master/redash/handlers/permissions.py#L42). This is actually a very restrictive limitation, and we believe that anybody with permissions (owner, admin, other permitted users) should be able to add/remove users. For instance, if you consider person A creating a dashboard, then giving access to persons B and C who are actively maintaining that dashboard. Then, if person A leaves the company, B and C would not be able to make the required changes to add another person D. What do you think?
/cc @rohanpd
</issue>
<code>
[start of redash/handlers/widgets.py]
1 import json
2
3 from flask import request
4
5 from redash import models
6 from redash.permissions import require_permission, require_admin_or_owner, require_access, view_only
7 from redash.handlers.base import BaseResource
8
9
10 class WidgetListResource(BaseResource):
11 @require_permission('edit_dashboard')
12 def post(self):
13 widget_properties = request.get_json(force=True)
14 dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)
15 require_admin_or_owner(dashboard.user_id)
16
17 widget_properties['options'] = json.dumps(widget_properties['options'])
18 widget_properties.pop('id', None)
19 widget_properties['dashboard'] = dashboard
20
21 visualization_id = widget_properties.pop('visualization_id')
22 if visualization_id:
23 visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)
24 require_access(visualization.query.groups, self.current_user, view_only)
25 else:
26 visualization = None
27
28 widget_properties['visualization'] = visualization
29
30 widget = models.Widget.create(**widget_properties)
31
32 layout = json.loads(widget.dashboard.layout)
33 new_row = True
34
35 if len(layout) == 0 or widget.width == 2:
36 layout.append([widget.id])
37 elif len(layout[-1]) == 1:
38 neighbour_widget = models.Widget.get(models.Widget.id == layout[-1][0])
39 if neighbour_widget.width == 1:
40 layout[-1].append(widget.id)
41 new_row = False
42 else:
43 layout.append([widget.id])
44 else:
45 layout.append([widget.id])
46
47 widget.dashboard.layout = json.dumps(layout)
48 widget.dashboard.save()
49
50 return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row}
51
52
53 class WidgetResource(BaseResource):
54 @require_permission('edit_dashboard')
55 def post(self, widget_id):
56 # This method currently handles Text Box widgets only.
57 widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
58 require_admin_or_owner(widget.dashboard.user_id)
59 widget_properties = request.get_json(force=True)
60 widget.text = widget_properties['text']
61 widget.save()
62
63 return widget.to_dict()
64
65 @require_permission('edit_dashboard')
66 def delete(self, widget_id):
67 widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
68 require_admin_or_owner(widget.dashboard.user_id)
69 widget.delete_instance()
70
71 return {'layout': widget.dashboard.layout}
72
[end of redash/handlers/widgets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py
--- a/redash/handlers/widgets.py
+++ b/redash/handlers/widgets.py
@@ -1,10 +1,11 @@
import json
from flask import request
-
from redash import models
-from redash.permissions import require_permission, require_admin_or_owner, require_access, view_only
from redash.handlers.base import BaseResource
+from redash.permissions import (require_access,
+ require_object_modify_permission,
+ require_permission, view_only)
class WidgetListResource(BaseResource):
@@ -12,7 +13,7 @@
def post(self):
widget_properties = request.get_json(force=True)
dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)
- require_admin_or_owner(dashboard.user_id)
+ require_object_modify_permission(dashboard, self.current_user)
widget_properties['options'] = json.dumps(widget_properties['options'])
widget_properties.pop('id', None)
@@ -47,7 +48,7 @@
widget.dashboard.layout = json.dumps(layout)
widget.dashboard.save()
- return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row}
+ return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row, 'version': dashboard.version}
class WidgetResource(BaseResource):
@@ -55,7 +56,7 @@
def post(self, widget_id):
# This method currently handles Text Box widgets only.
widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
- require_admin_or_owner(widget.dashboard.user_id)
+ require_object_modify_permission(widget.dashboard, self.current_user)
widget_properties = request.get_json(force=True)
widget.text = widget_properties['text']
widget.save()
@@ -65,7 +66,7 @@
@require_permission('edit_dashboard')
def delete(self, widget_id):
widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
- require_admin_or_owner(widget.dashboard.user_id)
+ require_object_modify_permission(widget.dashboard, self.current_user)
widget.delete_instance()
- return {'layout': widget.dashboard.layout}
+ return {'layout': widget.dashboard.layout, 'version': widget.dashboard.version}
| {"golden_diff": "diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py\n--- a/redash/handlers/widgets.py\n+++ b/redash/handlers/widgets.py\n@@ -1,10 +1,11 @@\n import json\n \n from flask import request\n-\n from redash import models\n-from redash.permissions import require_permission, require_admin_or_owner, require_access, view_only\n from redash.handlers.base import BaseResource\n+from redash.permissions import (require_access,\n+ require_object_modify_permission,\n+ require_permission, view_only)\n \n \n class WidgetListResource(BaseResource):\n@@ -12,7 +13,7 @@\n def post(self):\n widget_properties = request.get_json(force=True)\n dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)\n- require_admin_or_owner(dashboard.user_id)\n+ require_object_modify_permission(dashboard, self.current_user)\n \n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n@@ -47,7 +48,7 @@\n widget.dashboard.layout = json.dumps(layout)\n widget.dashboard.save()\n \n- return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row}\n+ return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row, 'version': dashboard.version}\n \n \n class WidgetResource(BaseResource):\n@@ -55,7 +56,7 @@\n def post(self, widget_id):\n # This method currently handles Text Box widgets only.\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n- require_admin_or_owner(widget.dashboard.user_id)\n+ require_object_modify_permission(widget.dashboard, self.current_user)\n widget_properties = request.get_json(force=True)\n widget.text = widget_properties['text']\n widget.save()\n@@ -65,7 +66,7 @@\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n- require_admin_or_owner(widget.dashboard.user_id)\n+ require_object_modify_permission(widget.dashboard, self.current_user)\n widget.delete_instance()\n \n- return {'layout': widget.dashboard.layout}\n+ return {'layout': widget.dashboard.layout, 'version': widget.dashboard.version}\n", "issue": "Share access permissions for add/remove widgets\nHi @arikfr ,\r\n\r\nin our PR #1113 we focused primarily on sharing access permissions for queries, and for dashboards we currently only allow to modify basic info of the dashboard. What is still missing is ability to allow other users to add and remove widgets. Is that something you are planning to add, or should we give it a shot?\r\n\r\nAnother thing that came up - we're currently enforcing `require_admin_or_owner(...)` for managing access permissions (e.g., https://github.com/getredash/redash/blob/master/redash/handlers/permissions.py#L42). This is actually a very restrictive limitation, and we believe that anybody with permissions (owner, admin, other permitted users) should be able to add/remove users. For instance, if you consider person A creating a dashboard, then giving access to persons B and C who are actively maintaining that dashboard. Then, if person A leaves the company, B and C would not be able to make the required changes to add another person D. What do you think?\r\n\r\n/cc @rohanpd\n", "before_files": [{"content": "import json\n\nfrom flask import request\n\nfrom redash import models\nfrom redash.permissions import require_permission, require_admin_or_owner, require_access, view_only\nfrom redash.handlers.base import BaseResource\n\n\nclass WidgetListResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n widget_properties = request.get_json(force=True)\n dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)\n require_admin_or_owner(dashboard.user_id)\n\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = dashboard\n\n visualization_id = widget_properties.pop('visualization_id')\n if visualization_id:\n visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)\n require_access(visualization.query.groups, self.current_user, view_only)\n else:\n visualization = None\n\n widget_properties['visualization'] = visualization\n\n widget = models.Widget.create(**widget_properties)\n\n layout = json.loads(widget.dashboard.layout)\n new_row = True\n\n if len(layout) == 0 or widget.width == 2:\n layout.append([widget.id])\n elif len(layout[-1]) == 1:\n neighbour_widget = models.Widget.get(models.Widget.id == layout[-1][0])\n if neighbour_widget.width == 1:\n layout[-1].append(widget.id)\n new_row = False\n else:\n layout.append([widget.id])\n else:\n layout.append([widget.id])\n\n widget.dashboard.layout = json.dumps(layout)\n widget.dashboard.save()\n\n return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row}\n\n\nclass WidgetResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self, widget_id):\n # This method currently handles Text Box widgets only.\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_admin_or_owner(widget.dashboard.user_id)\n widget_properties = request.get_json(force=True)\n widget.text = widget_properties['text']\n widget.save()\n\n return widget.to_dict()\n\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_admin_or_owner(widget.dashboard.user_id)\n widget.delete_instance()\n\n return {'layout': widget.dashboard.layout}\n", "path": "redash/handlers/widgets.py"}]} | 1,425 | 507 |
gh_patches_debug_25840 | rasdani/github-patches | git_diff | lnbits__lnbits-690 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Split payments shares <1%
Trying to set a payment share <1% will result in 500 INTERNAL SERVER ERROR.
This should work as it would be very useful as a fee for hosting lnbits but 1% is too much.

</issue>
<code>
[start of lnbits/extensions/splitpayments/migrations.py]
1 async def m001_initial(db):
2 """
3 Initial split payment table.
4 """
5 await db.execute(
6 """
7 CREATE TABLE splitpayments.targets (
8 wallet TEXT NOT NULL,
9 source TEXT NOT NULL,
10 percent INTEGER NOT NULL CHECK (percent >= 0 AND percent <= 100),
11 alias TEXT,
12
13 UNIQUE (source, wallet)
14 );
15 """
16 )
17
[end of lnbits/extensions/splitpayments/migrations.py]
[start of lnbits/extensions/splitpayments/models.py]
1 from typing import List, Optional
2
3 from fastapi.param_functions import Query
4 from pydantic import BaseModel
5
6
7 class Target(BaseModel):
8 wallet: str
9 source: str
10 percent: int
11 alias: Optional[str]
12
13
14 class TargetPutList(BaseModel):
15 wallet: str = Query(...)
16 alias: str = Query("")
17 percent: int = Query(..., ge=1)
18
19
20 class TargetPut(BaseModel):
21 __root__: List[TargetPutList]
22
[end of lnbits/extensions/splitpayments/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lnbits/extensions/splitpayments/migrations.py b/lnbits/extensions/splitpayments/migrations.py
--- a/lnbits/extensions/splitpayments/migrations.py
+++ b/lnbits/extensions/splitpayments/migrations.py
@@ -14,3 +14,41 @@
);
"""
)
+
+
+async def m002_float_percent(db):
+ """
+ Add float percent and migrates the existing data.
+ """
+ await db.execute("ALTER TABLE splitpayments.targets RENAME TO splitpayments_old")
+ await db.execute(
+ """
+ CREATE TABLE splitpayments.targets (
+ wallet TEXT NOT NULL,
+ source TEXT NOT NULL,
+ percent REAL NOT NULL CHECK (percent >= 0 AND percent <= 100),
+ alias TEXT,
+
+ UNIQUE (source, wallet)
+ );
+ """
+ )
+
+ for row in [
+ list(row)
+ for row in await db.fetchall("SELECT * FROM splitpayments.splitpayments_old")
+ ]:
+ await db.execute(
+ """
+ INSERT INTO splitpayments.targets (
+ wallet,
+ source,
+ percent,
+ alias
+ )
+ VALUES (?, ?, ?, ?)
+ """,
+ (row[0], row[1], row[2], row[3]),
+ )
+
+ await db.execute("DROP TABLE splitpayments.splitpayments_old")
diff --git a/lnbits/extensions/splitpayments/models.py b/lnbits/extensions/splitpayments/models.py
--- a/lnbits/extensions/splitpayments/models.py
+++ b/lnbits/extensions/splitpayments/models.py
@@ -7,14 +7,14 @@
class Target(BaseModel):
wallet: str
source: str
- percent: int
+ percent: float
alias: Optional[str]
class TargetPutList(BaseModel):
wallet: str = Query(...)
alias: str = Query("")
- percent: int = Query(..., ge=1)
+ percent: float = Query(..., ge=0.01)
class TargetPut(BaseModel):
| {"golden_diff": "diff --git a/lnbits/extensions/splitpayments/migrations.py b/lnbits/extensions/splitpayments/migrations.py\n--- a/lnbits/extensions/splitpayments/migrations.py\n+++ b/lnbits/extensions/splitpayments/migrations.py\n@@ -14,3 +14,41 @@\n );\n \"\"\"\n )\n+\n+\n+async def m002_float_percent(db):\n+ \"\"\"\n+ Add float percent and migrates the existing data.\n+ \"\"\"\n+ await db.execute(\"ALTER TABLE splitpayments.targets RENAME TO splitpayments_old\")\n+ await db.execute(\n+ \"\"\"\n+ CREATE TABLE splitpayments.targets (\n+ wallet TEXT NOT NULL,\n+ source TEXT NOT NULL,\n+ percent REAL NOT NULL CHECK (percent >= 0 AND percent <= 100),\n+ alias TEXT,\n+\n+ UNIQUE (source, wallet)\n+ );\n+ \"\"\"\n+ )\n+\n+ for row in [\n+ list(row)\n+ for row in await db.fetchall(\"SELECT * FROM splitpayments.splitpayments_old\")\n+ ]:\n+ await db.execute(\n+ \"\"\"\n+ INSERT INTO splitpayments.targets (\n+ wallet,\n+ source,\n+ percent,\n+ alias\n+ )\n+ VALUES (?, ?, ?, ?)\n+ \"\"\",\n+ (row[0], row[1], row[2], row[3]),\n+ )\n+\n+ await db.execute(\"DROP TABLE splitpayments.splitpayments_old\")\ndiff --git a/lnbits/extensions/splitpayments/models.py b/lnbits/extensions/splitpayments/models.py\n--- a/lnbits/extensions/splitpayments/models.py\n+++ b/lnbits/extensions/splitpayments/models.py\n@@ -7,14 +7,14 @@\n class Target(BaseModel):\n wallet: str\n source: str\n- percent: int\n+ percent: float\n alias: Optional[str]\n \n \n class TargetPutList(BaseModel):\n wallet: str = Query(...)\n alias: str = Query(\"\")\n- percent: int = Query(..., ge=1)\n+ percent: float = Query(..., ge=0.01)\n \n \n class TargetPut(BaseModel):\n", "issue": "Split payments shares <1%\nTrying to set a payment share <1% will result in 500 INTERNAL SERVER ERROR.\r\nThis should work as it would be very useful as a fee for hosting lnbits but 1% is too much.\r\n\r\n\n", "before_files": [{"content": "async def m001_initial(db):\n \"\"\"\n Initial split payment table.\n \"\"\"\n await db.execute(\n \"\"\"\n CREATE TABLE splitpayments.targets (\n wallet TEXT NOT NULL,\n source TEXT NOT NULL,\n percent INTEGER NOT NULL CHECK (percent >= 0 AND percent <= 100),\n alias TEXT,\n\n UNIQUE (source, wallet)\n );\n \"\"\"\n )\n", "path": "lnbits/extensions/splitpayments/migrations.py"}, {"content": "from typing import List, Optional\n\nfrom fastapi.param_functions import Query\nfrom pydantic import BaseModel\n\n\nclass Target(BaseModel):\n wallet: str\n source: str\n percent: int\n alias: Optional[str]\n\n\nclass TargetPutList(BaseModel):\n wallet: str = Query(...)\n alias: str = Query(\"\")\n percent: int = Query(..., ge=1)\n\n\nclass TargetPut(BaseModel):\n __root__: List[TargetPutList]\n", "path": "lnbits/extensions/splitpayments/models.py"}]} | 928 | 465 |
gh_patches_debug_24470 | rasdani/github-patches | git_diff | talonhub__community-889 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve VSCode draft editor robustness
Users can lose drafts if window switch is too slow. To address this issue, we propose the following:
- [x] Increase sleep timeout in https://github.com/knausj85/knausj_talon/blob/0d4ad8523b87c2fe10457b7fae7f2ba2f22ad735/draft_editor/draft_editor.py#L112
- [x] Keep last draft in memory, and have "draft submit" when outside of VSCode just submit the most recent draft. That way if initial draft submit doesn't work, user can just say "draft submit" again
- [x] While we're here, add "draft top", which selects from cursor to start of document
</issue>
<code>
[start of draft_editor/draft_editor.py]
1 from talon import Context, Module, actions, ui
2
3 mod = Module()
4 mod.tag("draft_editor_active", "Indicates whether the draft editor has been activated")
5 mod.tag(
6 "draft_editor_app_focused",
7 "Indicates that the draft editor app currently has focus",
8 )
9
10 ctx = Context()
11 tags: set[str] = set()
12
13
14 def add_tag(tag: str):
15 tags.add(tag)
16 ctx.tags = list(tags)
17
18
19 def remove_tag(tag: str):
20 tags.discard(tag)
21 ctx.tags = list(tags)
22
23
24 default_names = ["Visual Studio Code", "Code", "VSCodium", "Codium", "code-oss"]
25
26 setting_editor_names = mod.setting(
27 "draft_editor",
28 type=str,
29 default=None,
30 desc="List of application names to use for draft editor",
31 )
32
33
34 def get_editor_names():
35 names_csv = setting_editor_names.get()
36 return names_csv.split(", ") if names_csv else default_names
37
38
39 @mod.scope
40 def scope():
41 editor_names = get_editor_names()
42
43 for app in ui.apps(background=False):
44 if app.name in editor_names:
45 return {"draft_editor_running": True}
46
47 return {"draft_editor_running": False}
48
49
50 def handle_app_activate(app):
51 if app.name in get_editor_names():
52 add_tag("user.draft_editor_app_focused")
53 else:
54 remove_tag("user.draft_editor_app_focused")
55
56
57 ui.register("app_launch", scope.update)
58 ui.register("app_close", scope.update)
59 ui.register("app_activate", handle_app_activate)
60
61
62 original_window = None
63
64
65 @mod.action_class
66 class Actions:
67 def draft_editor_open():
68 """Open draft editor"""
69 global original_window
70 original_window = ui.active_window()
71 editor_app = get_editor_app()
72 selected_text = actions.edit.selected_text()
73 actions.user.switcher_focus_app(editor_app)
74 # Wait additional time for talon context to update.
75 actions.sleep("200ms")
76 actions.app.tab_open()
77 if selected_text != "":
78 actions.user.paste(selected_text)
79 add_tag("user.draft_editor_active")
80
81 def draft_editor_submit():
82 """Submit/save draft editor"""
83 close_editor(submit_draft=True)
84
85 def draft_editor_discard():
86 """Discard draft editor"""
87 close_editor(submit_draft=False)
88
89
90 def get_editor_app() -> ui.App:
91 editor_names = get_editor_names()
92
93 for app in ui.apps(background=False):
94 if app.name in editor_names:
95 return app
96
97 raise RuntimeError("Draft editor is not running")
98
99
100 def close_editor(submit_draft: bool):
101 remove_tag("user.draft_editor_active")
102 actions.edit.select_all()
103 selected_text = actions.edit.selected_text()
104 actions.edit.delete()
105 actions.app.tab_close()
106 actions.user.switcher_focus_window(original_window)
107 actions.sleep("200ms")
108 if submit_draft:
109 actions.user.paste(selected_text)
110
[end of draft_editor/draft_editor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/draft_editor/draft_editor.py b/draft_editor/draft_editor.py
--- a/draft_editor/draft_editor.py
+++ b/draft_editor/draft_editor.py
@@ -61,6 +61,8 @@
original_window = None
+last_draft = None
+
@mod.action_class
class Actions:
@@ -86,6 +88,11 @@
"""Discard draft editor"""
close_editor(submit_draft=False)
+ def draft_editor_paste_last():
+ """Paste last submitted draft"""
+ if last_draft:
+ actions.user.paste(last_draft)
+
def get_editor_app() -> ui.App:
editor_names = get_editor_names()
@@ -98,12 +105,14 @@
def close_editor(submit_draft: bool):
+ global last_draft
remove_tag("user.draft_editor_active")
actions.edit.select_all()
selected_text = actions.edit.selected_text()
actions.edit.delete()
actions.app.tab_close()
actions.user.switcher_focus_window(original_window)
- actions.sleep("200ms")
+ actions.sleep("300ms")
if submit_draft:
+ last_draft = selected_text
actions.user.paste(selected_text)
| {"golden_diff": "diff --git a/draft_editor/draft_editor.py b/draft_editor/draft_editor.py\n--- a/draft_editor/draft_editor.py\n+++ b/draft_editor/draft_editor.py\n@@ -61,6 +61,8 @@\n \n original_window = None\n \n+last_draft = None\n+\n \n @mod.action_class\n class Actions:\n@@ -86,6 +88,11 @@\n \"\"\"Discard draft editor\"\"\"\n close_editor(submit_draft=False)\n \n+ def draft_editor_paste_last():\n+ \"\"\"Paste last submitted draft\"\"\"\n+ if last_draft:\n+ actions.user.paste(last_draft)\n+\n \n def get_editor_app() -> ui.App:\n editor_names = get_editor_names()\n@@ -98,12 +105,14 @@\n \n \n def close_editor(submit_draft: bool):\n+ global last_draft\n remove_tag(\"user.draft_editor_active\")\n actions.edit.select_all()\n selected_text = actions.edit.selected_text()\n actions.edit.delete()\n actions.app.tab_close()\n actions.user.switcher_focus_window(original_window)\n- actions.sleep(\"200ms\")\n+ actions.sleep(\"300ms\")\n if submit_draft:\n+ last_draft = selected_text\n actions.user.paste(selected_text)\n", "issue": "Improve VSCode draft editor robustness\nUsers can lose drafts if window switch is too slow. To address this issue, we propose the following:\r\n\r\n- [x] Increase sleep timeout in https://github.com/knausj85/knausj_talon/blob/0d4ad8523b87c2fe10457b7fae7f2ba2f22ad735/draft_editor/draft_editor.py#L112\r\n- [x] Keep last draft in memory, and have \"draft submit\" when outside of VSCode just submit the most recent draft. That way if initial draft submit doesn't work, user can just say \"draft submit\" again\r\n- [x] While we're here, add \"draft top\", which selects from cursor to start of document\n", "before_files": [{"content": "from talon import Context, Module, actions, ui\n\nmod = Module()\nmod.tag(\"draft_editor_active\", \"Indicates whether the draft editor has been activated\")\nmod.tag(\n \"draft_editor_app_focused\",\n \"Indicates that the draft editor app currently has focus\",\n)\n\nctx = Context()\ntags: set[str] = set()\n\n\ndef add_tag(tag: str):\n tags.add(tag)\n ctx.tags = list(tags)\n\n\ndef remove_tag(tag: str):\n tags.discard(tag)\n ctx.tags = list(tags)\n\n\ndefault_names = [\"Visual Studio Code\", \"Code\", \"VSCodium\", \"Codium\", \"code-oss\"]\n\nsetting_editor_names = mod.setting(\n \"draft_editor\",\n type=str,\n default=None,\n desc=\"List of application names to use for draft editor\",\n)\n\n\ndef get_editor_names():\n names_csv = setting_editor_names.get()\n return names_csv.split(\", \") if names_csv else default_names\n\n\[email protected]\ndef scope():\n editor_names = get_editor_names()\n\n for app in ui.apps(background=False):\n if app.name in editor_names:\n return {\"draft_editor_running\": True}\n\n return {\"draft_editor_running\": False}\n\n\ndef handle_app_activate(app):\n if app.name in get_editor_names():\n add_tag(\"user.draft_editor_app_focused\")\n else:\n remove_tag(\"user.draft_editor_app_focused\")\n\n\nui.register(\"app_launch\", scope.update)\nui.register(\"app_close\", scope.update)\nui.register(\"app_activate\", handle_app_activate)\n\n\noriginal_window = None\n\n\[email protected]_class\nclass Actions:\n def draft_editor_open():\n \"\"\"Open draft editor\"\"\"\n global original_window\n original_window = ui.active_window()\n editor_app = get_editor_app()\n selected_text = actions.edit.selected_text()\n actions.user.switcher_focus_app(editor_app)\n # Wait additional time for talon context to update.\n actions.sleep(\"200ms\")\n actions.app.tab_open()\n if selected_text != \"\":\n actions.user.paste(selected_text)\n add_tag(\"user.draft_editor_active\")\n\n def draft_editor_submit():\n \"\"\"Submit/save draft editor\"\"\"\n close_editor(submit_draft=True)\n\n def draft_editor_discard():\n \"\"\"Discard draft editor\"\"\"\n close_editor(submit_draft=False)\n\n\ndef get_editor_app() -> ui.App:\n editor_names = get_editor_names()\n\n for app in ui.apps(background=False):\n if app.name in editor_names:\n return app\n\n raise RuntimeError(\"Draft editor is not running\")\n\n\ndef close_editor(submit_draft: bool):\n remove_tag(\"user.draft_editor_active\")\n actions.edit.select_all()\n selected_text = actions.edit.selected_text()\n actions.edit.delete()\n actions.app.tab_close()\n actions.user.switcher_focus_window(original_window)\n actions.sleep(\"200ms\")\n if submit_draft:\n actions.user.paste(selected_text)\n", "path": "draft_editor/draft_editor.py"}]} | 1,569 | 273 |
gh_patches_debug_5922 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3130 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV2 cloudformation checks are not packaged into the whl file
**Describe the issue**
CKV2 cloudformation checks are not packaged into the whl file.
I don't see them in the list command or the policy index.
Checks are stored at: https://github.com/bridgecrewio/checkov/tree/master/checkov/cloudformation/checks/graph_checks/aws
**Additional context**
A solution should be similar to https://github.com/bridgecrewio/checkov/pull/2255
The impact is that ckv2 cfn policies are not running
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "pytest==5.3.1",
28 "coverage==5.5",
29 "coverage-badge",
30 "GitPython==3.1.7",
31 "bandit",
32 "jsonschema",
33 ]
34 },
35 install_requires=[
36 "bc-python-hcl2==0.3.42",
37 "cloudsplaining>=0.4.1",
38 "deep_merge",
39 "tabulate",
40 "colorama",
41 "termcolor",
42 "junit-xml>=1.9",
43 "dpath>=1.5.0,<2",
44 "pyyaml>=5.4.1",
45 "boto3>=1.17",
46 "GitPython",
47 "jmespath",
48 "tqdm",
49 "update_checker",
50 "semantic_version",
51 "packaging",
52 "networkx",
53 "dockerfile-parse",
54 "docker",
55 "configargparse",
56 "argcomplete",
57 "detect-secrets",
58 "policyuniverse",
59 "typing-extensions>=4.1.0",
60 "cachetools",
61 "cyclonedx-python-lib>=2.4.0",
62 "click>=8.0.0",
63 "aiohttp",
64 "aiodns",
65 "aiomultiprocess",
66 "jsonpath_ng",
67 "jsonschema~=3.0",
68 "prettytable>=3.0.0",
69 "pycep-parser==0.3.7",
70 "charset-normalizer",
71 ],
72 license="Apache License 2.0",
73 name="checkov",
74 version=version,
75 python_requires=">=3.7",
76 description="Infrastructure as code static analysis",
77 author="bridgecrew",
78 author_email="[email protected]",
79 url="https://github.com/bridgecrewio/checkov",
80 packages=setuptools.find_packages(exclude=["tests*", "integration_tests*"]),
81 include_package_data=True,
82 package_dir={
83 "checkov.bicep.checks.graph_checks": "checkov/bicep/checks/graph_checks",
84 "checkov.terraform.checks.graph_checks": "checkov/terraform/checks/graph_checks",
85 },
86 package_data={
87 "checkov": ["py.typed"],
88 "checkov.bicep.checks.graph_checks": ["*.yaml"],
89 "checkov.common.util.templates": ["*.jinja2"],
90 "checkov.terraform.checks.graph_checks": [
91 "aws/*.yaml",
92 "gcp/*.yaml",
93 "azure/*.yaml",
94 ],
95 },
96 scripts=["bin/checkov", "bin/checkov.cmd"],
97 long_description=long_description,
98 long_description_content_type="text/markdown",
99 classifiers=[
100 "Environment :: Console",
101 "Intended Audience :: Developers",
102 "Intended Audience :: System Administrators",
103 "License :: OSI Approved :: Apache Software License",
104 "Programming Language :: Python :: 3 :: Only",
105 "Programming Language :: Python :: 3.7",
106 "Programming Language :: Python :: 3.8",
107 "Programming Language :: Python :: 3.9",
108 "Programming Language :: Python :: 3.10",
109 "Topic :: Security",
110 "Topic :: Software Development :: Build Tools",
111 "Typing :: Typed",
112 ],
113 )
114
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -81,6 +81,7 @@
include_package_data=True,
package_dir={
"checkov.bicep.checks.graph_checks": "checkov/bicep/checks/graph_checks",
+ "checkov.cloudformation.checks.graph_checks": "checkov/cloudformation/checks/graph_checks",
"checkov.terraform.checks.graph_checks": "checkov/terraform/checks/graph_checks",
},
package_data={
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -81,6 +81,7 @@\n include_package_data=True,\n package_dir={\n \"checkov.bicep.checks.graph_checks\": \"checkov/bicep/checks/graph_checks\",\n+ \"checkov.cloudformation.checks.graph_checks\": \"checkov/cloudformation/checks/graph_checks\",\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\",\n },\n package_data={\n", "issue": "CKV2 cloudformation checks are not packaged into the whl file \n**Describe the issue**\r\nCKV2 cloudformation checks are not packaged into the whl file.\r\nI don't see them in the list command or the policy index.\r\nChecks are stored at: https://github.com/bridgecrewio/checkov/tree/master/checkov/cloudformation/checks/graph_checks/aws\r\n**Additional context**\r\nA solution should be similar to https://github.com/bridgecrewio/checkov/pull/2255\r\n\r\nThe impact is that ckv2 cfn policies are not running\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2==0.3.42\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions>=4.1.0\",\n \"cachetools\",\n \"cyclonedx-python-lib>=2.4.0\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath_ng\",\n \"jsonschema~=3.0\",\n \"prettytable>=3.0.0\",\n \"pycep-parser==0.3.7\",\n \"charset-normalizer\",\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.bicep.checks.graph_checks\": \"checkov/bicep/checks/graph_checks\",\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\",\n },\n package_data={\n \"checkov\": [\"py.typed\"],\n \"checkov.bicep.checks.graph_checks\": [\"*.yaml\"],\n \"checkov.common.util.templates\": [\"*.jinja2\"],\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ],\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n \"Typing :: Typed\",\n ],\n)\n", "path": "setup.py"}]} | 1,745 | 115 |
gh_patches_debug_17020 | rasdani/github-patches | git_diff | pypa__pip-9779 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip 21.0.1 fails when run with warnings converted to errors
**Environment**
* pip version: 21.0.1
* Python version: 3.9.1
* OS: Windows
**Description**
With the latest version of packaging (vendored in 21.0.1) a DeprecationWarning is issued when parsing a "legacy version". If pip is run with warnings converted to errors, this causes a failure.
**Expected behavior**
No error
**How to Reproduce**
`py -wE -m pip --version`
Or to pinpoint it further,
```
py -wE
>>> from pip._vendor import pkg_resources
```
This does *not* happen with setuptools 52.0.0, it appears to be related to the version of setuptools (44.0.0) that we vendor.
**Output**
```
Traceback (most recent call last):
File "C:\Users\Gustav\AppData\Local\Programs\Python\Python39\lib\site-packages\pip\_vendor\packaging\version.py", line 57, in parse
return Version(version)
File "C:\Users\Gustav\AppData\Local\Programs\Python\Python39\lib\site-packages\pip\_vendor\packaging\version.py", line 298, in __init__
raise InvalidVersion("Invalid version: '{0}'".format(version))
pip._vendor.packaging.version.InvalidVersion: Invalid version: 'pip'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\Gustav\AppData\Local\Programs\Python\Python39\lib\site-packages\pip\_vendor\pkg_resources\__init__.py", line 3252, in <module>
def _initialize_master_working_set():
File "C:\Users\Gustav\AppData\Local\Programs\Python\Python39\lib\site-packages\pip\_vendor\pkg_resources\__init__.py", line 3235, in _call_aside
f(*args, **kwargs)
File "C:\Users\Gustav\AppData\Local\Programs\Python\Python39\lib\site-packages\pip\_vendor\pkg_resources\__init__.py", line 3264, in _initialize_master_working_set
working_set = WorkingSet._build_master()
File "C:\Users\Gustav\AppData\Local\Programs\Python\Python39\lib\site-packages\pip\_vendor\pkg_resources\__init__.py", line 574, in _build_master
ws = cls()
File "C:\Users\Gustav\AppData\Local\Programs\Python\Python39\lib\site-packages\pip\_vendor\pkg_resources\__init__.py", line 567, in __init__
self.add_entry(entry)
File "C:\Users\Gustav\AppData\Local\Programs\Python\Python39\lib\site-packages\pip\_vendor\pkg_resources\__init__.py", line 623, in add_entry
for dist in find_distributions(entry, True):
File "C:\Users\Gustav\AppData\Local\Programs\Python\Python39\lib\site-packages\pip\_vendor\pkg_resources\__init__.py", line 2061, in find_on_path
path_item_entries = _by_version_descending(filtered)
File "C:\Users\Gustav\AppData\Local\Programs\Python\Python39\lib\site-packages\pip\_vendor\pkg_resources\__init__.py", line 2034, in _by_version_descending
return sorted(names, key=_by_version, reverse=True)
File "C:\Users\Gustav\AppData\Local\Programs\Python\Python39\lib\site-packages\pip\_vendor\pkg_resources\__init__.py", line 2032, in _by_version
return [packaging.version.parse(part) for part in parts]
File "C:\Users\Gustav\AppData\Local\Programs\Python\Python39\lib\site-packages\pip\_vendor\pkg_resources\__init__.py", line 2032, in <listcomp>
return [packaging.version.parse(part) for part in parts]
File "C:\Users\Gustav\AppData\Local\Programs\Python\Python39\lib\site-packages\pip\_vendor\packaging\version.py", line 59, in parse
return LegacyVersion(version)
File "C:\Users\Gustav\AppData\Local\Programs\Python\Python39\lib\site-packages\pip\_vendor\packaging\version.py", line 127, in __init__
warnings.warn(
DeprecationWarning: Creating a LegacyVersion has been deprecated and will be removed in the next major release
```
</issue>
<code>
[start of src/pip/__main__.py]
1 import os
2 import sys
3
4 # Remove '' and current working directory from the first entry
5 # of sys.path, if present to avoid using current directory
6 # in pip commands check, freeze, install, list and show,
7 # when invoked as python -m pip <command>
8 if sys.path[0] in ("", os.getcwd()):
9 sys.path.pop(0)
10
11 # If we are running from a wheel, add the wheel to sys.path
12 # This allows the usage python pip-*.whl/pip install pip-*.whl
13 if __package__ == "":
14 # __file__ is pip-*.whl/pip/__main__.py
15 # first dirname call strips of '/__main__.py', second strips off '/pip'
16 # Resulting path is the name of the wheel itself
17 # Add that to sys.path so we can import pip
18 path = os.path.dirname(os.path.dirname(__file__))
19 sys.path.insert(0, path)
20
21 from pip._internal.cli.main import main as _main
22
23 if __name__ == "__main__":
24 sys.exit(_main())
25
[end of src/pip/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/__main__.py b/src/pip/__main__.py
--- a/src/pip/__main__.py
+++ b/src/pip/__main__.py
@@ -1,5 +1,6 @@
import os
import sys
+import warnings
# Remove '' and current working directory from the first entry
# of sys.path, if present to avoid using current directory
@@ -18,7 +19,13 @@
path = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, path)
-from pip._internal.cli.main import main as _main
-
if __name__ == "__main__":
+ # Work around the error reported in #9540, pending a proper fix.
+ # Note: It is essential the warning filter is set *before* importing
+ # pip, as the deprecation happens at import time, not runtime.
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, module=".*packaging\\.version"
+ )
+ from pip._internal.cli.main import main as _main
+
sys.exit(_main())
| {"golden_diff": "diff --git a/src/pip/__main__.py b/src/pip/__main__.py\n--- a/src/pip/__main__.py\n+++ b/src/pip/__main__.py\n@@ -1,5 +1,6 @@\n import os\n import sys\n+import warnings\n \n # Remove '' and current working directory from the first entry\n # of sys.path, if present to avoid using current directory\n@@ -18,7 +19,13 @@\n path = os.path.dirname(os.path.dirname(__file__))\n sys.path.insert(0, path)\n \n-from pip._internal.cli.main import main as _main\n-\n if __name__ == \"__main__\":\n+ # Work around the error reported in #9540, pending a proper fix.\n+ # Note: It is essential the warning filter is set *before* importing\n+ # pip, as the deprecation happens at import time, not runtime.\n+ warnings.filterwarnings(\n+ \"ignore\", category=DeprecationWarning, module=\".*packaging\\\\.version\"\n+ )\n+ from pip._internal.cli.main import main as _main\n+\n sys.exit(_main())\n", "issue": "pip 21.0.1 fails when run with warnings converted to errors\n**Environment**\r\n\r\n* pip version: 21.0.1\r\n* Python version: 3.9.1\r\n* OS: Windows\r\n\r\n**Description**\r\nWith the latest version of packaging (vendored in 21.0.1) a DeprecationWarning is issued when parsing a \"legacy version\". If pip is run with warnings converted to errors, this causes a failure.\r\n\r\n**Expected behavior**\r\nNo error\r\n\r\n**How to Reproduce**\r\n`py -wE -m pip --version`\r\n\r\nOr to pinpoint it further,\r\n\r\n```\r\npy -wE\r\n>>> from pip._vendor import pkg_resources\r\n```\r\n\r\nThis does *not* happen with setuptools 52.0.0, it appears to be related to the version of setuptools (44.0.0) that we vendor.\r\n\r\n**Output**\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Gustav\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\pip\\_vendor\\packaging\\version.py\", line 57, in parse\r\n return Version(version)\r\n File \"C:\\Users\\Gustav\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\pip\\_vendor\\packaging\\version.py\", line 298, in __init__\r\n raise InvalidVersion(\"Invalid version: '{0}'\".format(version))\r\npip._vendor.packaging.version.InvalidVersion: Invalid version: 'pip'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"C:\\Users\\Gustav\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\pip\\_vendor\\pkg_resources\\__init__.py\", line 3252, in <module>\r\n def _initialize_master_working_set():\r\n File \"C:\\Users\\Gustav\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\pip\\_vendor\\pkg_resources\\__init__.py\", line 3235, in _call_aside\r\n f(*args, **kwargs)\r\n File \"C:\\Users\\Gustav\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\pip\\_vendor\\pkg_resources\\__init__.py\", line 3264, in _initialize_master_working_set\r\n working_set = WorkingSet._build_master()\r\n File \"C:\\Users\\Gustav\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\pip\\_vendor\\pkg_resources\\__init__.py\", line 574, in _build_master\r\n ws = cls()\r\n File \"C:\\Users\\Gustav\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\pip\\_vendor\\pkg_resources\\__init__.py\", line 567, in __init__\r\n self.add_entry(entry)\r\n File \"C:\\Users\\Gustav\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\pip\\_vendor\\pkg_resources\\__init__.py\", line 623, in add_entry\r\n for dist in find_distributions(entry, True):\r\n File \"C:\\Users\\Gustav\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\pip\\_vendor\\pkg_resources\\__init__.py\", line 2061, in find_on_path\r\n path_item_entries = _by_version_descending(filtered)\r\n File \"C:\\Users\\Gustav\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\pip\\_vendor\\pkg_resources\\__init__.py\", line 2034, in _by_version_descending\r\n return sorted(names, key=_by_version, reverse=True)\r\n File \"C:\\Users\\Gustav\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\pip\\_vendor\\pkg_resources\\__init__.py\", line 2032, in _by_version\r\n return [packaging.version.parse(part) for part in parts]\r\n File \"C:\\Users\\Gustav\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\pip\\_vendor\\pkg_resources\\__init__.py\", line 2032, in <listcomp>\r\n return [packaging.version.parse(part) for part in parts]\r\n File \"C:\\Users\\Gustav\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\pip\\_vendor\\packaging\\version.py\", line 59, in parse\r\n return LegacyVersion(version)\r\n File \"C:\\Users\\Gustav\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\pip\\_vendor\\packaging\\version.py\", line 127, in __init__\r\n warnings.warn(\r\nDeprecationWarning: Creating a LegacyVersion has been deprecated and will be removed in the next major release\r\n```\r\n\n", "before_files": [{"content": "import os\nimport sys\n\n# Remove '' and current working directory from the first entry\n# of sys.path, if present to avoid using current directory\n# in pip commands check, freeze, install, list and show,\n# when invoked as python -m pip <command>\nif sys.path[0] in (\"\", os.getcwd()):\n sys.path.pop(0)\n\n# If we are running from a wheel, add the wheel to sys.path\n# This allows the usage python pip-*.whl/pip install pip-*.whl\nif __package__ == \"\":\n # __file__ is pip-*.whl/pip/__main__.py\n # first dirname call strips of '/__main__.py', second strips off '/pip'\n # Resulting path is the name of the wheel itself\n # Add that to sys.path so we can import pip\n path = os.path.dirname(os.path.dirname(__file__))\n sys.path.insert(0, path)\n\nfrom pip._internal.cli.main import main as _main\n\nif __name__ == \"__main__\":\n sys.exit(_main())\n", "path": "src/pip/__main__.py"}]} | 1,923 | 248 |
gh_patches_debug_33257 | rasdani/github-patches | git_diff | localstack__localstack-2244 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect response content-type header from cloudwatch logs
When using the .NET AWSSDK connected to localstack and querying cloudwatch logs e.g.
var logClient = new AmazonCloudWatchLogsClient(new AmazonCloudWatchLogsConfig()
{
ServiceURL = "http://localhost:4586",
UseHttp = true,
AuthenticationRegion = "eu-central-1",
});
var logGroupName = @"/aws/lambda/f1";
var events = logClient.FilterLogEventsAsync(new FilterLogEventsRequest()
{
LogGroupName = logGroupName,
}).GetAwaiter().GetResult();
The response is returned from the server but cannot be parsed by the SDK client code because the response's content-type header is text/html when it should be application/x-amz-json-1.1. This confirmed using Fiddler traces comparing the response from localstack to the response from AWS in the cloud. Can this be fixed easily?
</issue>
<code>
[start of localstack/services/logs/logs_listener.py]
1 import re
2 from requests.models import Request
3 from localstack.utils.common import to_str
4 from localstack.services.generic_proxy import ProxyListener
5
6
7 class ProxyListenerCloudWatchLogs(ProxyListener):
8
9 def forward_request(self, method, path, data, headers):
10 if method == 'POST' and path == '/':
11 if 'nextToken' in to_str(data or ''):
12 data = self._fix_next_token_request(data)
13 headers['content-length'] = str(len(data))
14 return Request(data=data, headers=headers, method=method)
15
16 return True
17
18 def return_response(self, method, path, data, headers, response):
19 if 'nextToken' in to_str(response.content or ''):
20 self._fix_next_token_response(response)
21 response.headers['content-length'] = str(len(response._content))
22
23 def _fix_next_token_request(self, data):
24 # Fix for https://github.com/localstack/localstack/issues/1527
25 pattern = r'"nextToken":\s*"([0-9]+)"'
26 replacement = r'"nextToken": \1'
27 return re.sub(pattern, replacement, to_str(data))
28
29 def _fix_next_token_response(self, response):
30 # Fix for https://github.com/localstack/localstack/issues/1527
31 pattern = r'"nextToken":\s*([0-9]+)'
32 replacement = r'"nextToken": "\1"'
33 response._content = re.sub(pattern, replacement, to_str(response.content))
34
35
36 # instantiate listener
37 UPDATE_LOGS = ProxyListenerCloudWatchLogs()
38
[end of localstack/services/logs/logs_listener.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/localstack/services/logs/logs_listener.py b/localstack/services/logs/logs_listener.py
--- a/localstack/services/logs/logs_listener.py
+++ b/localstack/services/logs/logs_listener.py
@@ -1,11 +1,11 @@
import re
from requests.models import Request
from localstack.utils.common import to_str
+from localstack.constants import APPLICATION_AMZ_JSON_1_1
from localstack.services.generic_proxy import ProxyListener
class ProxyListenerCloudWatchLogs(ProxyListener):
-
def forward_request(self, method, path, data, headers):
if method == 'POST' and path == '/':
if 'nextToken' in to_str(data or ''):
@@ -16,17 +16,22 @@
return True
def return_response(self, method, path, data, headers, response):
+ # Fix Incorrect response content-type header from cloudwatch logs #1343
+ response.headers['content-type'] = APPLICATION_AMZ_JSON_1_1
+
if 'nextToken' in to_str(response.content or ''):
self._fix_next_token_response(response)
response.headers['content-length'] = str(len(response._content))
- def _fix_next_token_request(self, data):
+ @staticmethod
+ def _fix_next_token_request(data):
# Fix for https://github.com/localstack/localstack/issues/1527
pattern = r'"nextToken":\s*"([0-9]+)"'
replacement = r'"nextToken": \1'
return re.sub(pattern, replacement, to_str(data))
- def _fix_next_token_response(self, response):
+ @staticmethod
+ def _fix_next_token_response(response):
# Fix for https://github.com/localstack/localstack/issues/1527
pattern = r'"nextToken":\s*([0-9]+)'
replacement = r'"nextToken": "\1"'
| {"golden_diff": "diff --git a/localstack/services/logs/logs_listener.py b/localstack/services/logs/logs_listener.py\n--- a/localstack/services/logs/logs_listener.py\n+++ b/localstack/services/logs/logs_listener.py\n@@ -1,11 +1,11 @@\n import re\n from requests.models import Request\n from localstack.utils.common import to_str\n+from localstack.constants import APPLICATION_AMZ_JSON_1_1\n from localstack.services.generic_proxy import ProxyListener\n \n \n class ProxyListenerCloudWatchLogs(ProxyListener):\n-\n def forward_request(self, method, path, data, headers):\n if method == 'POST' and path == '/':\n if 'nextToken' in to_str(data or ''):\n@@ -16,17 +16,22 @@\n return True\n \n def return_response(self, method, path, data, headers, response):\n+ # Fix Incorrect response content-type header from cloudwatch logs #1343\n+ response.headers['content-type'] = APPLICATION_AMZ_JSON_1_1\n+\n if 'nextToken' in to_str(response.content or ''):\n self._fix_next_token_response(response)\n response.headers['content-length'] = str(len(response._content))\n \n- def _fix_next_token_request(self, data):\n+ @staticmethod\n+ def _fix_next_token_request(data):\n # Fix for https://github.com/localstack/localstack/issues/1527\n pattern = r'\"nextToken\":\\s*\"([0-9]+)\"'\n replacement = r'\"nextToken\": \\1'\n return re.sub(pattern, replacement, to_str(data))\n \n- def _fix_next_token_response(self, response):\n+ @staticmethod\n+ def _fix_next_token_response(response):\n # Fix for https://github.com/localstack/localstack/issues/1527\n pattern = r'\"nextToken\":\\s*([0-9]+)'\n replacement = r'\"nextToken\": \"\\1\"'\n", "issue": "Incorrect response content-type header from cloudwatch logs\nWhen using the .NET AWSSDK connected to localstack and querying cloudwatch logs e.g. \r\n var logClient = new AmazonCloudWatchLogsClient(new AmazonCloudWatchLogsConfig()\r\n {\r\n ServiceURL = \"http://localhost:4586\",\r\n UseHttp = true,\r\n AuthenticationRegion = \"eu-central-1\",\r\n });\r\n var logGroupName = @\"/aws/lambda/f1\";\r\n var events = logClient.FilterLogEventsAsync(new FilterLogEventsRequest()\r\n {\r\n LogGroupName = logGroupName,\r\n }).GetAwaiter().GetResult();\r\n\r\nThe response is returned from the server but cannot be parsed by the SDK client code because the response's content-type header is text/html when it should be application/x-amz-json-1.1. This confirmed using Fiddler traces comparing the response from localstack to the response from AWS in the cloud. Can this be fixed easily?\n", "before_files": [{"content": "import re\nfrom requests.models import Request\nfrom localstack.utils.common import to_str\nfrom localstack.services.generic_proxy import ProxyListener\n\n\nclass ProxyListenerCloudWatchLogs(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n if method == 'POST' and path == '/':\n if 'nextToken' in to_str(data or ''):\n data = self._fix_next_token_request(data)\n headers['content-length'] = str(len(data))\n return Request(data=data, headers=headers, method=method)\n\n return True\n\n def return_response(self, method, path, data, headers, response):\n if 'nextToken' in to_str(response.content or ''):\n self._fix_next_token_response(response)\n response.headers['content-length'] = str(len(response._content))\n\n def _fix_next_token_request(self, data):\n # Fix for https://github.com/localstack/localstack/issues/1527\n pattern = r'\"nextToken\":\\s*\"([0-9]+)\"'\n replacement = r'\"nextToken\": \\1'\n return re.sub(pattern, replacement, to_str(data))\n\n def _fix_next_token_response(self, response):\n # Fix for https://github.com/localstack/localstack/issues/1527\n pattern = r'\"nextToken\":\\s*([0-9]+)'\n replacement = r'\"nextToken\": \"\\1\"'\n response._content = re.sub(pattern, replacement, to_str(response.content))\n\n\n# instantiate listener\nUPDATE_LOGS = ProxyListenerCloudWatchLogs()\n", "path": "localstack/services/logs/logs_listener.py"}]} | 1,142 | 419 |
gh_patches_debug_28173 | rasdani/github-patches | git_diff | CMSgov__bluebutton-web-server-5 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
When editing an application user can select capabilities not allowed by his groups
</issue>
<code>
[start of apps/dot_ext/views/application.py]
1 from django.core.urlresolvers import reverse_lazy
2 from django.forms.models import modelform_factory
3 from django.views.generic import CreateView, DetailView, DeleteView, ListView, UpdateView
4
5 from braces.views import LoginRequiredMixin
6
7 from oauth2_provider.models import get_application_model
8
9
10 class ApplicationOwnerIsUserMixin(LoginRequiredMixin):
11 """
12 This mixin is used to provide an Application queryset filtered by the current request.user.
13 """
14 fields = '__all__'
15
16 def get_queryset(self):
17 return get_application_model().objects.filter(user=self.request.user)
18
19
20 class ApplicationRegistration(LoginRequiredMixin, CreateView):
21 """
22 View used to register a new Application for the request.user
23 """
24 template_name = "application_registration_form.html"
25
26 def get_form_class(self):
27 """
28 Returns the form class for the application model
29 """
30
31 mff = modelform_factory(
32 get_application_model(),
33 fields=('name', 'client_id', 'client_secret', 'client_type',
34 'authorization_grant_type', 'scope', 'redirect_uris', )
35 )
36 return mff
37
38
39
40 def form_valid(self, form):
41 form.instance.user = self.request.user
42 return super(ApplicationRegistration, self).form_valid(form)
43
44
45 class ApplicationDetail(ApplicationOwnerIsUserMixin, DetailView):
46 """
47 Detail view for an application instance owned by the request.user
48 """
49 context_object_name = 'application'
50 template_name = "application_detail.html"
51
52
53 class ApplicationList(ApplicationOwnerIsUserMixin, ListView):
54 """
55 List view for all the applications owned by the request.user
56 """
57 context_object_name = 'applications'
58 template_name = "application_list.html"
59
60
61 class ApplicationDelete(ApplicationOwnerIsUserMixin, DeleteView):
62 """
63 View used to delete an application owned by the request.user
64 """
65 context_object_name = 'application'
66 success_url = reverse_lazy('dote_list')
67 template_name = "application_confirm_delete.html"
68
69
70 class ApplicationUpdate(ApplicationOwnerIsUserMixin, UpdateView):
71 """
72 View used to update an application owned by the request.user
73 """
74 context_object_name = 'application'
75 template_name = "application_form.html"
76
[end of apps/dot_ext/views/application.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/dot_ext/views/application.py b/apps/dot_ext/views/application.py
--- a/apps/dot_ext/views/application.py
+++ b/apps/dot_ext/views/application.py
@@ -6,6 +6,8 @@
from oauth2_provider.models import get_application_model
+from ..forms import CustomRegisterApplicationForm
+
class ApplicationOwnerIsUserMixin(LoginRequiredMixin):
"""
@@ -27,15 +29,15 @@
"""
Returns the form class for the application model
"""
-
+
mff = modelform_factory(
get_application_model(),
fields=('name', 'client_id', 'client_secret', 'client_type',
'authorization_grant_type', 'scope', 'redirect_uris', )
)
return mff
-
-
+
+
def form_valid(self, form):
form.instance.user = self.request.user
@@ -73,3 +75,15 @@
"""
context_object_name = 'application'
template_name = "application_form.html"
+
+ fields = None
+ form_class = CustomRegisterApplicationForm
+
+ def get_form_kwargs(self):
+ """
+ Add `user` to kwargs because it is required by the constructor of
+ CustomRegisterApplicationForm class.
+ """
+ kwargs = super(ApplicationUpdate, self).get_form_kwargs()
+ kwargs['user'] = self.request.user
+ return kwargs
| {"golden_diff": "diff --git a/apps/dot_ext/views/application.py b/apps/dot_ext/views/application.py\n--- a/apps/dot_ext/views/application.py\n+++ b/apps/dot_ext/views/application.py\n@@ -6,6 +6,8 @@\n \n from oauth2_provider.models import get_application_model\n \n+from ..forms import CustomRegisterApplicationForm\n+\n \n class ApplicationOwnerIsUserMixin(LoginRequiredMixin):\n \"\"\"\n@@ -27,15 +29,15 @@\n \"\"\"\n Returns the form class for the application model\n \"\"\"\n- \n+\n mff = modelform_factory(\n get_application_model(),\n fields=('name', 'client_id', 'client_secret', 'client_type',\n 'authorization_grant_type', 'scope', 'redirect_uris', )\n )\n return mff\n- \n- \n+\n+\n \n def form_valid(self, form):\n form.instance.user = self.request.user\n@@ -73,3 +75,15 @@\n \"\"\"\n context_object_name = 'application'\n template_name = \"application_form.html\"\n+\n+ fields = None\n+ form_class = CustomRegisterApplicationForm\n+\n+ def get_form_kwargs(self):\n+ \"\"\"\n+ Add `user` to kwargs because it is required by the constructor of\n+ CustomRegisterApplicationForm class.\n+ \"\"\"\n+ kwargs = super(ApplicationUpdate, self).get_form_kwargs()\n+ kwargs['user'] = self.request.user\n+ return kwargs\n", "issue": "When editing an application user can select capabilities not allowed by his groups\n\n", "before_files": [{"content": "from django.core.urlresolvers import reverse_lazy\nfrom django.forms.models import modelform_factory\nfrom django.views.generic import CreateView, DetailView, DeleteView, ListView, UpdateView\n\nfrom braces.views import LoginRequiredMixin\n\nfrom oauth2_provider.models import get_application_model\n\n\nclass ApplicationOwnerIsUserMixin(LoginRequiredMixin):\n \"\"\"\n This mixin is used to provide an Application queryset filtered by the current request.user.\n \"\"\"\n fields = '__all__'\n\n def get_queryset(self):\n return get_application_model().objects.filter(user=self.request.user)\n\n\nclass ApplicationRegistration(LoginRequiredMixin, CreateView):\n \"\"\"\n View used to register a new Application for the request.user\n \"\"\"\n template_name = \"application_registration_form.html\"\n\n def get_form_class(self):\n \"\"\"\n Returns the form class for the application model\n \"\"\"\n \n mff = modelform_factory(\n get_application_model(),\n fields=('name', 'client_id', 'client_secret', 'client_type',\n 'authorization_grant_type', 'scope', 'redirect_uris', )\n )\n return mff\n \n \n\n def form_valid(self, form):\n form.instance.user = self.request.user\n return super(ApplicationRegistration, self).form_valid(form)\n\n\nclass ApplicationDetail(ApplicationOwnerIsUserMixin, DetailView):\n \"\"\"\n Detail view for an application instance owned by the request.user\n \"\"\"\n context_object_name = 'application'\n template_name = \"application_detail.html\"\n\n\nclass ApplicationList(ApplicationOwnerIsUserMixin, ListView):\n \"\"\"\n List view for all the applications owned by the request.user\n \"\"\"\n context_object_name = 'applications'\n template_name = \"application_list.html\"\n\n\nclass ApplicationDelete(ApplicationOwnerIsUserMixin, DeleteView):\n \"\"\"\n View used to delete an application owned by the request.user\n \"\"\"\n context_object_name = 'application'\n success_url = reverse_lazy('dote_list')\n template_name = \"application_confirm_delete.html\"\n\n\nclass ApplicationUpdate(ApplicationOwnerIsUserMixin, UpdateView):\n \"\"\"\n View used to update an application owned by the request.user\n \"\"\"\n context_object_name = 'application'\n template_name = \"application_form.html\"\n", "path": "apps/dot_ext/views/application.py"}]} | 1,164 | 313 |
gh_patches_debug_34316 | rasdani/github-patches | git_diff | meltano__meltano-6856 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: Flaky test `tests/meltano/api/test_workers.py::TestUIAvailableWorker::test_open_browser`
### Meltano Version
N/A
### Python Version
NA
### Bug scope
API
### Operating System
N/A
### Description
From https://github.com/meltano/meltano/issues/6827
- https://github.com/meltano/meltano/actions/runs/3175819389/jobs/5174350171
- https://github.com/meltano/meltano/actions/runs/3159179840/jobs/5142085679
- https://github.com/meltano/meltano/actions/runs/3056794521/jobs/4931297918
- https://github.com/meltano/meltano/actions/runs/3182662282/jobs/5188896027
- https://github.com/meltano/meltano/actions/runs/3183540252/jobs/5190883701
- https://github.com/meltano/meltano/actions/runs/3184585185/jobs/5193156601
Of the recorded instances, we observe:
- They are all using `ubuntu-latest`
- They are all using Python 3.7
- They are using a variety of database backends
The error occurs on the assert on the last line of the following code block. We see that `requests_get` is always called 3 times, but `sleep` is called 300-500 times.
```python
@mock.patch("webbrowser.open")
@mock.patch("requests.get")
def test_open_browser(self, requests_get, webbrowser_open, subject):
error = mock.Mock(status_code=400)
ok = mock.Mock(status_code=200)
requests_get.side_effect = [error, error, ok]
with mock.patch("time.sleep") as sleep:
sleep.return_value = None
subject.run()
webbrowser_open.assert_called_with("http://localhost:5000")
assert requests_get.call_count == sleep.call_count
```
### Code
_No response_
</issue>
<code>
[start of src/meltano/api/workers/ui_available_worker.py]
1 from __future__ import annotations
2
3 import logging
4 import threading
5 import time
6 import traceback
7 import webbrowser
8
9 import click
10 import requests
11
12 from meltano.core.project_settings_service import ProjectSettingsService
13
14 logger = logging.getLogger(__name__)
15
16 SUCCESS_STATUS_CODE = 200
17
18
19 class UIAvailableWorker(threading.Thread):
20 def __init__(self, project, open_browser=False):
21 super().__init__()
22 self.project = project
23 self.open_browser = open_browser
24 self.settings_service = ProjectSettingsService(self.project)
25 self._terminate = False
26
27 def run(self):
28 url = f"http://localhost:{self.settings_service.get('ui.bind_port')}"
29 headers = {"Host": self.settings_service.get("ui.server_name")}
30
31 while not self._terminate:
32 try:
33 response = requests.get(url, headers=headers)
34 if response.status_code == SUCCESS_STATUS_CODE:
35 click.secho(f"Meltano UI is now available at {url}", fg="green")
36 if self.open_browser:
37 webbrowser.open(url)
38 self._terminate = True
39 except Exception:
40 logger.debug(
41 f"Exception encountered while trying to run Meltano UI:\n{traceback.format_exc()}"
42 )
43
44 time.sleep(2)
45
46 def stop(self):
47 self._terminate = True
48
[end of src/meltano/api/workers/ui_available_worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/meltano/api/workers/ui_available_worker.py b/src/meltano/api/workers/ui_available_worker.py
--- a/src/meltano/api/workers/ui_available_worker.py
+++ b/src/meltano/api/workers/ui_available_worker.py
@@ -1,10 +1,11 @@
+"""Meltano UI worker thread definition."""
+
from __future__ import annotations
import logging
import threading
import time
import traceback
-import webbrowser
import click
import requests
@@ -17,14 +18,21 @@
class UIAvailableWorker(threading.Thread):
- def __init__(self, project, open_browser=False):
+ """A thread subclass for Meltano UI workers."""
+
+ def __init__(self, project):
+ """Initialize the `UIAvailableWorker` thread.
+
+ Args:
+ project: The Meltano project.
+ """
super().__init__()
self.project = project
- self.open_browser = open_browser
self.settings_service = ProjectSettingsService(self.project)
self._terminate = False
- def run(self):
+ def run(self) -> None:
+ """Run the thread, and report when the Meltano UI becomes available."""
url = f"http://localhost:{self.settings_service.get('ui.bind_port')}"
headers = {"Host": self.settings_service.get("ui.server_name")}
@@ -33,8 +41,6 @@
response = requests.get(url, headers=headers)
if response.status_code == SUCCESS_STATUS_CODE:
click.secho(f"Meltano UI is now available at {url}", fg="green")
- if self.open_browser:
- webbrowser.open(url)
self._terminate = True
except Exception:
logger.debug(
@@ -44,4 +50,5 @@
time.sleep(2)
def stop(self):
+ """Stop the thread."""
self._terminate = True
| {"golden_diff": "diff --git a/src/meltano/api/workers/ui_available_worker.py b/src/meltano/api/workers/ui_available_worker.py\n--- a/src/meltano/api/workers/ui_available_worker.py\n+++ b/src/meltano/api/workers/ui_available_worker.py\n@@ -1,10 +1,11 @@\n+\"\"\"Meltano UI worker thread definition.\"\"\"\n+\n from __future__ import annotations\n \n import logging\n import threading\n import time\n import traceback\n-import webbrowser\n \n import click\n import requests\n@@ -17,14 +18,21 @@\n \n \n class UIAvailableWorker(threading.Thread):\n- def __init__(self, project, open_browser=False):\n+ \"\"\"A thread subclass for Meltano UI workers.\"\"\"\n+\n+ def __init__(self, project):\n+ \"\"\"Initialize the `UIAvailableWorker` thread.\n+\n+ Args:\n+ project: The Meltano project.\n+ \"\"\"\n super().__init__()\n self.project = project\n- self.open_browser = open_browser\n self.settings_service = ProjectSettingsService(self.project)\n self._terminate = False\n \n- def run(self):\n+ def run(self) -> None:\n+ \"\"\"Run the thread, and report when the Meltano UI becomes available.\"\"\"\n url = f\"http://localhost:{self.settings_service.get('ui.bind_port')}\"\n headers = {\"Host\": self.settings_service.get(\"ui.server_name\")}\n \n@@ -33,8 +41,6 @@\n response = requests.get(url, headers=headers)\n if response.status_code == SUCCESS_STATUS_CODE:\n click.secho(f\"Meltano UI is now available at {url}\", fg=\"green\")\n- if self.open_browser:\n- webbrowser.open(url)\n self._terminate = True\n except Exception:\n logger.debug(\n@@ -44,4 +50,5 @@\n time.sleep(2)\n \n def stop(self):\n+ \"\"\"Stop the thread.\"\"\"\n self._terminate = True\n", "issue": "bug: Flaky test `tests/meltano/api/test_workers.py::TestUIAvailableWorker::test_open_browser`\n### Meltano Version\n\nN/A\n\n### Python Version\n\nNA\n\n### Bug scope\n\nAPI\n\n### Operating System\n\nN/A\n\n### Description\n\nFrom https://github.com/meltano/meltano/issues/6827\r\n\r\n- https://github.com/meltano/meltano/actions/runs/3175819389/jobs/5174350171\r\n- https://github.com/meltano/meltano/actions/runs/3159179840/jobs/5142085679\r\n- https://github.com/meltano/meltano/actions/runs/3056794521/jobs/4931297918\r\n- https://github.com/meltano/meltano/actions/runs/3182662282/jobs/5188896027\r\n- https://github.com/meltano/meltano/actions/runs/3183540252/jobs/5190883701\r\n- https://github.com/meltano/meltano/actions/runs/3184585185/jobs/5193156601\r\n\r\nOf the recorded instances, we observe:\r\n- They are all using `ubuntu-latest`\r\n- They are all using Python 3.7\r\n- They are using a variety of database backends\r\n\r\nThe error occurs on the assert on the last line of the following code block. We see that `requests_get` is always called 3 times, but `sleep` is called 300-500 times.\r\n\r\n```python\r\n @mock.patch(\"webbrowser.open\")\r\n @mock.patch(\"requests.get\")\r\n def test_open_browser(self, requests_get, webbrowser_open, subject):\r\n error = mock.Mock(status_code=400)\r\n ok = mock.Mock(status_code=200)\r\n requests_get.side_effect = [error, error, ok]\r\n with mock.patch(\"time.sleep\") as sleep:\r\n sleep.return_value = None\r\n subject.run()\r\n webbrowser_open.assert_called_with(\"http://localhost:5000\")\r\n assert requests_get.call_count == sleep.call_count\r\n```\n\n### Code\n\n_No response_\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport threading\nimport time\nimport traceback\nimport webbrowser\n\nimport click\nimport requests\n\nfrom meltano.core.project_settings_service import ProjectSettingsService\n\nlogger = logging.getLogger(__name__)\n\nSUCCESS_STATUS_CODE = 200\n\n\nclass UIAvailableWorker(threading.Thread):\n def __init__(self, project, open_browser=False):\n super().__init__()\n self.project = project\n self.open_browser = open_browser\n self.settings_service = ProjectSettingsService(self.project)\n self._terminate = False\n\n def run(self):\n url = f\"http://localhost:{self.settings_service.get('ui.bind_port')}\"\n headers = {\"Host\": self.settings_service.get(\"ui.server_name\")}\n\n while not self._terminate:\n try:\n response = requests.get(url, headers=headers)\n if response.status_code == SUCCESS_STATUS_CODE:\n click.secho(f\"Meltano UI is now available at {url}\", fg=\"green\")\n if self.open_browser:\n webbrowser.open(url)\n self._terminate = True\n except Exception:\n logger.debug(\n f\"Exception encountered while trying to run Meltano UI:\\n{traceback.format_exc()}\"\n )\n\n time.sleep(2)\n\n def stop(self):\n self._terminate = True\n", "path": "src/meltano/api/workers/ui_available_worker.py"}]} | 1,442 | 424 |
gh_patches_debug_167 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-1526 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Jupyterhub 0.8.0 radio buttons unclickable or ugly due to form-control class
```
jupyterhub --version
0.8.0
```
I have some radio buttons in my spawner's `_option_form_default`:
```
return """<label for="type">Which type of instance do you want to launch?</label>
<table>
<tr>
<td><input type="radio" name="type" value="c4.8xlarge" checked="checked"></td>
<td> c4.8xlarge (36 CPU, 60GB RAM, $1.591/h)</td>
</tr>
<tr>
<td><input type="radio" name="type" value="r4.8xlarge"></td>
<td> r4.8xlarge (32 CPU, 244GB RAM, $2.341/h)</td>
</tr>
</table><br>
"""
```
In `0.8.0` version these are unclickable. Removing `form-control` class introduced [here](https://github.com/jupyterhub/jupyterhub/blob/master/share/jupyter/hub/templates/spawn.html) fixes the issue for me.
I also tried buttons like this:
```
<tr>
<td><label>
<input type="radio" name="type" value="c4.8xlarge">
c4.8xlarge (36 CPU, 60GB RAM, $1.591/h)
</label></td>
</tr>
```
These are clickable but look ugly with the `form-control` class.
Removing the `form-control` class makes them both clickable and pretty :)
</issue>
<code>
[start of jupyterhub/_version.py]
1 """JupyterHub version info"""
2
3 # Copyright (c) Jupyter Development Team.
4 # Distributed under the terms of the Modified BSD License.
5
6 version_info = (
7 0,
8 8,
9 2,
10 'dev',
11 )
12
13 __version__ = '.'.join(map(str, version_info))
14
15
16 def _check_version(hub_version, singleuser_version, log):
17 """Compare Hub and single-user server versions"""
18 if not hub_version:
19 log.warning("Hub has no version header, which means it is likely < 0.8. Expected %s", __version__)
20 return
21
22 if not singleuser_version:
23 log.warning("Single-user server has no version header, which means it is likely < 0.8. Expected %s", __version__)
24 return
25
26 # compare minor X.Y versions
27 if hub_version != singleuser_version:
28 from distutils.version import LooseVersion as V
29 hub_major_minor = V(hub_version).version[:2]
30 singleuser_major_minor = V(singleuser_version).version[:2]
31 extra = ""
32 if singleuser_major_minor == hub_major_minor:
33 # patch-level mismatch or lower, log difference at debug-level
34 # because this should be fine
35 log_method = log.debug
36 else:
37 # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.
38 log_method = log.warning
39 extra = " This could cause failure to authenticate and result in redirect loops!"
40 log_method(
41 "jupyterhub version %s != jupyterhub-singleuser version %s." + extra,
42 hub_version,
43 singleuser_version,
44 )
45 else:
46 log.debug("jupyterhub and jupyterhub-singleuser both on version %s" % hub_version)
47
[end of jupyterhub/_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/jupyterhub/_version.py b/jupyterhub/_version.py
--- a/jupyterhub/_version.py
+++ b/jupyterhub/_version.py
@@ -6,8 +6,8 @@
version_info = (
0,
8,
- 1,
- # 'dev',
+ 2,
+ 'dev',
)
__version__ = '.'.join(map(str, version_info))
| {"golden_diff": "diff --git a/jupyterhub/_version.py b/jupyterhub/_version.py\n--- a/jupyterhub/_version.py\n+++ b/jupyterhub/_version.py\n@@ -6,8 +6,8 @@\n version_info = (\n 0,\n 8,\n- 1,\n- # 'dev',\n+ 2,\n+ 'dev',\n )\n \n __version__ = '.'.join(map(str, version_info))\n", "issue": "Jupyterhub 0.8.0 radio buttons unclickable or ugly due to form-control class\n```\r\njupyterhub --version\r\n0.8.0\r\n```\r\n\r\nI have some radio buttons in my spawner's `_option_form_default`:\r\n\r\n```\r\nreturn \"\"\"<label for=\"type\">Which type of instance do you want to launch?</label>\r\n <table>\r\n <tr>\r\n <td><input type=\"radio\" name=\"type\" value=\"c4.8xlarge\" checked=\"checked\"></td>\r\n <td> c4.8xlarge (36 CPU, 60GB RAM, $1.591/h)</td>\r\n </tr>\r\n <tr>\r\n <td><input type=\"radio\" name=\"type\" value=\"r4.8xlarge\"></td>\r\n <td> r4.8xlarge (32 CPU, 244GB RAM, $2.341/h)</td>\r\n </tr>\r\n </table><br>\r\n \"\"\"\r\n```\r\n\r\nIn `0.8.0` version these are unclickable. Removing `form-control` class introduced [here](https://github.com/jupyterhub/jupyterhub/blob/master/share/jupyter/hub/templates/spawn.html) fixes the issue for me. \r\n\r\nI also tried buttons like this:\r\n\r\n```\r\n <tr>\r\n <td><label>\r\n <input type=\"radio\" name=\"type\" value=\"c4.8xlarge\">\r\n c4.8xlarge (36 CPU, 60GB RAM, $1.591/h)\r\n </label></td>\r\n </tr>\r\n```\r\n\r\nThese are clickable but look ugly with the `form-control` class. \r\n\r\nRemoving the `form-control` class makes them both clickable and pretty :) \n", "before_files": [{"content": "\"\"\"JupyterHub version info\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nversion_info = (\n 0,\n 8,\n 2,\n 'dev',\n)\n\n__version__ = '.'.join(map(str, version_info))\n\n\ndef _check_version(hub_version, singleuser_version, log):\n \"\"\"Compare Hub and single-user server versions\"\"\"\n if not hub_version:\n log.warning(\"Hub has no version header, which means it is likely < 0.8. Expected %s\", __version__)\n return\n\n if not singleuser_version:\n log.warning(\"Single-user server has no version header, which means it is likely < 0.8. Expected %s\", __version__)\n return\n\n # compare minor X.Y versions\n if hub_version != singleuser_version:\n from distutils.version import LooseVersion as V\n hub_major_minor = V(hub_version).version[:2]\n singleuser_major_minor = V(singleuser_version).version[:2]\n extra = \"\"\n if singleuser_major_minor == hub_major_minor:\n # patch-level mismatch or lower, log difference at debug-level\n # because this should be fine\n log_method = log.debug\n else:\n # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.\n log_method = log.warning\n extra = \" This could cause failure to authenticate and result in redirect loops!\"\n log_method(\n \"jupyterhub version %s != jupyterhub-singleuser version %s.\" + extra,\n hub_version,\n singleuser_version,\n )\n else:\n log.debug(\"jupyterhub and jupyterhub-singleuser both on version %s\" % hub_version)\n", "path": "jupyterhub/_version.py"}]} | 1,392 | 95 |
gh_patches_debug_13267 | rasdani/github-patches | git_diff | PokemonGoF__PokemonGo-Bot-1025 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bot unnecessarily walks to center of Pokestops
This issue may also increase detectability since it's unlikely that users would walk to the exact center of every stop they visit.
Also, most stops are spinnable from the road or other more accessible place. Once we start following roads, this will need fixed.
### Expected Behavior
Bot should walk to anywhere within the range of the stop.
### Actual Behavior
Bot walks to exactly the center of a stop.
### Steps to Reproduce
Run the bot and observe its path.
</issue>
<code>
[start of pokemongo_bot/cell_workers/move_to_fort_worker.py]
1 from utils import distance, format_dist
2 from pokemongo_bot.human_behaviour import sleep
3 from pokemongo_bot import logger
4 from pokemongo_bot.step_walker import StepWalker
5
6 class MoveToFortWorker(object):
7 def __init__(self, fort, bot):
8 self.bot = bot
9 self.fort = fort
10 self.api = bot.api
11 self.config = bot.config
12 self.navigator = bot.navigator
13 self.position = bot.position
14
15 def work(self):
16 lat = self.fort['latitude']
17 lng = self.fort['longitude']
18 fortID = self.fort['id']
19 unit = self.config.distance_unit # Unit to use when printing formatted distance
20
21 dist = distance(self.position[0], self.position[1], lat, lng)
22
23 # print('Found fort {} at distance {}m'.format(fortID, dist))
24 logger.log('Found fort {} at distance {}'.format(
25 fortID, format_dist(dist, unit)))
26
27 if dist > 10:
28 logger.log('Need to move closer to Pokestop')
29 position = (lat, lng, 0.0)
30
31 if self.config.walk > 0:
32 step_walker = StepWalker(
33 self.bot,
34 self.config.walk,
35 self.api._position_lat,
36 self.api._position_lng,
37 position[0],
38 position[1]
39 )
40
41 while True:
42 if step_walker.step():
43 break
44
45 else:
46 self.api.set_position(*position)
47
48 self.api.player_update(latitude=lat, longitude=lng)
49 response_dict = self.api.call()
50 logger.log('Arrived at Pokestop')
51 sleep(2)
52 return response_dict
53
54 return None
55
[end of pokemongo_bot/cell_workers/move_to_fort_worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pokemongo_bot/cell_workers/move_to_fort_worker.py b/pokemongo_bot/cell_workers/move_to_fort_worker.py
--- a/pokemongo_bot/cell_workers/move_to_fort_worker.py
+++ b/pokemongo_bot/cell_workers/move_to_fort_worker.py
@@ -1,4 +1,4 @@
-from utils import distance, format_dist
+from utils import distance, format_dist, i2f
from pokemongo_bot.human_behaviour import sleep
from pokemongo_bot import logger
from pokemongo_bot.step_walker import StepWalker
@@ -38,7 +38,7 @@
position[1]
)
- while True:
+ while distance(i2f(self.api._position_lat), i2f(self.api._position_lng), lat, lng) > 10:
if step_walker.step():
break
| {"golden_diff": "diff --git a/pokemongo_bot/cell_workers/move_to_fort_worker.py b/pokemongo_bot/cell_workers/move_to_fort_worker.py\n--- a/pokemongo_bot/cell_workers/move_to_fort_worker.py\n+++ b/pokemongo_bot/cell_workers/move_to_fort_worker.py\n@@ -1,4 +1,4 @@\n-from utils import distance, format_dist\n+from utils import distance, format_dist, i2f\n from pokemongo_bot.human_behaviour import sleep\n from pokemongo_bot import logger\n from pokemongo_bot.step_walker import StepWalker\n@@ -38,7 +38,7 @@\n position[1]\n )\n \n- while True:\n+ while distance(i2f(self.api._position_lat), i2f(self.api._position_lng), lat, lng) > 10:\n if step_walker.step():\n break\n", "issue": "Bot unnecessarily walks to center of Pokestops\nThis issue may also increase detectability since it's unlikely that users would walk to the exact center of every stop they visit.\n\nAlso, most stops are spinnable from the road or other more accessible place. Once we start following roads, this will need fixed.\n### Expected Behavior\n\nBot should walk to anywhere within the range of the stop.\n### Actual Behavior\n\nBot walks to exactly the center of a stop.\n### Steps to Reproduce\n\nRun the bot and observe its path.\n\n", "before_files": [{"content": "from utils import distance, format_dist\nfrom pokemongo_bot.human_behaviour import sleep\nfrom pokemongo_bot import logger\nfrom pokemongo_bot.step_walker import StepWalker\n\nclass MoveToFortWorker(object):\n def __init__(self, fort, bot):\n self.bot = bot\n self.fort = fort\n self.api = bot.api\n self.config = bot.config\n self.navigator = bot.navigator\n self.position = bot.position\n\n def work(self):\n lat = self.fort['latitude']\n lng = self.fort['longitude']\n fortID = self.fort['id']\n unit = self.config.distance_unit # Unit to use when printing formatted distance\n\n dist = distance(self.position[0], self.position[1], lat, lng)\n\n # print('Found fort {} at distance {}m'.format(fortID, dist))\n logger.log('Found fort {} at distance {}'.format(\n fortID, format_dist(dist, unit)))\n\n if dist > 10:\n logger.log('Need to move closer to Pokestop')\n position = (lat, lng, 0.0)\n\n if self.config.walk > 0:\n step_walker = StepWalker(\n self.bot,\n self.config.walk,\n self.api._position_lat,\n self.api._position_lng,\n position[0],\n position[1]\n )\n\n while True:\n if step_walker.step():\n break\n\n else:\n self.api.set_position(*position)\n\n self.api.player_update(latitude=lat, longitude=lng)\n response_dict = self.api.call()\n logger.log('Arrived at Pokestop')\n sleep(2)\n return response_dict\n\n return None\n", "path": "pokemongo_bot/cell_workers/move_to_fort_worker.py"}]} | 1,134 | 202 |
gh_patches_debug_48993 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-1030 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
googleapiclient.discovery.build fails with module 'six.moves' has no attribute 'collections_abc' since version 1.12.0
#### Environment details
- OS type and version: 18.04.1-Ubuntu
- Python version: Python 3.6.9
- pip version: `pip --version` pip 9.0.1
- `google-api-python-client` version: `pip show google-api-python-client`: Version: 1.12.0
#### Code example
googleapiclient.discovery.build() fails with message: module 'six.moves' has no attribute 'collections_abc'
We only see this problem with google-api-python-client 1.12.0. 1.11.0 is fine.
</issue>
<code>
[start of setup.py]
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Setup script for Google API Python client.
16
17 Also installs included versions of third party libraries, if those libraries
18 are not already installed.
19 """
20 from __future__ import print_function
21
22 import sys
23
24 if sys.version_info < (2, 7):
25 print("google-api-python-client requires python version >= 2.7.", file=sys.stderr)
26 sys.exit(1)
27 if (3, 1) <= sys.version_info < (3, 4):
28 print("google-api-python-client requires python3 version >= 3.4.", file=sys.stderr)
29 sys.exit(1)
30
31 import io
32 import os
33 from setuptools import setup
34
35 packages = ["apiclient", "googleapiclient", "googleapiclient/discovery_cache"]
36
37 install_requires = [
38 # NOTE: Apache Beam tests depend on this library and cannot
39 # currently upgrade their httplib2 version.
40 # Please see https://github.com/googleapis/google-api-python-client/pull/841
41 "httplib2>=0.9.2,<1dev",
42 "google-auth>=1.16.0",
43 "google-auth-httplib2>=0.0.3",
44 "google-api-core>=1.21.0,<2dev",
45 "six>=1.6.1,<2dev",
46 "uritemplate>=3.0.0,<4dev",
47 ]
48
49 package_root = os.path.abspath(os.path.dirname(__file__))
50
51 readme_filename = os.path.join(package_root, "README.md")
52 with io.open(readme_filename, encoding="utf-8") as readme_file:
53 readme = readme_file.read()
54
55 version = "1.12.0"
56
57 setup(
58 name="google-api-python-client",
59 version=version,
60 description="Google API Client Library for Python",
61 long_description=readme,
62 long_description_content_type='text/markdown',
63 author="Google LLC",
64 author_email="[email protected]",
65 url="https://github.com/googleapis/google-api-python-client/",
66 install_requires=install_requires,
67 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
68 packages=packages,
69 package_data={},
70 license="Apache 2.0",
71 keywords="google api client",
72 classifiers=[
73 "Programming Language :: Python :: 2",
74 "Programming Language :: Python :: 2.7",
75 "Programming Language :: Python :: 3",
76 "Programming Language :: Python :: 3.5",
77 "Programming Language :: Python :: 3.6",
78 "Programming Language :: Python :: 3.7",
79 "Development Status :: 5 - Production/Stable",
80 "Intended Audience :: Developers",
81 "License :: OSI Approved :: Apache Software License",
82 "Operating System :: OS Independent",
83 "Topic :: Internet :: WWW/HTTP",
84 ],
85 )
86
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -42,7 +42,7 @@
"google-auth>=1.16.0",
"google-auth-httplib2>=0.0.3",
"google-api-core>=1.21.0,<2dev",
- "six>=1.6.1,<2dev",
+ "six>=1.13.0,<2dev",
"uritemplate>=3.0.0,<4dev",
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -42,7 +42,7 @@\n \"google-auth>=1.16.0\",\n \"google-auth-httplib2>=0.0.3\",\n \"google-api-core>=1.21.0,<2dev\",\n- \"six>=1.6.1,<2dev\",\n+ \"six>=1.13.0,<2dev\",\n \"uritemplate>=3.0.0,<4dev\",\n ]\n", "issue": "googleapiclient.discovery.build fails with module 'six.moves' has no attribute 'collections_abc' since version 1.12.0\n#### Environment details\r\n\r\n - OS type and version: 18.04.1-Ubuntu\r\n - Python version: Python 3.6.9\r\n - pip version: `pip --version` pip 9.0.1\r\n - `google-api-python-client` version: `pip show google-api-python-client`: Version: 1.12.0\r\n\r\n#### Code example\r\ngoogleapiclient.discovery.build() fails with message: module 'six.moves' has no attribute 'collections_abc'\r\n\r\nWe only see this problem with google-api-python-client 1.12.0. 1.11.0 is fine.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (2, 7):\n print(\"google-api-python-client requires python version >= 2.7.\", file=sys.stderr)\n sys.exit(1)\nif (3, 1) <= sys.version_info < (3, 4):\n print(\"google-api-python-client requires python3 version >= 3.4.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n # NOTE: Apache Beam tests depend on this library and cannot\n # currently upgrade their httplib2 version.\n # Please see https://github.com/googleapis/google-api-python-client/pull/841\n \"httplib2>=0.9.2,<1dev\",\n \"google-auth>=1.16.0\",\n \"google-auth-httplib2>=0.0.3\",\n \"google-api-core>=1.21.0,<2dev\",\n \"six>=1.6.1,<2dev\",\n \"uritemplate>=3.0.0,<4dev\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = \"1.12.0\"\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type='text/markdown',\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]} | 1,618 | 121 |
gh_patches_debug_9665 | rasdani/github-patches | git_diff | great-expectations__great_expectations-2958 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of great_expectations/rule_based_profiler/profiler.py]
1 import uuid
2 from typing import Dict, List, Optional, Union
3
4 import great_expectations.exceptions as ge_exceptions
5 from great_expectations import DataContext
6 from great_expectations.core import ExpectationConfiguration, ExpectationSuite
7 from great_expectations.data_context.util import instantiate_class_from_config
8 from great_expectations.rule_based_profiler.domain_builder.domain_builder import (
9 DomainBuilder,
10 )
11 from great_expectations.rule_based_profiler.expectation_configuration_builder.expectation_configuration_builder import (
12 ExpectationConfigurationBuilder,
13 )
14 from great_expectations.rule_based_profiler.parameter_builder.parameter_builder import (
15 ParameterBuilder,
16 )
17 from great_expectations.rule_based_profiler.parameter_builder.parameter_container import (
18 ParameterContainer,
19 build_parameter_container_for_variables,
20 )
21 from great_expectations.rule_based_profiler.rule.rule import Rule
22
23
24 class Profiler:
25 """
26 Profiler object serves to profile, or automatically evaluate a set of rules, upon a given
27 batch / multiple batches of data.
28 """
29
30 def __init__(
31 self,
32 *,
33 profiler_config: Optional[Dict[str, Dict[str, Dict]]] = None,
34 data_context: Optional[DataContext] = None,
35 ):
36 """
37 Create a new Profiler using configured rules.
38 For a rule or an item in a rule configuration, instantiates the following if
39 available: a domain builder, a parameter builder, and a configuration builder.
40 These will be used to define profiler computation patterns.
41
42 Args:
43 variables_configs: Variables from a profiler configuration
44 rules_configs: Rule configuration as a dictionary
45 data_context: DataContext object that defines a full runtime environment (data access, etc.)
46 """
47 self._data_context = data_context
48 self._rules = []
49
50 rules_configs: Dict[str, Dict] = profiler_config.get("rules", {})
51 rule_name: str
52 rule_config: dict
53
54 for rule_name, rule_config in rules_configs.items():
55 domain_builder_config: dict = rule_config.get("domain_builder")
56
57 if domain_builder_config is None:
58 raise ge_exceptions.ProfilerConfigurationError(
59 message=f'Invalid rule "{rule_name}": no domain_builder found.'
60 )
61
62 domain_builder: DomainBuilder = instantiate_class_from_config(
63 config=domain_builder_config,
64 runtime_environment={"data_context": data_context},
65 config_defaults={
66 "module_name": "great_expectations.rule_based_profiler.domain_builder"
67 },
68 )
69
70 parameter_builders: List[ParameterBuilder] = []
71
72 parameter_builder_configs: dict = rule_config.get("parameter_builders")
73
74 if parameter_builder_configs:
75 parameter_builder_config: dict
76 for parameter_builder_config in parameter_builder_configs:
77 parameter_builders.append(
78 instantiate_class_from_config(
79 config=parameter_builder_config,
80 runtime_environment={"data_context": data_context},
81 config_defaults={
82 "module_name": "great_expectations.rule_based_profiler.parameter_builder"
83 },
84 )
85 )
86
87 expectation_configuration_builders: List[
88 ExpectationConfigurationBuilder
89 ] = []
90
91 expectation_configuration_builder_configs: dict = rule_config.get(
92 "expectation_configuration_builders"
93 )
94
95 if expectation_configuration_builder_configs:
96 expectation_configuration_builder_config: dict
97 for (
98 expectation_configuration_builder_config
99 ) in expectation_configuration_builder_configs:
100 expectation_configuration_builders.append(
101 instantiate_class_from_config(
102 config=expectation_configuration_builder_config,
103 runtime_environment={},
104 config_defaults={
105 "class_name": "DefaultExpectationConfigurationBuilder",
106 "module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder",
107 },
108 )
109 )
110
111 variables_configs: Dict[str, Dict] = profiler_config.get("variables", {})
112 variables: Optional[ParameterContainer] = None
113
114 if variables_configs:
115 variables = build_parameter_container_for_variables(
116 variables_configs=variables_configs
117 )
118
119 self._rules.append(
120 Rule(
121 name=rule_name,
122 domain_builder=domain_builder,
123 parameter_builders=parameter_builders,
124 expectation_configuration_builders=expectation_configuration_builders,
125 variables=variables,
126 )
127 )
128
129 def profile(
130 self,
131 *,
132 expectation_suite_name: Optional[str] = None,
133 ) -> ExpectationSuite:
134 """
135 Args:
136 :param expectation_suite_name: A name for returned Expectation suite.
137 :return: Set of rule evaluation results in the form of an ExpectationSuite
138 """
139 if expectation_suite_name is None:
140 expectation_suite_name = (
141 f"tmp_suite_{self.__class__.__name__}_{str(uuid.uuid4())[:8]}"
142 )
143
144 expectation_suite: ExpectationSuite = ExpectationSuite(
145 expectation_suite_name=expectation_suite_name
146 )
147
148 rule: Rule
149 for rule in self._rules:
150 expectation_configurations: List[ExpectationConfiguration] = rule.generate()
151 expectation_configuration: ExpectationConfiguration
152 for expectation_configuration in expectation_configurations:
153 expectation_suite.add_expectation(
154 expectation_configuration=expectation_configuration
155 )
156
157 return expectation_suite
158
[end of great_expectations/rule_based_profiler/profiler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/great_expectations/rule_based_profiler/profiler.py b/great_expectations/rule_based_profiler/profiler.py
--- a/great_expectations/rule_based_profiler/profiler.py
+++ b/great_expectations/rule_based_profiler/profiler.py
@@ -40,8 +40,7 @@
These will be used to define profiler computation patterns.
Args:
- variables_configs: Variables from a profiler configuration
- rules_configs: Rule configuration as a dictionary
+ profiler_config: Variables and Rules configuration as a dictionary
data_context: DataContext object that defines a full runtime environment (data access, etc.)
"""
self._data_context = data_context
| {"golden_diff": "diff --git a/great_expectations/rule_based_profiler/profiler.py b/great_expectations/rule_based_profiler/profiler.py\n--- a/great_expectations/rule_based_profiler/profiler.py\n+++ b/great_expectations/rule_based_profiler/profiler.py\n@@ -40,8 +40,7 @@\n These will be used to define profiler computation patterns.\n \n Args:\n- variables_configs: Variables from a profiler configuration\n- rules_configs: Rule configuration as a dictionary\n+ profiler_config: Variables and Rules configuration as a dictionary\n data_context: DataContext object that defines a full runtime environment (data access, etc.)\n \"\"\"\n self._data_context = data_context\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import uuid\nfrom typing import Dict, List, Optional, Union\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations import DataContext\nfrom great_expectations.core import ExpectationConfiguration, ExpectationSuite\nfrom great_expectations.data_context.util import instantiate_class_from_config\nfrom great_expectations.rule_based_profiler.domain_builder.domain_builder import (\n DomainBuilder,\n)\nfrom great_expectations.rule_based_profiler.expectation_configuration_builder.expectation_configuration_builder import (\n ExpectationConfigurationBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.parameter_builder import (\n ParameterBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.parameter_container import (\n ParameterContainer,\n build_parameter_container_for_variables,\n)\nfrom great_expectations.rule_based_profiler.rule.rule import Rule\n\n\nclass Profiler:\n \"\"\"\n Profiler object serves to profile, or automatically evaluate a set of rules, upon a given\n batch / multiple batches of data.\n \"\"\"\n\n def __init__(\n self,\n *,\n profiler_config: Optional[Dict[str, Dict[str, Dict]]] = None,\n data_context: Optional[DataContext] = None,\n ):\n \"\"\"\n Create a new Profiler using configured rules.\n For a rule or an item in a rule configuration, instantiates the following if\n available: a domain builder, a parameter builder, and a configuration builder.\n These will be used to define profiler computation patterns.\n\n Args:\n variables_configs: Variables from a profiler configuration\n rules_configs: Rule configuration as a dictionary\n data_context: DataContext object that defines a full runtime environment (data access, etc.)\n \"\"\"\n self._data_context = data_context\n self._rules = []\n\n rules_configs: Dict[str, Dict] = profiler_config.get(\"rules\", {})\n rule_name: str\n rule_config: dict\n\n for rule_name, rule_config in rules_configs.items():\n domain_builder_config: dict = rule_config.get(\"domain_builder\")\n\n if domain_builder_config is None:\n raise ge_exceptions.ProfilerConfigurationError(\n message=f'Invalid rule \"{rule_name}\": no domain_builder found.'\n )\n\n domain_builder: DomainBuilder = instantiate_class_from_config(\n config=domain_builder_config,\n runtime_environment={\"data_context\": data_context},\n config_defaults={\n \"module_name\": \"great_expectations.rule_based_profiler.domain_builder\"\n },\n )\n\n parameter_builders: List[ParameterBuilder] = []\n\n parameter_builder_configs: dict = rule_config.get(\"parameter_builders\")\n\n if parameter_builder_configs:\n parameter_builder_config: dict\n for parameter_builder_config in parameter_builder_configs:\n parameter_builders.append(\n instantiate_class_from_config(\n config=parameter_builder_config,\n runtime_environment={\"data_context\": data_context},\n config_defaults={\n \"module_name\": \"great_expectations.rule_based_profiler.parameter_builder\"\n },\n )\n )\n\n expectation_configuration_builders: List[\n ExpectationConfigurationBuilder\n ] = []\n\n expectation_configuration_builder_configs: dict = rule_config.get(\n \"expectation_configuration_builders\"\n )\n\n if expectation_configuration_builder_configs:\n expectation_configuration_builder_config: dict\n for (\n expectation_configuration_builder_config\n ) in expectation_configuration_builder_configs:\n expectation_configuration_builders.append(\n instantiate_class_from_config(\n config=expectation_configuration_builder_config,\n runtime_environment={},\n config_defaults={\n \"class_name\": \"DefaultExpectationConfigurationBuilder\",\n \"module_name\": \"great_expectations.rule_based_profiler.expectation_configuration_builder\",\n },\n )\n )\n\n variables_configs: Dict[str, Dict] = profiler_config.get(\"variables\", {})\n variables: Optional[ParameterContainer] = None\n\n if variables_configs:\n variables = build_parameter_container_for_variables(\n variables_configs=variables_configs\n )\n\n self._rules.append(\n Rule(\n name=rule_name,\n domain_builder=domain_builder,\n parameter_builders=parameter_builders,\n expectation_configuration_builders=expectation_configuration_builders,\n variables=variables,\n )\n )\n\n def profile(\n self,\n *,\n expectation_suite_name: Optional[str] = None,\n ) -> ExpectationSuite:\n \"\"\"\n Args:\n :param expectation_suite_name: A name for returned Expectation suite.\n :return: Set of rule evaluation results in the form of an ExpectationSuite\n \"\"\"\n if expectation_suite_name is None:\n expectation_suite_name = (\n f\"tmp_suite_{self.__class__.__name__}_{str(uuid.uuid4())[:8]}\"\n )\n\n expectation_suite: ExpectationSuite = ExpectationSuite(\n expectation_suite_name=expectation_suite_name\n )\n\n rule: Rule\n for rule in self._rules:\n expectation_configurations: List[ExpectationConfiguration] = rule.generate()\n expectation_configuration: ExpectationConfiguration\n for expectation_configuration in expectation_configurations:\n expectation_suite.add_expectation(\n expectation_configuration=expectation_configuration\n )\n\n return expectation_suite\n", "path": "great_expectations/rule_based_profiler/profiler.py"}]} | 2,003 | 152 |
gh_patches_debug_567 | rasdani/github-patches | git_diff | pex-tool__pex-891 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.3
On the docket:
+ [x] Error eagerly if an interpreter binary doesn't exist #886
+ [x] The pip-powered resolve in pex 2 will re-tokenize --find-links pages on each transitive requirement #887
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.1.2'
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.1.2'
+__version__ = '2.1.3'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.2'\n+__version__ = '2.1.3'\n", "issue": "Release 2.1.3\nOn the docket:\r\n+ [x] Error eagerly if an interpreter binary doesn't exist #886 \r\n+ [x] The pip-powered resolve in pex 2 will re-tokenize --find-links pages on each transitive requirement #887 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.2'\n", "path": "pex/version.py"}]} | 644 | 95 |
gh_patches_debug_24149 | rasdani/github-patches | git_diff | lightly-ai__lightly-482 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
when pip version older than newest version, it calls API on every command rather than caching the information
When I use an older pip version, I see multiple lines of output like this when I run `lightly-magic`
```
...Python/3.8/lib/python/site-packages/lightly/api/version_checking.py:57: Warning: You are using lightly version 1.1.17. There is a newer version of the package available. For compatability reasons, please upgrade your current version: pip install lightly==1.1.18
warnings.warn(Warning(warning))
```
Also tracking the connections it makes, it calls the API for **every** images I want to upload. So the pip does not cache the information that it is an outdated version. This is no bueno
</issue>
<code>
[start of lightly/__init__.py]
1 """Lightly is a computer vision framework for self-supervised learning.
2
3 With Lightly you can train deep learning models using
4 self-supervision. This means, that you don't require
5 any labels to train a model. Lightly has been built
6 to help you understand and work with large unlabeled datasets.
7 It is built on top of PyTorch and therefore fully compatible
8 with other frameworks such as Fast.ai.
9
10 The framework is structured into the following modules:
11
12 - **api**:
13
14 The lightly.api module handles communication with the Lightly web-app.
15
16 - **cli**:
17
18 The lightly.cli module provides a command-line interface for training
19 self-supervised models and embedding images. Furthermore, the command-line
20 tool can be used to upload and download images from/to the Lightly web-app.
21
22 - **core**:
23
24 The lightly.core module offers one-liners for simple self-supervised learning.
25
26 - **data**:
27
28 The lightly.data module provides a dataset wrapper and collate functions. The
29 collate functions are in charge of the data augmentations which are crucial for
30 self-supervised learning.
31
32 - **embedding**:
33
34 The lightly.embedding module combines the self-supervised models with a dataloader,
35 optimizer, and loss function to provide a simple pytorch-lightning trainable.
36
37 - **loss**:
38
39 The lightly.loss module contains implementations of popular self-supervised training
40 loss functions.
41
42 - **models**:
43
44 The lightly.models module holds the implementation of the ResNet as well as self-
45 supervised methods. Currently implements:
46
47 - SimCLR
48
49 - MoCo
50
51 - SimSiam
52
53 - Barlow Twins
54
55 - BYOL
56
57 - NNCLR
58
59 - **transforms**:
60
61 The lightly.transforms module implements custom data transforms. Currently implements:
62
63 - Gaussian Blur
64
65 - Random Rotation
66
67 - Random Solarization
68
69 - **utils**:
70
71 The lightly.utils package provides global utility methods.
72 The io module contains utility to save and load embeddings in a format which is
73 understood by the Lightly library.
74
75 """
76
77 # Copyright (c) 2020. Lightly AG and its affiliates.
78 # All Rights Reserved
79
80 __name__ = 'lightly'
81 __version__ = '1.1.18'
82
83
84 try:
85 # See (https://github.com/PyTorchLightning/pytorch-lightning)
86 # This variable is injected in the __builtins__ by the build
87 # process. It used to enable importing subpackages of skimage when
88 # the binaries are not built
89 __LIGHTLY_SETUP__
90 except NameError:
91 __LIGHTLY_SETUP__ = False
92
93
94 if __LIGHTLY_SETUP__:
95 # setting up lightly
96 msg = f'Partial import of {__name__}=={__version__} during build process.'
97 print(msg)
98 else:
99 # see if prefetch_generator is available
100 try:
101 import prefetch_generator
102 except ImportError:
103 _prefetch_generator_available = False
104 else:
105 _prefetch_generator_available = True
106
107 def _is_prefetch_generator_available():
108 return _prefetch_generator_available
109
110 from lightly.core import *
111 from lightly import active_learning
112 from lightly import api
113 from lightly import data
114 from lightly import embedding
115 from lightly import loss
116 from lightly import models
117 from lightly import openapi_generated
118 from lightly import transforms
119 from lightly import utils
120
121
122 # check for latest version
123 from lightly.api.version_checking import get_latest_version
124 from lightly.api.version_checking import version_compare
125 from lightly.api.version_checking import pretty_print_latest_version
126
127 latest_version = get_latest_version(__version__)
128 if latest_version is not None:
129 if version_compare(__version__, latest_version) < 0:
130 # local version is behind latest version
131 pretty_print_latest_version(latest_version)
132
[end of lightly/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightly/__init__.py b/lightly/__init__.py
--- a/lightly/__init__.py
+++ b/lightly/__init__.py
@@ -80,6 +80,7 @@
__name__ = 'lightly'
__version__ = '1.1.18'
+from multiprocessing import current_process
try:
# See (https://github.com/PyTorchLightning/pytorch-lightning)
@@ -118,14 +119,15 @@
from lightly import transforms
from lightly import utils
+ if current_process().name == 'MainProcess':
+ # check for latest version
+ from lightly.api.version_checking import get_latest_version
+ from lightly.api.version_checking import version_compare
+ from lightly.api.version_checking import pretty_print_latest_version
- # check for latest version
- from lightly.api.version_checking import get_latest_version
- from lightly.api.version_checking import version_compare
- from lightly.api.version_checking import pretty_print_latest_version
+ latest_version = get_latest_version(__version__)
+ if latest_version is not None:
+ if version_compare(__version__, latest_version) < 0:
+ # local version is behind latest version
+ pretty_print_latest_version(latest_version)
- latest_version = get_latest_version(__version__)
- if latest_version is not None:
- if version_compare(__version__, latest_version) < 0:
- # local version is behind latest version
- pretty_print_latest_version(latest_version)
| {"golden_diff": "diff --git a/lightly/__init__.py b/lightly/__init__.py\n--- a/lightly/__init__.py\n+++ b/lightly/__init__.py\n@@ -80,6 +80,7 @@\n __name__ = 'lightly'\n __version__ = '1.1.18'\n \n+from multiprocessing import current_process\n \n try:\n # See (https://github.com/PyTorchLightning/pytorch-lightning)\n@@ -118,14 +119,15 @@\n from lightly import transforms\n from lightly import utils\n \n+ if current_process().name == 'MainProcess':\n+ # check for latest version\n+ from lightly.api.version_checking import get_latest_version\n+ from lightly.api.version_checking import version_compare\n+ from lightly.api.version_checking import pretty_print_latest_version\n \n- # check for latest version\n- from lightly.api.version_checking import get_latest_version\n- from lightly.api.version_checking import version_compare\n- from lightly.api.version_checking import pretty_print_latest_version\n+ latest_version = get_latest_version(__version__)\n+ if latest_version is not None:\n+ if version_compare(__version__, latest_version) < 0:\n+ # local version is behind latest version\n+ pretty_print_latest_version(latest_version)\n \n- latest_version = get_latest_version(__version__)\n- if latest_version is not None:\n- if version_compare(__version__, latest_version) < 0:\n- # local version is behind latest version\n- pretty_print_latest_version(latest_version)\n", "issue": "when pip version older than newest version, it calls API on every command rather than caching the information\nWhen I use an older pip version, I see multiple lines of output like this when I run `lightly-magic`\r\n```\r\n...Python/3.8/lib/python/site-packages/lightly/api/version_checking.py:57: Warning: You are using lightly version 1.1.17. There is a newer version of the package available. For compatability reasons, please upgrade your current version: pip install lightly==1.1.18\r\n warnings.warn(Warning(warning))\r\n```\r\n\r\nAlso tracking the connections it makes, it calls the API for **every** images I want to upload. So the pip does not cache the information that it is an outdated version. This is no bueno\r\n \r\n \n", "before_files": [{"content": "\"\"\"Lightly is a computer vision framework for self-supervised learning.\n\nWith Lightly you can train deep learning models using\nself-supervision. This means, that you don't require\nany labels to train a model. Lightly has been built\nto help you understand and work with large unlabeled datasets.\nIt is built on top of PyTorch and therefore fully compatible \nwith other frameworks such as Fast.ai.\n\nThe framework is structured into the following modules:\n\n- **api**: \n\n The lightly.api module handles communication with the Lightly web-app.\n\n- **cli**:\n\n The lightly.cli module provides a command-line interface for training \n self-supervised models and embedding images. Furthermore, the command-line\n tool can be used to upload and download images from/to the Lightly web-app.\n\n- **core**:\n\n The lightly.core module offers one-liners for simple self-supervised learning.\n\n- **data**:\n\n The lightly.data module provides a dataset wrapper and collate functions. The\n collate functions are in charge of the data augmentations which are crucial for\n self-supervised learning.\n\n- **embedding**:\n\n The lightly.embedding module combines the self-supervised models with a dataloader,\n optimizer, and loss function to provide a simple pytorch-lightning trainable.\n\n- **loss**:\n\n The lightly.loss module contains implementations of popular self-supervised training\n loss functions.\n\n- **models**:\n\n The lightly.models module holds the implementation of the ResNet as well as self-\n supervised methods. Currently implements:\n\n - SimCLR\n\n - MoCo\n\n - SimSiam\n\n - Barlow Twins\n\n - BYOL\n\n - NNCLR\n\n- **transforms**:\n\n The lightly.transforms module implements custom data transforms. Currently implements:\n\n - Gaussian Blur\n\n - Random Rotation\n\n - Random Solarization\n\n- **utils**:\n\n The lightly.utils package provides global utility methods.\n The io module contains utility to save and load embeddings in a format which is\n understood by the Lightly library.\n\n\"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\n__name__ = 'lightly'\n__version__ = '1.1.18'\n\n\ntry:\n # See (https://github.com/PyTorchLightning/pytorch-lightning)\n # This variable is injected in the __builtins__ by the build\n # process. It used to enable importing subpackages of skimage when\n # the binaries are not built\n __LIGHTLY_SETUP__\nexcept NameError:\n __LIGHTLY_SETUP__ = False\n\n\nif __LIGHTLY_SETUP__:\n # setting up lightly\n msg = f'Partial import of {__name__}=={__version__} during build process.' \n print(msg)\nelse:\n # see if prefetch_generator is available\n try:\n import prefetch_generator\n except ImportError:\n _prefetch_generator_available = False\n else:\n _prefetch_generator_available = True\n\n def _is_prefetch_generator_available():\n return _prefetch_generator_available\n\n from lightly.core import *\n from lightly import active_learning\n from lightly import api\n from lightly import data\n from lightly import embedding\n from lightly import loss\n from lightly import models\n from lightly import openapi_generated\n from lightly import transforms\n from lightly import utils\n\n\n # check for latest version\n from lightly.api.version_checking import get_latest_version\n from lightly.api.version_checking import version_compare\n from lightly.api.version_checking import pretty_print_latest_version\n\n latest_version = get_latest_version(__version__)\n if latest_version is not None:\n if version_compare(__version__, latest_version) < 0:\n # local version is behind latest version\n pretty_print_latest_version(latest_version)\n", "path": "lightly/__init__.py"}]} | 1,839 | 342 |
gh_patches_debug_30371 | rasdani/github-patches | git_diff | Flexget__Flexget-2222 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot use variables with integer-only values
### Expected behaviour:
Variables should be able to handle integer-only values.
### Actual behaviour:
Configuration parsing error (when using ``'{? deluge.port ?}'``): ``Got `50586`, expected: integer``
Configuration parsing error (when using ``{? deluge.port ?}``):
```
while parsing a flow mapping in "<unicode string>", line 16, column 13: port: {? deluge.port ?} ^ expected ',' or '}', but got '?' in "<unicode string>", line 16, column 28: port: {? deluge.port ?} ^
```
### Steps to reproduce:
- Step 1: Try to use below config.
#### Config:
```
from_deluge:
host: '{? deluge.host ?}'
port: '{? deluge.port ?}'
```
#### Log:
See above.
### Additional information:
- Flexget Version: 2.10.24
- Python Version: 2.7.9
- Installation method: pip
- OS and version: macOS El Capitan 10.11.6
- Link to crash log: n/a
</issue>
<code>
[start of flexget/plugins/modify/variables.py]
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import codecs
5 import logging
6 import os
7 from datetime import datetime
8
9 import yaml
10
11 from jinja2 import Environment, TemplateError
12
13 from sqlalchemy import Column
14 from sqlalchemy.sql.sqltypes import Unicode, DateTime, Integer
15
16 from flexget import db_schema
17 from flexget.config_schema import register_config_key
18 from flexget.event import event
19 from flexget.manager import Session
20 from flexget.plugin import PluginError
21 from flexget.utils.database import json_synonym
22
23 log = logging.getLogger('variables')
24
25 DB_VERSION = 0
26 Base = db_schema.versioned_base('variables', DB_VERSION)
27
28
29 class Variables(Base):
30 __tablename__ = 'variables'
31
32 id = Column(Integer, primary_key=True)
33 _variables = Column('variables', Unicode)
34 variables = json_synonym('_variables')
35 added = Column(DateTime, default=datetime.now)
36
37
38 def variables_from_file(config_base, filename):
39 variables_file = os.path.join(config_base, filename)
40 if not os.path.exists(variables_file):
41 raise PluginError('File %s does not exist!' % variables_file)
42 try:
43 with codecs.open(variables_file, 'rb', 'utf-8') as f:
44 variables_dict = yaml.safe_load(f.read())
45 except yaml.YAMLError as e:
46 raise PluginError('Invalid variables file: %s' % e)
47 return variables_dict or {}
48
49
50 def variables_from_db():
51 with Session() as session:
52 variables = session.query(Variables).first()
53 if variables:
54 return variables.variables
55 else:
56 return {}
57
58
59 def variables_to_db(variables_dict):
60 with Session() as session:
61 variables = session.query(Variables).first()
62 if not variables:
63 variables = Variables()
64 variables.variables = variables_dict
65 session.merge(variables)
66
67
68 @event('manager.before_config_validate')
69 def process_variables(config, manager):
70 """Render all string elements of the config against defined variables."""
71 env_params = {
72 'block_start_string': '^^disabled^^',
73 'block_end_string': '^^disabled^^',
74 'variable_start_string': '{?',
75 'variable_end_string': '?}'
76 }
77 if 'variables' not in config or config.get('variables') is False:
78 return
79 env = Environment(**env_params)
80 if isinstance(config['variables'], bool):
81 log.debug('trying to load variables from DB')
82 variables = variables_from_db()
83 else:
84 log.debug('trying to load variables from file')
85 variables = variables_from_file(manager.config_base, config['variables'])
86 log.debug('updating DB with variable file contents')
87 variables_to_db(variables)
88 env.globals = variables
89 _process(config, env)
90 return config
91
92
93 def _process(element, environment):
94 if isinstance(element, dict):
95 for k, v in element.items():
96 new_key = _process(k, environment)
97 if new_key:
98 element[new_key] = element.pop(k)
99 k = new_key
100 val = _process(element[k], environment)
101 if val:
102 element[k] = val
103 elif isinstance(element, list):
104 for i, v in enumerate(element):
105 val = _process(v, environment)
106 if val:
107 element[i] = val
108 elif isinstance(element, str) and '{?' in element:
109 try:
110 template = environment.from_string(element)
111 return template.render()
112 except (TemplateError, TypeError):
113 return None
114
115
116 variables_config_schema = {'type': ['string', 'boolean']}
117
118
119 @event('config.register')
120 def register_config():
121 register_config_key('variables', variables_config_schema)
122
[end of flexget/plugins/modify/variables.py]
[start of flexget/_version.py]
1 """
2 Current FlexGet version.
3 This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
4 release scripts in continuous integration. Should (almost) never be set manually.
5
6 The version should always be set to the <next release version>.dev
7 The jenkins release job will automatically strip the .dev for release,
8 and update the version again for continued development.
9 """
10 __version__ = '2.15.2.dev'
11
[end of flexget/_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/_version.py b/flexget/_version.py
--- a/flexget/_version.py
+++ b/flexget/_version.py
@@ -7,4 +7,4 @@
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
-__version__ = '2.15.2.dev'
+__version__ = '2.16.0.dev'
diff --git a/flexget/plugins/modify/variables.py b/flexget/plugins/modify/variables.py
--- a/flexget/plugins/modify/variables.py
+++ b/flexget/plugins/modify/variables.py
@@ -8,7 +8,8 @@
import yaml
-from jinja2 import Environment, TemplateError
+from jinja2 import TemplateError
+from jinja2.nativetypes import NativeEnvironment
from sqlalchemy import Column
from sqlalchemy.sql.sqltypes import Unicode, DateTime, Integer
@@ -76,10 +77,13 @@
}
if 'variables' not in config or config.get('variables') is False:
return
- env = Environment(**env_params)
+ env = NativeEnvironment(**env_params)
if isinstance(config['variables'], bool):
log.debug('trying to load variables from DB')
variables = variables_from_db()
+ elif isinstance(config['variables'], dict):
+ log.debug('loading variables from config')
+ variables = config['variables']
else:
log.debug('trying to load variables from file')
variables = variables_from_file(manager.config_base, config['variables'])
@@ -113,7 +117,7 @@
return None
-variables_config_schema = {'type': ['string', 'boolean']}
+variables_config_schema = {'type': ['string', 'boolean', 'object']}
@event('config.register')
| {"golden_diff": "diff --git a/flexget/_version.py b/flexget/_version.py\n--- a/flexget/_version.py\n+++ b/flexget/_version.py\n@@ -7,4 +7,4 @@\n The jenkins release job will automatically strip the .dev for release,\n and update the version again for continued development.\n \"\"\"\n-__version__ = '2.15.2.dev'\n+__version__ = '2.16.0.dev'\ndiff --git a/flexget/plugins/modify/variables.py b/flexget/plugins/modify/variables.py\n--- a/flexget/plugins/modify/variables.py\n+++ b/flexget/plugins/modify/variables.py\n@@ -8,7 +8,8 @@\n \n import yaml\n \n-from jinja2 import Environment, TemplateError\n+from jinja2 import TemplateError\n+from jinja2.nativetypes import NativeEnvironment\n \n from sqlalchemy import Column\n from sqlalchemy.sql.sqltypes import Unicode, DateTime, Integer\n@@ -76,10 +77,13 @@\n }\n if 'variables' not in config or config.get('variables') is False:\n return\n- env = Environment(**env_params)\n+ env = NativeEnvironment(**env_params)\n if isinstance(config['variables'], bool):\n log.debug('trying to load variables from DB')\n variables = variables_from_db()\n+ elif isinstance(config['variables'], dict):\n+ log.debug('loading variables from config')\n+ variables = config['variables']\n else:\n log.debug('trying to load variables from file')\n variables = variables_from_file(manager.config_base, config['variables'])\n@@ -113,7 +117,7 @@\n return None\n \n \n-variables_config_schema = {'type': ['string', 'boolean']}\n+variables_config_schema = {'type': ['string', 'boolean', 'object']}\n \n \n @event('config.register')\n", "issue": "Cannot use variables with integer-only values\n### Expected behaviour:\r\nVariables should be able to handle integer-only values.\r\n\r\n### Actual behaviour:\r\nConfiguration parsing error (when using ``'{? deluge.port ?}'``): ``Got `50586`, expected: integer``\r\nConfiguration parsing error (when using ``{? deluge.port ?}``):\r\n```\r\nwhile parsing a flow mapping in \"<unicode string>\", line 16, column 13: port: {? deluge.port ?} ^ expected ',' or '}', but got '?' in \"<unicode string>\", line 16, column 28: port: {? deluge.port ?} ^\r\n```\r\n\r\n### Steps to reproduce:\r\n- Step 1: Try to use below config.\r\n\r\n#### Config:\r\n```\r\n from_deluge:\r\n host: '{? deluge.host ?}'\r\n port: '{? deluge.port ?}'\r\n\r\n```\r\n \r\n#### Log:\r\nSee above.\r\n\r\n### Additional information:\r\n\r\n- Flexget Version: 2.10.24\r\n- Python Version: 2.7.9\r\n- Installation method: pip\r\n- OS and version: macOS El Capitan 10.11.6\r\n- Link to crash log: n/a\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport codecs\nimport logging\nimport os\nfrom datetime import datetime\n\nimport yaml\n\nfrom jinja2 import Environment, TemplateError\n\nfrom sqlalchemy import Column\nfrom sqlalchemy.sql.sqltypes import Unicode, DateTime, Integer\n\nfrom flexget import db_schema\nfrom flexget.config_schema import register_config_key\nfrom flexget.event import event\nfrom flexget.manager import Session\nfrom flexget.plugin import PluginError\nfrom flexget.utils.database import json_synonym\n\nlog = logging.getLogger('variables')\n\nDB_VERSION = 0\nBase = db_schema.versioned_base('variables', DB_VERSION)\n\n\nclass Variables(Base):\n __tablename__ = 'variables'\n\n id = Column(Integer, primary_key=True)\n _variables = Column('variables', Unicode)\n variables = json_synonym('_variables')\n added = Column(DateTime, default=datetime.now)\n\n\ndef variables_from_file(config_base, filename):\n variables_file = os.path.join(config_base, filename)\n if not os.path.exists(variables_file):\n raise PluginError('File %s does not exist!' % variables_file)\n try:\n with codecs.open(variables_file, 'rb', 'utf-8') as f:\n variables_dict = yaml.safe_load(f.read())\n except yaml.YAMLError as e:\n raise PluginError('Invalid variables file: %s' % e)\n return variables_dict or {}\n\n\ndef variables_from_db():\n with Session() as session:\n variables = session.query(Variables).first()\n if variables:\n return variables.variables\n else:\n return {}\n\n\ndef variables_to_db(variables_dict):\n with Session() as session:\n variables = session.query(Variables).first()\n if not variables:\n variables = Variables()\n variables.variables = variables_dict\n session.merge(variables)\n\n\n@event('manager.before_config_validate')\ndef process_variables(config, manager):\n \"\"\"Render all string elements of the config against defined variables.\"\"\"\n env_params = {\n 'block_start_string': '^^disabled^^',\n 'block_end_string': '^^disabled^^',\n 'variable_start_string': '{?',\n 'variable_end_string': '?}'\n }\n if 'variables' not in config or config.get('variables') is False:\n return\n env = Environment(**env_params)\n if isinstance(config['variables'], bool):\n log.debug('trying to load variables from DB')\n variables = variables_from_db()\n else:\n log.debug('trying to load variables from file')\n variables = variables_from_file(manager.config_base, config['variables'])\n log.debug('updating DB with variable file contents')\n variables_to_db(variables)\n env.globals = variables\n _process(config, env)\n return config\n\n\ndef _process(element, environment):\n if isinstance(element, dict):\n for k, v in element.items():\n new_key = _process(k, environment)\n if new_key:\n element[new_key] = element.pop(k)\n k = new_key\n val = _process(element[k], environment)\n if val:\n element[k] = val\n elif isinstance(element, list):\n for i, v in enumerate(element):\n val = _process(v, environment)\n if val:\n element[i] = val\n elif isinstance(element, str) and '{?' in element:\n try:\n template = environment.from_string(element)\n return template.render()\n except (TemplateError, TypeError):\n return None\n\n\nvariables_config_schema = {'type': ['string', 'boolean']}\n\n\n@event('config.register')\ndef register_config():\n register_config_key('variables', variables_config_schema)\n", "path": "flexget/plugins/modify/variables.py"}, {"content": "\"\"\"\nCurrent FlexGet version.\nThis is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by\nrelease scripts in continuous integration. Should (almost) never be set manually.\n\nThe version should always be set to the <next release version>.dev\nThe jenkins release job will automatically strip the .dev for release,\nand update the version again for continued development.\n\"\"\"\n__version__ = '2.15.2.dev'\n", "path": "flexget/_version.py"}]} | 1,985 | 404 |
gh_patches_debug_21179 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-903 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a new mechanism to avoid calling the legacy memcache API
This API is only supported on the python27 runtime. Lets only try to import it in environments that actually support it.
The problem I'm running into is a bit esoteric, but here goes. We've built some GAE API shims to help move our apps to newer App Engine runtimes (like python3) that don't include these legacy APIs. Because of that, when this library tries to import google.appengine.api.memcache, it imports and uses our shim, and that isn't always desirable. Having some way to configure googleapiclient to not use this legacy API even if it is importable would be useful.
Despite this sorta niche use case, I figured I'd propose this change upstream since reducing reliance on a py2-only API shouldn't be too controversial these days.
</issue>
<code>
[start of googleapiclient/discovery_cache/__init__.py]
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Caching utility for the discovery document."""
16
17 from __future__ import absolute_import
18
19 import logging
20 import datetime
21
22
23 LOGGER = logging.getLogger(__name__)
24
25 DISCOVERY_DOC_MAX_AGE = 60 * 60 * 24 # 1 day
26
27
28 def autodetect():
29 """Detects an appropriate cache module and returns it.
30
31 Returns:
32 googleapiclient.discovery_cache.base.Cache, a cache object which
33 is auto detected, or None if no cache object is available.
34 """
35 try:
36 from google.appengine.api import memcache
37 from . import appengine_memcache
38
39 return appengine_memcache.cache
40 except Exception:
41 try:
42 from . import file_cache
43
44 return file_cache.cache
45 except Exception as e:
46 LOGGER.warning(e, exc_info=True)
47 return None
48
[end of googleapiclient/discovery_cache/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/googleapiclient/discovery_cache/__init__.py b/googleapiclient/discovery_cache/__init__.py
--- a/googleapiclient/discovery_cache/__init__.py
+++ b/googleapiclient/discovery_cache/__init__.py
@@ -18,7 +18,7 @@
import logging
import datetime
-
+import os
LOGGER = logging.getLogger(__name__)
@@ -32,16 +32,18 @@
googleapiclient.discovery_cache.base.Cache, a cache object which
is auto detected, or None if no cache object is available.
"""
- try:
- from google.appengine.api import memcache
- from . import appengine_memcache
-
- return appengine_memcache.cache
- except Exception:
+ if 'APPENGINE_RUNTIME' in os.environ:
try:
- from . import file_cache
+ from google.appengine.api import memcache
+ from . import appengine_memcache
+
+ return appengine_memcache.cache
+ except Exception:
+ pass
+ try:
+ from . import file_cache
- return file_cache.cache
- except Exception as e:
- LOGGER.warning(e, exc_info=True)
- return None
+ return file_cache.cache
+ except Exception as e:
+ LOGGER.warning(e, exc_info=True)
+ return None
| {"golden_diff": "diff --git a/googleapiclient/discovery_cache/__init__.py b/googleapiclient/discovery_cache/__init__.py\n--- a/googleapiclient/discovery_cache/__init__.py\n+++ b/googleapiclient/discovery_cache/__init__.py\n@@ -18,7 +18,7 @@\n \n import logging\n import datetime\n-\n+import os\n \n LOGGER = logging.getLogger(__name__)\n \n@@ -32,16 +32,18 @@\n googleapiclient.discovery_cache.base.Cache, a cache object which\n is auto detected, or None if no cache object is available.\n \"\"\"\n- try:\n- from google.appengine.api import memcache\n- from . import appengine_memcache\n-\n- return appengine_memcache.cache\n- except Exception:\n+ if 'APPENGINE_RUNTIME' in os.environ:\n try:\n- from . import file_cache\n+ from google.appengine.api import memcache\n+ from . import appengine_memcache\n+\n+ return appengine_memcache.cache\n+ except Exception:\n+ pass\n+ try:\n+ from . import file_cache\n \n- return file_cache.cache\n- except Exception as e:\n- LOGGER.warning(e, exc_info=True)\n- return None\n+ return file_cache.cache\n+ except Exception as e:\n+ LOGGER.warning(e, exc_info=True)\n+ return None\n", "issue": "Add a new mechanism to avoid calling the legacy memcache API\nThis API is only supported on the python27 runtime. Lets only try to import it in environments that actually support it.\r\n\r\nThe problem I'm running into is a bit esoteric, but here goes. We've built some GAE API shims to help move our apps to newer App Engine runtimes (like python3) that don't include these legacy APIs. Because of that, when this library tries to import google.appengine.api.memcache, it imports and uses our shim, and that isn't always desirable. Having some way to configure googleapiclient to not use this legacy API even if it is importable would be useful.\r\n\r\nDespite this sorta niche use case, I figured I'd propose this change upstream since reducing reliance on a py2-only API shouldn't be too controversial these days.\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Caching utility for the discovery document.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport datetime\n\n\nLOGGER = logging.getLogger(__name__)\n\nDISCOVERY_DOC_MAX_AGE = 60 * 60 * 24 # 1 day\n\n\ndef autodetect():\n \"\"\"Detects an appropriate cache module and returns it.\n\n Returns:\n googleapiclient.discovery_cache.base.Cache, a cache object which\n is auto detected, or None if no cache object is available.\n \"\"\"\n try:\n from google.appengine.api import memcache\n from . import appengine_memcache\n\n return appengine_memcache.cache\n except Exception:\n try:\n from . import file_cache\n\n return file_cache.cache\n except Exception as e:\n LOGGER.warning(e, exc_info=True)\n return None\n", "path": "googleapiclient/discovery_cache/__init__.py"}]} | 1,135 | 308 |
gh_patches_debug_686 | rasdani/github-patches | git_diff | projectmesa__mesa-398 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
error launching Flocker
I've Anaconda with python 3.6 & Mesa 0.8.1
I launch Flocker's run.py and I get this error:
```
Flockers e$ python run.py
Traceback (most recent call last):
File "run.py", line 1, in <module>
from flockers.server import server
File "/Users/e/Dropbox/devlib/notebooks/mesa-master/examples/Flockers/flockers/server.py", line 20, in <module>
server = ModularServer(BoidModel, [boid_canvas], "Boids", model_params)
File "/Users/e/anaconda3/lib/python3.6/site-packages/mesa/visualization/ModularVisualization.py", line 287, in __init__
self.reset_model()
File "/Users/e/anaconda3/lib/python3.6/site-packages/mesa/visualization/ModularVisualization.py", line 313, in reset_model
self.model = self.model_cls(**model_params)
TypeError: __init__() got an unexpected keyword argument 'N'
```
</issue>
<code>
[start of examples/Flockers/flockers/server.py]
1 from mesa.visualization.ModularVisualization import ModularServer
2
3 from .model import BoidModel
4 from .SimpleContinuousModule import SimpleCanvas
5
6
7 def boid_draw(agent):
8 return {"Shape": "circle", "r": 2, "Filled": "true", "Color": "Red"}
9
10 boid_canvas = SimpleCanvas(boid_draw, 500, 500)
11 model_params = {
12 "N": 100,
13 "width": 100,
14 "height": 100,
15 "speed": 5,
16 "vision": 10,
17 "separation": 2
18 }
19
20 server = ModularServer(BoidModel, [boid_canvas], "Boids", model_params)
21
[end of examples/Flockers/flockers/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/Flockers/flockers/server.py b/examples/Flockers/flockers/server.py
--- a/examples/Flockers/flockers/server.py
+++ b/examples/Flockers/flockers/server.py
@@ -9,7 +9,7 @@
boid_canvas = SimpleCanvas(boid_draw, 500, 500)
model_params = {
- "N": 100,
+ "population": 100,
"width": 100,
"height": 100,
"speed": 5,
| {"golden_diff": "diff --git a/examples/Flockers/flockers/server.py b/examples/Flockers/flockers/server.py\n--- a/examples/Flockers/flockers/server.py\n+++ b/examples/Flockers/flockers/server.py\n@@ -9,7 +9,7 @@\n \n boid_canvas = SimpleCanvas(boid_draw, 500, 500)\n model_params = {\n- \"N\": 100,\n+ \"population\": 100,\n \"width\": 100,\n \"height\": 100,\n \"speed\": 5,\n", "issue": "error launching Flocker\nI've Anaconda with python 3.6 & Mesa 0.8.1\r\n\r\nI launch Flocker's run.py and I get this error:\r\n```\r\nFlockers e$ python run.py\r\nTraceback (most recent call last):\r\n File \"run.py\", line 1, in <module>\r\n from flockers.server import server\r\n File \"/Users/e/Dropbox/devlib/notebooks/mesa-master/examples/Flockers/flockers/server.py\", line 20, in <module>\r\n server = ModularServer(BoidModel, [boid_canvas], \"Boids\", model_params)\r\n File \"/Users/e/anaconda3/lib/python3.6/site-packages/mesa/visualization/ModularVisualization.py\", line 287, in __init__\r\n self.reset_model()\r\n File \"/Users/e/anaconda3/lib/python3.6/site-packages/mesa/visualization/ModularVisualization.py\", line 313, in reset_model\r\n self.model = self.model_cls(**model_params)\r\nTypeError: __init__() got an unexpected keyword argument 'N'\r\n```\n", "before_files": [{"content": "from mesa.visualization.ModularVisualization import ModularServer\n\nfrom .model import BoidModel\nfrom .SimpleContinuousModule import SimpleCanvas\n\n\ndef boid_draw(agent):\n return {\"Shape\": \"circle\", \"r\": 2, \"Filled\": \"true\", \"Color\": \"Red\"}\n\nboid_canvas = SimpleCanvas(boid_draw, 500, 500)\nmodel_params = {\n \"N\": 100,\n \"width\": 100,\n \"height\": 100,\n \"speed\": 5,\n \"vision\": 10,\n \"separation\": 2\n}\n\nserver = ModularServer(BoidModel, [boid_canvas], \"Boids\", model_params)\n", "path": "examples/Flockers/flockers/server.py"}]} | 973 | 129 |
gh_patches_debug_39717 | rasdani/github-patches | git_diff | kserve__kserve-156 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KFServing should have a consistent way of supporting model download across inference server implementations
/kind feature
**Describe the solution you'd like**
KFServing should expose a consistent way to download models across inference servers and clouds. The current implementation depends on the features of individual inference servers expose. E.g. see #137
**Anything else you would like to add:**
Proposed solution design is documented here: https://docs.google.com/document/d/1xqBOkoQ6Vzc5gv4O5MgVVNE3qILbKuMkC-DN5zp5w28/edit?usp=sharing
</issue>
<code>
[start of python/kfserving/kfserving/storage.py]
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 import tempfile
17 import os
18 import re
19 from minio import Minio
20 from google.cloud import storage
21 from google.auth import exceptions
22
23 _GCS_PREFIX = "gs://"
24 _S3_PREFIX = "s3://"
25 _LOCAL_PREFIX = "file://"
26
27
28 class Storage(object): # pylint: disable=too-few-public-methods
29 @staticmethod
30 def download(uri: str) -> str:
31 logging.info("Copying contents of %s to local", uri)
32 if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):
33 return Storage._download_local(uri)
34
35 temp_dir = tempfile.mkdtemp()
36 if uri.startswith(_GCS_PREFIX):
37 Storage._download_gcs(uri, temp_dir)
38 elif uri.startswith(_S3_PREFIX):
39 Storage._download_s3(uri, temp_dir)
40 else:
41 raise Exception("Cannot recognize storage type for " + uri +
42 "\n'%s', '%s', and '%s' are the current available storage type." %
43 (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))
44
45 logging.info("Successfully copied %s to %s", uri, temp_dir)
46 return temp_dir
47
48 @staticmethod
49 def _download_s3(uri, temp_dir: str):
50 client = Storage._create_minio_client()
51 bucket_args = uri.replace(_S3_PREFIX, "", 1).split("/", 1)
52 bucket_name = bucket_args[0]
53 bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
54 objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)
55 for obj in objects:
56 # Replace any prefix from the object key with temp_dir
57 subdir_object_key = obj.object_name.replace(bucket_path, "", 1).strip("/")
58 client.fget_object(bucket_name, obj.object_name,
59 os.path.join(temp_dir, subdir_object_key))
60
61 @staticmethod
62 def _download_gcs(uri, temp_dir: str):
63 try:
64 storage_client = storage.Client()
65 except exceptions.DefaultCredentialsError:
66 storage_client = storage.Client.create_anonymous_client()
67 bucket_args = uri.replace(_GCS_PREFIX, "", 1).split("/", 1)
68 bucket_name = bucket_args[0]
69 bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
70 bucket = storage_client.bucket(bucket_name)
71 blobs = bucket.list_blobs(prefix=bucket_path)
72 for blob in blobs:
73 # Replace any prefix from the object key with temp_dir
74 subdir_object_key = blob.name.replace(bucket_path, "", 1).strip("/")
75 # Create necessary subdirectory to store the object locally
76 if "/" in subdir_object_key:
77 local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit("/", 1)[0])
78 if not os.path.isdir(local_object_dir):
79 os.makedirs(local_object_dir, exist_ok=True)
80 blob.download_to_filename(os.path.join(temp_dir, subdir_object_key))
81
82 @staticmethod
83 def _download_local(uri):
84 local_path = uri.replace(_LOCAL_PREFIX, "", 1)
85 if not os.path.exists(local_path):
86 raise Exception("Local path %s does not exist." % (uri))
87 return local_path
88
89 @staticmethod
90 def _create_minio_client():
91 # Remove possible http scheme for Minio
92 url = re.compile(r"https?://")
93 minioClient = Minio(url.sub("", os.getenv("S3_ENDPOINT", "")),
94 access_key=os.getenv("AWS_ACCESS_KEY_ID", ""),
95 secret_key=os.getenv("AWS_SECRET_ACCESS_KEY", ""),
96 secure=True)
97 return minioClient
98
[end of python/kfserving/kfserving/storage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py
--- a/python/kfserving/kfserving/storage.py
+++ b/python/kfserving/kfserving/storage.py
@@ -27,23 +27,25 @@
class Storage(object): # pylint: disable=too-few-public-methods
@staticmethod
- def download(uri: str) -> str:
+ def download(uri: str, out_dir: str = None) -> str:
logging.info("Copying contents of %s to local", uri)
if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):
return Storage._download_local(uri)
- temp_dir = tempfile.mkdtemp()
+ if out_dir is None:
+ out_dir = tempfile.mkdtemp()
+
if uri.startswith(_GCS_PREFIX):
- Storage._download_gcs(uri, temp_dir)
+ Storage._download_gcs(uri, out_dir)
elif uri.startswith(_S3_PREFIX):
- Storage._download_s3(uri, temp_dir)
+ Storage._download_s3(uri, out_dir)
else:
raise Exception("Cannot recognize storage type for " + uri +
"\n'%s', '%s', and '%s' are the current available storage type." %
(_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))
- logging.info("Successfully copied %s to %s", uri, temp_dir)
- return temp_dir
+ logging.info("Successfully copied %s to %s", uri, out_dir)
+ return out_dir
@staticmethod
def _download_s3(uri, temp_dir: str):
@@ -68,16 +70,23 @@
bucket_name = bucket_args[0]
bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
bucket = storage_client.bucket(bucket_name)
- blobs = bucket.list_blobs(prefix=bucket_path)
+ prefix = bucket_path
+ if not prefix.endswith("/"):
+ prefix = prefix + "/"
+ blobs = bucket.list_blobs(prefix=prefix)
for blob in blobs:
# Replace any prefix from the object key with temp_dir
subdir_object_key = blob.name.replace(bucket_path, "", 1).strip("/")
+
# Create necessary subdirectory to store the object locally
if "/" in subdir_object_key:
local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit("/", 1)[0])
if not os.path.isdir(local_object_dir):
os.makedirs(local_object_dir, exist_ok=True)
- blob.download_to_filename(os.path.join(temp_dir, subdir_object_key))
+ if subdir_object_key.strip() != "":
+ dest_path = os.path.join(temp_dir, subdir_object_key)
+ logging.info("Downloading: %s", dest_path)
+ blob.download_to_filename(dest_path)
@staticmethod
def _download_local(uri):
| {"golden_diff": "diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py\n--- a/python/kfserving/kfserving/storage.py\n+++ b/python/kfserving/kfserving/storage.py\n@@ -27,23 +27,25 @@\n \n class Storage(object): # pylint: disable=too-few-public-methods\n @staticmethod\n- def download(uri: str) -> str:\n+ def download(uri: str, out_dir: str = None) -> str:\n logging.info(\"Copying contents of %s to local\", uri)\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n return Storage._download_local(uri)\n \n- temp_dir = tempfile.mkdtemp()\n+ if out_dir is None:\n+ out_dir = tempfile.mkdtemp()\n+\n if uri.startswith(_GCS_PREFIX):\n- Storage._download_gcs(uri, temp_dir)\n+ Storage._download_gcs(uri, out_dir)\n elif uri.startswith(_S3_PREFIX):\n- Storage._download_s3(uri, temp_dir)\n+ Storage._download_s3(uri, out_dir)\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))\n \n- logging.info(\"Successfully copied %s to %s\", uri, temp_dir)\n- return temp_dir\n+ logging.info(\"Successfully copied %s to %s\", uri, out_dir)\n+ return out_dir\n \n @staticmethod\n def _download_s3(uri, temp_dir: str):\n@@ -68,16 +70,23 @@\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n bucket = storage_client.bucket(bucket_name)\n- blobs = bucket.list_blobs(prefix=bucket_path)\n+ prefix = bucket_path\n+ if not prefix.endswith(\"/\"):\n+ prefix = prefix + \"/\"\n+ blobs = bucket.list_blobs(prefix=prefix)\n for blob in blobs:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n+\n # Create necessary subdirectory to store the object locally\n if \"/\" in subdir_object_key:\n local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n if not os.path.isdir(local_object_dir):\n os.makedirs(local_object_dir, exist_ok=True)\n- blob.download_to_filename(os.path.join(temp_dir, subdir_object_key))\n+ if subdir_object_key.strip() != \"\":\n+ dest_path = os.path.join(temp_dir, subdir_object_key)\n+ logging.info(\"Downloading: %s\", dest_path)\n+ blob.download_to_filename(dest_path)\n \n @staticmethod\n def _download_local(uri):\n", "issue": "KFServing should have a consistent way of supporting model download across inference server implementations\n/kind feature\r\n\r\n**Describe the solution you'd like**\r\nKFServing should expose a consistent way to download models across inference servers and clouds. The current implementation depends on the features of individual inference servers expose. E.g. see #137 \r\n\r\n**Anything else you would like to add:**\r\nProposed solution design is documented here: https://docs.google.com/document/d/1xqBOkoQ6Vzc5gv4O5MgVVNE3qILbKuMkC-DN5zp5w28/edit?usp=sharing\r\n\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport tempfile\nimport os\nimport re\nfrom minio import Minio\nfrom google.cloud import storage\nfrom google.auth import exceptions\n\n_GCS_PREFIX = \"gs://\"\n_S3_PREFIX = \"s3://\"\n_LOCAL_PREFIX = \"file://\"\n\n\nclass Storage(object): # pylint: disable=too-few-public-methods\n @staticmethod\n def download(uri: str) -> str:\n logging.info(\"Copying contents of %s to local\", uri)\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n return Storage._download_local(uri)\n\n temp_dir = tempfile.mkdtemp()\n if uri.startswith(_GCS_PREFIX):\n Storage._download_gcs(uri, temp_dir)\n elif uri.startswith(_S3_PREFIX):\n Storage._download_s3(uri, temp_dir)\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))\n\n logging.info(\"Successfully copied %s to %s\", uri, temp_dir)\n return temp_dir\n\n @staticmethod\n def _download_s3(uri, temp_dir: str):\n client = Storage._create_minio_client()\n bucket_args = uri.replace(_S3_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)\n for obj in objects:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = obj.object_name.replace(bucket_path, \"\", 1).strip(\"/\")\n client.fget_object(bucket_name, obj.object_name,\n os.path.join(temp_dir, subdir_object_key))\n\n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n try:\n storage_client = storage.Client()\n except exceptions.DefaultCredentialsError:\n storage_client = storage.Client.create_anonymous_client()\n bucket_args = uri.replace(_GCS_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n bucket = storage_client.bucket(bucket_name)\n blobs = bucket.list_blobs(prefix=bucket_path)\n for blob in blobs:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n # Create necessary subdirectory to store the object locally\n if \"/\" in subdir_object_key:\n local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n if not os.path.isdir(local_object_dir):\n os.makedirs(local_object_dir, exist_ok=True)\n blob.download_to_filename(os.path.join(temp_dir, subdir_object_key))\n\n @staticmethod\n def _download_local(uri):\n local_path = uri.replace(_LOCAL_PREFIX, \"\", 1)\n if not os.path.exists(local_path):\n raise Exception(\"Local path %s does not exist.\" % (uri))\n return local_path\n\n @staticmethod\n def _create_minio_client():\n # Remove possible http scheme for Minio\n url = re.compile(r\"https?://\")\n minioClient = Minio(url.sub(\"\", os.getenv(\"S3_ENDPOINT\", \"\")),\n access_key=os.getenv(\"AWS_ACCESS_KEY_ID\", \"\"),\n secret_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\", \"\"),\n secure=True)\n return minioClient\n", "path": "python/kfserving/kfserving/storage.py"}]} | 1,777 | 636 |
gh_patches_debug_11230 | rasdani/github-patches | git_diff | spack__spack-12009 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installation issue: py-jsonchema (No checksum provided for @2.6.0 requested by nrm)
The nrm package specifically requests [email protected]. Attempting to install this package results in the following error:
==> Warning: There is no checksum on file to fetch [email protected] safely.
==> Error: Will not fetch [email protected]
Add a checksum or use --no-checksum to skip this check.
</issue>
<code>
[start of var/spack/repos/builtin/packages/py-jsonschema/package.py]
1 # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class PyJsonschema(PythonPackage):
10 """Jsonschema: An(other) implementation of JSON Schema for Python."""
11
12 homepage = "http://github.com/Julian/jsonschema"
13 url = "https://pypi.io/packages/source/j/jsonschema/jsonschema-2.5.1.tar.gz"
14
15 version('2.5.1', '374e848fdb69a3ce8b7e778b47c30640')
16
17 depends_on('py-setuptools', type='build')
18 depends_on('py-vcversioner', type=('build', 'run'))
19 depends_on('py-functools32', when="^[email protected]:2.7.999", type=('build', 'run'))
20
[end of var/spack/repos/builtin/packages/py-jsonschema/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/var/spack/repos/builtin/packages/py-jsonschema/package.py b/var/spack/repos/builtin/packages/py-jsonschema/package.py
--- a/var/spack/repos/builtin/packages/py-jsonschema/package.py
+++ b/var/spack/repos/builtin/packages/py-jsonschema/package.py
@@ -10,8 +10,9 @@
"""Jsonschema: An(other) implementation of JSON Schema for Python."""
homepage = "http://github.com/Julian/jsonschema"
- url = "https://pypi.io/packages/source/j/jsonschema/jsonschema-2.5.1.tar.gz"
+ url = "https://pypi.io/packages/source/j/jsonschema/jsonschema-2.6.0.tar.gz"
+ version('2.6.0', sha256='6ff5f3180870836cae40f06fa10419f557208175f13ad7bc26caa77beb1f6e02')
version('2.5.1', '374e848fdb69a3ce8b7e778b47c30640')
depends_on('py-setuptools', type='build')
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/py-jsonschema/package.py b/var/spack/repos/builtin/packages/py-jsonschema/package.py\n--- a/var/spack/repos/builtin/packages/py-jsonschema/package.py\n+++ b/var/spack/repos/builtin/packages/py-jsonschema/package.py\n@@ -10,8 +10,9 @@\n \"\"\"Jsonschema: An(other) implementation of JSON Schema for Python.\"\"\"\n \n homepage = \"http://github.com/Julian/jsonschema\"\n- url = \"https://pypi.io/packages/source/j/jsonschema/jsonschema-2.5.1.tar.gz\"\n+ url = \"https://pypi.io/packages/source/j/jsonschema/jsonschema-2.6.0.tar.gz\"\n \n+ version('2.6.0', sha256='6ff5f3180870836cae40f06fa10419f557208175f13ad7bc26caa77beb1f6e02')\n version('2.5.1', '374e848fdb69a3ce8b7e778b47c30640')\n \n depends_on('py-setuptools', type='build')\n", "issue": "Installation issue: py-jsonchema (No checksum provided for @2.6.0 requested by nrm)\nThe nrm package specifically requests [email protected]. Attempting to install this package results in the following error:\r\n\r\n==> Warning: There is no checksum on file to fetch [email protected] safely.\r\n==> Error: Will not fetch [email protected]\r\nAdd a checksum or use --no-checksum to skip this check.\r\n\n", "before_files": [{"content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyJsonschema(PythonPackage):\n \"\"\"Jsonschema: An(other) implementation of JSON Schema for Python.\"\"\"\n\n homepage = \"http://github.com/Julian/jsonschema\"\n url = \"https://pypi.io/packages/source/j/jsonschema/jsonschema-2.5.1.tar.gz\"\n\n version('2.5.1', '374e848fdb69a3ce8b7e778b47c30640')\n\n depends_on('py-setuptools', type='build')\n depends_on('py-vcversioner', type=('build', 'run'))\n depends_on('py-functools32', when=\"^[email protected]:2.7.999\", type=('build', 'run'))\n", "path": "var/spack/repos/builtin/packages/py-jsonschema/package.py"}]} | 917 | 281 |
gh_patches_debug_18554 | rasdani/github-patches | git_diff | praw-dev__praw-846 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnicodeEncodeError is raised if reddit returns localized error message
## Issue Description
Context: [[PRAW] UnicodeEncodeError when submitting non-unicode text : redditdev](https://www.reddit.com/r/redditdev/comments/6xf600/praw_unicodeencodeerror_when_submitting/)
Reddit may return localized error messages depends on the user's preference settings. Since
localized error messages may contain non-ascii characters (and underlying requests library
converts the errror message to unicode type), running this code in Python2 may raise UnicodeEncodeError:
https://github.com/praw-dev/praw/blob/efbe90f8c01a8afcda1fa09a59d1d89ed0da0f6b/praw/exceptions.py#L25
Here is an example of the localized message:
```
File "/usr/local/lib/python2.7/site-packages/praw/exceptions.py", line 25, in __init__
error_str = '{}: \'{}\''.format(error_type, message)
UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-10: ordinal not in range(128)
Uncaught exception. Entering post mortem debugging
Running 'cont' or 'step' will restart the program
> /usr/local/lib/python2.7/site-packages/praw/exceptions.py(25)__init__()
-> error_str = '{}: \'{}\''.format(error_type, message)
(Pdb) p error_type
u'RATELIMIT'
(Pdb) print message
実行回数が多すぎます。9分経ってから再試行してください。
```
I think this issue is only affect to Python2 users because Python3's str type is unicode string.
## System Information
PRAW Version: 5.0.0
Python Version: Python 2.7.13
Operating System: OS X El Capitan 10.11.6
</issue>
<code>
[start of praw/exceptions.py]
1 """PRAW exception classes.
2
3 Includes two main exceptions: :class:`.APIException` for when something goes
4 wrong on the server side, and :class:`.ClientException` when something goes
5 wrong on the client side. Both of these classes extend :class:`.PRAWException`.
6
7 """
8
9
10 class PRAWException(Exception):
11 """The base PRAW Exception that all other exception classes extend."""
12
13
14 class APIException(PRAWException):
15 """Indicate exception that involve responses from Reddit's API."""
16
17 def __init__(self, error_type, message, field):
18 """Initialize an instance of APIException.
19
20 :param error_type: The error type set on Reddit's end.
21 :param message: The associated message for the error.
22 :param field: The input field associated with the error if available.
23
24 """
25 error_str = '{}: \'{}\''.format(error_type, message)
26 if field:
27 error_str += ' on field \'{}\''.format(field)
28 super(APIException, self).__init__(error_str)
29 self.error_type = error_type
30 self.message = message
31 self.field = field
32
33
34 class ClientException(PRAWException):
35 """Indicate exceptions that don't involve interaction with Reddit's API."""
36
[end of praw/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/praw/exceptions.py b/praw/exceptions.py
--- a/praw/exceptions.py
+++ b/praw/exceptions.py
@@ -21,10 +21,17 @@
:param message: The associated message for the error.
:param field: The input field associated with the error if available.
+ .. note: Calling `str()` on the instance returns `unicode_escape`d
+ ASCII string because the message may be localized and may contain
+ UNICODE characters. If you want a non-escaped message, access
+ the `message` atribute on the instance.
+
"""
- error_str = '{}: \'{}\''.format(error_type, message)
+ error_str = u'{}: \'{}\''.format(error_type, message)
if field:
- error_str += ' on field \'{}\''.format(field)
+ error_str += u' on field \'{}\''.format(field)
+ error_str = error_str.encode('unicode_escape').decode('ascii')
+
super(APIException, self).__init__(error_str)
self.error_type = error_type
self.message = message
| {"golden_diff": "diff --git a/praw/exceptions.py b/praw/exceptions.py\n--- a/praw/exceptions.py\n+++ b/praw/exceptions.py\n@@ -21,10 +21,17 @@\n :param message: The associated message for the error.\n :param field: The input field associated with the error if available.\n \n+ .. note: Calling `str()` on the instance returns `unicode_escape`d\n+ ASCII string because the message may be localized and may contain\n+ UNICODE characters. If you want a non-escaped message, access\n+ the `message` atribute on the instance.\n+\n \"\"\"\n- error_str = '{}: \\'{}\\''.format(error_type, message)\n+ error_str = u'{}: \\'{}\\''.format(error_type, message)\n if field:\n- error_str += ' on field \\'{}\\''.format(field)\n+ error_str += u' on field \\'{}\\''.format(field)\n+ error_str = error_str.encode('unicode_escape').decode('ascii')\n+\n super(APIException, self).__init__(error_str)\n self.error_type = error_type\n self.message = message\n", "issue": "UnicodeEncodeError is raised if reddit returns localized error message\n## Issue Description\r\n\r\nContext: [[PRAW] UnicodeEncodeError when submitting non-unicode text : redditdev](https://www.reddit.com/r/redditdev/comments/6xf600/praw_unicodeencodeerror_when_submitting/)\r\n\r\nReddit may return localized error messages depends on the user's preference settings. Since\r\nlocalized error messages may contain non-ascii characters (and underlying requests library\r\nconverts the errror message to unicode type), running this code in Python2 may raise UnicodeEncodeError:\r\n\r\nhttps://github.com/praw-dev/praw/blob/efbe90f8c01a8afcda1fa09a59d1d89ed0da0f6b/praw/exceptions.py#L25\r\n\r\nHere is an example of the localized message:\r\n\r\n```\r\n File \"/usr/local/lib/python2.7/site-packages/praw/exceptions.py\", line 25, in __init__\r\n error_str = '{}: \\'{}\\''.format(error_type, message)\r\nUnicodeEncodeError: 'ascii' codec can't encode characters in position 0-10: ordinal not in range(128)\r\nUncaught exception. Entering post mortem debugging\r\nRunning 'cont' or 'step' will restart the program\r\n> /usr/local/lib/python2.7/site-packages/praw/exceptions.py(25)__init__()\r\n-> error_str = '{}: \\'{}\\''.format(error_type, message)\r\n(Pdb) p error_type\r\nu'RATELIMIT'\r\n(Pdb) print message\r\n\u5b9f\u884c\u56de\u6570\u304c\u591a\u3059\u304e\u307e\u3059\u30029\u5206\u7d4c\u3063\u3066\u304b\u3089\u518d\u8a66\u884c\u3057\u3066\u304f\u3060\u3055\u3044\u3002\r\n```\r\n\r\nI think this issue is only affect to Python2 users because Python3's str type is unicode string. \r\n\r\n## System Information\r\n\r\n PRAW Version: 5.0.0\r\n Python Version: Python 2.7.13\r\n Operating System: OS X El Capitan 10.11.6\n", "before_files": [{"content": "\"\"\"PRAW exception classes.\n\nIncludes two main exceptions: :class:`.APIException` for when something goes\nwrong on the server side, and :class:`.ClientException` when something goes\nwrong on the client side. Both of these classes extend :class:`.PRAWException`.\n\n\"\"\"\n\n\nclass PRAWException(Exception):\n \"\"\"The base PRAW Exception that all other exception classes extend.\"\"\"\n\n\nclass APIException(PRAWException):\n \"\"\"Indicate exception that involve responses from Reddit's API.\"\"\"\n\n def __init__(self, error_type, message, field):\n \"\"\"Initialize an instance of APIException.\n\n :param error_type: The error type set on Reddit's end.\n :param message: The associated message for the error.\n :param field: The input field associated with the error if available.\n\n \"\"\"\n error_str = '{}: \\'{}\\''.format(error_type, message)\n if field:\n error_str += ' on field \\'{}\\''.format(field)\n super(APIException, self).__init__(error_str)\n self.error_type = error_type\n self.message = message\n self.field = field\n\n\nclass ClientException(PRAWException):\n \"\"\"Indicate exceptions that don't involve interaction with Reddit's API.\"\"\"\n", "path": "praw/exceptions.py"}]} | 1,286 | 253 |
gh_patches_debug_1683 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-2085 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dmesg combiner always succeeds
The [Dmesg combiner has only optional dependencies](https://github.com/RedHatInsights/insights-core/blob/master/insights/combiners/dmesg.py#L51), which means it always succeeds. This is an anti-pattern.
</issue>
<code>
[start of insights/combiners/dmesg.py]
1 """
2 Dmesg
3 =====
4
5 Combiner for Dmesg information. It uses the results of the following parsers (if they are present):
6 :class:`insights.parsers.dmesg.DmesgLineList`,
7 :class:`insights.parsers.dmesg_log.DmesgLog`
8
9 Typical output of the ``/var/log/dmesg`` file is::
10
11 [ 0.000000] Initializing cgroup subsys cpu
12 [ 0.000000] Linux version 3.10.0-862.el7.x86_64 ([email protected]) \
13 (gcc version 4.8.5 20150623 (Red Hat 4.8.5-28) (GCC) ) #1 SMP Wed Mar 21 18:14:51 EDT 2018
14 [ 2.090905] SELinux: Completing initialization.
15 [ 2.090907] SELinux: Setting up existing superblocks.
16 [ 2.099684] systemd[1]: Successfully loaded SELinux policy in 82.788ms.
17 [ 2.117410] ip_tables: (C) 2000-2006 Netfilter Core Team
18 [ 2.117429] systemd[1]: Inserted module 'ip_tables'
19 [ 2.376551] systemd-journald[441]: Received request to flush runtime journal from PID 1
20 [ 2.716874] cryptd: max_cpu_qlen set to 100
21 [ 2.804152] AES CTR mode by8 optimization enabled
22
23 Typical output of the ``dmesg`` command is::
24
25 [ 2.939498] [TTM] Initializing pool allocator
26 [ 2.939502] [TTM] Initializing DMA pool allocator
27 [ 2.940800] [drm] fb mappable at 0xFC000000
28 [ 2.940947] fbcon: cirrusdrmfb (fb0) is primary device
29 [ 2.957375] Console: switching to colour frame buffer device 128x48
30 [ 2.959322] cirrus 0000:00:02.0: fb0: cirrusdrmfb frame buffer device
31 [ 2.959334] [drm] Initialized cirrus 1.0.0 20110418 for 0000:00:02.0 on minor 0
32 [ 3.062459] XFS (vda1): Ending clean mount
33 [ 5.048484] ip6_tables: (C) 2000-2006 Netfilter Core Team
34 [ 5.102434] Ebtables v2.0 registered
35
36
37 Examples:
38 >>> dmesg.dmesg_cmd_available
39 True
40 >>> dmesg.dmesg_log_available
41 True
42 >>> dmesg.dmesg_log_wrapped
43 False
44 """
45
46 from insights.core.plugins import combiner
47 from insights.parsers.dmesg import DmesgLineList
48 from insights.parsers.dmesg_log import DmesgLog
49
50
51 @combiner(optional=[DmesgLineList, DmesgLog])
52 class Dmesg(object):
53 """
54 Combiner for ``dmesg`` command and ``/var/log/dmesg`` file.
55 """
56
57 def __init__(self, dmesg_cmd, dmesg_log):
58 if dmesg_cmd is not None:
59 self.dmesg_cmd_available = True
60 self.dmesg_cmd = dmesg_cmd
61 self.dmesg_cmd_wrapped = True if 'Linux version' not in dmesg_cmd else False
62 else:
63 self.dmesg_cmd_available = False
64
65 if dmesg_log is not None:
66 self.dmesg_log_available = True
67 self.dmesg_log = dmesg_log
68 self.dmesg_log_wrapped = True if 'Linux version' not in dmesg_log else False
69 else:
70 self.dmesg_log_available = False
71
[end of insights/combiners/dmesg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/insights/combiners/dmesg.py b/insights/combiners/dmesg.py
--- a/insights/combiners/dmesg.py
+++ b/insights/combiners/dmesg.py
@@ -48,7 +48,7 @@
from insights.parsers.dmesg_log import DmesgLog
-@combiner(optional=[DmesgLineList, DmesgLog])
+@combiner([DmesgLineList, DmesgLog])
class Dmesg(object):
"""
Combiner for ``dmesg`` command and ``/var/log/dmesg`` file.
| {"golden_diff": "diff --git a/insights/combiners/dmesg.py b/insights/combiners/dmesg.py\n--- a/insights/combiners/dmesg.py\n+++ b/insights/combiners/dmesg.py\n@@ -48,7 +48,7 @@\n from insights.parsers.dmesg_log import DmesgLog\n \n \n-@combiner(optional=[DmesgLineList, DmesgLog])\n+@combiner([DmesgLineList, DmesgLog])\n class Dmesg(object):\n \"\"\"\n Combiner for ``dmesg`` command and ``/var/log/dmesg`` file.\n", "issue": "Dmesg combiner always succeeds\nThe [Dmesg combiner has only optional dependencies](https://github.com/RedHatInsights/insights-core/blob/master/insights/combiners/dmesg.py#L51), which means it always succeeds. This is an anti-pattern.\n", "before_files": [{"content": "\"\"\"\nDmesg\n=====\n\nCombiner for Dmesg information. It uses the results of the following parsers (if they are present):\n:class:`insights.parsers.dmesg.DmesgLineList`,\n:class:`insights.parsers.dmesg_log.DmesgLog`\n\nTypical output of the ``/var/log/dmesg`` file is::\n\n[ 0.000000] Initializing cgroup subsys cpu\n[ 0.000000] Linux version 3.10.0-862.el7.x86_64 ([email protected]) \\\n(gcc version 4.8.5 20150623 (Red Hat 4.8.5-28) (GCC) ) #1 SMP Wed Mar 21 18:14:51 EDT 2018\n[ 2.090905] SELinux: Completing initialization.\n[ 2.090907] SELinux: Setting up existing superblocks.\n[ 2.099684] systemd[1]: Successfully loaded SELinux policy in 82.788ms.\n[ 2.117410] ip_tables: (C) 2000-2006 Netfilter Core Team\n[ 2.117429] systemd[1]: Inserted module 'ip_tables'\n[ 2.376551] systemd-journald[441]: Received request to flush runtime journal from PID 1\n[ 2.716874] cryptd: max_cpu_qlen set to 100\n[ 2.804152] AES CTR mode by8 optimization enabled\n\nTypical output of the ``dmesg`` command is::\n\n[ 2.939498] [TTM] Initializing pool allocator\n[ 2.939502] [TTM] Initializing DMA pool allocator\n[ 2.940800] [drm] fb mappable at 0xFC000000\n[ 2.940947] fbcon: cirrusdrmfb (fb0) is primary device\n[ 2.957375] Console: switching to colour frame buffer device 128x48\n[ 2.959322] cirrus 0000:00:02.0: fb0: cirrusdrmfb frame buffer device\n[ 2.959334] [drm] Initialized cirrus 1.0.0 20110418 for 0000:00:02.0 on minor 0\n[ 3.062459] XFS (vda1): Ending clean mount\n[ 5.048484] ip6_tables: (C) 2000-2006 Netfilter Core Team\n[ 5.102434] Ebtables v2.0 registered\n\n\nExamples:\n >>> dmesg.dmesg_cmd_available\n True\n >>> dmesg.dmesg_log_available\n True\n >>> dmesg.dmesg_log_wrapped\n False\n\"\"\"\n\nfrom insights.core.plugins import combiner\nfrom insights.parsers.dmesg import DmesgLineList\nfrom insights.parsers.dmesg_log import DmesgLog\n\n\n@combiner(optional=[DmesgLineList, DmesgLog])\nclass Dmesg(object):\n \"\"\"\n Combiner for ``dmesg`` command and ``/var/log/dmesg`` file.\n \"\"\"\n\n def __init__(self, dmesg_cmd, dmesg_log):\n if dmesg_cmd is not None:\n self.dmesg_cmd_available = True\n self.dmesg_cmd = dmesg_cmd\n self.dmesg_cmd_wrapped = True if 'Linux version' not in dmesg_cmd else False\n else:\n self.dmesg_cmd_available = False\n\n if dmesg_log is not None:\n self.dmesg_log_available = True\n self.dmesg_log = dmesg_log\n self.dmesg_log_wrapped = True if 'Linux version' not in dmesg_log else False\n else:\n self.dmesg_log_available = False\n", "path": "insights/combiners/dmesg.py"}]} | 1,753 | 144 |
gh_patches_debug_31108 | rasdani/github-patches | git_diff | python-poetry__poetry-5053 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update prompt environment variable when opening shell
<!--
Hi there! Thank you for wanting to make Poetry better.
Before you submit this; let's make sure of a few things.
Please make sure the following boxes are ticked if they are correct.
If not, please try and fulfill these first.
-->
<!-- Checked checkbox should look like this: [x] -->
- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] I have searched the [documentation](https://poetry.eustace.io/docs/) and believe that my question is not covered.
## Feature Request
<!-- Now feel free to write your idea for improvement. Thanks again 🙌 ❤️ -->
When running `poetry shell` you have no idea your in the virtualenv or not. Please add the virtualenv's name to the $PROMPT or $PS1 variable.
</issue>
<code>
[start of src/poetry/utils/shell.py]
1 import os
2 import signal
3 import sys
4
5 from pathlib import Path
6 from typing import TYPE_CHECKING
7 from typing import Any
8 from typing import Optional
9
10 import pexpect
11
12 from cleo.terminal import Terminal
13 from shellingham import ShellDetectionFailure
14 from shellingham import detect_shell
15
16 from poetry.utils._compat import WINDOWS
17
18
19 if TYPE_CHECKING:
20 from poetry.utils.env import VirtualEnv
21
22
23 class Shell:
24 """
25 Represents the current shell.
26 """
27
28 _shell = None
29
30 def __init__(self, name: str, path: str) -> None:
31 self._name = name
32 self._path = path
33
34 @property
35 def name(self) -> str:
36 return self._name
37
38 @property
39 def path(self) -> str:
40 return self._path
41
42 @classmethod
43 def get(cls) -> "Shell":
44 """
45 Retrieve the current shell.
46 """
47 if cls._shell is not None:
48 return cls._shell
49
50 try:
51 name, path = detect_shell(os.getpid())
52 except (RuntimeError, ShellDetectionFailure):
53 shell = None
54
55 if os.name == "posix":
56 shell = os.environ.get("SHELL")
57 elif os.name == "nt":
58 shell = os.environ.get("COMSPEC")
59
60 if not shell:
61 raise RuntimeError("Unable to detect the current shell.")
62
63 name, path = Path(shell).stem, shell
64
65 cls._shell = cls(name, path)
66
67 return cls._shell
68
69 def activate(self, env: "VirtualEnv") -> Optional[int]:
70 if WINDOWS:
71 return env.execute(self.path)
72
73 import shlex
74
75 terminal = Terminal()
76 with env.temp_environ():
77 c = pexpect.spawn(
78 self._path, ["-i"], dimensions=(terminal.height, terminal.width)
79 )
80
81 if self._name == "zsh":
82 c.setecho(False)
83
84 activate_script = self._get_activate_script()
85 bin_dir = "Scripts" if WINDOWS else "bin"
86 activate_path = env.path / bin_dir / activate_script
87 c.sendline(f"{self._get_source_command()} {shlex.quote(str(activate_path))}")
88
89 def resize(sig: Any, data: Any) -> None:
90 terminal = Terminal()
91 c.setwinsize(terminal.height, terminal.width)
92
93 signal.signal(signal.SIGWINCH, resize)
94
95 # Interact with the new shell.
96 c.interact(escape_character=None)
97 c.close()
98
99 sys.exit(c.exitstatus)
100
101 def _get_activate_script(self) -> str:
102 if self._name == "fish":
103 suffix = ".fish"
104 elif self._name in ("csh", "tcsh"):
105 suffix = ".csh"
106 else:
107 suffix = ""
108
109 return "activate" + suffix
110
111 def _get_source_command(self) -> str:
112 if self._name in ("fish", "csh", "tcsh"):
113 return "source"
114 return "."
115
116 def __repr__(self) -> str:
117 return f'{self.__class__.__name__}("{self._name}", "{self._path}")'
118
[end of src/poetry/utils/shell.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/poetry/utils/shell.py b/src/poetry/utils/shell.py
--- a/src/poetry/utils/shell.py
+++ b/src/poetry/utils/shell.py
@@ -1,5 +1,6 @@
import os
import signal
+import subprocess
import sys
from pathlib import Path
@@ -67,8 +68,19 @@
return cls._shell
def activate(self, env: "VirtualEnv") -> Optional[int]:
+ activate_script = self._get_activate_script()
+ bin_dir = "Scripts" if WINDOWS else "bin"
+ activate_path = env.path / bin_dir / activate_script
+
if WINDOWS:
- return env.execute(self.path)
+ if self._name in ("powershell", "pwsh"):
+ args = ["-NoExit", "-File", str(activate_path)]
+ else:
+ # /K will execute the bat file and
+ # keep the cmd process from terminating
+ args = ["/K", str(activate_path)]
+ completed_proc = subprocess.run([self.path, *args])
+ return completed_proc.returncode
import shlex
@@ -81,9 +93,6 @@
if self._name == "zsh":
c.setecho(False)
- activate_script = self._get_activate_script()
- bin_dir = "Scripts" if WINDOWS else "bin"
- activate_path = env.path / bin_dir / activate_script
c.sendline(f"{self._get_source_command()} {shlex.quote(str(activate_path))}")
def resize(sig: Any, data: Any) -> None:
@@ -103,6 +112,10 @@
suffix = ".fish"
elif self._name in ("csh", "tcsh"):
suffix = ".csh"
+ elif self._name in ("powershell", "pwsh"):
+ suffix = ".ps1"
+ elif self._name == "cmd":
+ suffix = ".bat"
else:
suffix = ""
| {"golden_diff": "diff --git a/src/poetry/utils/shell.py b/src/poetry/utils/shell.py\n--- a/src/poetry/utils/shell.py\n+++ b/src/poetry/utils/shell.py\n@@ -1,5 +1,6 @@\n import os\n import signal\n+import subprocess\n import sys\n \n from pathlib import Path\n@@ -67,8 +68,19 @@\n return cls._shell\n \n def activate(self, env: \"VirtualEnv\") -> Optional[int]:\n+ activate_script = self._get_activate_script()\n+ bin_dir = \"Scripts\" if WINDOWS else \"bin\"\n+ activate_path = env.path / bin_dir / activate_script\n+\n if WINDOWS:\n- return env.execute(self.path)\n+ if self._name in (\"powershell\", \"pwsh\"):\n+ args = [\"-NoExit\", \"-File\", str(activate_path)]\n+ else:\n+ # /K will execute the bat file and\n+ # keep the cmd process from terminating\n+ args = [\"/K\", str(activate_path)]\n+ completed_proc = subprocess.run([self.path, *args])\n+ return completed_proc.returncode\n \n import shlex\n \n@@ -81,9 +93,6 @@\n if self._name == \"zsh\":\n c.setecho(False)\n \n- activate_script = self._get_activate_script()\n- bin_dir = \"Scripts\" if WINDOWS else \"bin\"\n- activate_path = env.path / bin_dir / activate_script\n c.sendline(f\"{self._get_source_command()} {shlex.quote(str(activate_path))}\")\n \n def resize(sig: Any, data: Any) -> None:\n@@ -103,6 +112,10 @@\n suffix = \".fish\"\n elif self._name in (\"csh\", \"tcsh\"):\n suffix = \".csh\"\n+ elif self._name in (\"powershell\", \"pwsh\"):\n+ suffix = \".ps1\"\n+ elif self._name == \"cmd\":\n+ suffix = \".bat\"\n else:\n suffix = \"\"\n", "issue": "Update prompt environment variable when opening shell\n<!--\r\n Hi there! Thank you for wanting to make Poetry better.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] I have searched the [documentation](https://poetry.eustace.io/docs/) and believe that my question is not covered.\r\n\r\n## Feature Request\r\n<!-- Now feel free to write your idea for improvement. Thanks again \ud83d\ude4c \u2764\ufe0f -->\r\nWhen running `poetry shell` you have no idea your in the virtualenv or not. Please add the virtualenv's name to the $PROMPT or $PS1 variable.\n", "before_files": [{"content": "import os\nimport signal\nimport sys\n\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\nfrom typing import Any\nfrom typing import Optional\n\nimport pexpect\n\nfrom cleo.terminal import Terminal\nfrom shellingham import ShellDetectionFailure\nfrom shellingham import detect_shell\n\nfrom poetry.utils._compat import WINDOWS\n\n\nif TYPE_CHECKING:\n from poetry.utils.env import VirtualEnv\n\n\nclass Shell:\n \"\"\"\n Represents the current shell.\n \"\"\"\n\n _shell = None\n\n def __init__(self, name: str, path: str) -> None:\n self._name = name\n self._path = path\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def path(self) -> str:\n return self._path\n\n @classmethod\n def get(cls) -> \"Shell\":\n \"\"\"\n Retrieve the current shell.\n \"\"\"\n if cls._shell is not None:\n return cls._shell\n\n try:\n name, path = detect_shell(os.getpid())\n except (RuntimeError, ShellDetectionFailure):\n shell = None\n\n if os.name == \"posix\":\n shell = os.environ.get(\"SHELL\")\n elif os.name == \"nt\":\n shell = os.environ.get(\"COMSPEC\")\n\n if not shell:\n raise RuntimeError(\"Unable to detect the current shell.\")\n\n name, path = Path(shell).stem, shell\n\n cls._shell = cls(name, path)\n\n return cls._shell\n\n def activate(self, env: \"VirtualEnv\") -> Optional[int]:\n if WINDOWS:\n return env.execute(self.path)\n\n import shlex\n\n terminal = Terminal()\n with env.temp_environ():\n c = pexpect.spawn(\n self._path, [\"-i\"], dimensions=(terminal.height, terminal.width)\n )\n\n if self._name == \"zsh\":\n c.setecho(False)\n\n activate_script = self._get_activate_script()\n bin_dir = \"Scripts\" if WINDOWS else \"bin\"\n activate_path = env.path / bin_dir / activate_script\n c.sendline(f\"{self._get_source_command()} {shlex.quote(str(activate_path))}\")\n\n def resize(sig: Any, data: Any) -> None:\n terminal = Terminal()\n c.setwinsize(terminal.height, terminal.width)\n\n signal.signal(signal.SIGWINCH, resize)\n\n # Interact with the new shell.\n c.interact(escape_character=None)\n c.close()\n\n sys.exit(c.exitstatus)\n\n def _get_activate_script(self) -> str:\n if self._name == \"fish\":\n suffix = \".fish\"\n elif self._name in (\"csh\", \"tcsh\"):\n suffix = \".csh\"\n else:\n suffix = \"\"\n\n return \"activate\" + suffix\n\n def _get_source_command(self) -> str:\n if self._name in (\"fish\", \"csh\", \"tcsh\"):\n return \"source\"\n return \".\"\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(\"{self._name}\", \"{self._path}\")'\n", "path": "src/poetry/utils/shell.py"}]} | 1,684 | 451 |
gh_patches_debug_61040 | rasdani/github-patches | git_diff | google-research__text-to-text-transfer-transformer-480 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Running hf_model.py
I am trying to run your models with [`hf_model`](https://github.com/google-research/text-to-text-transfer-transformer/blob/master/t5/models/hf_model.py). The current blocker issue is that the code is using `num_parallel_calls` in [in multiple places](https://github.com/google-research/text-to-text-transfer-transformer/blob/master/t5/models/hf_model.py#L128), however, this function seems to be [deprecated](https://github.com/google-research/text-to-text-transfer-transformer/blob/838157d433995473e96b773c9c761b6aadf01e37/t5/data/preprocessors.py#L2651).
Wondering if there is a replacement for this function I can use as a quick fix.
</issue>
<code>
[start of t5/version.py]
1 # Copyright 2020 The T5 Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # Lint as: python3
16 r"""Separate file for storing the current version of T5.
17
18 Stored in a separate file so that setup.py can reference the version without
19 pulling in all the dependencies in __init__.py.
20 """
21 __version__ = '0.7.0'
22
[end of t5/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/t5/version.py b/t5/version.py
--- a/t5/version.py
+++ b/t5/version.py
@@ -18,4 +18,4 @@
Stored in a separate file so that setup.py can reference the version without
pulling in all the dependencies in __init__.py.
"""
-__version__ = '0.7.0'
+__version__ = '0.7.1'
| {"golden_diff": "diff --git a/t5/version.py b/t5/version.py\n--- a/t5/version.py\n+++ b/t5/version.py\n@@ -18,4 +18,4 @@\n Stored in a separate file so that setup.py can reference the version without\n pulling in all the dependencies in __init__.py.\n \"\"\"\n-__version__ = '0.7.0'\n+__version__ = '0.7.1'\n", "issue": "Running hf_model.py \nI am trying to run your models with [`hf_model`](https://github.com/google-research/text-to-text-transfer-transformer/blob/master/t5/models/hf_model.py). The current blocker issue is that the code is using `num_parallel_calls` in [in multiple places](https://github.com/google-research/text-to-text-transfer-transformer/blob/master/t5/models/hf_model.py#L128), however, this function seems to be [deprecated](https://github.com/google-research/text-to-text-transfer-transformer/blob/838157d433995473e96b773c9c761b6aadf01e37/t5/data/preprocessors.py#L2651).\r\n\r\nWondering if there is a replacement for this function I can use as a quick fix. \n", "before_files": [{"content": "# Copyright 2020 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Separate file for storing the current version of T5.\n\nStored in a separate file so that setup.py can reference the version without\npulling in all the dependencies in __init__.py.\n\"\"\"\n__version__ = '0.7.0'\n", "path": "t5/version.py"}]} | 949 | 92 |
gh_patches_debug_21897 | rasdani/github-patches | git_diff | weecology__retriever-1267 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fetch method should return an ordered dict (not a dict)
Hello,
I noticed that `fetch` method returns a dict of dataframes.
To preserve order of tables (which may have a meaning) it should return an ordered dict.
[Datapackage](https://frictionlessdata.io/specs/data-package/) ressources are stored in a list so it's ordered.
Kind regards
</issue>
<code>
[start of retriever/engines/sqlite.py]
1 import os
2 import pandas as pd
3 from builtins import range
4
5 from retriever.lib.defaults import DATA_DIR
6 from retriever.lib.models import Engine, no_cleanup
7
8
9 class engine(Engine):
10 """Engine instance for SQLite."""
11
12 name = "SQLite"
13 abbreviation = "sqlite"
14 datatypes = {
15 "auto": ("INTEGER", "AUTOINCREMENT"),
16 "int": "INTEGER",
17 "bigint": "INTEGER",
18 "double": "REAL",
19 "decimal": "REAL",
20 "char": "TEXT",
21 "bool": "INTEGER",
22 }
23 placeholder = "?"
24 insert_limit = 1000
25 required_opts = [("file",
26 "Enter the filename of your SQLite database",
27 "sqlite.db"),
28 ("table_name",
29 "Format of table name",
30 "{db}_{table}"),
31 ("data_dir",
32 "Install directory",
33 DATA_DIR),
34 ]
35
36 def create_db(self):
37 """Don't create database for SQLite
38
39 SQLite doesn't create databases. Each database is a file and needs a separate
40 connection. This overloads`create_db` to do nothing in this case.
41 """
42 return None
43
44 def fetch_tables(self, dataset, table_names):
45 """Return sqlite dataset as list of pandas dataframe."""
46 connection = self.get_connection()
47 data = {table[len(dataset) + 1:]: pd.read_sql_query("SELECT * "
48 "FROM {};".format(table),
49 connection)
50 for table in table_names}
51 return data
52
53 def get_bulk_insert_statement(self):
54 """Get insert statement for bulk inserts
55
56 This places ?'s instead of the actual values so that executemany() can
57 operate as designed
58 """
59 columns = self.table.get_insert_columns()
60 column_count = len(self.table.get_insert_columns(False))
61 insert_stmt = "INSERT INTO " + self.table_name()
62 insert_stmt += " (" + columns + ")"
63 insert_stmt += " VALUES ("
64 for _ in range(0, column_count):
65 insert_stmt += "?, "
66 insert_stmt = insert_stmt.rstrip(", ") + ")"
67 return insert_stmt
68
69 def insert_data_from_file(self, filename):
70 """Perform a high speed bulk insert
71
72 Checks to see if a given file can be bulk inserted, and if so loads
73 it in chunks and inserts those chunks into the database using
74 executemany.
75 """
76 chunk_size = 1000000
77 self.get_cursor()
78
79 # Determine if the dataset includes cross-tab data
80 crosstab = len([True for c in self.table.columns if c[1][0][:3] == "ct-"]) != 0
81
82 if (([self.table.cleanup.function, self.table.header_rows] == [no_cleanup, 1])
83 and not self.table.fixed_width
84 and not crosstab
85 and (not hasattr(self.table, "do_not_bulk_insert") or not self.table.do_not_bulk_insert)):
86 filename = os.path.abspath(filename)
87 try:
88 bulk_insert_statement = self.get_bulk_insert_statement()
89 line_endings = set(['\n', '\r', '\r\n'])
90 with open(filename, 'r') as data_file:
91 data_chunk = data_file.readlines(chunk_size)
92 data_chunk = [line.rstrip('\r\n') for line in data_chunk if line not in line_endings]
93 del data_chunk[:self.table.header_rows]
94 while data_chunk:
95 data_chunk_split = [row.split(self.table.delimiter)
96 for row in data_chunk]
97 self.cursor.executemany(bulk_insert_statement, data_chunk_split)
98 data_chunk = data_file.readlines(chunk_size)
99 self.connection.commit()
100 except:
101 self.connection.rollback()
102 return Engine.insert_data_from_file(self, filename)
103 else:
104 return Engine.insert_data_from_file(self, filename)
105
106 def get_connection(self):
107 """Get db connection."""
108 import sqlite3 as dbapi
109
110 self.get_input()
111 file = self.opts["file"]
112 db_file = self.opts["data_dir"]
113 full_path = os.path.join(db_file, file)
114
115 return dbapi.connect(os.path.normpath(full_path))
116
[end of retriever/engines/sqlite.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/retriever/engines/sqlite.py b/retriever/engines/sqlite.py
--- a/retriever/engines/sqlite.py
+++ b/retriever/engines/sqlite.py
@@ -1,6 +1,7 @@
import os
import pandas as pd
from builtins import range
+from collections import OrderedDict
from retriever.lib.defaults import DATA_DIR
from retriever.lib.models import Engine, no_cleanup
@@ -44,10 +45,12 @@
def fetch_tables(self, dataset, table_names):
"""Return sqlite dataset as list of pandas dataframe."""
connection = self.get_connection()
- data = {table[len(dataset) + 1:]: pd.read_sql_query("SELECT * "
- "FROM {};".format(table),
- connection)
- for table in table_names}
+ sql_query = "SELECT * FROM {};"
+ data = OrderedDict({
+ table[len(dataset) + 1:]
+ :pd.read_sql_query(sql_query.format(table), connection)
+ for table in table_names
+ })
return data
def get_bulk_insert_statement(self):
| {"golden_diff": "diff --git a/retriever/engines/sqlite.py b/retriever/engines/sqlite.py\n--- a/retriever/engines/sqlite.py\n+++ b/retriever/engines/sqlite.py\n@@ -1,6 +1,7 @@\n import os\n import pandas as pd\n from builtins import range\n+from collections import OrderedDict\n \n from retriever.lib.defaults import DATA_DIR\n from retriever.lib.models import Engine, no_cleanup\n@@ -44,10 +45,12 @@\n def fetch_tables(self, dataset, table_names):\n \"\"\"Return sqlite dataset as list of pandas dataframe.\"\"\"\n connection = self.get_connection()\n- data = {table[len(dataset) + 1:]: pd.read_sql_query(\"SELECT * \"\n- \"FROM {};\".format(table),\n- connection)\n- for table in table_names}\n+ sql_query = \"SELECT * FROM {};\"\n+ data = OrderedDict({\n+ table[len(dataset) + 1:]\n+ :pd.read_sql_query(sql_query.format(table), connection)\n+ for table in table_names\n+ })\n return data\n \n def get_bulk_insert_statement(self):\n", "issue": "fetch method should return an ordered dict (not a dict)\nHello,\r\n\r\nI noticed that `fetch` method returns a dict of dataframes.\r\nTo preserve order of tables (which may have a meaning) it should return an ordered dict.\r\n[Datapackage](https://frictionlessdata.io/specs/data-package/) ressources are stored in a list so it's ordered.\r\n\r\nKind regards\n", "before_files": [{"content": "import os\nimport pandas as pd\nfrom builtins import range\n\nfrom retriever.lib.defaults import DATA_DIR\nfrom retriever.lib.models import Engine, no_cleanup\n\n\nclass engine(Engine):\n \"\"\"Engine instance for SQLite.\"\"\"\n\n name = \"SQLite\"\n abbreviation = \"sqlite\"\n datatypes = {\n \"auto\": (\"INTEGER\", \"AUTOINCREMENT\"),\n \"int\": \"INTEGER\",\n \"bigint\": \"INTEGER\",\n \"double\": \"REAL\",\n \"decimal\": \"REAL\",\n \"char\": \"TEXT\",\n \"bool\": \"INTEGER\",\n }\n placeholder = \"?\"\n insert_limit = 1000\n required_opts = [(\"file\",\n \"Enter the filename of your SQLite database\",\n \"sqlite.db\"),\n (\"table_name\",\n \"Format of table name\",\n \"{db}_{table}\"),\n (\"data_dir\",\n \"Install directory\",\n DATA_DIR),\n ]\n\n def create_db(self):\n \"\"\"Don't create database for SQLite\n\n SQLite doesn't create databases. Each database is a file and needs a separate\n connection. This overloads`create_db` to do nothing in this case.\n \"\"\"\n return None\n\n def fetch_tables(self, dataset, table_names):\n \"\"\"Return sqlite dataset as list of pandas dataframe.\"\"\"\n connection = self.get_connection()\n data = {table[len(dataset) + 1:]: pd.read_sql_query(\"SELECT * \"\n \"FROM {};\".format(table),\n connection)\n for table in table_names}\n return data\n\n def get_bulk_insert_statement(self):\n \"\"\"Get insert statement for bulk inserts\n\n This places ?'s instead of the actual values so that executemany() can\n operate as designed\n \"\"\"\n columns = self.table.get_insert_columns()\n column_count = len(self.table.get_insert_columns(False))\n insert_stmt = \"INSERT INTO \" + self.table_name()\n insert_stmt += \" (\" + columns + \")\"\n insert_stmt += \" VALUES (\"\n for _ in range(0, column_count):\n insert_stmt += \"?, \"\n insert_stmt = insert_stmt.rstrip(\", \") + \")\"\n return insert_stmt\n\n def insert_data_from_file(self, filename):\n \"\"\"Perform a high speed bulk insert\n\n Checks to see if a given file can be bulk inserted, and if so loads\n it in chunks and inserts those chunks into the database using\n executemany.\n \"\"\"\n chunk_size = 1000000\n self.get_cursor()\n\n # Determine if the dataset includes cross-tab data\n crosstab = len([True for c in self.table.columns if c[1][0][:3] == \"ct-\"]) != 0\n\n if (([self.table.cleanup.function, self.table.header_rows] == [no_cleanup, 1])\n and not self.table.fixed_width\n and not crosstab\n and (not hasattr(self.table, \"do_not_bulk_insert\") or not self.table.do_not_bulk_insert)):\n filename = os.path.abspath(filename)\n try:\n bulk_insert_statement = self.get_bulk_insert_statement()\n line_endings = set(['\\n', '\\r', '\\r\\n'])\n with open(filename, 'r') as data_file:\n data_chunk = data_file.readlines(chunk_size)\n data_chunk = [line.rstrip('\\r\\n') for line in data_chunk if line not in line_endings]\n del data_chunk[:self.table.header_rows]\n while data_chunk:\n data_chunk_split = [row.split(self.table.delimiter)\n for row in data_chunk]\n self.cursor.executemany(bulk_insert_statement, data_chunk_split)\n data_chunk = data_file.readlines(chunk_size)\n self.connection.commit()\n except:\n self.connection.rollback()\n return Engine.insert_data_from_file(self, filename)\n else:\n return Engine.insert_data_from_file(self, filename)\n\n def get_connection(self):\n \"\"\"Get db connection.\"\"\"\n import sqlite3 as dbapi\n\n self.get_input()\n file = self.opts[\"file\"]\n db_file = self.opts[\"data_dir\"]\n full_path = os.path.join(db_file, file)\n\n return dbapi.connect(os.path.normpath(full_path))\n", "path": "retriever/engines/sqlite.py"}]} | 1,752 | 249 |
gh_patches_debug_8515 | rasdani/github-patches | git_diff | Gallopsled__pwntools-1892 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
phd throws an exception when output pipe fails, as with e.g. `head`
We should silence the BrokenPipeError exception when `pwn phd` output closes.
```
$ phd < /dev/random | head -n 1
00000000 43 18 3f 38 0e 45 9c 5d d9 b8 ed 44 7c 64 ee e3 │C·?8│·E·]│···D│|d··│
Exception ignored in: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
BrokenPipeError: [Errno 32] Broken pipe
```
</issue>
<code>
[start of pwnlib/commandline/phd.py]
1 #!/usr/bin/env python2
2 from __future__ import absolute_import
3 from __future__ import division
4
5 import argparse
6 import os
7 import sys
8 import io
9
10 import pwnlib.args
11 pwnlib.args.free_form = False
12
13 from pwn import *
14 from pwnlib.commandline import common
15
16 parser = common.parser_commands.add_parser(
17 'phd',
18 help = 'Pretty hex dump',
19 description = 'Pretty hex dump'
20 )
21
22 parser.add_argument(
23 'file',
24 metavar='file',
25 nargs='?',
26 help='File to hexdump. Reads from stdin if missing.',
27 type=argparse.FileType('rb'),
28 default=getattr(sys.stdin, 'buffer', sys.stdin)
29 )
30
31 parser.add_argument(
32 "-w", "--width",
33 help="Number of bytes per line.",
34 default='16',
35 )
36
37 parser.add_argument(
38 "-l", "--highlight",
39 help="Byte to highlight.",
40 nargs="*",
41 )
42
43 parser.add_argument(
44 "-s", "--skip",
45 help="Skip this many initial bytes.",
46 default='0',
47 )
48
49 parser.add_argument(
50 "-c", "--count",
51 help="Only show this many bytes.",
52 default='-1',
53 )
54
55 parser.add_argument(
56 "-o", "--offset",
57 help="Addresses in left hand column starts at this address.",
58 default='0',
59 )
60
61 parser.add_argument(
62 "--color",
63 nargs='?',
64 help="Colorize the output. When 'auto' output is colorized exactly when stdout is a TTY. Default is 'auto'.",
65 choices = ('always', 'never', 'auto'),
66 default='auto',
67 )
68
69 def asint(s):
70 if s.startswith('0x'):
71 return int(s, 16)
72 elif s.startswith('0'):
73 return int(s, 8)
74 else:
75 return int(s, 10)
76
77 def main(args):
78 infile = args.file
79 width = asint(args.width)
80 skip = asint(args.skip)
81 count = asint(args.count)
82 offset = asint(args.offset)
83
84 # if `--color` has no argument it is `None`
85 color = args.color or 'always'
86 text.when = color
87
88 if skip:
89 try:
90 infile.seek(skip, os.SEEK_CUR)
91 except IOError:
92 infile.read(skip)
93
94 if count != -1:
95 infile = io.BytesIO(infile.read(count))
96
97 hl = []
98 if args.highlight:
99 for hs in args.highlight:
100 for h in hs.split(','):
101 hl.append(asint(h))
102
103 try:
104 for line in hexdump_iter(infile, width, highlight = hl, begin = offset + skip):
105 print(line)
106 except (KeyboardInterrupt, IOError):
107 pass
108
109 if __name__ == '__main__':
110 pwnlib.commandline.common.main(__file__)
111
[end of pwnlib/commandline/phd.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwnlib/commandline/phd.py b/pwnlib/commandline/phd.py
--- a/pwnlib/commandline/phd.py
+++ b/pwnlib/commandline/phd.py
@@ -4,6 +4,7 @@
import argparse
import os
+import signal
import sys
import io
@@ -100,6 +101,8 @@
for h in hs.split(','):
hl.append(asint(h))
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
try:
for line in hexdump_iter(infile, width, highlight = hl, begin = offset + skip):
print(line)
| {"golden_diff": "diff --git a/pwnlib/commandline/phd.py b/pwnlib/commandline/phd.py\n--- a/pwnlib/commandline/phd.py\n+++ b/pwnlib/commandline/phd.py\n@@ -4,6 +4,7 @@\n \n import argparse\n import os\n+import signal\n import sys\n import io\n \n@@ -100,6 +101,8 @@\n for h in hs.split(','):\n hl.append(asint(h))\n \n+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)\n+\n try:\n for line in hexdump_iter(infile, width, highlight = hl, begin = offset + skip):\n print(line)\n", "issue": "phd throws an exception when output pipe fails, as with e.g. `head`\nWe should silence the BrokenPipeError exception when `pwn phd` output closes.\r\n\r\n```\r\n$ phd < /dev/random | head -n 1\r\n00000000 43 18 3f 38 0e 45 9c 5d d9 b8 ed 44 7c 64 ee e3 \u2502C\u00b7?8\u2502\u00b7E\u00b7]\u2502\u00b7\u00b7\u00b7D\u2502|d\u00b7\u00b7\u2502\r\nException ignored in: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>\r\nBrokenPipeError: [Errno 32] Broken pipe\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python2\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport argparse\nimport os\nimport sys\nimport io\n\nimport pwnlib.args\npwnlib.args.free_form = False\n\nfrom pwn import *\nfrom pwnlib.commandline import common\n\nparser = common.parser_commands.add_parser(\n 'phd',\n help = 'Pretty hex dump',\n description = 'Pretty hex dump'\n)\n\nparser.add_argument(\n 'file',\n metavar='file',\n nargs='?',\n help='File to hexdump. Reads from stdin if missing.',\n type=argparse.FileType('rb'),\n default=getattr(sys.stdin, 'buffer', sys.stdin)\n)\n\nparser.add_argument(\n \"-w\", \"--width\",\n help=\"Number of bytes per line.\",\n default='16',\n)\n\nparser.add_argument(\n \"-l\", \"--highlight\",\n help=\"Byte to highlight.\",\n nargs=\"*\",\n)\n\nparser.add_argument(\n \"-s\", \"--skip\",\n help=\"Skip this many initial bytes.\",\n default='0',\n)\n\nparser.add_argument(\n \"-c\", \"--count\",\n help=\"Only show this many bytes.\",\n default='-1',\n)\n\nparser.add_argument(\n \"-o\", \"--offset\",\n help=\"Addresses in left hand column starts at this address.\",\n default='0',\n)\n\nparser.add_argument(\n \"--color\",\n nargs='?',\n help=\"Colorize the output. When 'auto' output is colorized exactly when stdout is a TTY. Default is 'auto'.\",\n choices = ('always', 'never', 'auto'),\n default='auto',\n)\n\ndef asint(s):\n if s.startswith('0x'):\n return int(s, 16)\n elif s.startswith('0'):\n return int(s, 8)\n else:\n return int(s, 10)\n\ndef main(args):\n infile = args.file\n width = asint(args.width)\n skip = asint(args.skip)\n count = asint(args.count)\n offset = asint(args.offset)\n\n # if `--color` has no argument it is `None`\n color = args.color or 'always'\n text.when = color\n\n if skip:\n try:\n infile.seek(skip, os.SEEK_CUR)\n except IOError:\n infile.read(skip)\n\n if count != -1:\n infile = io.BytesIO(infile.read(count))\n\n hl = []\n if args.highlight:\n for hs in args.highlight:\n for h in hs.split(','):\n hl.append(asint(h))\n\n try:\n for line in hexdump_iter(infile, width, highlight = hl, begin = offset + skip):\n print(line)\n except (KeyboardInterrupt, IOError):\n pass\n\nif __name__ == '__main__':\n pwnlib.commandline.common.main(__file__)\n", "path": "pwnlib/commandline/phd.py"}]} | 1,555 | 144 |
gh_patches_debug_16435 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3324 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider tapetro is broken
During the global build at 2021-10-06-14-42-44, spider **tapetro** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-06-14-42-44/logs/tapetro.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-06-14-42-44/output/tapetro.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-06-14-42-44/output/tapetro.geojson))
</issue>
<code>
[start of locations/spiders/tapetro.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3
4 from locations.items import GeojsonPointItem
5 from xlrd import open_workbook
6
7 BRANDS = {
8 'T': 'TravelCenters of America',
9 'P': 'Petro',
10 'TE': 'TA Express'
11 }
12
13
14 class TAPetroSpider(scrapy.Spider):
15 name = 'tapetro'
16 item_attributes = {'brand': "TravelCenters of America",
17 'brand_wikidata': "Q7835892"}
18 allowed_domains = ['www.ta-petro.com']
19 start_urls = (
20 'http://www.ta-petro.com/assets/ce/Documents/Master-Location-List.xls',
21 )
22
23 def parse(self, response):
24 workbook = open_workbook(file_contents=response.body)
25 sheet = workbook.sheets()[0] # Sheet1
26
27 # read header
28 nrow = 0
29 columns = []
30 for ncol in range(sheet.ncols):
31 columns.append((ncol, sheet.cell(nrow, ncol).value))
32
33 for nrow in range(1, sheet.nrows):
34 store = {}
35 for ncol, column in columns:
36 value = sheet.cell(nrow, ncol).value
37 store[column] = value
38
39 if not (store.get("LATITUDE") and store.get("LONGITUDE")):
40 continue
41
42 ref = '%s-%s-%s' % (
43 store['SITE ID#'], store['BRAND'], store['LOCATION_ID'])
44 yield GeojsonPointItem(
45 ref=ref,
46 lat=float(store['LATITUDE']),
47 lon=float(store['LONGITUDE']),
48 name=store['LOCATION'],
49 addr_full=store['ADDRESS'],
50 city=store['CITY'],
51 state=store['STATE'],
52 postcode=store['ZIPCODE'],
53 phone=store['PHONE'],
54 brand=BRANDS.get(store['BRAND'], BRANDS['T']),
55 extras={
56 'amenity:fuel': True,
57 'fuel:diesel:class2': store['WINTERIZED DIESEL NOV-MAR(any temp)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 10 degrees or below)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 30 degrees or below)'] == 'y',
58 'fuel:diesel': True,
59 'fuel:HGV_diesel': True,
60 'fuel:lng': int(store['LNG(Liquified Natural Gas)/Lanes'] or 0) > 0,
61 'fuel:propane': store['PROPANE'] == 'Y',
62 'hgv': True
63 }
64 )
65
[end of locations/spiders/tapetro.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/tapetro.py b/locations/spiders/tapetro.py
--- a/locations/spiders/tapetro.py
+++ b/locations/spiders/tapetro.py
@@ -57,7 +57,7 @@
'fuel:diesel:class2': store['WINTERIZED DIESEL NOV-MAR(any temp)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 10 degrees or below)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 30 degrees or below)'] == 'y',
'fuel:diesel': True,
'fuel:HGV_diesel': True,
- 'fuel:lng': int(store['LNG(Liquified Natural Gas)/Lanes'] or 0) > 0,
+ 'fuel:lng': store['LNG(Liquified Natural Gas)'] == 'Y',
'fuel:propane': store['PROPANE'] == 'Y',
'hgv': True
}
| {"golden_diff": "diff --git a/locations/spiders/tapetro.py b/locations/spiders/tapetro.py\n--- a/locations/spiders/tapetro.py\n+++ b/locations/spiders/tapetro.py\n@@ -57,7 +57,7 @@\n 'fuel:diesel:class2': store['WINTERIZED DIESEL NOV-MAR(any temp)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 10 degrees or below)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 30 degrees or below)'] == 'y',\n 'fuel:diesel': True,\n 'fuel:HGV_diesel': True,\n- 'fuel:lng': int(store['LNG(Liquified Natural Gas)/Lanes'] or 0) > 0,\n+ 'fuel:lng': store['LNG(Liquified Natural Gas)'] == 'Y',\n 'fuel:propane': store['PROPANE'] == 'Y',\n 'hgv': True\n }\n", "issue": "Spider tapetro is broken\nDuring the global build at 2021-10-06-14-42-44, spider **tapetro** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-06-14-42-44/logs/tapetro.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-06-14-42-44/output/tapetro.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-06-14-42-44/output/tapetro.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom xlrd import open_workbook\n\nBRANDS = {\n 'T': 'TravelCenters of America',\n 'P': 'Petro',\n 'TE': 'TA Express'\n}\n\n\nclass TAPetroSpider(scrapy.Spider):\n name = 'tapetro'\n item_attributes = {'brand': \"TravelCenters of America\",\n 'brand_wikidata': \"Q7835892\"}\n allowed_domains = ['www.ta-petro.com']\n start_urls = (\n 'http://www.ta-petro.com/assets/ce/Documents/Master-Location-List.xls',\n )\n\n def parse(self, response):\n workbook = open_workbook(file_contents=response.body)\n sheet = workbook.sheets()[0] # Sheet1\n\n # read header\n nrow = 0\n columns = []\n for ncol in range(sheet.ncols):\n columns.append((ncol, sheet.cell(nrow, ncol).value))\n\n for nrow in range(1, sheet.nrows):\n store = {}\n for ncol, column in columns:\n value = sheet.cell(nrow, ncol).value\n store[column] = value\n\n if not (store.get(\"LATITUDE\") and store.get(\"LONGITUDE\")):\n continue\n\n ref = '%s-%s-%s' % (\n store['SITE ID#'], store['BRAND'], store['LOCATION_ID'])\n yield GeojsonPointItem(\n ref=ref,\n lat=float(store['LATITUDE']),\n lon=float(store['LONGITUDE']),\n name=store['LOCATION'],\n addr_full=store['ADDRESS'],\n city=store['CITY'],\n state=store['STATE'],\n postcode=store['ZIPCODE'],\n phone=store['PHONE'],\n brand=BRANDS.get(store['BRAND'], BRANDS['T']),\n extras={\n 'amenity:fuel': True,\n 'fuel:diesel:class2': store['WINTERIZED DIESEL NOV-MAR(any temp)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 10 degrees or below)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 30 degrees or below)'] == 'y',\n 'fuel:diesel': True,\n 'fuel:HGV_diesel': True,\n 'fuel:lng': int(store['LNG(Liquified Natural Gas)/Lanes'] or 0) > 0,\n 'fuel:propane': store['PROPANE'] == 'Y',\n 'hgv': True\n }\n )\n", "path": "locations/spiders/tapetro.py"}]} | 1,429 | 234 |
gh_patches_debug_3412 | rasdani/github-patches | git_diff | dynaconf__dynaconf-767 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] filter_strategy config not working
**Describe the bug**
It seems that the `filter_strategy` config (which by the way is misspelled int the [docs](https://github.com/rochacbruno/dynaconf/blob/master/docs/configuration.md)) is not taken into account when used in the `Dynaconf` constructor.
**To Reproduce**
Steps to reproduce the behavior:
1. Having the following folder structure
Just a plain python script.
2. Having the following config files:
No config file, just using env variables
4. Having the following app code:
<details>
<summary> Code </summary>
**/test/test.py**
```python
import dynaconf
class CustomFilter:
def __call__(self, data):
print("this is never called")
return {
k: v
for k, v in data.items()
if k.startswith("PREFIX")
}
if __name__ == "__main__":
dc = dynaconf.Dynaconf(
envvar_prefix=False,
filter_strategy=CustomFilter(),
)
print(dc.as_dict())
```
</details>
5. Executing under the following environment
<details>
<summary> Execution </summary>
```bash
PREFIX_VAR="HELLO" OTHER_VAR="WORLD" python test.py
```
</details>
**Expected behavior**
`CustomFilter` should be called ("this is never called" should be displayed) and only the `PREFIX_VAR` should be in dict, not `OTHER_VAR`
**Environment (please complete the following information):**
- OS: Linux version 5.10.60.1-microsoft-standard-WSL2
- Dynaconf Version 3.1.9
- Framework: None
**Context**
I was looking for a way to filter out empty environment variables.
</issue>
<code>
[start of dynaconf/loaders/env_loader.py]
1 from __future__ import annotations
2
3 from os import environ
4
5 from dynaconf.utils import missing
6 from dynaconf.utils import upperfy
7 from dynaconf.utils.parse_conf import parse_conf_data
8 from dynaconf.vendor.dotenv import cli as dotenv_cli
9
10
11 IDENTIFIER = "env"
12
13
14 def load(obj, env=None, silent=True, key=None):
15 """Loads envvars with prefixes:
16
17 `DYNACONF_` (default global) or `$(ENVVAR_PREFIX_FOR_DYNACONF)_`
18 """
19 global_prefix = obj.get("ENVVAR_PREFIX_FOR_DYNACONF")
20 if global_prefix is False or global_prefix.upper() != "DYNACONF":
21 load_from_env(obj, "DYNACONF", key, silent, IDENTIFIER + "_global")
22
23 # Load the global env if exists and overwrite everything
24 load_from_env(obj, global_prefix, key, silent, IDENTIFIER + "_global")
25
26
27 def load_from_env(
28 obj,
29 prefix=False,
30 key=None,
31 silent=False,
32 identifier=IDENTIFIER,
33 env=False, # backwards compatibility bc renamed param
34 ):
35 if prefix is False and env is not False:
36 prefix = env
37
38 env_ = ""
39 if prefix is not False:
40 if not isinstance(prefix, str):
41 raise TypeError("`prefix/env` must be str or False")
42
43 prefix = prefix.upper()
44 env_ = f"{prefix}_"
45
46 # Load a single environment variable explicitly.
47 if key:
48 key = upperfy(key)
49 value = environ.get(f"{env_}{key}")
50 if value:
51 try: # obj is a Settings
52 obj.set(key, value, loader_identifier=identifier, tomlfy=True)
53 except AttributeError: # obj is a dict
54 obj[key] = parse_conf_data(
55 value, tomlfy=True, box_settings=obj
56 )
57
58 # Load environment variables in bulk (when matching).
59 else:
60 # Only known variables should be loaded from environment?
61 ignore_unknown = obj.get("IGNORE_UNKNOWN_ENVVARS_FOR_DYNACONF")
62
63 trim_len = len(env_)
64 data = {
65 key[trim_len:]: parse_conf_data(
66 data, tomlfy=True, box_settings=obj
67 )
68 for key, data in environ.items()
69 if key.startswith(env_)
70 and not (
71 # Ignore environment variables that haven't been
72 # pre-defined in settings space.
73 ignore_unknown
74 and obj.get(key[trim_len:], default=missing) is missing
75 )
76 }
77 # Update the settings space based on gathered data from environment.
78 if data:
79 obj.update(data, loader_identifier=identifier)
80
81
82 def write(settings_path, settings_data, **kwargs):
83 """Write data to .env file"""
84 for key, value in settings_data.items():
85 quote_mode = (
86 isinstance(value, str)
87 and (value.startswith("'") or value.startswith('"'))
88 ) or isinstance(value, (list, dict))
89 dotenv_cli.set_key(
90 str(settings_path),
91 key,
92 str(value),
93 quote_mode="always" if quote_mode else "none",
94 )
95
[end of dynaconf/loaders/env_loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dynaconf/loaders/env_loader.py b/dynaconf/loaders/env_loader.py
--- a/dynaconf/loaders/env_loader.py
+++ b/dynaconf/loaders/env_loader.py
@@ -76,6 +76,9 @@
}
# Update the settings space based on gathered data from environment.
if data:
+ filter_strategy = obj.get("FILTER_STRATEGY")
+ if filter_strategy:
+ data = filter_strategy(data)
obj.update(data, loader_identifier=identifier)
| {"golden_diff": "diff --git a/dynaconf/loaders/env_loader.py b/dynaconf/loaders/env_loader.py\n--- a/dynaconf/loaders/env_loader.py\n+++ b/dynaconf/loaders/env_loader.py\n@@ -76,6 +76,9 @@\n }\n # Update the settings space based on gathered data from environment.\n if data:\n+ filter_strategy = obj.get(\"FILTER_STRATEGY\")\n+ if filter_strategy:\n+ data = filter_strategy(data)\n obj.update(data, loader_identifier=identifier)\n", "issue": "[bug] filter_strategy config not working\n**Describe the bug**\r\nIt seems that the `filter_strategy` config (which by the way is misspelled int the [docs](https://github.com/rochacbruno/dynaconf/blob/master/docs/configuration.md)) is not taken into account when used in the `Dynaconf` constructor.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Having the following folder structure\r\nJust a plain python script.\r\n\r\n2. Having the following config files:\r\nNo config file, just using env variables\r\n\r\n4. Having the following app code:\r\n\r\n<details>\r\n<summary> Code </summary>\r\n\r\n**/test/test.py**\r\n```python\r\nimport dynaconf\r\n\r\nclass CustomFilter:\r\n def __call__(self, data):\r\n print(\"this is never called\")\r\n return {\r\n k: v\r\n for k, v in data.items()\r\n if k.startswith(\"PREFIX\")\r\n }\r\n\r\n\r\nif __name__ == \"__main__\":\r\n dc = dynaconf.Dynaconf(\r\n envvar_prefix=False,\r\n filter_strategy=CustomFilter(),\r\n )\r\n print(dc.as_dict())\r\n```\r\n\r\n</details>\r\n\r\n5. Executing under the following environment\r\n\r\n<details>\r\n<summary> Execution </summary>\r\n\r\n```bash\r\nPREFIX_VAR=\"HELLO\" OTHER_VAR=\"WORLD\" python test.py\r\n```\r\n\r\n</details>\r\n\r\n**Expected behavior**\r\n`CustomFilter` should be called (\"this is never called\" should be displayed) and only the `PREFIX_VAR` should be in dict, not `OTHER_VAR`\r\n\r\n**Environment (please complete the following information):**\r\n - OS: Linux version 5.10.60.1-microsoft-standard-WSL2 \r\n - Dynaconf Version 3.1.9\r\n - Framework: None\r\n\r\n**Context**\r\n\r\nI was looking for a way to filter out empty environment variables.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom os import environ\n\nfrom dynaconf.utils import missing\nfrom dynaconf.utils import upperfy\nfrom dynaconf.utils.parse_conf import parse_conf_data\nfrom dynaconf.vendor.dotenv import cli as dotenv_cli\n\n\nIDENTIFIER = \"env\"\n\n\ndef load(obj, env=None, silent=True, key=None):\n \"\"\"Loads envvars with prefixes:\n\n `DYNACONF_` (default global) or `$(ENVVAR_PREFIX_FOR_DYNACONF)_`\n \"\"\"\n global_prefix = obj.get(\"ENVVAR_PREFIX_FOR_DYNACONF\")\n if global_prefix is False or global_prefix.upper() != \"DYNACONF\":\n load_from_env(obj, \"DYNACONF\", key, silent, IDENTIFIER + \"_global\")\n\n # Load the global env if exists and overwrite everything\n load_from_env(obj, global_prefix, key, silent, IDENTIFIER + \"_global\")\n\n\ndef load_from_env(\n obj,\n prefix=False,\n key=None,\n silent=False,\n identifier=IDENTIFIER,\n env=False, # backwards compatibility bc renamed param\n):\n if prefix is False and env is not False:\n prefix = env\n\n env_ = \"\"\n if prefix is not False:\n if not isinstance(prefix, str):\n raise TypeError(\"`prefix/env` must be str or False\")\n\n prefix = prefix.upper()\n env_ = f\"{prefix}_\"\n\n # Load a single environment variable explicitly.\n if key:\n key = upperfy(key)\n value = environ.get(f\"{env_}{key}\")\n if value:\n try: # obj is a Settings\n obj.set(key, value, loader_identifier=identifier, tomlfy=True)\n except AttributeError: # obj is a dict\n obj[key] = parse_conf_data(\n value, tomlfy=True, box_settings=obj\n )\n\n # Load environment variables in bulk (when matching).\n else:\n # Only known variables should be loaded from environment?\n ignore_unknown = obj.get(\"IGNORE_UNKNOWN_ENVVARS_FOR_DYNACONF\")\n\n trim_len = len(env_)\n data = {\n key[trim_len:]: parse_conf_data(\n data, tomlfy=True, box_settings=obj\n )\n for key, data in environ.items()\n if key.startswith(env_)\n and not (\n # Ignore environment variables that haven't been\n # pre-defined in settings space.\n ignore_unknown\n and obj.get(key[trim_len:], default=missing) is missing\n )\n }\n # Update the settings space based on gathered data from environment.\n if data:\n obj.update(data, loader_identifier=identifier)\n\n\ndef write(settings_path, settings_data, **kwargs):\n \"\"\"Write data to .env file\"\"\"\n for key, value in settings_data.items():\n quote_mode = (\n isinstance(value, str)\n and (value.startswith(\"'\") or value.startswith('\"'))\n ) or isinstance(value, (list, dict))\n dotenv_cli.set_key(\n str(settings_path),\n key,\n str(value),\n quote_mode=\"always\" if quote_mode else \"none\",\n )\n", "path": "dynaconf/loaders/env_loader.py"}]} | 1,794 | 112 |
gh_patches_debug_16832 | rasdani/github-patches | git_diff | pantsbuild__pants-20984 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`stevedore_namespace` documentation shows `str`'s doc string
**Describe the bug**
The `stevedore_namespace` BUILD file symbol has a doc-string, but it isn't shown in `pants help-all`. It instead shows what looks like the doc string for `str`.
https://github.com/pantsbuild/pants/blob/ec86d19cd954cd49a9562880a7c0dbc45632778c/src/python/pants/backend/python/framework/stevedore/target_types.py#L13-L30
To reproduce, enable the stevedore backend and look at `help` or `help-all`:
```shell
PANTS_VERSION=2.22.0.dev3 pants --backend-packages=pants.backend.experimental.python.framework.stevedore help stevedore_namespace
```
```
`stevedore_namespace` BUILD file symbol
---------------------------------------
str(object='') -> str
str(bytes_or_buffer[, encoding[, errors]]) -> str
Create a new string object from the given object. If encoding or
errors is specified, then the object must expose a data buffer
that will be decoded using the given encoding and error handler.
Otherwise, returns the result of object.__str__() (if defined)
or repr(object).
encoding defaults to sys.getdefaultencoding().
errors defaults to 'strict'.
```
```shell
PANTS_VERSION=2.22.0.dev3 pants --backend-packages=pants.backend.experimental.python.framework.stevedore help-all | \
jq .name_to_build_file_info.stevedore_namespace
```
```json
{
"documentation": "str(object='') -> str\nstr(bytes_or_buffer[, encoding[, errors]]) -> str\n\nCreate a new string object from the given object. If encoding or\nerrors is specified, then the object must expose a data buffer\nthat will be decoded using the given encoding and error handler.\nOtherwise, returns the result of object.__str__() (if defined)\nor repr(object).\nencoding defaults to sys.getdefaultencoding().\nerrors defaults to 'strict'.",
"is_target": false,
"name": "stevedore_namespace",
"signature": null
}
```
**Pants version**
Seems to be visible in 2.16 through to the currently latest.
**OS**
both
**Additional info**
- Will appear in online docs too after https://github.com/pantsbuild/pantsbuild.org/pull/216
- Relevant issues:
- https://github.com/pantsbuild/pants/discussions/18117
- https://github.com/pantsbuild/pants/issues/14832
</issue>
<code>
[start of src/python/pants/backend/python/framework/stevedore/target_types.py]
1 # Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from dataclasses import dataclass
7
8 from pants.backend.python.target_types import PythonDistribution
9 from pants.engine.target import StringSequenceField, Targets
10 from pants.util.strutil import help_text
11
12
13 class StevedoreNamespace(str):
14 f"""Tag a namespace in entry_points as a stevedore namespace.
15
16 This is required for the entry_point to be visible to dep inference
17 based on the `stevedore_namespaces` field.
18
19 For example:
20 {PythonDistribution.alias}(
21 ...
22 entry_points={{
23 stevedore_namespace("a.b.c"): {{
24 "plugin_name": "some.entry:point",
25 }},
26 }},
27 )
28 """
29
30 alias = "stevedore_namespace"
31
32
33 # This is a lot like a SpecialCasedDependencies field, but it doesn't list targets directly.
34 class StevedoreNamespacesField(StringSequenceField):
35 alias = "stevedore_namespaces"
36 help = help_text(
37 f"""
38 List the stevedore namespaces required by this target.
39
40 Code for all `entry_points` on `{PythonDistribution.alias}` targets with
41 these namespaces will be added as dependencies so that they are
42 available on PYTHONPATH during tests. Note that this is only a subset
43 of the `{PythonDistribution.alias}`s dependencies, so the `entry_points`
44 only need to be defined on one `{PythonDistribution.alias}` even if the
45 test only needs some of the `entry_points` namespaces on it.
46
47 Plus, an `entry_points.txt` file will be generated in the sandbox so that
48 each of the `{PythonDistribution.alias}`s appear to be "installed". The
49 `entry_points.txt` file will only include the namespaces requested on this
50 field. Without this, stevedore would not be able to look up plugins in
51 the setuptools `entry_points` metadata.
52
53 NOTE: Each `{PythonDistribution.alias}` must opt-in to being included in
54 this repo-wide inference by tagging the namespaces with
55 `{StevedoreNamespace.alias}("my.stevedore.extension")`.
56
57 The stevedore namespace format (`my.stevedore.extension`) is similar
58 to a Python namespace.
59 """
60 )
61
62
63 class AllStevedoreExtensionTargets(Targets):
64 pass
65
66
67 @dataclass(frozen=True)
68 class StevedoreNamespacesProviderTargetsRequest:
69 stevedore_namespaces: StevedoreNamespacesField
70
71
72 class StevedoreExtensionTargets(Targets):
73 pass
74
[end of src/python/pants/backend/python/framework/stevedore/target_types.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/backend/python/framework/stevedore/target_types.py b/src/python/pants/backend/python/framework/stevedore/target_types.py
--- a/src/python/pants/backend/python/framework/stevedore/target_types.py
+++ b/src/python/pants/backend/python/framework/stevedore/target_types.py
@@ -11,20 +11,22 @@
class StevedoreNamespace(str):
- f"""Tag a namespace in entry_points as a stevedore namespace.
+ """Tag a namespace in entry_points as a stevedore namespace.
This is required for the entry_point to be visible to dep inference
based on the `stevedore_namespaces` field.
For example:
- {PythonDistribution.alias}(
- ...
- entry_points={{
- stevedore_namespace("a.b.c"): {{
- "plugin_name": "some.entry:point",
- }},
- }},
- )
+ ```python
+ python_distribution(
+ ...
+ entry_points={
+ stevedore_namespace("a.b.c"): {
+ "plugin_name": "some.entry:point",
+ },
+ },
+ )
+ ```
"""
alias = "stevedore_namespace"
| {"golden_diff": "diff --git a/src/python/pants/backend/python/framework/stevedore/target_types.py b/src/python/pants/backend/python/framework/stevedore/target_types.py\n--- a/src/python/pants/backend/python/framework/stevedore/target_types.py\n+++ b/src/python/pants/backend/python/framework/stevedore/target_types.py\n@@ -11,20 +11,22 @@\n \n \n class StevedoreNamespace(str):\n- f\"\"\"Tag a namespace in entry_points as a stevedore namespace.\n+ \"\"\"Tag a namespace in entry_points as a stevedore namespace.\n \n This is required for the entry_point to be visible to dep inference\n based on the `stevedore_namespaces` field.\n \n For example:\n- {PythonDistribution.alias}(\n- ...\n- entry_points={{\n- stevedore_namespace(\"a.b.c\"): {{\n- \"plugin_name\": \"some.entry:point\",\n- }},\n- }},\n- )\n+ ```python\n+ python_distribution(\n+ ...\n+ entry_points={\n+ stevedore_namespace(\"a.b.c\"): {\n+ \"plugin_name\": \"some.entry:point\",\n+ },\n+ },\n+ )\n+ ```\n \"\"\"\n \n alias = \"stevedore_namespace\"\n", "issue": "`stevedore_namespace` documentation shows `str`'s doc string\n**Describe the bug**\r\n\r\nThe `stevedore_namespace` BUILD file symbol has a doc-string, but it isn't shown in `pants help-all`. It instead shows what looks like the doc string for `str`.\r\n\r\nhttps://github.com/pantsbuild/pants/blob/ec86d19cd954cd49a9562880a7c0dbc45632778c/src/python/pants/backend/python/framework/stevedore/target_types.py#L13-L30\r\n\r\nTo reproduce, enable the stevedore backend and look at `help` or `help-all`:\r\n\r\n```shell\r\nPANTS_VERSION=2.22.0.dev3 pants --backend-packages=pants.backend.experimental.python.framework.stevedore help stevedore_namespace\r\n```\r\n```\r\n`stevedore_namespace` BUILD file symbol\r\n---------------------------------------\r\n\r\nstr(object='') -> str\r\nstr(bytes_or_buffer[, encoding[, errors]]) -> str\r\n\r\nCreate a new string object from the given object. If encoding or\r\nerrors is specified, then the object must expose a data buffer\r\nthat will be decoded using the given encoding and error handler.\r\nOtherwise, returns the result of object.__str__() (if defined)\r\nor repr(object).\r\nencoding defaults to sys.getdefaultencoding().\r\nerrors defaults to 'strict'.\r\n```\r\n\r\n```shell\r\nPANTS_VERSION=2.22.0.dev3 pants --backend-packages=pants.backend.experimental.python.framework.stevedore help-all | \\\r\n jq .name_to_build_file_info.stevedore_namespace\r\n```\r\n```json\r\n{\r\n \"documentation\": \"str(object='') -> str\\nstr(bytes_or_buffer[, encoding[, errors]]) -> str\\n\\nCreate a new string object from the given object. If encoding or\\nerrors is specified, then the object must expose a data buffer\\nthat will be decoded using the given encoding and error handler.\\nOtherwise, returns the result of object.__str__() (if defined)\\nor repr(object).\\nencoding defaults to sys.getdefaultencoding().\\nerrors defaults to 'strict'.\",\r\n \"is_target\": false,\r\n \"name\": \"stevedore_namespace\",\r\n \"signature\": null\r\n}\r\n```\r\n\r\n**Pants version**\r\nSeems to be visible in 2.16 through to the currently latest.\r\n\r\n**OS**\r\nboth\r\n\r\n**Additional info**\r\n\r\n- Will appear in online docs too after https://github.com/pantsbuild/pantsbuild.org/pull/216\r\n- Relevant issues:\r\n - https://github.com/pantsbuild/pants/discussions/18117\r\n - https://github.com/pantsbuild/pants/issues/14832\n", "before_files": [{"content": "# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\nfrom pants.backend.python.target_types import PythonDistribution\nfrom pants.engine.target import StringSequenceField, Targets\nfrom pants.util.strutil import help_text\n\n\nclass StevedoreNamespace(str):\n f\"\"\"Tag a namespace in entry_points as a stevedore namespace.\n\n This is required for the entry_point to be visible to dep inference\n based on the `stevedore_namespaces` field.\n\n For example:\n {PythonDistribution.alias}(\n ...\n entry_points={{\n stevedore_namespace(\"a.b.c\"): {{\n \"plugin_name\": \"some.entry:point\",\n }},\n }},\n )\n \"\"\"\n\n alias = \"stevedore_namespace\"\n\n\n# This is a lot like a SpecialCasedDependencies field, but it doesn't list targets directly.\nclass StevedoreNamespacesField(StringSequenceField):\n alias = \"stevedore_namespaces\"\n help = help_text(\n f\"\"\"\n List the stevedore namespaces required by this target.\n\n Code for all `entry_points` on `{PythonDistribution.alias}` targets with\n these namespaces will be added as dependencies so that they are\n available on PYTHONPATH during tests. Note that this is only a subset\n of the `{PythonDistribution.alias}`s dependencies, so the `entry_points`\n only need to be defined on one `{PythonDistribution.alias}` even if the\n test only needs some of the `entry_points` namespaces on it.\n\n Plus, an `entry_points.txt` file will be generated in the sandbox so that\n each of the `{PythonDistribution.alias}`s appear to be \"installed\". The\n `entry_points.txt` file will only include the namespaces requested on this\n field. Without this, stevedore would not be able to look up plugins in\n the setuptools `entry_points` metadata.\n\n NOTE: Each `{PythonDistribution.alias}` must opt-in to being included in\n this repo-wide inference by tagging the namespaces with\n `{StevedoreNamespace.alias}(\"my.stevedore.extension\")`.\n\n The stevedore namespace format (`my.stevedore.extension`) is similar\n to a Python namespace.\n \"\"\"\n )\n\n\nclass AllStevedoreExtensionTargets(Targets):\n pass\n\n\n@dataclass(frozen=True)\nclass StevedoreNamespacesProviderTargetsRequest:\n stevedore_namespaces: StevedoreNamespacesField\n\n\nclass StevedoreExtensionTargets(Targets):\n pass\n", "path": "src/python/pants/backend/python/framework/stevedore/target_types.py"}]} | 1,838 | 273 |
gh_patches_debug_18816 | rasdani/github-patches | git_diff | encode__uvicorn-646 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for --reload to monitor additional file types.
The "reload" process currently only monitors ".py" files in various directories. I have a changes that will pass in a list of additional "reload_suffixes" that the process will monitor. This allows the service to monitor data files in addition to code files.
Any feedback on whether this is useful to others?
<!-- POLAR PLEDGE BADGE START -->
> [!IMPORTANT]
> - We're using [Polar.sh](https://polar.sh/encode) so you can upvote and help fund this issue.
> - We receive the funding once the issue is completed & confirmed by you.
> - Thank you in advance for helping prioritize & fund our backlog.
<a href="https://polar.sh/encode/uvicorn/issues/528">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/encode/uvicorn/issues/528/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/encode/uvicorn/issues/528/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
</issue>
<code>
[start of uvicorn/supervisors/statreload.py]
1 import logging
2 import os
3 from pathlib import Path
4
5 from uvicorn.supervisors.basereload import BaseReload
6
7 logger = logging.getLogger("uvicorn.error")
8
9
10 class StatReload(BaseReload):
11 def __init__(self, config, target, sockets):
12 super().__init__(config, target, sockets)
13 self.reloader_name = "statreload"
14 self.mtimes = {}
15
16 def should_restart(self):
17 for filename in self.iter_py_files():
18 try:
19 mtime = os.path.getmtime(filename)
20 except OSError: # pragma: nocover
21 continue
22
23 old_time = self.mtimes.get(filename)
24 if old_time is None:
25 self.mtimes[filename] = mtime
26 continue
27 elif mtime > old_time:
28 display_path = os.path.normpath(filename)
29 if Path.cwd() in Path(filename).parents:
30 display_path = os.path.normpath(os.path.relpath(filename))
31 message = "Detected file change in '%s'. Reloading..."
32 logger.warning(message, display_path)
33 return True
34 return False
35
36 def iter_py_files(self):
37 for reload_dir in self.config.reload_dirs:
38 for subdir, dirs, files in os.walk(reload_dir):
39 for file in files:
40 if file.endswith(".py"):
41 yield subdir + os.sep + file
42
[end of uvicorn/supervisors/statreload.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/uvicorn/supervisors/statreload.py b/uvicorn/supervisors/statreload.py
--- a/uvicorn/supervisors/statreload.py
+++ b/uvicorn/supervisors/statreload.py
@@ -14,7 +14,7 @@
self.mtimes = {}
def should_restart(self):
- for filename in self.iter_py_files():
+ for filename in self.iter_files():
try:
mtime = os.path.getmtime(filename)
except OSError: # pragma: nocover
@@ -33,9 +33,9 @@
return True
return False
- def iter_py_files(self):
+ def iter_files(self):
for reload_dir in self.config.reload_dirs:
for subdir, dirs, files in os.walk(reload_dir):
for file in files:
- if file.endswith(".py"):
+ if not file.startswith("."):
yield subdir + os.sep + file
| {"golden_diff": "diff --git a/uvicorn/supervisors/statreload.py b/uvicorn/supervisors/statreload.py\n--- a/uvicorn/supervisors/statreload.py\n+++ b/uvicorn/supervisors/statreload.py\n@@ -14,7 +14,7 @@\n self.mtimes = {}\n \n def should_restart(self):\n- for filename in self.iter_py_files():\n+ for filename in self.iter_files():\n try:\n mtime = os.path.getmtime(filename)\n except OSError: # pragma: nocover\n@@ -33,9 +33,9 @@\n return True\n return False\n \n- def iter_py_files(self):\n+ def iter_files(self):\n for reload_dir in self.config.reload_dirs:\n for subdir, dirs, files in os.walk(reload_dir):\n for file in files:\n- if file.endswith(\".py\"):\n+ if not file.startswith(\".\"):\n yield subdir + os.sep + file\n", "issue": "Add support for --reload to monitor additional file types.\nThe \"reload\" process currently only monitors \".py\" files in various directories. I have a changes that will pass in a list of additional \"reload_suffixes\" that the process will monitor. This allows the service to monitor data files in addition to code files.\r\n\r\nAny feedback on whether this is useful to others?\n\n<!-- POLAR PLEDGE BADGE START -->\n> [!IMPORTANT]\n> - We're using [Polar.sh](https://polar.sh/encode) so you can upvote and help fund this issue.\n> - We receive the funding once the issue is completed & confirmed by you.\n> - Thank you in advance for helping prioritize & fund our backlog.\n\n<a href=\"https://polar.sh/encode/uvicorn/issues/528\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/encode/uvicorn/issues/528/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/encode/uvicorn/issues/528/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "import logging\nimport os\nfrom pathlib import Path\n\nfrom uvicorn.supervisors.basereload import BaseReload\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\n\nclass StatReload(BaseReload):\n def __init__(self, config, target, sockets):\n super().__init__(config, target, sockets)\n self.reloader_name = \"statreload\"\n self.mtimes = {}\n\n def should_restart(self):\n for filename in self.iter_py_files():\n try:\n mtime = os.path.getmtime(filename)\n except OSError: # pragma: nocover\n continue\n\n old_time = self.mtimes.get(filename)\n if old_time is None:\n self.mtimes[filename] = mtime\n continue\n elif mtime > old_time:\n display_path = os.path.normpath(filename)\n if Path.cwd() in Path(filename).parents:\n display_path = os.path.normpath(os.path.relpath(filename))\n message = \"Detected file change in '%s'. Reloading...\"\n logger.warning(message, display_path)\n return True\n return False\n\n def iter_py_files(self):\n for reload_dir in self.config.reload_dirs:\n for subdir, dirs, files in os.walk(reload_dir):\n for file in files:\n if file.endswith(\".py\"):\n yield subdir + os.sep + file\n", "path": "uvicorn/supervisors/statreload.py"}]} | 1,168 | 211 |
gh_patches_debug_31192 | rasdani/github-patches | git_diff | meltano__meltano-6118 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Verify `meltano install` telemetry fires for malformed plugin entries
In #6109 @pnadolny13 noticed that with the following entry no events where fired:
```yaml
- name: tap-something-invalid
variant: meltanolabs
pip_url: git+https://github.com/foo/tap-something-invalid.git
```
I wasn't able to reproduce that at the time and did see two events (started/aborted) come across. We should double check though, its entirely possible that my local dev setup had a seperate issue that was triggering the `aborted` event.
</issue>
<code>
[start of src/meltano/cli/install.py]
1 """CLI command `meltano install`."""
2 from __future__ import annotations
3
4 import click
5
6 from meltano.core.legacy_tracking import LegacyTracker
7 from meltano.core.plugin import PluginType
8 from meltano.core.plugin.error import PluginNotFoundError
9 from meltano.core.project_plugins_service import ProjectPluginsService
10 from meltano.core.tracking import PluginsTrackingContext, Tracker
11 from meltano.core.tracking import cli as cli_tracking
12 from meltano.core.tracking import cli_context_builder
13
14 from . import cli
15 from .params import pass_project
16 from .utils import CliError, install_plugins
17
18
19 @cli.command(short_help="Install project dependencies.")
20 @click.argument(
21 "plugin_type", type=click.Choice(PluginType.cli_arguments()), required=False
22 )
23 @click.argument("plugin_name", nargs=-1, required=False)
24 @click.option(
25 "--clean",
26 is_flag=True,
27 help="Completely reinstall a plugin rather than simply upgrading if necessary.",
28 )
29 @click.option(
30 "--parallelism",
31 "-p",
32 type=click.INT,
33 default=None,
34 help="Limit the number of plugins to install in parallel. Defaults to the number of cores.",
35 )
36 @pass_project(migrate=True)
37 def install(project, plugin_type, plugin_name, clean, parallelism):
38 """
39 Install all the dependencies of your project based on the meltano.yml file.
40
41 \b\nRead more at https://www.meltano.com/docs/command-line-interface.html#install
42 """
43 tracker = Tracker(project)
44 tracker.add_contexts(
45 cli_context_builder(
46 "install",
47 None,
48 clean=clean,
49 parallelism=parallelism,
50 )
51 )
52
53 plugins_service = ProjectPluginsService(project)
54
55 if plugin_type:
56 try:
57 plugin_type = PluginType.from_cli_argument(plugin_type)
58 except ValueError:
59 # if we fail because plugin_type is not valid we have no plugins to instrument
60 tracker.track_command_event(cli_tracking.STARTED)
61 tracker.track_command_event(cli_tracking.ABORTED)
62 raise
63 plugins = plugins_service.get_plugins_of_type(plugin_type)
64 if plugin_name:
65 plugins = [plugin for plugin in plugins if plugin.name in plugin_name]
66 else:
67 try:
68 plugins = list(plugins_service.plugins())
69 except PluginNotFoundError:
70 tracker.track_command_event(cli_tracking.STARTED)
71 tracker.track_command_event(cli_tracking.ABORTED)
72 raise
73
74 click.echo(f"Installing {len(plugins)} plugins...")
75 tracker.add_contexts(
76 PluginsTrackingContext([(candidate, None) for candidate in plugins])
77 )
78 tracker.track_command_event(cli_tracking.STARTED)
79
80 success = install_plugins(project, plugins, parallelism=parallelism, clean=clean)
81
82 legacy_tracker = LegacyTracker(project)
83 legacy_tracker.track_meltano_install()
84
85 if not success:
86 tracker.track_command_event(cli_tracking.FAILED)
87 raise CliError("Failed to install plugin(s)")
88 tracker.track_command_event(cli_tracking.COMPLETED)
89
[end of src/meltano/cli/install.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/meltano/cli/install.py b/src/meltano/cli/install.py
--- a/src/meltano/cli/install.py
+++ b/src/meltano/cli/install.py
@@ -5,7 +5,6 @@
from meltano.core.legacy_tracking import LegacyTracker
from meltano.core.plugin import PluginType
-from meltano.core.plugin.error import PluginNotFoundError
from meltano.core.project_plugins_service import ProjectPluginsService
from meltano.core.tracking import PluginsTrackingContext, Tracker
from meltano.core.tracking import cli as cli_tracking
@@ -52,24 +51,18 @@
plugins_service = ProjectPluginsService(project)
- if plugin_type:
- try:
+ try:
+ if plugin_type:
plugin_type = PluginType.from_cli_argument(plugin_type)
- except ValueError:
- # if we fail because plugin_type is not valid we have no plugins to instrument
- tracker.track_command_event(cli_tracking.STARTED)
- tracker.track_command_event(cli_tracking.ABORTED)
- raise
- plugins = plugins_service.get_plugins_of_type(plugin_type)
- if plugin_name:
- plugins = [plugin for plugin in plugins if plugin.name in plugin_name]
- else:
- try:
+ plugins = plugins_service.get_plugins_of_type(plugin_type)
+ if plugin_name:
+ plugins = [plugin for plugin in plugins if plugin.name in plugin_name]
+ else:
plugins = list(plugins_service.plugins())
- except PluginNotFoundError:
- tracker.track_command_event(cli_tracking.STARTED)
- tracker.track_command_event(cli_tracking.ABORTED)
- raise
+ except Exception:
+ tracker.track_command_event(cli_tracking.STARTED)
+ tracker.track_command_event(cli_tracking.ABORTED)
+ raise
click.echo(f"Installing {len(plugins)} plugins...")
tracker.add_contexts(
| {"golden_diff": "diff --git a/src/meltano/cli/install.py b/src/meltano/cli/install.py\n--- a/src/meltano/cli/install.py\n+++ b/src/meltano/cli/install.py\n@@ -5,7 +5,6 @@\n \n from meltano.core.legacy_tracking import LegacyTracker\n from meltano.core.plugin import PluginType\n-from meltano.core.plugin.error import PluginNotFoundError\n from meltano.core.project_plugins_service import ProjectPluginsService\n from meltano.core.tracking import PluginsTrackingContext, Tracker\n from meltano.core.tracking import cli as cli_tracking\n@@ -52,24 +51,18 @@\n \n plugins_service = ProjectPluginsService(project)\n \n- if plugin_type:\n- try:\n+ try:\n+ if plugin_type:\n plugin_type = PluginType.from_cli_argument(plugin_type)\n- except ValueError:\n- # if we fail because plugin_type is not valid we have no plugins to instrument\n- tracker.track_command_event(cli_tracking.STARTED)\n- tracker.track_command_event(cli_tracking.ABORTED)\n- raise\n- plugins = plugins_service.get_plugins_of_type(plugin_type)\n- if plugin_name:\n- plugins = [plugin for plugin in plugins if plugin.name in plugin_name]\n- else:\n- try:\n+ plugins = plugins_service.get_plugins_of_type(plugin_type)\n+ if plugin_name:\n+ plugins = [plugin for plugin in plugins if plugin.name in plugin_name]\n+ else:\n plugins = list(plugins_service.plugins())\n- except PluginNotFoundError:\n- tracker.track_command_event(cli_tracking.STARTED)\n- tracker.track_command_event(cli_tracking.ABORTED)\n- raise\n+ except Exception:\n+ tracker.track_command_event(cli_tracking.STARTED)\n+ tracker.track_command_event(cli_tracking.ABORTED)\n+ raise\n \n click.echo(f\"Installing {len(plugins)} plugins...\")\n tracker.add_contexts(\n", "issue": "Verify `meltano install` telemetry fires for malformed plugin entries\nIn #6109 @pnadolny13 noticed that with the following entry no events where fired:\r\n\r\n```yaml\r\n - name: tap-something-invalid\r\n variant: meltanolabs\r\n pip_url: git+https://github.com/foo/tap-something-invalid.git\r\n```\r\n\r\nI wasn't able to reproduce that at the time and did see two events (started/aborted) come across. We should double check though, its entirely possible that my local dev setup had a seperate issue that was triggering the `aborted` event.\n", "before_files": [{"content": "\"\"\"CLI command `meltano install`.\"\"\"\nfrom __future__ import annotations\n\nimport click\n\nfrom meltano.core.legacy_tracking import LegacyTracker\nfrom meltano.core.plugin import PluginType\nfrom meltano.core.plugin.error import PluginNotFoundError\nfrom meltano.core.project_plugins_service import ProjectPluginsService\nfrom meltano.core.tracking import PluginsTrackingContext, Tracker\nfrom meltano.core.tracking import cli as cli_tracking\nfrom meltano.core.tracking import cli_context_builder\n\nfrom . import cli\nfrom .params import pass_project\nfrom .utils import CliError, install_plugins\n\n\[email protected](short_help=\"Install project dependencies.\")\[email protected](\n \"plugin_type\", type=click.Choice(PluginType.cli_arguments()), required=False\n)\[email protected](\"plugin_name\", nargs=-1, required=False)\[email protected](\n \"--clean\",\n is_flag=True,\n help=\"Completely reinstall a plugin rather than simply upgrading if necessary.\",\n)\[email protected](\n \"--parallelism\",\n \"-p\",\n type=click.INT,\n default=None,\n help=\"Limit the number of plugins to install in parallel. Defaults to the number of cores.\",\n)\n@pass_project(migrate=True)\ndef install(project, plugin_type, plugin_name, clean, parallelism):\n \"\"\"\n Install all the dependencies of your project based on the meltano.yml file.\n\n \\b\\nRead more at https://www.meltano.com/docs/command-line-interface.html#install\n \"\"\"\n tracker = Tracker(project)\n tracker.add_contexts(\n cli_context_builder(\n \"install\",\n None,\n clean=clean,\n parallelism=parallelism,\n )\n )\n\n plugins_service = ProjectPluginsService(project)\n\n if plugin_type:\n try:\n plugin_type = PluginType.from_cli_argument(plugin_type)\n except ValueError:\n # if we fail because plugin_type is not valid we have no plugins to instrument\n tracker.track_command_event(cli_tracking.STARTED)\n tracker.track_command_event(cli_tracking.ABORTED)\n raise\n plugins = plugins_service.get_plugins_of_type(plugin_type)\n if plugin_name:\n plugins = [plugin for plugin in plugins if plugin.name in plugin_name]\n else:\n try:\n plugins = list(plugins_service.plugins())\n except PluginNotFoundError:\n tracker.track_command_event(cli_tracking.STARTED)\n tracker.track_command_event(cli_tracking.ABORTED)\n raise\n\n click.echo(f\"Installing {len(plugins)} plugins...\")\n tracker.add_contexts(\n PluginsTrackingContext([(candidate, None) for candidate in plugins])\n )\n tracker.track_command_event(cli_tracking.STARTED)\n\n success = install_plugins(project, plugins, parallelism=parallelism, clean=clean)\n\n legacy_tracker = LegacyTracker(project)\n legacy_tracker.track_meltano_install()\n\n if not success:\n tracker.track_command_event(cli_tracking.FAILED)\n raise CliError(\"Failed to install plugin(s)\")\n tracker.track_command_event(cli_tracking.COMPLETED)\n", "path": "src/meltano/cli/install.py"}]} | 1,466 | 407 |
gh_patches_debug_34990 | rasdani/github-patches | git_diff | streamlink__streamlink-838 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
azubu.tv: remove plugin
http://www.azubu.tv/
`Soon a new future for Azubu and Hitbox, together as a single force in the world of eSports and competitive gaming, will be revealed. We will be launching a new brand, website, and mobile apps. There you will find the best offerings from both Azubu and Hitbox in one new place.`
</issue>
<code>
[start of src/streamlink/plugins/azubutv.py]
1 #!/usr/bin/env python
2 import json
3 import requests
4
5 import re
6
7 from io import BytesIO
8 from time import sleep
9
10 from streamlink.exceptions import PluginError
11
12 from streamlink.plugin import Plugin
13 from streamlink.plugin.api import http, validate
14 from streamlink.stream import HLSStream
15
16
17 HTTP_HEADERS = {
18 "User-Agent": ("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 "
19 "(KHTML, like Gecko) Chrome/36.0.1944.9 Safari/537.36"),
20 'Accept': 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV'
21
22 }
23
24 _url_re = re.compile(r"http(s)?://(\w+\.)?azubu.tv/(?P<domain>\w+)")
25
26 PARAMS_REGEX = r"(\w+)=({.+?}|\[.+?\]|\(.+?\)|'(?:[^'\\]|\\')*'|\"(?:[^\"\\]|\\\")*\"|\S+)"
27 stream_video_url = "http://api.azubu.tv/public/channel/{}/player"
28
29
30 class AzubuTV(Plugin):
31 @classmethod
32 def can_handle_url(cls, url):
33 return _url_re.match(url)
34
35 @classmethod
36 def stream_weight(cls, stream):
37 if stream == "source":
38 weight = 1080
39 else:
40 weight, group = Plugin.stream_weight(stream)
41
42 return weight, "azubutv"
43
44 def _parse_params(self, params):
45 rval = {}
46 matches = re.findall(PARAMS_REGEX, params)
47
48 for key, value in matches:
49 try:
50 value = ast.literal_eval(value)
51 except Exception:
52 pass
53
54 rval[key] = value
55
56 return rval
57
58 def _get_stream_url(self, o):
59
60 match = _url_re.match(self.url)
61 channel = match.group('domain')
62
63 channel_info = requests.get(stream_video_url.format(channel))
64 j = json.loads(channel_info.text)
65
66 if j["data"]["is_live"] is not True:
67 return "", False
68 else:
69 is_live = True
70
71 stream_url = 'https://edge.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:{0}'
72
73 r = requests.get(stream_url.format(j["data"]["stream_video"]["reference_id"]), headers=HTTP_HEADERS)
74 t = json.loads(r.text)
75
76 stream_url = t["sources"][0]["src"]
77 return stream_url, is_live
78
79 def _get_streams(self):
80 hls_url, is_live = self._get_stream_url(self)
81
82 if not is_live:
83 return
84
85 split = self.url.split(" ")
86 params = (" ").join(split[1:])
87 params = self._parse_params(params)
88
89 try:
90 streams = HLSStream.parse_variant_playlist(self.session, hls_url, **params)
91 except IOError as err:
92 raise PluginError(err)
93
94 return streams
95
96
97 __plugin__ = AzubuTV
98
[end of src/streamlink/plugins/azubutv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/azubutv.py b/src/streamlink/plugins/azubutv.py
deleted file mode 100644
--- a/src/streamlink/plugins/azubutv.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env python
-import json
-import requests
-
-import re
-
-from io import BytesIO
-from time import sleep
-
-from streamlink.exceptions import PluginError
-
-from streamlink.plugin import Plugin
-from streamlink.plugin.api import http, validate
-from streamlink.stream import HLSStream
-
-
-HTTP_HEADERS = {
- "User-Agent": ("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 "
- "(KHTML, like Gecko) Chrome/36.0.1944.9 Safari/537.36"),
- 'Accept': 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV'
-
-}
-
-_url_re = re.compile(r"http(s)?://(\w+\.)?azubu.tv/(?P<domain>\w+)")
-
-PARAMS_REGEX = r"(\w+)=({.+?}|\[.+?\]|\(.+?\)|'(?:[^'\\]|\\')*'|\"(?:[^\"\\]|\\\")*\"|\S+)"
-stream_video_url = "http://api.azubu.tv/public/channel/{}/player"
-
-
-class AzubuTV(Plugin):
- @classmethod
- def can_handle_url(cls, url):
- return _url_re.match(url)
-
- @classmethod
- def stream_weight(cls, stream):
- if stream == "source":
- weight = 1080
- else:
- weight, group = Plugin.stream_weight(stream)
-
- return weight, "azubutv"
-
- def _parse_params(self, params):
- rval = {}
- matches = re.findall(PARAMS_REGEX, params)
-
- for key, value in matches:
- try:
- value = ast.literal_eval(value)
- except Exception:
- pass
-
- rval[key] = value
-
- return rval
-
- def _get_stream_url(self, o):
-
- match = _url_re.match(self.url)
- channel = match.group('domain')
-
- channel_info = requests.get(stream_video_url.format(channel))
- j = json.loads(channel_info.text)
-
- if j["data"]["is_live"] is not True:
- return "", False
- else:
- is_live = True
-
- stream_url = 'https://edge.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:{0}'
-
- r = requests.get(stream_url.format(j["data"]["stream_video"]["reference_id"]), headers=HTTP_HEADERS)
- t = json.loads(r.text)
-
- stream_url = t["sources"][0]["src"]
- return stream_url, is_live
-
- def _get_streams(self):
- hls_url, is_live = self._get_stream_url(self)
-
- if not is_live:
- return
-
- split = self.url.split(" ")
- params = (" ").join(split[1:])
- params = self._parse_params(params)
-
- try:
- streams = HLSStream.parse_variant_playlist(self.session, hls_url, **params)
- except IOError as err:
- raise PluginError(err)
-
- return streams
-
-
-__plugin__ = AzubuTV
| {"golden_diff": "diff --git a/src/streamlink/plugins/azubutv.py b/src/streamlink/plugins/azubutv.py\ndeleted file mode 100644\n--- a/src/streamlink/plugins/azubutv.py\n+++ /dev/null\n@@ -1,97 +0,0 @@\n-#!/usr/bin/env python\n-import json\n-import requests\n-\n-import re\n-\n-from io import BytesIO\n-from time import sleep\n-\n-from streamlink.exceptions import PluginError\n-\n-from streamlink.plugin import Plugin\n-from streamlink.plugin.api import http, validate\n-from streamlink.stream import HLSStream\n-\n-\n-HTTP_HEADERS = {\n- \"User-Agent\": (\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \"\n- \"(KHTML, like Gecko) Chrome/36.0.1944.9 Safari/537.36\"),\n- 'Accept': 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV'\n-\n-}\n-\n-_url_re = re.compile(r\"http(s)?://(\\w+\\.)?azubu.tv/(?P<domain>\\w+)\")\n-\n-PARAMS_REGEX = r\"(\\w+)=({.+?}|\\[.+?\\]|\\(.+?\\)|'(?:[^'\\\\]|\\\\')*'|\\\"(?:[^\\\"\\\\]|\\\\\\\")*\\\"|\\S+)\"\n-stream_video_url = \"http://api.azubu.tv/public/channel/{}/player\"\n-\n-\n-class AzubuTV(Plugin):\n- @classmethod\n- def can_handle_url(cls, url):\n- return _url_re.match(url)\n-\n- @classmethod\n- def stream_weight(cls, stream):\n- if stream == \"source\":\n- weight = 1080\n- else:\n- weight, group = Plugin.stream_weight(stream)\n-\n- return weight, \"azubutv\"\n-\n- def _parse_params(self, params):\n- rval = {}\n- matches = re.findall(PARAMS_REGEX, params)\n-\n- for key, value in matches:\n- try:\n- value = ast.literal_eval(value)\n- except Exception:\n- pass\n-\n- rval[key] = value\n-\n- return rval\n-\n- def _get_stream_url(self, o):\n-\n- match = _url_re.match(self.url)\n- channel = match.group('domain')\n-\n- channel_info = requests.get(stream_video_url.format(channel))\n- j = json.loads(channel_info.text)\n-\n- if j[\"data\"][\"is_live\"] is not True:\n- return \"\", False\n- else:\n- is_live = True\n-\n- stream_url = 'https://edge.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:{0}'\n-\n- r = requests.get(stream_url.format(j[\"data\"][\"stream_video\"][\"reference_id\"]), headers=HTTP_HEADERS)\n- t = json.loads(r.text)\n-\n- stream_url = t[\"sources\"][0][\"src\"]\n- return stream_url, is_live\n-\n- def _get_streams(self):\n- hls_url, is_live = self._get_stream_url(self)\n-\n- if not is_live:\n- return\n-\n- split = self.url.split(\" \")\n- params = (\" \").join(split[1:])\n- params = self._parse_params(params)\n-\n- try:\n- streams = HLSStream.parse_variant_playlist(self.session, hls_url, **params)\n- except IOError as err:\n- raise PluginError(err)\n-\n- return streams\n-\n-\n-__plugin__ = AzubuTV\n", "issue": "azubu.tv: remove plugin\nhttp://www.azubu.tv/\r\n`Soon a new future for Azubu and Hitbox, together as a single force in the world of eSports and competitive gaming, will be revealed. We will be launching a new brand, website, and mobile apps. There you will find the best offerings from both Azubu and Hitbox in one new place.`\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport json\nimport requests\n\nimport re\n\nfrom io import BytesIO\nfrom time import sleep\n\nfrom streamlink.exceptions import PluginError\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate\nfrom streamlink.stream import HLSStream\n\n\nHTTP_HEADERS = {\n \"User-Agent\": (\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/36.0.1944.9 Safari/537.36\"),\n 'Accept': 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV'\n\n}\n\n_url_re = re.compile(r\"http(s)?://(\\w+\\.)?azubu.tv/(?P<domain>\\w+)\")\n\nPARAMS_REGEX = r\"(\\w+)=({.+?}|\\[.+?\\]|\\(.+?\\)|'(?:[^'\\\\]|\\\\')*'|\\\"(?:[^\\\"\\\\]|\\\\\\\")*\\\"|\\S+)\"\nstream_video_url = \"http://api.azubu.tv/public/channel/{}/player\"\n\n\nclass AzubuTV(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n @classmethod\n def stream_weight(cls, stream):\n if stream == \"source\":\n weight = 1080\n else:\n weight, group = Plugin.stream_weight(stream)\n\n return weight, \"azubutv\"\n\n def _parse_params(self, params):\n rval = {}\n matches = re.findall(PARAMS_REGEX, params)\n\n for key, value in matches:\n try:\n value = ast.literal_eval(value)\n except Exception:\n pass\n\n rval[key] = value\n\n return rval\n\n def _get_stream_url(self, o):\n\n match = _url_re.match(self.url)\n channel = match.group('domain')\n\n channel_info = requests.get(stream_video_url.format(channel))\n j = json.loads(channel_info.text)\n\n if j[\"data\"][\"is_live\"] is not True:\n return \"\", False\n else:\n is_live = True\n\n stream_url = 'https://edge.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:{0}'\n\n r = requests.get(stream_url.format(j[\"data\"][\"stream_video\"][\"reference_id\"]), headers=HTTP_HEADERS)\n t = json.loads(r.text)\n\n stream_url = t[\"sources\"][0][\"src\"]\n return stream_url, is_live\n\n def _get_streams(self):\n hls_url, is_live = self._get_stream_url(self)\n\n if not is_live:\n return\n\n split = self.url.split(\" \")\n params = (\" \").join(split[1:])\n params = self._parse_params(params)\n\n try:\n streams = HLSStream.parse_variant_playlist(self.session, hls_url, **params)\n except IOError as err:\n raise PluginError(err)\n\n return streams\n\n\n__plugin__ = AzubuTV\n", "path": "src/streamlink/plugins/azubutv.py"}]} | 1,596 | 899 |
gh_patches_debug_22072 | rasdani/github-patches | git_diff | dask__distributed-3056 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cpu cores estimate based on cgroups
I was reading the announcement for 2.4.0 and got interested in https://github.com/dask/distributed/pull/3039 by @jcrist
That did lead me to this part of the code:
https://github.com/dask/distributed/blob/7d017c467590c758fa4b8cb2b1193205fe5aa7ad/distributed/system.py#L62
Just by looking at it (and although I'm not an expert I think I know what's going on), I have to observations -- half way between a question and a bugreport.
1. in my docker environment I have here (ubuntu 18.04), the filename is different:
```
~$ cat /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us
100000
~$ cat /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us
220000
```
in the code is that path:
```
~$ ls /sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us
ls: cannot access '/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us': No such file or directory
```
2. The actual calculation is `int(quota / period)`. I think this should round up, not down. The point is, if you have a fraction like "2.5 cores", it will report 2 cores and keep half a core unused. It would be better if it reports 3 cores and then cgroups limits to the actual 2.5.
</issue>
<code>
[start of distributed/system.py]
1 import os
2 import sys
3
4 import psutil
5
6 __all__ = ("memory_limit", "cpu_count", "MEMORY_LIMIT", "CPU_COUNT")
7
8
9 def memory_limit():
10 """Get the memory limit (in bytes) for this system.
11
12 Takes the minimum value from the following locations:
13
14 - Total system host memory
15 - Cgroups limit (if set)
16 - RSS rlimit (if set)
17 """
18 limit = psutil.virtual_memory().total
19
20 # Check cgroups if available
21 if sys.platform == "linux":
22 try:
23 with open("/sys/fs/cgroup/memory/memory.limit_in_bytes") as f:
24 cgroups_limit = int(f.read())
25 if cgroups_limit > 0:
26 limit = min(limit, cgroups_limit)
27 except Exception:
28 pass
29
30 # Check rlimit if available
31 try:
32 import resource
33
34 hard_limit = resource.getrlimit(resource.RLIMIT_RSS)[1]
35 if hard_limit > 0:
36 limit = min(limit, hard_limit)
37 except (ImportError, OSError):
38 pass
39
40 return limit
41
42
43 def cpu_count():
44 """Get the available CPU count for this system.
45
46 Takes the minimum value from the following locations:
47
48 - Total system cpus available on the host.
49 - CPU Affinity (if set)
50 - Cgroups limit (if set)
51 """
52 count = os.cpu_count()
53
54 # Check CPU affinity if available
55 try:
56 affinity_count = len(psutil.Process().cpu_affinity())
57 if affinity_count > 0:
58 count = min(count, affinity_count)
59 except Exception:
60 pass
61
62 # Check cgroups if available
63 if sys.platform == "linux":
64 try:
65 with open("/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us") as f:
66 quota = int(f.read())
67 with open("/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_period_us") as f:
68 period = int(f.read())
69 cgroups_count = int(quota / period)
70 if cgroups_count > 0:
71 count = min(count, cgroups_count)
72 except Exception:
73 pass
74
75 return count
76
77
78 MEMORY_LIMIT = memory_limit()
79 CPU_COUNT = cpu_count()
80
[end of distributed/system.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/system.py b/distributed/system.py
--- a/distributed/system.py
+++ b/distributed/system.py
@@ -1,3 +1,4 @@
+import math
import os
import sys
@@ -61,16 +62,20 @@
# Check cgroups if available
if sys.platform == "linux":
- try:
- with open("/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us") as f:
- quota = int(f.read())
- with open("/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_period_us") as f:
- period = int(f.read())
- cgroups_count = int(quota / period)
- if cgroups_count > 0:
- count = min(count, cgroups_count)
- except Exception:
- pass
+ # The directory name isn't standardized across linux distros, check both
+ for dirname in ["cpuacct,cpu", "cpu,cpuacct"]:
+ try:
+ with open("/sys/fs/cgroup/%s/cpu.cfs_quota_us" % dirname) as f:
+ quota = int(f.read())
+ with open("/sys/fs/cgroup/%s/cpu.cfs_period_us" % dirname) as f:
+ period = int(f.read())
+ # We round up on fractional CPUs
+ cgroups_count = math.ceil(quota / period)
+ if cgroups_count > 0:
+ count = min(count, cgroups_count)
+ break
+ except Exception:
+ pass
return count
| {"golden_diff": "diff --git a/distributed/system.py b/distributed/system.py\n--- a/distributed/system.py\n+++ b/distributed/system.py\n@@ -1,3 +1,4 @@\n+import math\n import os\n import sys\n \n@@ -61,16 +62,20 @@\n \n # Check cgroups if available\n if sys.platform == \"linux\":\n- try:\n- with open(\"/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us\") as f:\n- quota = int(f.read())\n- with open(\"/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_period_us\") as f:\n- period = int(f.read())\n- cgroups_count = int(quota / period)\n- if cgroups_count > 0:\n- count = min(count, cgroups_count)\n- except Exception:\n- pass\n+ # The directory name isn't standardized across linux distros, check both\n+ for dirname in [\"cpuacct,cpu\", \"cpu,cpuacct\"]:\n+ try:\n+ with open(\"/sys/fs/cgroup/%s/cpu.cfs_quota_us\" % dirname) as f:\n+ quota = int(f.read())\n+ with open(\"/sys/fs/cgroup/%s/cpu.cfs_period_us\" % dirname) as f:\n+ period = int(f.read())\n+ # We round up on fractional CPUs\n+ cgroups_count = math.ceil(quota / period)\n+ if cgroups_count > 0:\n+ count = min(count, cgroups_count)\n+ break\n+ except Exception:\n+ pass\n \n return count\n", "issue": "cpu cores estimate based on cgroups\nI was reading the announcement for 2.4.0 and got interested in https://github.com/dask/distributed/pull/3039 by @jcrist \r\n\r\nThat did lead me to this part of the code:\r\nhttps://github.com/dask/distributed/blob/7d017c467590c758fa4b8cb2b1193205fe5aa7ad/distributed/system.py#L62\r\n\r\nJust by looking at it (and although I'm not an expert I think I know what's going on), I have to observations -- half way between a question and a bugreport.\r\n\r\n1. in my docker environment I have here (ubuntu 18.04), the filename is different:\r\n\r\n```\r\n~$ cat /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us \r\n100000\r\n~$ cat /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us \r\n220000\r\n```\r\nin the code is that path:\r\n```\r\n~$ ls /sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us\r\nls: cannot access '/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us': No such file or directory\r\n```\r\n\r\n2. The actual calculation is `int(quota / period)`. I think this should round up, not down. The point is, if you have a fraction like \"2.5 cores\", it will report 2 cores and keep half a core unused. It would be better if it reports 3 cores and then cgroups limits to the actual 2.5.\r\n\r\n\n", "before_files": [{"content": "import os\nimport sys\n\nimport psutil\n\n__all__ = (\"memory_limit\", \"cpu_count\", \"MEMORY_LIMIT\", \"CPU_COUNT\")\n\n\ndef memory_limit():\n \"\"\"Get the memory limit (in bytes) for this system.\n\n Takes the minimum value from the following locations:\n\n - Total system host memory\n - Cgroups limit (if set)\n - RSS rlimit (if set)\n \"\"\"\n limit = psutil.virtual_memory().total\n\n # Check cgroups if available\n if sys.platform == \"linux\":\n try:\n with open(\"/sys/fs/cgroup/memory/memory.limit_in_bytes\") as f:\n cgroups_limit = int(f.read())\n if cgroups_limit > 0:\n limit = min(limit, cgroups_limit)\n except Exception:\n pass\n\n # Check rlimit if available\n try:\n import resource\n\n hard_limit = resource.getrlimit(resource.RLIMIT_RSS)[1]\n if hard_limit > 0:\n limit = min(limit, hard_limit)\n except (ImportError, OSError):\n pass\n\n return limit\n\n\ndef cpu_count():\n \"\"\"Get the available CPU count for this system.\n\n Takes the minimum value from the following locations:\n\n - Total system cpus available on the host.\n - CPU Affinity (if set)\n - Cgroups limit (if set)\n \"\"\"\n count = os.cpu_count()\n\n # Check CPU affinity if available\n try:\n affinity_count = len(psutil.Process().cpu_affinity())\n if affinity_count > 0:\n count = min(count, affinity_count)\n except Exception:\n pass\n\n # Check cgroups if available\n if sys.platform == \"linux\":\n try:\n with open(\"/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us\") as f:\n quota = int(f.read())\n with open(\"/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_period_us\") as f:\n period = int(f.read())\n cgroups_count = int(quota / period)\n if cgroups_count > 0:\n count = min(count, cgroups_count)\n except Exception:\n pass\n\n return count\n\n\nMEMORY_LIMIT = memory_limit()\nCPU_COUNT = cpu_count()\n", "path": "distributed/system.py"}]} | 1,525 | 347 |
gh_patches_debug_13457 | rasdani/github-patches | git_diff | modin-project__modin-3382 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
JSON dispatcher data file split correction
Originated from https://github.com/modin-project/modin/pull/2607#discussion_r571989125.
</issue>
<code>
[start of modin/engines/base/io/text/json_dispatcher.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 """Module houses `JSONDispatcher` class, that is used for reading `.json` files."""
15
16 from modin.engines.base.io.text.text_file_dispatcher import TextFileDispatcher
17 from io import BytesIO
18 import pandas
19 import numpy as np
20 from csv import QUOTE_NONE
21
22 from modin.config import NPartitions
23
24
25 class JSONDispatcher(TextFileDispatcher):
26 """
27 Class handles utils for reading `.json` files.
28
29 Inherits some common for text files util functions from `TextFileDispatcher` class.
30 """
31
32 @classmethod
33 def _read(cls, path_or_buf, **kwargs):
34 """
35 Read data from `path_or_buf` according to the passed `read_json` `kwargs` parameters.
36
37 Parameters
38 ----------
39 path_or_buf : str, path object or file-like object
40 `path_or_buf` parameter of `read_json` function.
41 **kwargs : dict
42 Parameters of `read_json` function.
43
44 Returns
45 -------
46 BaseQueryCompiler
47 Query compiler with imported data for further processing.
48 """
49 path_or_buf = cls.get_path_or_buffer(path_or_buf)
50 if isinstance(path_or_buf, str):
51 if not cls.file_exists(path_or_buf):
52 return cls.single_worker_read(path_or_buf, **kwargs)
53 path_or_buf = cls.get_path(path_or_buf)
54 elif not cls.pathlib_or_pypath(path_or_buf):
55 return cls.single_worker_read(path_or_buf, **kwargs)
56 if not kwargs.get("lines", False):
57 return cls.single_worker_read(path_or_buf, **kwargs)
58 columns = pandas.read_json(
59 BytesIO(b"" + open(path_or_buf, "rb").readline()), lines=True
60 ).columns
61 kwargs["columns"] = columns
62 empty_pd_df = pandas.DataFrame(columns=columns)
63
64 with cls.file_open(path_or_buf, "rb", kwargs.get("compression", "infer")) as f:
65 partition_ids = []
66 index_ids = []
67 dtypes_ids = []
68
69 column_widths, num_splits = cls._define_metadata(empty_pd_df, columns)
70
71 args = {"fname": path_or_buf, "num_splits": num_splits, **kwargs}
72
73 splits = cls.partitioned_file(
74 f,
75 num_partitions=NPartitions.get(),
76 is_quoting=(args.get("quoting", "") != QUOTE_NONE),
77 )
78 for start, end in splits:
79 args.update({"start": start, "end": end})
80 partition_id = cls.deploy(cls.parse, num_splits + 3, args)
81 partition_ids.append(partition_id[:-3])
82 index_ids.append(partition_id[-3])
83 dtypes_ids.append(partition_id[-2])
84
85 # partition_id[-1] contains the columns for each partition, which will be useful
86 # for implementing when `lines=False`.
87 row_lengths = cls.materialize(index_ids)
88 new_index = pandas.RangeIndex(sum(row_lengths))
89
90 dtypes = cls.get_dtypes(dtypes_ids)
91 partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)
92
93 if isinstance(dtypes, pandas.Series):
94 dtypes.index = columns
95 else:
96 dtypes = pandas.Series(dtypes, index=columns)
97
98 new_frame = cls.frame_cls(
99 np.array(partition_ids),
100 new_index,
101 columns,
102 row_lengths,
103 column_widths,
104 dtypes=dtypes,
105 )
106 new_frame.synchronize_labels(axis=0)
107 return cls.query_compiler_cls(new_frame)
108
[end of modin/engines/base/io/text/json_dispatcher.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modin/engines/base/io/text/json_dispatcher.py b/modin/engines/base/io/text/json_dispatcher.py
--- a/modin/engines/base/io/text/json_dispatcher.py
+++ b/modin/engines/base/io/text/json_dispatcher.py
@@ -17,7 +17,6 @@
from io import BytesIO
import pandas
import numpy as np
-from csv import QUOTE_NONE
from modin.config import NPartitions
@@ -73,7 +72,6 @@
splits = cls.partitioned_file(
f,
num_partitions=NPartitions.get(),
- is_quoting=(args.get("quoting", "") != QUOTE_NONE),
)
for start, end in splits:
args.update({"start": start, "end": end})
| {"golden_diff": "diff --git a/modin/engines/base/io/text/json_dispatcher.py b/modin/engines/base/io/text/json_dispatcher.py\n--- a/modin/engines/base/io/text/json_dispatcher.py\n+++ b/modin/engines/base/io/text/json_dispatcher.py\n@@ -17,7 +17,6 @@\n from io import BytesIO\n import pandas\n import numpy as np\n-from csv import QUOTE_NONE\n \n from modin.config import NPartitions\n \n@@ -73,7 +72,6 @@\n splits = cls.partitioned_file(\n f,\n num_partitions=NPartitions.get(),\n- is_quoting=(args.get(\"quoting\", \"\") != QUOTE_NONE),\n )\n for start, end in splits:\n args.update({\"start\": start, \"end\": end})\n", "issue": "JSON dispatcher data file split correction\nOriginated from https://github.com/modin-project/modin/pull/2607#discussion_r571989125.\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module houses `JSONDispatcher` class, that is used for reading `.json` files.\"\"\"\n\nfrom modin.engines.base.io.text.text_file_dispatcher import TextFileDispatcher\nfrom io import BytesIO\nimport pandas\nimport numpy as np\nfrom csv import QUOTE_NONE\n\nfrom modin.config import NPartitions\n\n\nclass JSONDispatcher(TextFileDispatcher):\n \"\"\"\n Class handles utils for reading `.json` files.\n\n Inherits some common for text files util functions from `TextFileDispatcher` class.\n \"\"\"\n\n @classmethod\n def _read(cls, path_or_buf, **kwargs):\n \"\"\"\n Read data from `path_or_buf` according to the passed `read_json` `kwargs` parameters.\n\n Parameters\n ----------\n path_or_buf : str, path object or file-like object\n `path_or_buf` parameter of `read_json` function.\n **kwargs : dict\n Parameters of `read_json` function.\n\n Returns\n -------\n BaseQueryCompiler\n Query compiler with imported data for further processing.\n \"\"\"\n path_or_buf = cls.get_path_or_buffer(path_or_buf)\n if isinstance(path_or_buf, str):\n if not cls.file_exists(path_or_buf):\n return cls.single_worker_read(path_or_buf, **kwargs)\n path_or_buf = cls.get_path(path_or_buf)\n elif not cls.pathlib_or_pypath(path_or_buf):\n return cls.single_worker_read(path_or_buf, **kwargs)\n if not kwargs.get(\"lines\", False):\n return cls.single_worker_read(path_or_buf, **kwargs)\n columns = pandas.read_json(\n BytesIO(b\"\" + open(path_or_buf, \"rb\").readline()), lines=True\n ).columns\n kwargs[\"columns\"] = columns\n empty_pd_df = pandas.DataFrame(columns=columns)\n\n with cls.file_open(path_or_buf, \"rb\", kwargs.get(\"compression\", \"infer\")) as f:\n partition_ids = []\n index_ids = []\n dtypes_ids = []\n\n column_widths, num_splits = cls._define_metadata(empty_pd_df, columns)\n\n args = {\"fname\": path_or_buf, \"num_splits\": num_splits, **kwargs}\n\n splits = cls.partitioned_file(\n f,\n num_partitions=NPartitions.get(),\n is_quoting=(args.get(\"quoting\", \"\") != QUOTE_NONE),\n )\n for start, end in splits:\n args.update({\"start\": start, \"end\": end})\n partition_id = cls.deploy(cls.parse, num_splits + 3, args)\n partition_ids.append(partition_id[:-3])\n index_ids.append(partition_id[-3])\n dtypes_ids.append(partition_id[-2])\n\n # partition_id[-1] contains the columns for each partition, which will be useful\n # for implementing when `lines=False`.\n row_lengths = cls.materialize(index_ids)\n new_index = pandas.RangeIndex(sum(row_lengths))\n\n dtypes = cls.get_dtypes(dtypes_ids)\n partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)\n\n if isinstance(dtypes, pandas.Series):\n dtypes.index = columns\n else:\n dtypes = pandas.Series(dtypes, index=columns)\n\n new_frame = cls.frame_cls(\n np.array(partition_ids),\n new_index,\n columns,\n row_lengths,\n column_widths,\n dtypes=dtypes,\n )\n new_frame.synchronize_labels(axis=0)\n return cls.query_compiler_cls(new_frame)\n", "path": "modin/engines/base/io/text/json_dispatcher.py"}]} | 1,711 | 171 |
gh_patches_debug_33856 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2674 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
[FEATURE]: Patch meta information of `torch.nn.functional.softmax()`
This is a part of issue #2628, we will patch meta information of `torch.nn.functional.softmax()`
</issue>
<code>
[start of colossalai/auto_parallel/meta_profiler/meta_registry/activation.py]
1 from typing import List, Tuple
2
3 import torch
4
5 from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, OperationDataType, TrainCycleItem
6 from colossalai.fx.profiler.memory_utils import activation_size
7 from colossalai.fx.profiler.opcount import flop_mapping
8
9 from ..registry import meta_register
10
11 __all__ = ["relu_meta_info"]
12
13
14 @meta_register.register(torch.nn.ReLU)
15 def relu_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]:
16 """torch.nn.ReLU metainfo generator
17 The aten graph of torch.nn.ReLU is
18 graph():
19 %input_2 : [#users=1] = placeholder[target=placeholder](default=)
20 %relu_default : [#users=2] = call_function[target=torch.ops.aten.relu.default](args = (%input_2,), kwargs = {})
21 %zeros_like_default : [#users=1] = call_function[target=torch.ops.aten.zeros_like.default](args = (%relu_default,), kwargs = {dtype: None, layout: None, device: None, pin_memory: None})
22 %detach_default : [#users=1] = call_function[target=torch.ops.aten.detach.default](args = (%relu_default,), kwargs = {})
23 %threshold_backward_default : [#users=1] = call_function[target=torch.ops.aten.threshold_backward.default](args = (%zeros_like_default, %detach_default, None), kwargs = {})
24 %detach_default_1 : [#users=1] = call_function[target=torch.ops.aten.detach.default](args = (%threshold_backward_default,), kwargs = {})
25 %detach_default_2 : [#users=0] = call_function[target=torch.ops.aten.detach.default](args = (%detach_default_1,), kwargs = {})
26
27 Returns:
28 Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: compute cost, memory cost and forward inputs
29 """
30
31 input_tensor = args[0].data
32 output_tensor = next(filter(lambda x: x.type == OperationDataType.OUTPUT, args)).data
33 is_inplace = kwargs.get("inplace", False)
34
35 # construct input args for forward
36 fwd_in_args = [input_tensor]
37
38 # construct input args for backward
39 bwd_in_args = [output_tensor]
40
41 # calculate cost
42 # the fwd op with compute cost is relu.default
43 # the bwd op with compute cost is threshold_backward
44
45 # calculate compute cost
46 fwd_compute_cost = flop_mapping[torch.ops.aten.relu.default](fwd_in_args, (output_tensor,))
47 bwd_compute_cost = flop_mapping[torch.ops.aten.threshold_backward.default](bwd_in_args, (input_tensor,))
48 compute_cost = TrainCycleItem(fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost)
49
50 # calculate memory cost
51 # NOTE: the inplace ReLU don't have forward memory cost
52 # NOTE: currently in SPMD solver we always believe that there will be a new tensor created in forward
53 fwd_memory_cost = MemoryCost(
54 activation=activation_size(input_tensor) if is_inplace else activation_size([output_tensor, input_tensor]),
55 parameter=0,
56 temp=0,
57 buffer=0)
58
59 bwd_memory_cost = MemoryCost(activation=activation_size(input_tensor), parameter=0, temp=0, buffer=0)
60
61 # total cost is the sum of forward and backward cost
62 total_cost = MemoryCost(activation=fwd_memory_cost.activation + bwd_memory_cost.activation,
63 parameter=fwd_memory_cost.parameter + bwd_memory_cost.parameter)
64
65 memory_cost = TrainCycleItem(fwd=fwd_memory_cost, bwd=bwd_memory_cost, total=total_cost)
66
67 # store fwd_in, fwd_buffer, fwd_out
68 # NOTE: It might seems a little bit weird here, we just want to align it with the older version
69 # of MetaInfoProp. In the future we might modify this part to make it clearer.
70 fwd_in = []
71 fwd_buffer = [torch.zeros_like(output_tensor, device='meta')]
72 fwd_out = [torch.zeros_like(output_tensor, device='meta')]
73
74 return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out
75
[end of colossalai/auto_parallel/meta_profiler/meta_registry/activation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py b/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py
--- a/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py
+++ b/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py
@@ -72,3 +72,53 @@
fwd_out = [torch.zeros_like(output_tensor, device='meta')]
return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out
+
+
+@meta_register.register(torch.nn.Softmax)
+@meta_register.register(torch.nn.functional.softmax)
+def softmax_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]:
+ """torch.nn.Softmax metainfo generator
+ Returns:
+ Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: compute cost, memory cost and forward inputs
+ """
+ input_tensor = next(
+ filter(
+ lambda x:
+ (x.type == OperationDataType.ARG or x.type == OperationDataType.PARAM) and x.name != 'softmax_dim',
+ args)).data
+ output_tensor = next(filter(lambda x: x.type == OperationDataType.OUTPUT, args)).data
+ softmax_dim = next(filter(lambda x: x.name == 'softmax_dim', args)).data
+
+ # calculate cost
+
+ # calculate compute cost
+ fwd_compute_cost = flop_mapping[torch.ops.aten._softmax.default]([input_tensor], [output_tensor])
+ bwd_compute_cost = flop_mapping[torch.ops.aten._softmax_backward_data.default]([output_tensor], [input_tensor])
+
+ compute_cost = TrainCycleItem(fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost)
+
+ # calculate memory cost
+ # NOTE: currently in SPMD solver we always believe that there will be a new tensor created in forward
+ fwd_memory_cost = MemoryCost(activation=activation_size([input_tensor, output_tensor]),
+ parameter=0,
+ temp=0,
+ buffer=0)
+ bwd_memory_cost = MemoryCost(activation=activation_size(input_tensor),
+ parameter=0,
+ temp=activation_size(input_tensor),
+ buffer=0)
+
+ # total cost is the sum of forward and backward cost
+ total_cost = MemoryCost(activation=fwd_memory_cost.activation + bwd_memory_cost.activation,
+ parameter=fwd_memory_cost.parameter + bwd_memory_cost.parameter,
+ temp=fwd_memory_cost.temp + bwd_memory_cost.temp,
+ buffer=fwd_memory_cost.buffer + bwd_memory_cost.buffer)
+
+ memory_cost = TrainCycleItem(fwd=fwd_memory_cost, bwd=bwd_memory_cost, total=total_cost)
+
+ # store fwd_in, fwd_buffer, fwd_out
+ fwd_in = []
+ fwd_buffer = [torch.zeros_like(output_tensor, device='meta')]
+ fwd_out = [torch.zeros_like(output_tensor, device='meta')]
+
+ return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out
| {"golden_diff": "diff --git a/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py b/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py\n--- a/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py\n+++ b/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py\n@@ -72,3 +72,53 @@\n fwd_out = [torch.zeros_like(output_tensor, device='meta')]\n \n return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out\n+\n+\n+@meta_register.register(torch.nn.Softmax)\n+@meta_register.register(torch.nn.functional.softmax)\n+def softmax_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]:\n+ \"\"\"torch.nn.Softmax metainfo generator\n+ Returns:\n+ Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: compute cost, memory cost and forward inputs\n+ \"\"\"\n+ input_tensor = next(\n+ filter(\n+ lambda x:\n+ (x.type == OperationDataType.ARG or x.type == OperationDataType.PARAM) and x.name != 'softmax_dim',\n+ args)).data\n+ output_tensor = next(filter(lambda x: x.type == OperationDataType.OUTPUT, args)).data\n+ softmax_dim = next(filter(lambda x: x.name == 'softmax_dim', args)).data\n+\n+ # calculate cost\n+\n+ # calculate compute cost\n+ fwd_compute_cost = flop_mapping[torch.ops.aten._softmax.default]([input_tensor], [output_tensor])\n+ bwd_compute_cost = flop_mapping[torch.ops.aten._softmax_backward_data.default]([output_tensor], [input_tensor])\n+\n+ compute_cost = TrainCycleItem(fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost)\n+\n+ # calculate memory cost\n+ # NOTE: currently in SPMD solver we always believe that there will be a new tensor created in forward\n+ fwd_memory_cost = MemoryCost(activation=activation_size([input_tensor, output_tensor]),\n+ parameter=0,\n+ temp=0,\n+ buffer=0)\n+ bwd_memory_cost = MemoryCost(activation=activation_size(input_tensor),\n+ parameter=0,\n+ temp=activation_size(input_tensor),\n+ buffer=0)\n+\n+ # total cost is the sum of forward and backward cost\n+ total_cost = MemoryCost(activation=fwd_memory_cost.activation + bwd_memory_cost.activation,\n+ parameter=fwd_memory_cost.parameter + bwd_memory_cost.parameter,\n+ temp=fwd_memory_cost.temp + bwd_memory_cost.temp,\n+ buffer=fwd_memory_cost.buffer + bwd_memory_cost.buffer)\n+\n+ memory_cost = TrainCycleItem(fwd=fwd_memory_cost, bwd=bwd_memory_cost, total=total_cost)\n+\n+ # store fwd_in, fwd_buffer, fwd_out\n+ fwd_in = []\n+ fwd_buffer = [torch.zeros_like(output_tensor, device='meta')]\n+ fwd_out = [torch.zeros_like(output_tensor, device='meta')]\n+\n+ return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[FEATURE]: Patch meta information of `torch.nn.functional.softmax()`\nThis is a part of issue #2628, we will patch meta information of `torch.nn.functional.softmax()`\n", "before_files": [{"content": "from typing import List, Tuple\n\nimport torch\n\nfrom colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, OperationDataType, TrainCycleItem\nfrom colossalai.fx.profiler.memory_utils import activation_size\nfrom colossalai.fx.profiler.opcount import flop_mapping\n\nfrom ..registry import meta_register\n\n__all__ = [\"relu_meta_info\"]\n\n\n@meta_register.register(torch.nn.ReLU)\ndef relu_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]:\n \"\"\"torch.nn.ReLU metainfo generator\n The aten graph of torch.nn.ReLU is\n graph():\n %input_2 : [#users=1] = placeholder[target=placeholder](default=)\n %relu_default : [#users=2] = call_function[target=torch.ops.aten.relu.default](args = (%input_2,), kwargs = {})\n %zeros_like_default : [#users=1] = call_function[target=torch.ops.aten.zeros_like.default](args = (%relu_default,), kwargs = {dtype: None, layout: None, device: None, pin_memory: None})\n %detach_default : [#users=1] = call_function[target=torch.ops.aten.detach.default](args = (%relu_default,), kwargs = {})\n %threshold_backward_default : [#users=1] = call_function[target=torch.ops.aten.threshold_backward.default](args = (%zeros_like_default, %detach_default, None), kwargs = {})\n %detach_default_1 : [#users=1] = call_function[target=torch.ops.aten.detach.default](args = (%threshold_backward_default,), kwargs = {})\n %detach_default_2 : [#users=0] = call_function[target=torch.ops.aten.detach.default](args = (%detach_default_1,), kwargs = {})\n\n Returns:\n Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: compute cost, memory cost and forward inputs\n \"\"\"\n\n input_tensor = args[0].data\n output_tensor = next(filter(lambda x: x.type == OperationDataType.OUTPUT, args)).data\n is_inplace = kwargs.get(\"inplace\", False)\n\n # construct input args for forward\n fwd_in_args = [input_tensor]\n\n # construct input args for backward\n bwd_in_args = [output_tensor]\n\n # calculate cost\n # the fwd op with compute cost is relu.default\n # the bwd op with compute cost is threshold_backward\n\n # calculate compute cost\n fwd_compute_cost = flop_mapping[torch.ops.aten.relu.default](fwd_in_args, (output_tensor,))\n bwd_compute_cost = flop_mapping[torch.ops.aten.threshold_backward.default](bwd_in_args, (input_tensor,))\n compute_cost = TrainCycleItem(fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost)\n\n # calculate memory cost\n # NOTE: the inplace ReLU don't have forward memory cost\n # NOTE: currently in SPMD solver we always believe that there will be a new tensor created in forward\n fwd_memory_cost = MemoryCost(\n activation=activation_size(input_tensor) if is_inplace else activation_size([output_tensor, input_tensor]),\n parameter=0,\n temp=0,\n buffer=0)\n\n bwd_memory_cost = MemoryCost(activation=activation_size(input_tensor), parameter=0, temp=0, buffer=0)\n\n # total cost is the sum of forward and backward cost\n total_cost = MemoryCost(activation=fwd_memory_cost.activation + bwd_memory_cost.activation,\n parameter=fwd_memory_cost.parameter + bwd_memory_cost.parameter)\n\n memory_cost = TrainCycleItem(fwd=fwd_memory_cost, bwd=bwd_memory_cost, total=total_cost)\n\n # store fwd_in, fwd_buffer, fwd_out\n # NOTE: It might seems a little bit weird here, we just want to align it with the older version\n # of MetaInfoProp. In the future we might modify this part to make it clearer.\n fwd_in = []\n fwd_buffer = [torch.zeros_like(output_tensor, device='meta')]\n fwd_out = [torch.zeros_like(output_tensor, device='meta')]\n\n return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out\n", "path": "colossalai/auto_parallel/meta_profiler/meta_registry/activation.py"}]} | 1,664 | 693 |
gh_patches_debug_4261 | rasdani/github-patches | git_diff | Nitrate__Nitrate-406 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix call to DurationField.from_db_value() which will be removed in Django 3.0
Lots of such warning are output.
```
src/tests/xmlrpc/test_testcaseplan.py::TestCasePlanGet::test_get_with_negative_case_id
/testenv/lib/python3.7/site-packages/django/db/models/sql/compiler.py:995: RemovedInDjango30Warning: Remove the context parameter from DurationField.from_db_value(). Support for it will be removed in Django 3.0.
RemovedInDjango30Warning,
```
</issue>
<code>
[start of src/tcms/core/models/fields.py]
1 # -*- coding: utf-8 -*-
2 import datetime
3 import six
4
5 from django.core.exceptions import ValidationError
6 from django.db.models.fields import IntegerField
7 from django.db.models.fields import BooleanField
8
9 from tcms.core.forms.fields import DurationField as DurationFormField
10
11 try:
12 from pymysql.constants import FIELD_TYPE
13 except ImportError:
14 # Refer to tcms/__init__.py for details.
15 pass
16 else:
17 from django.db.backends.mysql.base import django_conversions
18 django_conversions.update({FIELD_TYPE.TIME: None})
19
20
21 class DurationField(IntegerField):
22 """Duration field for test run
23
24 Value is stored as number of seconds in database and presents in Nitrate in
25 timedelta type.
26
27 Value should also be able to be serialized to integer as seconds, and then
28 deserialized from value of seconds.
29 """
30
31 def to_python(self, value):
32 if isinstance(value, six.integer_types):
33 return datetime.timedelta(seconds=value)
34 elif isinstance(value, datetime.timedelta):
35 return value
36 else:
37 raise TypeError('Unable to convert %s to timedelta.' % value)
38
39 def from_db_value(self, value, expression, connection, context):
40 if value is None:
41 return value
42 return datetime.timedelta(seconds=value)
43
44 def get_db_prep_value(self, value, connection, prepared=True):
45 """convert datetime.timedelta to seconds.
46
47 1 day equal to 86400 seconds
48 """
49 if isinstance(value, datetime.timedelta):
50 return value.seconds + (86400 * value.days)
51 else:
52 value = super(DurationField, self).get_db_prep_value(
53 value, connection, prepared)
54 return value
55
56 def formfield(self, form_class=DurationFormField, **kwargs):
57 defaults = {'help_text': 'Enter duration in the format: DDHHMM'}
58 defaults.update(kwargs)
59 return form_class(**defaults)
60
61
62 class NitrateBooleanField(BooleanField):
63 """Custom boolean field to allow accepting arbitrary bool values"""
64
65 def to_python(self, value):
66 if value in (1, '1', 'true', 'True', True):
67 return True
68 if value in (0, '0', 'false', 'False', False):
69 return False
70 raise ValidationError(
71 '{} is not recognized as a bool value.'.format(value))
72
[end of src/tcms/core/models/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/tcms/core/models/fields.py b/src/tcms/core/models/fields.py
--- a/src/tcms/core/models/fields.py
+++ b/src/tcms/core/models/fields.py
@@ -36,7 +36,7 @@
else:
raise TypeError('Unable to convert %s to timedelta.' % value)
- def from_db_value(self, value, expression, connection, context):
+ def from_db_value(self, value, *args, **kwargs):
if value is None:
return value
return datetime.timedelta(seconds=value)
| {"golden_diff": "diff --git a/src/tcms/core/models/fields.py b/src/tcms/core/models/fields.py\n--- a/src/tcms/core/models/fields.py\n+++ b/src/tcms/core/models/fields.py\n@@ -36,7 +36,7 @@\n else:\n raise TypeError('Unable to convert %s to timedelta.' % value)\n \n- def from_db_value(self, value, expression, connection, context):\n+ def from_db_value(self, value, *args, **kwargs):\n if value is None:\n return value\n return datetime.timedelta(seconds=value)\n", "issue": "Fix call to DurationField.from_db_value() which will be removed in Django 3.0\nLots of such warning are output.\r\n\r\n```\r\nsrc/tests/xmlrpc/test_testcaseplan.py::TestCasePlanGet::test_get_with_negative_case_id\r\n /testenv/lib/python3.7/site-packages/django/db/models/sql/compiler.py:995: RemovedInDjango30Warning: Remove the context parameter from DurationField.from_db_value(). Support for it will be removed in Django 3.0.\r\n RemovedInDjango30Warning,\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport datetime\nimport six\n\nfrom django.core.exceptions import ValidationError\nfrom django.db.models.fields import IntegerField\nfrom django.db.models.fields import BooleanField\n\nfrom tcms.core.forms.fields import DurationField as DurationFormField\n\ntry:\n from pymysql.constants import FIELD_TYPE\nexcept ImportError:\n # Refer to tcms/__init__.py for details.\n pass\nelse:\n from django.db.backends.mysql.base import django_conversions\n django_conversions.update({FIELD_TYPE.TIME: None})\n\n\nclass DurationField(IntegerField):\n \"\"\"Duration field for test run\n\n Value is stored as number of seconds in database and presents in Nitrate in\n timedelta type.\n\n Value should also be able to be serialized to integer as seconds, and then\n deserialized from value of seconds.\n \"\"\"\n\n def to_python(self, value):\n if isinstance(value, six.integer_types):\n return datetime.timedelta(seconds=value)\n elif isinstance(value, datetime.timedelta):\n return value\n else:\n raise TypeError('Unable to convert %s to timedelta.' % value)\n\n def from_db_value(self, value, expression, connection, context):\n if value is None:\n return value\n return datetime.timedelta(seconds=value)\n\n def get_db_prep_value(self, value, connection, prepared=True):\n \"\"\"convert datetime.timedelta to seconds.\n\n 1 day equal to 86400 seconds\n \"\"\"\n if isinstance(value, datetime.timedelta):\n return value.seconds + (86400 * value.days)\n else:\n value = super(DurationField, self).get_db_prep_value(\n value, connection, prepared)\n return value\n\n def formfield(self, form_class=DurationFormField, **kwargs):\n defaults = {'help_text': 'Enter duration in the format: DDHHMM'}\n defaults.update(kwargs)\n return form_class(**defaults)\n\n\nclass NitrateBooleanField(BooleanField):\n \"\"\"Custom boolean field to allow accepting arbitrary bool values\"\"\"\n\n def to_python(self, value):\n if value in (1, '1', 'true', 'True', True):\n return True\n if value in (0, '0', 'false', 'False', False):\n return False\n raise ValidationError(\n '{} is not recognized as a bool value.'.format(value))\n", "path": "src/tcms/core/models/fields.py"}]} | 1,287 | 125 |
gh_patches_debug_2642 | rasdani/github-patches | git_diff | sunpy__sunpy-3676 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Removing astropy_helpers section in CONTRIBUTING.rst
<!-- This comments are hidden when you submit the issue so you do not need to remove them!
Please be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst
Please be sure to check out our code of conduct:
https://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->
<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.
If a similar issue is closed, have a quick look to see if you are satisfied by the resolution.
If not please go ahead and open an issue! -->
### Description
<!-- Provide a general description of the bug. -->
As of PR https://github.com/sunpy/sunpy/pull/3598, sunpy no longer needs `astropy_helpers`, and even it is removed from the package.
I think there should not be a section of Astropy Helpers in contribution guidelines as well.
</issue>
<code>
[start of sunpy/version.py]
1 # This file is for compatibility with astropy_helpers
2 version = 'unknown.dev'
3 try:
4 from importlib_metadata import version as _version, PackageNotFoundError
5 version = _version('sunpy')
6 except ImportError:
7 from pkg_resources import get_distribution, DistributionNotFound
8 try:
9 version = get_distribution("sunpy").version
10 except DistributionNotFound:
11 pass
12 except PackageNotFoundError:
13 pass
14
[end of sunpy/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sunpy/version.py b/sunpy/version.py
deleted file mode 100644
--- a/sunpy/version.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# This file is for compatibility with astropy_helpers
-version = 'unknown.dev'
-try:
- from importlib_metadata import version as _version, PackageNotFoundError
- version = _version('sunpy')
-except ImportError:
- from pkg_resources import get_distribution, DistributionNotFound
- try:
- version = get_distribution("sunpy").version
- except DistributionNotFound:
- pass
-except PackageNotFoundError:
- pass
| {"golden_diff": "diff --git a/sunpy/version.py b/sunpy/version.py\ndeleted file mode 100644\n--- a/sunpy/version.py\n+++ /dev/null\n@@ -1,13 +0,0 @@\n-# This file is for compatibility with astropy_helpers\n-version = 'unknown.dev'\n-try:\n- from importlib_metadata import version as _version, PackageNotFoundError\n- version = _version('sunpy')\n-except ImportError:\n- from pkg_resources import get_distribution, DistributionNotFound\n- try:\n- version = get_distribution(\"sunpy\").version\n- except DistributionNotFound:\n- pass\n-except PackageNotFoundError:\n- pass\n", "issue": "Removing astropy_helpers section in CONTRIBUTING.rst\n<!-- This comments are hidden when you submit the issue so you do not need to remove them!\r\nPlease be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst\r\nPlease be sure to check out our code of conduct:\r\nhttps://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->\r\n\r\n<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.\r\nIf a similar issue is closed, have a quick look to see if you are satisfied by the resolution.\r\nIf not please go ahead and open an issue! -->\r\n\r\n### Description\r\n<!-- Provide a general description of the bug. -->\r\nAs of PR https://github.com/sunpy/sunpy/pull/3598, sunpy no longer needs `astropy_helpers`, and even it is removed from the package.\r\nI think there should not be a section of Astropy Helpers in contribution guidelines as well.\n", "before_files": [{"content": "# This file is for compatibility with astropy_helpers\nversion = 'unknown.dev'\ntry:\n from importlib_metadata import version as _version, PackageNotFoundError\n version = _version('sunpy')\nexcept ImportError:\n from pkg_resources import get_distribution, DistributionNotFound\n try:\n version = get_distribution(\"sunpy\").version\n except DistributionNotFound:\n pass\nexcept PackageNotFoundError:\n pass\n", "path": "sunpy/version.py"}]} | 852 | 149 |
gh_patches_debug_6051 | rasdani/github-patches | git_diff | coala__coala-3888 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
aspects/meta.py: Typo error
<!-- Hello! If you're filing a bug, please include every step so as to help us reproduce it on our machines. If you're unsure about how to file an issue, use the issue template. If you need any help regarding usage of coala, check out the documentation or hit us up on chat. You can ignore or delete this text, it is commented and won't appear when the issue is submitted or previewed.
Chat: https://coala.io/chat
Issue Template: https://github.com/coala/coala/blob/master/CONTRIBUTING.rst#filing-issues
Documentation: https://docs.coala.io
-->
Replace `int` -> `in` in `search for tastes int the sub-aspectclass`
difficulty/newcomer
</issue>
<code>
[start of coalib/bearlib/aspects/meta.py]
1 from inspect import getmembers, signature
2
3 from coala_utils.decorators import generate_repr
4
5 from .base import aspectbase
6 from .docs import Documentation
7 from .taste import Taste
8
9
10 class aspectclass(type):
11 """
12 Metaclass for aspectclasses.
13
14 Root aspectclass is :class:`coalib.bearlib.aspectclasses.Root`.
15 """
16 def __init__(cls, clsname, bases, clsattrs):
17 """
18 Initializes the ``.subaspects`` dict on new aspectclasses.
19 """
20 cls.subaspects = {}
21
22 @property
23 def tastes(cls):
24 """
25 Get a dictionary of all taste names mapped to their
26 :class:`coalib.bearlib.aspectclasses.Taste` instances.
27 """
28 if cls.parent:
29 return dict(cls.parent.tastes, **cls._tastes)
30
31 return dict(cls._tastes)
32
33 def subaspect(cls, subcls):
34 """
35 The sub-aspectclass decorator.
36
37 See :class:`coalib.bearlib.aspectclasses.Root` for description
38 and usage.
39 """
40 aspectname = subcls.__name__
41
42 docs = getattr(subcls, 'docs', None)
43 aspectdocs = Documentation(subcls.__doc__, **{
44 attr: getattr(docs, attr, '') for attr in
45 list(signature(Documentation).parameters.keys())[1:]})
46
47 # search for tastes int the sub-aspectclass
48 subtastes = {}
49 for name, member in getmembers(subcls):
50 if isinstance(member, Taste):
51 # tell the taste its own name
52 member.name = name
53 subtastes[name] = member
54
55 class Sub(subcls, aspectbase, metaclass=aspectclass):
56 __module__ = subcls.__module__
57
58 parent = cls
59
60 docs = aspectdocs
61 _tastes = subtastes
62
63 members = sorted(Sub.tastes)
64 if members:
65 Sub = generate_repr(*members)(Sub)
66
67 Sub.__name__ = aspectname
68 Sub.__qualname__ = '%s.%s' % (cls.__qualname__, aspectname)
69 cls.subaspects[aspectname] = Sub
70 setattr(cls, aspectname, Sub)
71 return Sub
72
73 def __repr__(cls):
74 return '<%s %s>' % (type(cls).__name__, repr(cls.__qualname__))
75
[end of coalib/bearlib/aspects/meta.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/coalib/bearlib/aspects/meta.py b/coalib/bearlib/aspects/meta.py
--- a/coalib/bearlib/aspects/meta.py
+++ b/coalib/bearlib/aspects/meta.py
@@ -44,7 +44,7 @@
attr: getattr(docs, attr, '') for attr in
list(signature(Documentation).parameters.keys())[1:]})
- # search for tastes int the sub-aspectclass
+ # search for tastes in the sub-aspectclass
subtastes = {}
for name, member in getmembers(subcls):
if isinstance(member, Taste):
| {"golden_diff": "diff --git a/coalib/bearlib/aspects/meta.py b/coalib/bearlib/aspects/meta.py\n--- a/coalib/bearlib/aspects/meta.py\n+++ b/coalib/bearlib/aspects/meta.py\n@@ -44,7 +44,7 @@\n attr: getattr(docs, attr, '') for attr in\n list(signature(Documentation).parameters.keys())[1:]})\n \n- # search for tastes int the sub-aspectclass\n+ # search for tastes in the sub-aspectclass\n subtastes = {}\n for name, member in getmembers(subcls):\n if isinstance(member, Taste):\n", "issue": "aspects/meta.py: Typo error\n<!-- Hello! If you're filing a bug, please include every step so as to help us reproduce it on our machines. If you're unsure about how to file an issue, use the issue template. If you need any help regarding usage of coala, check out the documentation or hit us up on chat. You can ignore or delete this text, it is commented and won't appear when the issue is submitted or previewed.\r\n\r\nChat: https://coala.io/chat\r\nIssue Template: https://github.com/coala/coala/blob/master/CONTRIBUTING.rst#filing-issues\r\nDocumentation: https://docs.coala.io\r\n-->\r\nReplace `int` -> `in` in `search for tastes int the sub-aspectclass`\r\n\r\ndifficulty/newcomer\n", "before_files": [{"content": "from inspect import getmembers, signature\n\nfrom coala_utils.decorators import generate_repr\n\nfrom .base import aspectbase\nfrom .docs import Documentation\nfrom .taste import Taste\n\n\nclass aspectclass(type):\n \"\"\"\n Metaclass for aspectclasses.\n\n Root aspectclass is :class:`coalib.bearlib.aspectclasses.Root`.\n \"\"\"\n def __init__(cls, clsname, bases, clsattrs):\n \"\"\"\n Initializes the ``.subaspects`` dict on new aspectclasses.\n \"\"\"\n cls.subaspects = {}\n\n @property\n def tastes(cls):\n \"\"\"\n Get a dictionary of all taste names mapped to their\n :class:`coalib.bearlib.aspectclasses.Taste` instances.\n \"\"\"\n if cls.parent:\n return dict(cls.parent.tastes, **cls._tastes)\n\n return dict(cls._tastes)\n\n def subaspect(cls, subcls):\n \"\"\"\n The sub-aspectclass decorator.\n\n See :class:`coalib.bearlib.aspectclasses.Root` for description\n and usage.\n \"\"\"\n aspectname = subcls.__name__\n\n docs = getattr(subcls, 'docs', None)\n aspectdocs = Documentation(subcls.__doc__, **{\n attr: getattr(docs, attr, '') for attr in\n list(signature(Documentation).parameters.keys())[1:]})\n\n # search for tastes int the sub-aspectclass\n subtastes = {}\n for name, member in getmembers(subcls):\n if isinstance(member, Taste):\n # tell the taste its own name\n member.name = name\n subtastes[name] = member\n\n class Sub(subcls, aspectbase, metaclass=aspectclass):\n __module__ = subcls.__module__\n\n parent = cls\n\n docs = aspectdocs\n _tastes = subtastes\n\n members = sorted(Sub.tastes)\n if members:\n Sub = generate_repr(*members)(Sub)\n\n Sub.__name__ = aspectname\n Sub.__qualname__ = '%s.%s' % (cls.__qualname__, aspectname)\n cls.subaspects[aspectname] = Sub\n setattr(cls, aspectname, Sub)\n return Sub\n\n def __repr__(cls):\n return '<%s %s>' % (type(cls).__name__, repr(cls.__qualname__))\n", "path": "coalib/bearlib/aspects/meta.py"}]} | 1,356 | 140 |
gh_patches_debug_17530 | rasdani/github-patches | git_diff | biopython__biopython-2513 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove use of Bio._py3k (Python 2 / 3 compatibility)
As of Biopython 1.76 (released December 2019), we are dropping Python 2 support and focusing on Python 3.6 or later. This means we no longer need our (internal) Python 2 vs 3 compatibility library ``Bio._py3k`` (which is a bit like the third party library ``six``).
This issue is suitable and specifically targeting first time contributors.
There are lots of cases:
```
$ grep _py3k Bio*/*.py Bio/*/*.py Bio/*/*/*.py
Bio/File.py:from Bio._py3k import basestring
Bio/MarkovModel.py: from Bio._py3k import StringIO
Bio/Seq.py:from Bio._py3k import range
Bio/Seq.py:from Bio._py3k import basestring
...
```
Example One
------------
Taking the first example, ``from Bio._py3k import basestring`` we see that this is defined under Python 3 as an alias of ``str``:
https://github.com/biopython/biopython/blob/biopython-176/Bio/_py3k/__init__.py#L56
```python
# Lots of our Python 2 code uses isinstance(x, basestring)
# which after 2to3 becomes isinstance(x, str)
basestring = str
unicode = str
```
Therefore the fix for ``Bio/File.py`` is to remove the ``from Bio._py3k import basestring`` line, and update where ``basestring`` was used to instead use ``str``, which in this case means editing one line:
```python
if isinstance(handleish, basestring):
```
with:
```python
if isinstance(handleish, str):
```
Example Two
------------
Taking the second example, ``Bio/MarkovModel.py`` has ``from Bio._py3k import StringIO`` which we find on Python 3 can just be replaced with ``from io import StringIO``
https://github.com/biopython/biopython/blob/biopython-176/Bio/_py3k/__init__.py#L130
Contributing
-----------
Could any newcomer wanting to work on this first comment on this issue saying which file(s) they are going to start with (e.g. ``Bio/File.py``, or ``BioSQL/``) to avoid duplication of effort.
(*Update: The consensus was to switch to be function or constant instead, since they generally require the same technique/fix each time*)
Then read https://github.com/biopython/biopython/blob/master/CONTRIBUTING.rst and setup ``flake8`` on your machine.
Then make a pull request making the necessary changes so that those files no longer import from ``Bio._py3k``. Once that's done, you could pick some more to work on.
Eventually there will be nothing using ``Bio._py3k`` and that code itself can be removed, and this issue closed.
</issue>
<code>
[start of Bio/_py3k/__init__.py]
1 # Copyright 2010-2018 by Peter Cock. All rights reserved.
2 #
3 # This file is part of the Biopython distribution and governed by your
4 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
5 # Please see the LICENSE file that should have been included as part of this
6 # package.
7 """Python 3 compatibility tools (PRIVATE).
8
9 Once we drop support for Python 2, the whole of Bio._py3k will
10 go away.
11 """
12
13 # From the point of view of pep8 and flake8, there are lots of issues with
14 # this file. This line tells flake8 to ignore it for quality assurance:
15 # flake8: noqa
16
17 import sys
18
19 import codecs
20
21
22 def _bytes_bytearray_to_str(s):
23 """If s is bytes or bytearray, convert to a unicode string (PRIVATE)."""
24 if isinstance(s, (bytes, bytearray)):
25 return s.decode()
26 return s
27
28
29 import io
30 import locale
31
32 # Python 3.4 onwards, the standard library wrappers should work:
33 def _binary_to_string_handle(handle):
34 """Treat a binary (bytes) handle like a text (unicode) handle (PRIVATE)."""
35 try:
36 # If this is a network handle from urllib,
37 # the HTTP headers may tell us the encoding.
38 encoding = handle.headers.get_content_charset()
39 except AttributeError:
40 encoding = None
41 if encoding is None:
42 # The W3C recommendation is:
43 # When no explicit charset parameter is provided by the sender,
44 # media subtypes of the "text" type are defined to have a default
45 # charset value of "ISO-8859-1" when received via HTTP.
46 # "ISO-8859-1" is also known as 'latin-1'
47 # See the following for more detail:
48 # https://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1
49 encoding = "latin-1"
50 wrapped = io.TextIOWrapper(io.BufferedReader(handle), encoding=encoding)
51 try:
52 # If wrapping an online handle, this is nice to have:
53 wrapped.url = handle.url
54 except AttributeError:
55 pass
56 return wrapped
57
58
59 # On Python 3 urllib, urllib2, and urlparse were merged:
60 from urllib.request import urlopen, Request, urlparse, urlcleanup
61 from urllib.parse import urlencode, quote
62 from urllib.error import URLError, HTTPError
63
[end of Bio/_py3k/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Bio/_py3k/__init__.py b/Bio/_py3k/__init__.py
--- a/Bio/_py3k/__init__.py
+++ b/Bio/_py3k/__init__.py
@@ -14,20 +14,7 @@
# this file. This line tells flake8 to ignore it for quality assurance:
# flake8: noqa
-import sys
-
-import codecs
-
-
-def _bytes_bytearray_to_str(s):
- """If s is bytes or bytearray, convert to a unicode string (PRIVATE)."""
- if isinstance(s, (bytes, bytearray)):
- return s.decode()
- return s
-
-
import io
-import locale
# Python 3.4 onwards, the standard library wrappers should work:
def _binary_to_string_handle(handle):
@@ -54,9 +41,3 @@
except AttributeError:
pass
return wrapped
-
-
-# On Python 3 urllib, urllib2, and urlparse were merged:
-from urllib.request import urlopen, Request, urlparse, urlcleanup
-from urllib.parse import urlencode, quote
-from urllib.error import URLError, HTTPError
| {"golden_diff": "diff --git a/Bio/_py3k/__init__.py b/Bio/_py3k/__init__.py\n--- a/Bio/_py3k/__init__.py\n+++ b/Bio/_py3k/__init__.py\n@@ -14,20 +14,7 @@\n # this file. This line tells flake8 to ignore it for quality assurance:\n # flake8: noqa\n \n-import sys\n-\n-import codecs\n-\n-\n-def _bytes_bytearray_to_str(s):\n- \"\"\"If s is bytes or bytearray, convert to a unicode string (PRIVATE).\"\"\"\n- if isinstance(s, (bytes, bytearray)):\n- return s.decode()\n- return s\n-\n-\n import io\n-import locale\n \n # Python 3.4 onwards, the standard library wrappers should work:\n def _binary_to_string_handle(handle):\n@@ -54,9 +41,3 @@\n except AttributeError:\n pass\n return wrapped\n-\n-\n-# On Python 3 urllib, urllib2, and urlparse were merged:\n-from urllib.request import urlopen, Request, urlparse, urlcleanup\n-from urllib.parse import urlencode, quote\n-from urllib.error import URLError, HTTPError\n", "issue": "Remove use of Bio._py3k (Python 2 / 3 compatibility)\nAs of Biopython 1.76 (released December 2019), we are dropping Python 2 support and focusing on Python 3.6 or later. This means we no longer need our (internal) Python 2 vs 3 compatibility library ``Bio._py3k`` (which is a bit like the third party library ``six``).\r\n\r\nThis issue is suitable and specifically targeting first time contributors.\r\n\r\nThere are lots of cases:\r\n\r\n```\r\n$ grep _py3k Bio*/*.py Bio/*/*.py Bio/*/*/*.py\r\nBio/File.py:from Bio._py3k import basestring\r\nBio/MarkovModel.py: from Bio._py3k import StringIO\r\nBio/Seq.py:from Bio._py3k import range\r\nBio/Seq.py:from Bio._py3k import basestring\r\n...\r\n```\r\n\r\nExample One\r\n------------\r\n\r\nTaking the first example, ``from Bio._py3k import basestring`` we see that this is defined under Python 3 as an alias of ``str``:\r\n\r\nhttps://github.com/biopython/biopython/blob/biopython-176/Bio/_py3k/__init__.py#L56\r\n\r\n```python\r\n # Lots of our Python 2 code uses isinstance(x, basestring)\r\n # which after 2to3 becomes isinstance(x, str)\r\n basestring = str\r\n unicode = str\r\n```\r\n\r\nTherefore the fix for ``Bio/File.py`` is to remove the ``from Bio._py3k import basestring`` line, and update where ``basestring`` was used to instead use ``str``, which in this case means editing one line:\r\n\r\n```python\r\nif isinstance(handleish, basestring):\r\n```\r\n\r\nwith:\r\n\r\n```python\r\nif isinstance(handleish, str):\r\n```\r\n\r\nExample Two\r\n------------\r\n\r\nTaking the second example, ``Bio/MarkovModel.py`` has ``from Bio._py3k import StringIO`` which we find on Python 3 can just be replaced with ``from io import StringIO``\r\n\r\nhttps://github.com/biopython/biopython/blob/biopython-176/Bio/_py3k/__init__.py#L130\r\n\r\nContributing\r\n-----------\r\n\r\nCould any newcomer wanting to work on this first comment on this issue saying which file(s) they are going to start with (e.g. ``Bio/File.py``, or ``BioSQL/``) to avoid duplication of effort.\r\n\r\n(*Update: The consensus was to switch to be function or constant instead, since they generally require the same technique/fix each time*)\r\n\r\nThen read https://github.com/biopython/biopython/blob/master/CONTRIBUTING.rst and setup ``flake8`` on your machine.\r\n\r\nThen make a pull request making the necessary changes so that those files no longer import from ``Bio._py3k``. Once that's done, you could pick some more to work on.\r\n\r\nEventually there will be nothing using ``Bio._py3k`` and that code itself can be removed, and this issue closed.\n", "before_files": [{"content": "# Copyright 2010-2018 by Peter Cock. All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\"\"\"Python 3 compatibility tools (PRIVATE).\n\nOnce we drop support for Python 2, the whole of Bio._py3k will\ngo away.\n\"\"\"\n\n# From the point of view of pep8 and flake8, there are lots of issues with\n# this file. This line tells flake8 to ignore it for quality assurance:\n# flake8: noqa\n\nimport sys\n\nimport codecs\n\n\ndef _bytes_bytearray_to_str(s):\n \"\"\"If s is bytes or bytearray, convert to a unicode string (PRIVATE).\"\"\"\n if isinstance(s, (bytes, bytearray)):\n return s.decode()\n return s\n\n\nimport io\nimport locale\n\n# Python 3.4 onwards, the standard library wrappers should work:\ndef _binary_to_string_handle(handle):\n \"\"\"Treat a binary (bytes) handle like a text (unicode) handle (PRIVATE).\"\"\"\n try:\n # If this is a network handle from urllib,\n # the HTTP headers may tell us the encoding.\n encoding = handle.headers.get_content_charset()\n except AttributeError:\n encoding = None\n if encoding is None:\n # The W3C recommendation is:\n # When no explicit charset parameter is provided by the sender,\n # media subtypes of the \"text\" type are defined to have a default\n # charset value of \"ISO-8859-1\" when received via HTTP.\n # \"ISO-8859-1\" is also known as 'latin-1'\n # See the following for more detail:\n # https://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1\n encoding = \"latin-1\"\n wrapped = io.TextIOWrapper(io.BufferedReader(handle), encoding=encoding)\n try:\n # If wrapping an online handle, this is nice to have:\n wrapped.url = handle.url\n except AttributeError:\n pass\n return wrapped\n\n\n# On Python 3 urllib, urllib2, and urlparse were merged:\nfrom urllib.request import urlopen, Request, urlparse, urlcleanup\nfrom urllib.parse import urlencode, quote\nfrom urllib.error import URLError, HTTPError\n", "path": "Bio/_py3k/__init__.py"}]} | 1,862 | 253 |
gh_patches_debug_6998 | rasdani/github-patches | git_diff | microsoft__hi-ml-80 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Handle the "v" in version numbering
Our code in `setup.py` will trigger with new tags. `setuptools.setup` will reject tags that are not release versions but we could do more to make that explicit by checking for the leading "v".
Also when we tag releases as, say, "v0.1.1" the leading "v" is carried through `setuptools.setup` so it becomes part of the pip test download
> Successfully installed pip-21.2.4
> Collecting hi-ml==v0.1.0
> Downloading hi_ml-0.1.0-py3-none-any.whl (25 kB)
(from [here](https://github.com/microsoft/hi-ml/runs/3362573497?check_suite_focus=true#step:6:29))
This works, but it would be cleaner to submit the version number using the public version identifier format mandated in [PEP 440](https://www.python.org/dev/peps/pep-0440/#public-version-identifiers), i.e. without the leading "v"
</issue>
<code>
[start of setup.py]
1 # ------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 # ------------------------------------------------------------------------------------------
5
6 """A setuptools based setup module.
7
8 See:
9 https://packaging.python.org/guides/distributing-packages-using-setuptools/
10 """
11
12 import os
13 from math import floor
14 import pathlib
15 from random import random
16 from setuptools import setup, find_packages # type: ignore
17
18
19 here = pathlib.Path(__file__).parent.resolve()
20
21 # Get the long description from the README file
22 long_description = (here / 'README.md').read_text(encoding='utf-8')
23
24 version = ''
25
26 # If running from a GitHub Action then a standard set of environment variables will be
27 # populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables).
28 # In particular, GITHUB_REF is the branch or tag ref that triggered the workflow.
29 # If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'.
30 # Extract this tag and use it as a version string
31 # See also:
32 # https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/
33 # https://github.com/pypa/gh-action-pypi-publish
34 GITHUB_REF_TAG_COMMIT = 'refs/tags/'
35
36 github_ref = os.getenv('GITHUB_REF')
37 if github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):
38 version = github_ref[len(GITHUB_REF_TAG_COMMIT):]
39
40 # Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated.
41 # Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be
42 # '0.1.2.post124'. Although this is discouraged, see:
43 # https://www.python.org/dev/peps/pep-0440/#post-releases
44 # it is necessary here to avoid duplicate packages in Test.PyPI.
45 if not version:
46 # TODO: Replace this with more principled package version management for the package wheels built during local test
47 # runs, one which circumvents AzureML's apparent package caching:
48 build_number = os.getenv('GITHUB_RUN_NUMBER')
49 if build_number:
50 version = '0.1.0.post' + build_number
51 else:
52 default_random_version_number = floor(random() * 10_000_000_000)
53 version = f'0.1.0.post{str(default_random_version_number)}'
54
55 (here / 'latest_version.txt').write_text(version)
56
57 # Read run_requirements.txt to get install_requires
58 install_requires = (here / 'run_requirements.txt').read_text().split("\n")
59 # Remove any whitespace and blank lines
60 install_requires = [line.strip() for line in install_requires if line.strip()]
61
62 description = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace'
63
64 setup(
65 name='hi-ml',
66 version=version,
67 description=description,
68 long_description=long_description,
69 long_description_content_type='text/markdown',
70 url='https://github.com/microsoft/hi-ml',
71 author="Microsoft Research Cambridge InnerEye Team ",
72 author_email="[email protected]",
73 classifiers=[
74 'Development Status :: 3 - Alpha',
75 'Intended Audience :: Science/Research',
76 "Topic :: Scientific/Engineering :: Medical Science Apps.",
77 'License :: OSI Approved :: MIT License',
78 'Programming Language :: Python :: 3.7'
79 ],
80 keywords='InnerEye, HealthIntelligence, AzureML',
81 license='MIT License',
82 packages=find_packages(where="src"),
83 package_dir={"": "src"},
84 include_package_data=True,
85 install_requires=install_requires,
86 scripts=['src/health/azure/run_tensorboard.py']
87 )
88
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -31,7 +31,7 @@
# See also:
# https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/
# https://github.com/pypa/gh-action-pypi-publish
-GITHUB_REF_TAG_COMMIT = 'refs/tags/'
+GITHUB_REF_TAG_COMMIT = 'refs/tags/v'
github_ref = os.getenv('GITHUB_REF')
if github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -31,7 +31,7 @@\n # See also:\n # https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/\n # https://github.com/pypa/gh-action-pypi-publish\n-GITHUB_REF_TAG_COMMIT = 'refs/tags/'\n+GITHUB_REF_TAG_COMMIT = 'refs/tags/v'\n \n github_ref = os.getenv('GITHUB_REF')\n if github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):\n", "issue": "Handle the \"v\" in version numbering \nOur code in `setup.py` will trigger with new tags. `setuptools.setup` will reject tags that are not release versions but we could do more to make that explicit by checking for the leading \"v\".\r\n\r\nAlso when we tag releases as, say, \"v0.1.1\" the leading \"v\" is carried through `setuptools.setup` so it becomes part of the pip test download\r\n\r\n> Successfully installed pip-21.2.4\r\n> Collecting hi-ml==v0.1.0\r\n> Downloading hi_ml-0.1.0-py3-none-any.whl (25 kB)\r\n\r\n(from [here](https://github.com/microsoft/hi-ml/runs/3362573497?check_suite_focus=true#step:6:29))\r\n\r\nThis works, but it would be cleaner to submit the version number using the public version identifier format mandated in [PEP 440](https://www.python.org/dev/peps/pep-0440/#public-version-identifiers), i.e. without the leading \"v\"\n", "before_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\n\n\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/guides/distributing-packages-using-setuptools/\n\"\"\"\n\nimport os\nfrom math import floor\nimport pathlib\nfrom random import random\nfrom setuptools import setup, find_packages # type: ignore\n\n\nhere = pathlib.Path(__file__).parent.resolve()\n\n# Get the long description from the README file\nlong_description = (here / 'README.md').read_text(encoding='utf-8')\n\nversion = ''\n\n# If running from a GitHub Action then a standard set of environment variables will be\n# populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables).\n# In particular, GITHUB_REF is the branch or tag ref that triggered the workflow.\n# If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'.\n# Extract this tag and use it as a version string\n# See also:\n# https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/\n# https://github.com/pypa/gh-action-pypi-publish\nGITHUB_REF_TAG_COMMIT = 'refs/tags/'\n\ngithub_ref = os.getenv('GITHUB_REF')\nif github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):\n version = github_ref[len(GITHUB_REF_TAG_COMMIT):]\n\n# Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated.\n# Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be\n# '0.1.2.post124'. Although this is discouraged, see:\n# https://www.python.org/dev/peps/pep-0440/#post-releases\n# it is necessary here to avoid duplicate packages in Test.PyPI.\nif not version:\n # TODO: Replace this with more principled package version management for the package wheels built during local test\n # runs, one which circumvents AzureML's apparent package caching:\n build_number = os.getenv('GITHUB_RUN_NUMBER')\n if build_number:\n version = '0.1.0.post' + build_number\n else:\n default_random_version_number = floor(random() * 10_000_000_000)\n version = f'0.1.0.post{str(default_random_version_number)}'\n\n(here / 'latest_version.txt').write_text(version)\n\n# Read run_requirements.txt to get install_requires\ninstall_requires = (here / 'run_requirements.txt').read_text().split(\"\\n\")\n# Remove any whitespace and blank lines\ninstall_requires = [line.strip() for line in install_requires if line.strip()]\n\ndescription = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace'\n\nsetup(\n name='hi-ml',\n version=version,\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/microsoft/hi-ml',\n author=\"Microsoft Research Cambridge InnerEye Team \",\n author_email=\"[email protected]\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.7'\n ],\n keywords='InnerEye, HealthIntelligence, AzureML',\n license='MIT License',\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n install_requires=install_requires,\n scripts=['src/health/azure/run_tensorboard.py']\n)\n", "path": "setup.py"}]} | 1,783 | 126 |
gh_patches_debug_42542 | rasdani/github-patches | git_diff | networkx__networkx-2532 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
maximal_independent_set does not work for DiGraph
Currently [maximal_independent_set](https://github.com/networkx/networkx/blob/d7d906e1d16ef331da0bc1d149953e7532155acc/networkx/algorithms/mis.py#L70) returns the wrong results for a `DiGraph` because it uses the `G.neighbors` method which returns only the successor nodes in a `DiGraph`. I believe the [all_neighbors](https://github.com/networkx/networkx/blob/13b373bf6938c077d1e61adc60a48cb910a75755/networkx/classes/function.py#L540) function should be used instead to make `maximal_independent_set` work correctly for both graph types.
</issue>
<code>
[start of networkx/algorithms/mis.py]
1 # -*- coding: utf-8 -*-
2 # $Id: maximalIndependentSet.py 576 2011-03-01 05:50:34Z lleeoo $
3 """
4 Algorithm to find a maximal (not maximum) independent set.
5
6 """
7 # Leo Lopes <[email protected]>
8 # Aric Hagberg <[email protected]>
9 # Dan Schult <[email protected]>
10 # Pieter Swart <[email protected]>
11 # All rights reserved.
12 # BSD license.
13
14 __author__ = "\n".join(["Leo Lopes <[email protected]>",
15 "Loïc Séguin-C. <[email protected]>"])
16
17 __all__ = ['maximal_independent_set']
18
19 import random
20 import networkx as nx
21
22 def maximal_independent_set(G, nodes=None):
23 """Return a random maximal independent set guaranteed to contain
24 a given set of nodes.
25
26 An independent set is a set of nodes such that the subgraph
27 of G induced by these nodes contains no edges. A maximal
28 independent set is an independent set such that it is not possible
29 to add a new node and still get an independent set.
30
31 Parameters
32 ----------
33 G : NetworkX graph
34
35 nodes : list or iterable
36 Nodes that must be part of the independent set. This set of nodes
37 must be independent.
38
39 Returns
40 -------
41 indep_nodes : list
42 List of nodes that are part of a maximal independent set.
43
44 Raises
45 ------
46 NetworkXUnfeasible
47 If the nodes in the provided list are not part of the graph or
48 do not form an independent set, an exception is raised.
49
50 Examples
51 --------
52 >>> G = nx.path_graph(5)
53 >>> nx.maximal_independent_set(G) # doctest: +SKIP
54 [4, 0, 2]
55 >>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP
56 [1, 3]
57
58 Notes
59 -----
60 This algorithm does not solve the maximum independent set problem.
61
62 """
63 if not nodes:
64 nodes = set([random.choice(list(G))])
65 else:
66 nodes = set(nodes)
67 if not nodes.issubset(G):
68 raise nx.NetworkXUnfeasible(
69 "%s is not a subset of the nodes of G" % nodes)
70 neighbors = set.union(*[set(G.neighbors(v)) for v in nodes])
71 if set.intersection(neighbors, nodes):
72 raise nx.NetworkXUnfeasible(
73 "%s is not an independent set of G" % nodes)
74 indep_nodes = list(nodes)
75 available_nodes = set(G.nodes()).difference(neighbors.union(nodes))
76 while available_nodes:
77 node = random.choice(list(available_nodes))
78 indep_nodes.append(node)
79 available_nodes.difference_update(list(G.neighbors(node)) + [node])
80 return indep_nodes
81
82
[end of networkx/algorithms/mis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/networkx/algorithms/mis.py b/networkx/algorithms/mis.py
--- a/networkx/algorithms/mis.py
+++ b/networkx/algorithms/mis.py
@@ -1,24 +1,26 @@
# -*- coding: utf-8 -*-
# $Id: maximalIndependentSet.py 576 2011-03-01 05:50:34Z lleeoo $
-"""
-Algorithm to find a maximal (not maximum) independent set.
-
-"""
# Leo Lopes <[email protected]>
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
+#
+# Authors: Leo Lopes <[email protected]>
+# Loïc Séguin-C. <[email protected]>
+"""
+Algorithm to find a maximal (not maximum) independent set.
-__author__ = "\n".join(["Leo Lopes <[email protected]>",
- "Loïc Séguin-C. <[email protected]>"])
+"""
+import random
+import networkx as nx
+from networkx.utils import not_implemented_for
__all__ = ['maximal_independent_set']
-import random
-import networkx as nx
+@not_implemented_for('directed')
def maximal_independent_set(G, nodes=None):
"""Return a random maximal independent set guaranteed to contain
a given set of nodes.
@@ -27,10 +29,10 @@
of G induced by these nodes contains no edges. A maximal
independent set is an independent set such that it is not possible
to add a new node and still get an independent set.
-
+
Parameters
----------
- G : NetworkX graph
+ G : NetworkX graph
nodes : list or iterable
Nodes that must be part of the independent set. This set of nodes
@@ -38,7 +40,7 @@
Returns
-------
- indep_nodes : list
+ indep_nodes : list
List of nodes that are part of a maximal independent set.
Raises
@@ -47,6 +49,9 @@
If the nodes in the provided list are not part of the graph or
do not form an independent set, an exception is raised.
+ NetworkXNotImplemented
+ If `G` is directed.
+
Examples
--------
>>> G = nx.path_graph(5)
@@ -54,7 +59,7 @@
[4, 0, 2]
>>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP
[1, 3]
-
+
Notes
-----
This algorithm does not solve the maximum independent set problem.
@@ -67,7 +72,7 @@
if not nodes.issubset(G):
raise nx.NetworkXUnfeasible(
"%s is not a subset of the nodes of G" % nodes)
- neighbors = set.union(*[set(G.neighbors(v)) for v in nodes])
+ neighbors = set.union(*[set(G.adj[v]) for v in nodes])
if set.intersection(neighbors, nodes):
raise nx.NetworkXUnfeasible(
"%s is not an independent set of G" % nodes)
@@ -76,6 +81,5 @@
while available_nodes:
node = random.choice(list(available_nodes))
indep_nodes.append(node)
- available_nodes.difference_update(list(G.neighbors(node)) + [node])
+ available_nodes.difference_update(list(G.adj[node]) + [node])
return indep_nodes
-
| {"golden_diff": "diff --git a/networkx/algorithms/mis.py b/networkx/algorithms/mis.py\n--- a/networkx/algorithms/mis.py\n+++ b/networkx/algorithms/mis.py\n@@ -1,24 +1,26 @@\n # -*- coding: utf-8 -*-\n # $Id: maximalIndependentSet.py 576 2011-03-01 05:50:34Z lleeoo $\n-\"\"\"\n-Algorithm to find a maximal (not maximum) independent set.\n-\n-\"\"\"\n # Leo Lopes <[email protected]>\n # Aric Hagberg <[email protected]>\n # Dan Schult <[email protected]>\n # Pieter Swart <[email protected]>\n # All rights reserved.\n # BSD license.\n+#\n+# Authors: Leo Lopes <[email protected]>\n+# Lo\u00efc S\u00e9guin-C. <[email protected]>\n+\"\"\"\n+Algorithm to find a maximal (not maximum) independent set.\n \n-__author__ = \"\\n\".join([\"Leo Lopes <[email protected]>\",\n- \"Lo\u00efc S\u00e9guin-C. <[email protected]>\"])\n+\"\"\"\n+import random\n+import networkx as nx\n+from networkx.utils import not_implemented_for\n \n __all__ = ['maximal_independent_set']\n \n-import random\n-import networkx as nx\n \n+@not_implemented_for('directed')\n def maximal_independent_set(G, nodes=None):\n \"\"\"Return a random maximal independent set guaranteed to contain\n a given set of nodes.\n@@ -27,10 +29,10 @@\n of G induced by these nodes contains no edges. A maximal\n independent set is an independent set such that it is not possible\n to add a new node and still get an independent set.\n- \n+\n Parameters\n ----------\n- G : NetworkX graph \n+ G : NetworkX graph\n \n nodes : list or iterable\n Nodes that must be part of the independent set. This set of nodes\n@@ -38,7 +40,7 @@\n \n Returns\n -------\n- indep_nodes : list \n+ indep_nodes : list\n List of nodes that are part of a maximal independent set.\n \n Raises\n@@ -47,6 +49,9 @@\n If the nodes in the provided list are not part of the graph or\n do not form an independent set, an exception is raised.\n \n+ NetworkXNotImplemented\n+ If `G` is directed.\n+\n Examples\n --------\n >>> G = nx.path_graph(5)\n@@ -54,7 +59,7 @@\n [4, 0, 2]\n >>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP\n [1, 3]\n- \n+\n Notes\n -----\n This algorithm does not solve the maximum independent set problem.\n@@ -67,7 +72,7 @@\n if not nodes.issubset(G):\n raise nx.NetworkXUnfeasible(\n \"%s is not a subset of the nodes of G\" % nodes)\n- neighbors = set.union(*[set(G.neighbors(v)) for v in nodes])\n+ neighbors = set.union(*[set(G.adj[v]) for v in nodes])\n if set.intersection(neighbors, nodes):\n raise nx.NetworkXUnfeasible(\n \"%s is not an independent set of G\" % nodes)\n@@ -76,6 +81,5 @@\n while available_nodes:\n node = random.choice(list(available_nodes))\n indep_nodes.append(node)\n- available_nodes.difference_update(list(G.neighbors(node)) + [node])\n+ available_nodes.difference_update(list(G.adj[node]) + [node])\n return indep_nodes\n-\n", "issue": "maximal_independent_set does not work for DiGraph\nCurrently [maximal_independent_set](https://github.com/networkx/networkx/blob/d7d906e1d16ef331da0bc1d149953e7532155acc/networkx/algorithms/mis.py#L70) returns the wrong results for a `DiGraph` because it uses the `G.neighbors` method which returns only the successor nodes in a `DiGraph`. I believe the [all_neighbors](https://github.com/networkx/networkx/blob/13b373bf6938c077d1e61adc60a48cb910a75755/networkx/classes/function.py#L540) function should be used instead to make `maximal_independent_set` work correctly for both graph types.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# $Id: maximalIndependentSet.py 576 2011-03-01 05:50:34Z lleeoo $\n\"\"\"\nAlgorithm to find a maximal (not maximum) independent set.\n\n\"\"\"\n# Leo Lopes <[email protected]>\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\n\n__author__ = \"\\n\".join([\"Leo Lopes <[email protected]>\",\n \"Lo\u00efc S\u00e9guin-C. <[email protected]>\"])\n\n__all__ = ['maximal_independent_set']\n\nimport random\nimport networkx as nx\n\ndef maximal_independent_set(G, nodes=None):\n \"\"\"Return a random maximal independent set guaranteed to contain\n a given set of nodes.\n\n An independent set is a set of nodes such that the subgraph\n of G induced by these nodes contains no edges. A maximal\n independent set is an independent set such that it is not possible\n to add a new node and still get an independent set.\n \n Parameters\n ----------\n G : NetworkX graph \n\n nodes : list or iterable\n Nodes that must be part of the independent set. This set of nodes\n must be independent.\n\n Returns\n -------\n indep_nodes : list \n List of nodes that are part of a maximal independent set.\n\n Raises\n ------\n NetworkXUnfeasible\n If the nodes in the provided list are not part of the graph or\n do not form an independent set, an exception is raised.\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> nx.maximal_independent_set(G) # doctest: +SKIP\n [4, 0, 2]\n >>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP\n [1, 3]\n \n Notes\n -----\n This algorithm does not solve the maximum independent set problem.\n\n \"\"\"\n if not nodes:\n nodes = set([random.choice(list(G))])\n else:\n nodes = set(nodes)\n if not nodes.issubset(G):\n raise nx.NetworkXUnfeasible(\n \"%s is not a subset of the nodes of G\" % nodes)\n neighbors = set.union(*[set(G.neighbors(v)) for v in nodes])\n if set.intersection(neighbors, nodes):\n raise nx.NetworkXUnfeasible(\n \"%s is not an independent set of G\" % nodes)\n indep_nodes = list(nodes)\n available_nodes = set(G.nodes()).difference(neighbors.union(nodes))\n while available_nodes:\n node = random.choice(list(available_nodes))\n indep_nodes.append(node)\n available_nodes.difference_update(list(G.neighbors(node)) + [node])\n return indep_nodes\n\n", "path": "networkx/algorithms/mis.py"}]} | 1,544 | 847 |
gh_patches_debug_27752 | rasdani/github-patches | git_diff | pyload__pyload-52 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FourChanOrg don't work
When i try to download a thread (e.g. http://boards.4chan.org/wg/res/5176429) nothing happens, only BasePlugin will be used,
</issue>
<code>
[start of module/plugins/crypter/FourChanOrg.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import re
5
6 from module.plugins.Crypter import Crypter
7
8 class FourChanOrg(Crypter):
9 __name__ = "FourChanOrg"
10 __type__ = "container"
11 __pattern__ = r"http://(www\.)?(img\.)?(zip\.)?4chan.org/\w+/(res/|imgboard\.html)"
12 __version__ = "0.1"
13 __description__ = """4chan.org Thread Download Plugin"""
14 __author_name__ = ("Spoob")
15 __author_mail__ = ("[email protected]")
16
17 def __init__(self, parent):
18 Crypter.__init__(self, parent)
19 self.parent = parent
20 self.html = None
21
22 def file_exists(self):
23 """ returns True or False
24 """
25 return True
26
27 def proceed(self, url, location):
28 url = self.parent.url
29 html = self.req.load(url)
30 link_pattern = ""
31 temp_links = []
32 if "imagebord.html" in url:
33 link_pattern = '[<a href="(res/\d*\.html)">Reply</a>]'
34 temp_links = re.findall(link_pattern, html)
35 for link in re.findall(link_pattern, html):
36 temp_links.append(link)
37 else:
38 temp_links = re.findall('File : <a href="(http://(?:img\.)?(?:zip\.)?4chan\.org/\w{,3}/src/\d*\..{3})"', html)
39 self.links = temp_links
40
[end of module/plugins/crypter/FourChanOrg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/module/plugins/crypter/FourChanOrg.py b/module/plugins/crypter/FourChanOrg.py
--- a/module/plugins/crypter/FourChanOrg.py
+++ b/module/plugins/crypter/FourChanOrg.py
@@ -6,34 +6,20 @@
from module.plugins.Crypter import Crypter
class FourChanOrg(Crypter):
+ # Based on 4chandl by Roland Beermann
+ # https://gist.github.com/enkore/3492599
__name__ = "FourChanOrg"
__type__ = "container"
- __pattern__ = r"http://(www\.)?(img\.)?(zip\.)?4chan.org/\w+/(res/|imgboard\.html)"
- __version__ = "0.1"
- __description__ = """4chan.org Thread Download Plugin"""
- __author_name__ = ("Spoob")
- __author_mail__ = ("[email protected]")
+ __version__ = "0.3"
+ __pattern__ = r"http://boards\.4chan.org/\w+/res/(\d+)"
+ __description__ = "Downloader for entire 4chan threads"
- def __init__(self, parent):
- Crypter.__init__(self, parent)
- self.parent = parent
- self.html = None
+ def decrypt(self, pyfile):
+ pagehtml = self.load(pyfile.url)
- def file_exists(self):
- """ returns True or False
- """
- return True
+ images = set(re.findall(r'(images\.4chan\.org/[^/]*/src/[^"<]*)', pagehtml))
+ urls = []
+ for image in images:
+ urls.append("http://" + image)
- def proceed(self, url, location):
- url = self.parent.url
- html = self.req.load(url)
- link_pattern = ""
- temp_links = []
- if "imagebord.html" in url:
- link_pattern = '[<a href="(res/\d*\.html)">Reply</a>]'
- temp_links = re.findall(link_pattern, html)
- for link in re.findall(link_pattern, html):
- temp_links.append(link)
- else:
- temp_links = re.findall('File : <a href="(http://(?:img\.)?(?:zip\.)?4chan\.org/\w{,3}/src/\d*\..{3})"', html)
- self.links = temp_links
+ self.core.files.addLinks(urls, self.pyfile.package().id)
| {"golden_diff": "diff --git a/module/plugins/crypter/FourChanOrg.py b/module/plugins/crypter/FourChanOrg.py\n--- a/module/plugins/crypter/FourChanOrg.py\n+++ b/module/plugins/crypter/FourChanOrg.py\n@@ -6,34 +6,20 @@\n from module.plugins.Crypter import Crypter\n \n class FourChanOrg(Crypter):\n+ # Based on 4chandl by Roland Beermann\n+ # https://gist.github.com/enkore/3492599\n __name__ = \"FourChanOrg\"\n __type__ = \"container\"\n- __pattern__ = r\"http://(www\\.)?(img\\.)?(zip\\.)?4chan.org/\\w+/(res/|imgboard\\.html)\"\n- __version__ = \"0.1\"\n- __description__ = \"\"\"4chan.org Thread Download Plugin\"\"\"\n- __author_name__ = (\"Spoob\")\n- __author_mail__ = (\"[email protected]\")\n+ __version__ = \"0.3\"\n+ __pattern__ = r\"http://boards\\.4chan.org/\\w+/res/(\\d+)\"\n+ __description__ = \"Downloader for entire 4chan threads\"\n \n- def __init__(self, parent):\n- Crypter.__init__(self, parent)\n- self.parent = parent\n- self.html = None\n+ def decrypt(self, pyfile):\n+ pagehtml = self.load(pyfile.url)\n \n- def file_exists(self):\n- \"\"\" returns True or False\n- \"\"\"\n- return True\n+ images = set(re.findall(r'(images\\.4chan\\.org/[^/]*/src/[^\"<]*)', pagehtml))\n+ urls = []\n+ for image in images:\n+ urls.append(\"http://\" + image)\n \n- def proceed(self, url, location):\n- url = self.parent.url\n- html = self.req.load(url)\n- link_pattern = \"\"\n- temp_links = []\n- if \"imagebord.html\" in url:\n- link_pattern = '[<a href=\"(res/\\d*\\.html)\">Reply</a>]'\n- temp_links = re.findall(link_pattern, html)\n- for link in re.findall(link_pattern, html):\n- temp_links.append(link)\n- else:\n- temp_links = re.findall('File : <a href=\"(http://(?:img\\.)?(?:zip\\.)?4chan\\.org/\\w{,3}/src/\\d*\\..{3})\"', html)\n- self.links = temp_links\n+ self.core.files.addLinks(urls, self.pyfile.package().id)\n", "issue": "FourChanOrg don't work\nWhen i try to download a thread (e.g. http://boards.4chan.org/wg/res/5176429) nothing happens, only BasePlugin will be used,\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\n\nfrom module.plugins.Crypter import Crypter\n\nclass FourChanOrg(Crypter):\n __name__ = \"FourChanOrg\"\n __type__ = \"container\"\n __pattern__ = r\"http://(www\\.)?(img\\.)?(zip\\.)?4chan.org/\\w+/(res/|imgboard\\.html)\"\n __version__ = \"0.1\"\n __description__ = \"\"\"4chan.org Thread Download Plugin\"\"\"\n __author_name__ = (\"Spoob\")\n __author_mail__ = (\"[email protected]\")\n\n def __init__(self, parent):\n Crypter.__init__(self, parent)\n self.parent = parent\n self.html = None\n\n def file_exists(self):\n \"\"\" returns True or False\n \"\"\"\n return True\n\n def proceed(self, url, location):\n url = self.parent.url\n html = self.req.load(url)\n link_pattern = \"\"\n temp_links = []\n if \"imagebord.html\" in url:\n link_pattern = '[<a href=\"(res/\\d*\\.html)\">Reply</a>]'\n temp_links = re.findall(link_pattern, html)\n for link in re.findall(link_pattern, html):\n temp_links.append(link)\n else:\n temp_links = re.findall('File : <a href=\"(http://(?:img\\.)?(?:zip\\.)?4chan\\.org/\\w{,3}/src/\\d*\\..{3})\"', html)\n self.links = temp_links\n", "path": "module/plugins/crypter/FourChanOrg.py"}]} | 1,007 | 583 |
gh_patches_debug_11575 | rasdani/github-patches | git_diff | mindsdb__lightwood-968 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve runtime of `LightGBMArray` for long-horizon forecasting
Two main approaches:
- Disable optuna hyperparam search past some threshold.
- Opt for a recursive strategy instead of direct (i.e. same regressor trained for all timesteps v/s one for each step).
</issue>
<code>
[start of lightwood/mixer/lightgbm_array.py]
1 from copy import deepcopy
2 from typing import Dict, List, Union
3
4 import numpy as np
5 import pandas as pd
6
7 from lightwood.helpers.log import log
8 from lightwood.mixer.helpers.ts import _apply_stl_on_training, _stl_transform, _stl_inverse_transform
9 from lightwood.encoder.base import BaseEncoder
10 from lightwood.mixer.base import BaseMixer
11 from lightwood.mixer.lightgbm import LightGBM
12 from lightwood.api.types import PredictionArguments, TimeseriesSettings
13 from lightwood.data.encoded_ds import EncodedDs, ConcatedEncodedDs
14
15
16 class LightGBMArray(BaseMixer):
17 """LightGBM-based model, intended for usage in time series tasks."""
18 models: List[LightGBM]
19 submodel_stop_after: float
20 target: str
21 supports_proba: bool
22 ts_analysis: Dict
23 tss: TimeseriesSettings
24
25 def __init__(
26 self,
27 stop_after: float,
28 target: str,
29 dtype_dict: Dict[str, str],
30 input_cols: List[str],
31 fit_on_dev: bool,
32 target_encoder: BaseEncoder,
33 ts_analysis: Dict[str, object],
34 use_stl: bool,
35 tss: TimeseriesSettings
36 ):
37 super().__init__(stop_after)
38 self.tss = tss
39 self.horizon = tss.horizon
40 self.submodel_stop_after = stop_after / self.horizon
41 self.target = target
42 self.offset_pred_cols = [f'{self.target}_timestep_{i}' for i in range(1, self.horizon)]
43 if set(input_cols) != {self.tss.order_by}:
44 input_cols.remove(self.tss.order_by)
45 for col in self.offset_pred_cols:
46 dtype_dict[col] = dtype_dict[self.target]
47 self.models = [LightGBM(self.submodel_stop_after,
48 target_col,
49 dtype_dict,
50 input_cols,
51 False, # fit_on_dev,
52 True, # use_optuna
53 target_encoder)
54 for _, target_col in zip(range(self.horizon), [target] + self.offset_pred_cols)]
55 self.ts_analysis = ts_analysis
56 self.supports_proba = False
57 self.use_stl = False
58 self.stable = True
59
60 def _fit(self, train_data: EncodedDs, dev_data: EncodedDs, submodel_method='fit') -> None:
61 original_train = deepcopy(train_data.data_frame)
62 original_dev = deepcopy(dev_data.data_frame)
63
64 if self.use_stl and self.ts_analysis.get('stl_transforms', False):
65 _apply_stl_on_training(train_data, dev_data, self.target, self.tss, self.ts_analysis)
66
67 for timestep in range(self.horizon):
68 getattr(self.models[timestep], submodel_method)(train_data, dev_data)
69
70 # restore dfs
71 train_data.data_frame = original_train
72 dev_data.data_frame = original_dev
73
74 def fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None:
75 log.info('Started fitting LGBM models for array prediction')
76 self._fit(train_data, dev_data, submodel_method='fit')
77
78 def partial_fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None:
79 log.info('Updating array of LGBM models...')
80 self._fit(train_data, dev_data, submodel_method='partial_fit')
81
82 def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs],
83 args: PredictionArguments = PredictionArguments()) -> pd.DataFrame:
84 if args.predict_proba:
85 log.warning('This model does not output probability estimates')
86
87 original_df = deepcopy(ds.data_frame)
88 length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds)
89 ydf = pd.DataFrame(0, # zero-filled
90 index=np.arange(length),
91 columns=[f'prediction_{i}' for i in range(self.horizon)])
92
93 if self.use_stl and self.ts_analysis.get('stl_transforms', False):
94 ds.data_frame = _stl_transform(ydf, ds, self.target, self.tss, self.ts_analysis)
95
96 for timestep in range(self.horizon):
97 ydf[f'prediction_{timestep}'] = self.models[timestep](ds, args)['prediction'].values
98
99 if self.use_stl and self.ts_analysis.get('stl_transforms', False):
100 ydf = _stl_inverse_transform(ydf, ds, self.tss, self.ts_analysis)
101
102 if self.models[0].positive_domain:
103 ydf = ydf.clip(0)
104
105 ydf['prediction'] = ydf.values.tolist()
106 ds.data_frame = original_df
107 return ydf[['prediction']]
108
[end of lightwood/mixer/lightgbm_array.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightwood/mixer/lightgbm_array.py b/lightwood/mixer/lightgbm_array.py
--- a/lightwood/mixer/lightgbm_array.py
+++ b/lightwood/mixer/lightgbm_array.py
@@ -49,7 +49,7 @@
dtype_dict,
input_cols,
False, # fit_on_dev,
- True, # use_optuna
+ True if tss.horizon < 10 else False, # use_optuna
target_encoder)
for _, target_col in zip(range(self.horizon), [target] + self.offset_pred_cols)]
self.ts_analysis = ts_analysis
| {"golden_diff": "diff --git a/lightwood/mixer/lightgbm_array.py b/lightwood/mixer/lightgbm_array.py\n--- a/lightwood/mixer/lightgbm_array.py\n+++ b/lightwood/mixer/lightgbm_array.py\n@@ -49,7 +49,7 @@\n dtype_dict,\n input_cols,\n False, # fit_on_dev,\n- True, # use_optuna\n+ True if tss.horizon < 10 else False, # use_optuna\n target_encoder)\n for _, target_col in zip(range(self.horizon), [target] + self.offset_pred_cols)]\n self.ts_analysis = ts_analysis\n", "issue": "Improve runtime of `LightGBMArray` for long-horizon forecasting\nTwo main approaches:\r\n\r\n- Disable optuna hyperparam search past some threshold.\r\n- Opt for a recursive strategy instead of direct (i.e. same regressor trained for all timesteps v/s one for each step).\n", "before_files": [{"content": "from copy import deepcopy\nfrom typing import Dict, List, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom lightwood.helpers.log import log\nfrom lightwood.mixer.helpers.ts import _apply_stl_on_training, _stl_transform, _stl_inverse_transform\nfrom lightwood.encoder.base import BaseEncoder\nfrom lightwood.mixer.base import BaseMixer\nfrom lightwood.mixer.lightgbm import LightGBM\nfrom lightwood.api.types import PredictionArguments, TimeseriesSettings\nfrom lightwood.data.encoded_ds import EncodedDs, ConcatedEncodedDs\n\n\nclass LightGBMArray(BaseMixer):\n \"\"\"LightGBM-based model, intended for usage in time series tasks.\"\"\"\n models: List[LightGBM]\n submodel_stop_after: float\n target: str\n supports_proba: bool\n ts_analysis: Dict\n tss: TimeseriesSettings\n\n def __init__(\n self,\n stop_after: float,\n target: str,\n dtype_dict: Dict[str, str],\n input_cols: List[str],\n fit_on_dev: bool,\n target_encoder: BaseEncoder,\n ts_analysis: Dict[str, object],\n use_stl: bool,\n tss: TimeseriesSettings\n ):\n super().__init__(stop_after)\n self.tss = tss\n self.horizon = tss.horizon\n self.submodel_stop_after = stop_after / self.horizon\n self.target = target\n self.offset_pred_cols = [f'{self.target}_timestep_{i}' for i in range(1, self.horizon)]\n if set(input_cols) != {self.tss.order_by}:\n input_cols.remove(self.tss.order_by)\n for col in self.offset_pred_cols:\n dtype_dict[col] = dtype_dict[self.target]\n self.models = [LightGBM(self.submodel_stop_after,\n target_col,\n dtype_dict,\n input_cols,\n False, # fit_on_dev,\n True, # use_optuna\n target_encoder)\n for _, target_col in zip(range(self.horizon), [target] + self.offset_pred_cols)]\n self.ts_analysis = ts_analysis\n self.supports_proba = False\n self.use_stl = False\n self.stable = True\n\n def _fit(self, train_data: EncodedDs, dev_data: EncodedDs, submodel_method='fit') -> None:\n original_train = deepcopy(train_data.data_frame)\n original_dev = deepcopy(dev_data.data_frame)\n\n if self.use_stl and self.ts_analysis.get('stl_transforms', False):\n _apply_stl_on_training(train_data, dev_data, self.target, self.tss, self.ts_analysis)\n\n for timestep in range(self.horizon):\n getattr(self.models[timestep], submodel_method)(train_data, dev_data)\n\n # restore dfs\n train_data.data_frame = original_train\n dev_data.data_frame = original_dev\n\n def fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None:\n log.info('Started fitting LGBM models for array prediction')\n self._fit(train_data, dev_data, submodel_method='fit')\n\n def partial_fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None:\n log.info('Updating array of LGBM models...')\n self._fit(train_data, dev_data, submodel_method='partial_fit')\n\n def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs],\n args: PredictionArguments = PredictionArguments()) -> pd.DataFrame:\n if args.predict_proba:\n log.warning('This model does not output probability estimates')\n\n original_df = deepcopy(ds.data_frame)\n length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds)\n ydf = pd.DataFrame(0, # zero-filled\n index=np.arange(length),\n columns=[f'prediction_{i}' for i in range(self.horizon)])\n\n if self.use_stl and self.ts_analysis.get('stl_transforms', False):\n ds.data_frame = _stl_transform(ydf, ds, self.target, self.tss, self.ts_analysis)\n\n for timestep in range(self.horizon):\n ydf[f'prediction_{timestep}'] = self.models[timestep](ds, args)['prediction'].values\n\n if self.use_stl and self.ts_analysis.get('stl_transforms', False):\n ydf = _stl_inverse_transform(ydf, ds, self.tss, self.ts_analysis)\n\n if self.models[0].positive_domain:\n ydf = ydf.clip(0)\n\n ydf['prediction'] = ydf.values.tolist()\n ds.data_frame = original_df\n return ydf[['prediction']]\n", "path": "lightwood/mixer/lightgbm_array.py"}]} | 1,857 | 144 |
gh_patches_debug_56612 | rasdani/github-patches | git_diff | spacetelescope__jwql-677 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update Bokeh to latest version
I remember there was some reason that we were holding off on upgrading Bokeh from 1.3.4. However, Bokeh is now up to version 2.2.1 I believe. We should look into upgrading the version used for JWQL in order to take advantage of new features and so that we minimize the number of plots created under 1.3.4 which may need to be tweaked to work under the new version.
For example, one difference I ran into today was that the keyword "legend", which is used in 1.3.4 to denote the string printed in the legend for a particular element, has been changed to "legend_label" in version 2.2.1.
</issue>
<code>
[start of setup.py]
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.24.0'
6
7 AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '
8 AUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'
9
10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
11
12 DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']
13
14 REQUIRES = [
15 'asdf>=2.3.3',
16 'astropy>=3.2.1',
17 'astroquery>=0.3.9',
18 'authlib',
19 'bokeh>=1.0,<1.4',
20 'codecov',
21 'crds',
22 'cryptography',
23 'django',
24 'flake8',
25 'inflection',
26 'ipython',
27 'jinja2',
28 'jsonschema',
29 'jwedb>=0.0.3',
30 'jwst',
31 'matplotlib',
32 'nodejs',
33 'numpy',
34 'numpydoc',
35 'pandas',
36 'psycopg2',
37 'pysiaf',
38 'pytest',
39 'pytest-cov',
40 'scipy',
41 'sphinx',
42 'sqlalchemy',
43 'stsci_rtd_theme',
44 'twine',
45 'wtforms'
46 ]
47
48 setup(
49 name='jwql',
50 version=VERSION,
51 description=DESCRIPTION,
52 url='https://github.com/spacetelescope/jwql.git',
53 author=AUTHORS,
54 author_email='[email protected]',
55 license='BSD',
56 keywords=['astronomy', 'python'],
57 classifiers=['Programming Language :: Python'],
58 packages=find_packages(),
59 install_requires=REQUIRES,
60 dependency_links=DEPENDENCY_LINKS,
61 include_package_data=True,
62 include_dirs=[np.get_include()],
63 )
64
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@
'astropy>=3.2.1',
'astroquery>=0.3.9',
'authlib',
- 'bokeh>=1.0,<1.4',
+ 'bokeh',
'codecov',
'crds',
'cryptography',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n- 'bokeh>=1.0,<1.4',\n+ 'bokeh',\n 'codecov',\n 'crds',\n 'cryptography',\n", "issue": "Update Bokeh to latest version\nI remember there was some reason that we were holding off on upgrading Bokeh from 1.3.4. However, Bokeh is now up to version 2.2.1 I believe. We should look into upgrading the version used for JWQL in order to take advantage of new features and so that we minimize the number of plots created under 1.3.4 which may need to be tweaked to work under the new version.\r\n\r\nFor example, one difference I ran into today was that the keyword \"legend\", which is used in 1.3.4 to denote the string printed in the legend for a particular element, has been changed to \"legend_label\" in version 2.2.1.\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.24.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']\n\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'crds',\n 'cryptography',\n 'django',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema',\n 'jwedb>=0.0.3',\n 'jwst',\n 'matplotlib',\n 'nodejs',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine',\n 'wtforms'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]} | 1,255 | 95 |
gh_patches_debug_26224 | rasdani/github-patches | git_diff | mirumee__ariadne-24 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`add_resolve_functions_to_schema` should support Scalars parse_value and parse_literal
Currently Ariadne Scalar support is limited to serializing python types to JSON before returning them to client, but we also want to support using custom scalars for input.
Our `add_resolve_functions_to_scalar` utility could support following use-cases:
Code below results in one-way only scalar:
- `type_defs = {'Scalar': {'serialize': callable}}`
And this code results in two-way scalar:
- `type_defs = {'Scalar': {'serialize': callable, 'parse_value': callable, 'parse_literal': callable}}` - explicit syntax for two-directional scalar.
</issue>
<code>
[start of ariadne/resolvers.py]
1 from graphql import GraphQLObjectType, GraphQLScalarType, GraphQLSchema
2 from graphql.execution.base import ResolveInfo
3
4
5 def resolve_parent_field(parent, name: str):
6 if isinstance(parent, dict):
7 return parent.get(name)
8 return getattr(parent, name, None)
9
10
11 def default_resolver(parent, info: ResolveInfo):
12 return resolve_parent_field(parent, info.field_name)
13
14
15 def resolve_to(name: str):
16 def resolver(parent, *_):
17 return resolve_parent_field(parent, name)
18
19 return resolver
20
21
22 def add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):
23 for type_name, type_object in schema.get_type_map().items():
24 if isinstance(type_object, GraphQLObjectType):
25 add_resolve_functions_to_object(type_name, type_object, resolvers)
26 if isinstance(type_object, GraphQLScalarType):
27 add_resolve_function_to_scalar(type_name, type_object, resolvers)
28
29
30 def add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict):
31 type_resolver = resolvers.get(name, {})
32 for field_name, field_object in obj.fields.items():
33 field_resolver = type_resolver.get(field_name, default_resolver)
34 field_object.resolver = field_resolver
35
36
37 def add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):
38 serializer = resolvers.get(name, obj.serialize)
39 obj.serialize = serializer
40
[end of ariadne/resolvers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ariadne/resolvers.py b/ariadne/resolvers.py
--- a/ariadne/resolvers.py
+++ b/ariadne/resolvers.py
@@ -24,16 +24,24 @@
if isinstance(type_object, GraphQLObjectType):
add_resolve_functions_to_object(type_name, type_object, resolvers)
if isinstance(type_object, GraphQLScalarType):
- add_resolve_function_to_scalar(type_name, type_object, resolvers)
+ add_resolve_functions_to_scalar(type_name, type_object, resolvers)
def add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict):
- type_resolver = resolvers.get(name, {})
+ type_resolvers = resolvers.get(name, {})
for field_name, field_object in obj.fields.items():
- field_resolver = type_resolver.get(field_name, default_resolver)
+ field_resolver = type_resolvers.get(field_name, default_resolver)
field_object.resolver = field_resolver
-def add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):
- serializer = resolvers.get(name, obj.serialize)
- obj.serialize = serializer
+def add_resolve_functions_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):
+ scalar_resolvers = resolvers.get(name, {})
+
+ serialize = scalar_resolvers.get("serialize", obj.serialize)
+ obj.serialize = serialize
+
+ parse_literal = scalar_resolvers.get("parse_literal", obj.parse_literal)
+ obj.parse_literal = parse_literal
+
+ parse_value = scalar_resolvers.get("parse_value", obj.parse_value)
+ obj.parse_value = parse_value
| {"golden_diff": "diff --git a/ariadne/resolvers.py b/ariadne/resolvers.py\n--- a/ariadne/resolvers.py\n+++ b/ariadne/resolvers.py\n@@ -24,16 +24,24 @@\n if isinstance(type_object, GraphQLObjectType):\n add_resolve_functions_to_object(type_name, type_object, resolvers)\n if isinstance(type_object, GraphQLScalarType):\n- add_resolve_function_to_scalar(type_name, type_object, resolvers)\n+ add_resolve_functions_to_scalar(type_name, type_object, resolvers)\n \n \n def add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict):\n- type_resolver = resolvers.get(name, {})\n+ type_resolvers = resolvers.get(name, {})\n for field_name, field_object in obj.fields.items():\n- field_resolver = type_resolver.get(field_name, default_resolver)\n+ field_resolver = type_resolvers.get(field_name, default_resolver)\n field_object.resolver = field_resolver\n \n \n-def add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):\n- serializer = resolvers.get(name, obj.serialize)\n- obj.serialize = serializer\n+def add_resolve_functions_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):\n+ scalar_resolvers = resolvers.get(name, {})\n+\n+ serialize = scalar_resolvers.get(\"serialize\", obj.serialize)\n+ obj.serialize = serialize\n+\n+ parse_literal = scalar_resolvers.get(\"parse_literal\", obj.parse_literal)\n+ obj.parse_literal = parse_literal\n+\n+ parse_value = scalar_resolvers.get(\"parse_value\", obj.parse_value)\n+ obj.parse_value = parse_value\n", "issue": "`add_resolve_functions_to_schema` should support Scalars parse_value and parse_literal\nCurrently Ariadne Scalar support is limited to serializing python types to JSON before returning them to client, but we also want to support using custom scalars for input.\r\n\r\nOur `add_resolve_functions_to_scalar` utility could support following use-cases:\r\n\r\nCode below results in one-way only scalar:\r\n\r\n- `type_defs = {'Scalar': {'serialize': callable}}`\r\n\r\nAnd this code results in two-way scalar:\r\n\r\n- `type_defs = {'Scalar': {'serialize': callable, 'parse_value': callable, 'parse_literal': callable}}` - explicit syntax for two-directional scalar.\r\n\n", "before_files": [{"content": "from graphql import GraphQLObjectType, GraphQLScalarType, GraphQLSchema\nfrom graphql.execution.base import ResolveInfo\n\n\ndef resolve_parent_field(parent, name: str):\n if isinstance(parent, dict):\n return parent.get(name)\n return getattr(parent, name, None)\n\n\ndef default_resolver(parent, info: ResolveInfo):\n return resolve_parent_field(parent, info.field_name)\n\n\ndef resolve_to(name: str):\n def resolver(parent, *_):\n return resolve_parent_field(parent, name)\n\n return resolver\n\n\ndef add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):\n for type_name, type_object in schema.get_type_map().items():\n if isinstance(type_object, GraphQLObjectType):\n add_resolve_functions_to_object(type_name, type_object, resolvers)\n if isinstance(type_object, GraphQLScalarType):\n add_resolve_function_to_scalar(type_name, type_object, resolvers)\n\n\ndef add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict):\n type_resolver = resolvers.get(name, {})\n for field_name, field_object in obj.fields.items():\n field_resolver = type_resolver.get(field_name, default_resolver)\n field_object.resolver = field_resolver\n\n\ndef add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):\n serializer = resolvers.get(name, obj.serialize)\n obj.serialize = serializer\n", "path": "ariadne/resolvers.py"}]} | 1,040 | 364 |
gh_patches_debug_36546 | rasdani/github-patches | git_diff | weecology__retriever-698 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error downloading eBird_observation data.
The URL doesn't work anymore.
</issue>
<code>
[start of try_install_all.py]
1 """Attempt to install all datasets into all database management systems
2
3 This module, when run, attempts to install datasets from all Retriever scripts
4 in the /scripts folder (except for those listed in IGNORE), for each engine in
5 ENGINE_LIST() from __init__.py. In other words, it runs trys to install using
6 all possible combinations of database platform and script and checks to
7 see if there are any errors. It does not check the values in the database.
8
9 """
10 from __future__ import print_function
11 from __future__ import absolute_import
12 import os
13 import sys
14 from imp import reload
15 from retriever.lib.tools import choose_engine
16 from retriever import MODULE_LIST, ENGINE_LIST, SCRIPT_LIST
17
18 reload(sys)
19 if hasattr(sys, 'setdefaultencoding'):
20 sys.setdefaultencoding('latin-1')
21
22 MODULE_LIST = MODULE_LIST()
23 ENGINE_LIST = ENGINE_LIST()
24 if len(sys.argv) > 1:
25 ENGINE_LIST = [
26 e for e in ENGINE_LIST
27 if e.name in sys.argv[1:] or
28 e.abbreviation in sys.argv[1:]
29 ]
30 SCRIPT_LIST = SCRIPT_LIST()
31 TEST_ENGINES = {}
32 IGNORE = ["AvianBodyMass", "FIA", "Bioclim", "PRISM", "vertnet","NPN", "mammsupertree", "eBirdOD"]
33 IGNORE = [dataset.lower() for dataset in IGNORE]
34
35 for engine in ENGINE_LIST:
36 opts = {}
37 print("** %s **" % engine.name)
38 opts["engine"] = engine.abbreviation
39
40 try:
41 TEST_ENGINES[engine.abbreviation] = choose_engine(opts)
42 TEST_ENGINES[engine.abbreviation].get_input()
43 TEST_ENGINES[engine.abbreviation].get_cursor()
44 except:
45 TEST_ENGINES[engine.abbreviation] = None
46 pass
47
48 errors = []
49 for module in MODULE_LIST:
50 for (key, value) in list(TEST_ENGINES.items()):
51 if module.SCRIPT.shortname.lower() not in IGNORE:
52 if value != None:
53 print("==>", module.__name__, value.name, "..........", module.SCRIPT.shortname)
54 try:
55 module.SCRIPT.download(value)
56 except KeyboardInterrupt:
57 pass
58 except Exception as e:
59 print("ERROR.")
60 errors.append((key, module.__name__, e))
61 else:
62 errors.append((key, "No connection detected......" + module.SCRIPT.shortname))
63
64 print('')
65 if errors:
66 print("Engine, Dataset, Error")
67 for error in errors:
68 print(error)
69 else:
70 print("All tests passed")
71
[end of try_install_all.py]
[start of scripts/eBird_observation.py]
1 #retriever
2 """Data Retriever script for the eBird Observation Dataset"""
3
4 from retriever.lib.templates import Script
5 from retriever.lib.models import Table
6
7
8 class main(Script):
9 def __init__(self, **kwargs):
10 Script.__init__(self, **kwargs)
11 self.name = "eBird Observation Dataset"
12 self.shortname = "eBirdOD"
13 self.ref = "http://ebird.org/content/ebird/news/gbif/"
14 self.urls = {"main": "https://dataone.ornith.cornell.edu/metacat/d1/mn/v1/object/CLOEODDATA.05192014.1"}
15 self.retriever_minimum_version = '2.0.dev'
16 self.version = '1.0'
17 self.description = "A collection of observations from birders through portals managed and maintained by local partner conservation organizations"
18
19 def download(self, engine=None, debug=False):
20 data_file_name = "eBird_Observation_Dataset_2013.csv"
21 Script.download(self, engine, debug)
22 self.engine.download_files_from_archive(self.urls["main"],
23 [data_file_name],
24 filetype='gz')
25 table = (Table("main", delimiter=","))
26 table.columns=[("BASISOFRECORD",("char", )),
27 ("INSTITUTIONCODE",("char", )),
28 ("COLLECTIONCODE",("char", )),
29 ("CATALOGNUMBER",("char", )),
30 ("OCCURRENCEID",("char", )),
31 ("RECORDEDBY",("char", )),
32 ("YEAR",("int", )),
33 ("MONTH",("int", )),
34 ("DAY",("int", )),
35 ("COUNTRY",("char", )),
36 ("STATEPROVINCE",("char", )),
37 ("COUNTY",("char", )),
38 ("DECIMALLATITUDE",("double", )),
39 ("DECIMALLONGITUDE",("double", )),
40 ("LOCALITY",("char", )),
41 ("KINGDOM",("char", )),
42 ("PHYLUM",("char", )),
43 ("CLASS",("char", )),
44 ("SPORDER",("char", )),
45 ("FAMILY",("char", )),
46 ("GENUS",("char", )),
47 ("SPECIFICEPITHET",("char", )),
48 ("SCIENTIFICNAME",("char", )),
49 ("VERNACULARNAME",("char", )),
50 ("INDIVIDUALCOUNT",("int", ))]
51 engine.table = table
52 engine.create_table()
53 engine.insert_data_from_file(engine.format_filename(data_file_name))
54 return engine
55
56 SCRIPT = main()
57
[end of scripts/eBird_observation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/eBird_observation.py b/scripts/eBird_observation.py
deleted file mode 100644
--- a/scripts/eBird_observation.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#retriever
-"""Data Retriever script for the eBird Observation Dataset"""
-
-from retriever.lib.templates import Script
-from retriever.lib.models import Table
-
-
-class main(Script):
- def __init__(self, **kwargs):
- Script.__init__(self, **kwargs)
- self.name = "eBird Observation Dataset"
- self.shortname = "eBirdOD"
- self.ref = "http://ebird.org/content/ebird/news/gbif/"
- self.urls = {"main": "https://dataone.ornith.cornell.edu/metacat/d1/mn/v1/object/CLOEODDATA.05192014.1"}
- self.retriever_minimum_version = '2.0.dev'
- self.version = '1.0'
- self.description = "A collection of observations from birders through portals managed and maintained by local partner conservation organizations"
-
- def download(self, engine=None, debug=False):
- data_file_name = "eBird_Observation_Dataset_2013.csv"
- Script.download(self, engine, debug)
- self.engine.download_files_from_archive(self.urls["main"],
- [data_file_name],
- filetype='gz')
- table = (Table("main", delimiter=","))
- table.columns=[("BASISOFRECORD",("char", )),
- ("INSTITUTIONCODE",("char", )),
- ("COLLECTIONCODE",("char", )),
- ("CATALOGNUMBER",("char", )),
- ("OCCURRENCEID",("char", )),
- ("RECORDEDBY",("char", )),
- ("YEAR",("int", )),
- ("MONTH",("int", )),
- ("DAY",("int", )),
- ("COUNTRY",("char", )),
- ("STATEPROVINCE",("char", )),
- ("COUNTY",("char", )),
- ("DECIMALLATITUDE",("double", )),
- ("DECIMALLONGITUDE",("double", )),
- ("LOCALITY",("char", )),
- ("KINGDOM",("char", )),
- ("PHYLUM",("char", )),
- ("CLASS",("char", )),
- ("SPORDER",("char", )),
- ("FAMILY",("char", )),
- ("GENUS",("char", )),
- ("SPECIFICEPITHET",("char", )),
- ("SCIENTIFICNAME",("char", )),
- ("VERNACULARNAME",("char", )),
- ("INDIVIDUALCOUNT",("int", ))]
- engine.table = table
- engine.create_table()
- engine.insert_data_from_file(engine.format_filename(data_file_name))
- return engine
-
-SCRIPT = main()
diff --git a/try_install_all.py b/try_install_all.py
--- a/try_install_all.py
+++ b/try_install_all.py
@@ -29,7 +29,7 @@
]
SCRIPT_LIST = SCRIPT_LIST()
TEST_ENGINES = {}
-IGNORE = ["AvianBodyMass", "FIA", "Bioclim", "PRISM", "vertnet","NPN", "mammsupertree", "eBirdOD"]
+IGNORE = ["AvianBodyMass", "FIA", "Bioclim", "PRISM", "vertnet","NPN", "mammsupertree"]
IGNORE = [dataset.lower() for dataset in IGNORE]
for engine in ENGINE_LIST:
| {"golden_diff": "diff --git a/scripts/eBird_observation.py b/scripts/eBird_observation.py\ndeleted file mode 100644\n--- a/scripts/eBird_observation.py\n+++ /dev/null\n@@ -1,56 +0,0 @@\n-#retriever\n-\"\"\"Data Retriever script for the eBird Observation Dataset\"\"\"\n-\n-from retriever.lib.templates import Script\n-from retriever.lib.models import Table\n-\n-\n-class main(Script):\n- def __init__(self, **kwargs):\n- Script.__init__(self, **kwargs)\n- self.name = \"eBird Observation Dataset\"\n- self.shortname = \"eBirdOD\"\n- self.ref = \"http://ebird.org/content/ebird/news/gbif/\"\n- self.urls = {\"main\": \"https://dataone.ornith.cornell.edu/metacat/d1/mn/v1/object/CLOEODDATA.05192014.1\"}\n- self.retriever_minimum_version = '2.0.dev'\n- self.version = '1.0'\n- self.description = \"A collection of observations from birders through portals managed and maintained by local partner conservation organizations\"\n-\n- def download(self, engine=None, debug=False):\n- data_file_name = \"eBird_Observation_Dataset_2013.csv\"\n- Script.download(self, engine, debug)\n- self.engine.download_files_from_archive(self.urls[\"main\"],\n- [data_file_name],\n- filetype='gz')\n- table = (Table(\"main\", delimiter=\",\"))\n- table.columns=[(\"BASISOFRECORD\",(\"char\", )),\n- (\"INSTITUTIONCODE\",(\"char\", )),\n- (\"COLLECTIONCODE\",(\"char\", )),\n- (\"CATALOGNUMBER\",(\"char\", )),\n- (\"OCCURRENCEID\",(\"char\", )),\n- (\"RECORDEDBY\",(\"char\", )),\n- (\"YEAR\",(\"int\", )),\n- (\"MONTH\",(\"int\", )),\n- (\"DAY\",(\"int\", )),\n- (\"COUNTRY\",(\"char\", )),\n- (\"STATEPROVINCE\",(\"char\", )),\n- (\"COUNTY\",(\"char\", )),\n- (\"DECIMALLATITUDE\",(\"double\", )),\n- (\"DECIMALLONGITUDE\",(\"double\", )),\n- (\"LOCALITY\",(\"char\", )),\n- (\"KINGDOM\",(\"char\", )),\n- (\"PHYLUM\",(\"char\", )),\n- (\"CLASS\",(\"char\", )),\n- (\"SPORDER\",(\"char\", )),\n- (\"FAMILY\",(\"char\", )),\n- (\"GENUS\",(\"char\", )),\n- (\"SPECIFICEPITHET\",(\"char\", )),\n- (\"SCIENTIFICNAME\",(\"char\", )),\n- (\"VERNACULARNAME\",(\"char\", )),\n- (\"INDIVIDUALCOUNT\",(\"int\", ))]\n- engine.table = table\n- engine.create_table()\n- engine.insert_data_from_file(engine.format_filename(data_file_name))\n- return engine\n-\n-SCRIPT = main()\ndiff --git a/try_install_all.py b/try_install_all.py\n--- a/try_install_all.py\n+++ b/try_install_all.py\n@@ -29,7 +29,7 @@\n ]\n SCRIPT_LIST = SCRIPT_LIST()\n TEST_ENGINES = {}\n-IGNORE = [\"AvianBodyMass\", \"FIA\", \"Bioclim\", \"PRISM\", \"vertnet\",\"NPN\", \"mammsupertree\", \"eBirdOD\"]\n+IGNORE = [\"AvianBodyMass\", \"FIA\", \"Bioclim\", \"PRISM\", \"vertnet\",\"NPN\", \"mammsupertree\"]\n IGNORE = [dataset.lower() for dataset in IGNORE]\n \n for engine in ENGINE_LIST:\n", "issue": "Error downloading eBird_observation data.\nThe URL doesn't work anymore.\n\n", "before_files": [{"content": "\"\"\"Attempt to install all datasets into all database management systems\n\nThis module, when run, attempts to install datasets from all Retriever scripts\nin the /scripts folder (except for those listed in IGNORE), for each engine in\nENGINE_LIST() from __init__.py. In other words, it runs trys to install using\nall possible combinations of database platform and script and checks to\nsee if there are any errors. It does not check the values in the database.\n\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport os\nimport sys\nfrom imp import reload\nfrom retriever.lib.tools import choose_engine\nfrom retriever import MODULE_LIST, ENGINE_LIST, SCRIPT_LIST\n\nreload(sys)\nif hasattr(sys, 'setdefaultencoding'):\n sys.setdefaultencoding('latin-1')\n\nMODULE_LIST = MODULE_LIST()\nENGINE_LIST = ENGINE_LIST()\nif len(sys.argv) > 1:\n ENGINE_LIST = [\n e for e in ENGINE_LIST\n if e.name in sys.argv[1:] or\n e.abbreviation in sys.argv[1:]\n ]\nSCRIPT_LIST = SCRIPT_LIST()\nTEST_ENGINES = {}\nIGNORE = [\"AvianBodyMass\", \"FIA\", \"Bioclim\", \"PRISM\", \"vertnet\",\"NPN\", \"mammsupertree\", \"eBirdOD\"]\nIGNORE = [dataset.lower() for dataset in IGNORE]\n\nfor engine in ENGINE_LIST:\n opts = {}\n print(\"** %s **\" % engine.name)\n opts[\"engine\"] = engine.abbreviation\n\n try:\n TEST_ENGINES[engine.abbreviation] = choose_engine(opts)\n TEST_ENGINES[engine.abbreviation].get_input()\n TEST_ENGINES[engine.abbreviation].get_cursor()\n except:\n TEST_ENGINES[engine.abbreviation] = None\n pass\n\nerrors = []\nfor module in MODULE_LIST:\n for (key, value) in list(TEST_ENGINES.items()):\n if module.SCRIPT.shortname.lower() not in IGNORE:\n if value != None:\n print(\"==>\", module.__name__, value.name, \"..........\", module.SCRIPT.shortname)\n try:\n module.SCRIPT.download(value)\n except KeyboardInterrupt:\n pass\n except Exception as e:\n print(\"ERROR.\")\n errors.append((key, module.__name__, e))\n else:\n errors.append((key, \"No connection detected......\" + module.SCRIPT.shortname))\n\nprint('')\nif errors:\n print(\"Engine, Dataset, Error\")\n for error in errors:\n print(error)\nelse:\n print(\"All tests passed\")\n", "path": "try_install_all.py"}, {"content": "#retriever\n\"\"\"Data Retriever script for the eBird Observation Dataset\"\"\"\n\nfrom retriever.lib.templates import Script\nfrom retriever.lib.models import Table\n\n\nclass main(Script):\n def __init__(self, **kwargs):\n Script.__init__(self, **kwargs)\n self.name = \"eBird Observation Dataset\"\n self.shortname = \"eBirdOD\"\n self.ref = \"http://ebird.org/content/ebird/news/gbif/\"\n self.urls = {\"main\": \"https://dataone.ornith.cornell.edu/metacat/d1/mn/v1/object/CLOEODDATA.05192014.1\"}\n self.retriever_minimum_version = '2.0.dev'\n self.version = '1.0'\n self.description = \"A collection of observations from birders through portals managed and maintained by local partner conservation organizations\"\n\n def download(self, engine=None, debug=False):\n data_file_name = \"eBird_Observation_Dataset_2013.csv\"\n Script.download(self, engine, debug)\n self.engine.download_files_from_archive(self.urls[\"main\"],\n [data_file_name],\n filetype='gz')\n table = (Table(\"main\", delimiter=\",\"))\n table.columns=[(\"BASISOFRECORD\",(\"char\", )),\n (\"INSTITUTIONCODE\",(\"char\", )),\n (\"COLLECTIONCODE\",(\"char\", )),\n (\"CATALOGNUMBER\",(\"char\", )),\n (\"OCCURRENCEID\",(\"char\", )),\n (\"RECORDEDBY\",(\"char\", )),\n (\"YEAR\",(\"int\", )),\n (\"MONTH\",(\"int\", )),\n (\"DAY\",(\"int\", )),\n (\"COUNTRY\",(\"char\", )),\n (\"STATEPROVINCE\",(\"char\", )),\n (\"COUNTY\",(\"char\", )),\n (\"DECIMALLATITUDE\",(\"double\", )),\n (\"DECIMALLONGITUDE\",(\"double\", )),\n (\"LOCALITY\",(\"char\", )),\n (\"KINGDOM\",(\"char\", )),\n (\"PHYLUM\",(\"char\", )),\n (\"CLASS\",(\"char\", )),\n (\"SPORDER\",(\"char\", )),\n (\"FAMILY\",(\"char\", )),\n (\"GENUS\",(\"char\", )),\n (\"SPECIFICEPITHET\",(\"char\", )),\n (\"SCIENTIFICNAME\",(\"char\", )),\n (\"VERNACULARNAME\",(\"char\", )),\n (\"INDIVIDUALCOUNT\",(\"int\", ))]\n engine.table = table\n engine.create_table()\n engine.insert_data_from_file(engine.format_filename(data_file_name))\n return engine\n\nSCRIPT = main()\n", "path": "scripts/eBird_observation.py"}]} | 1,920 | 821 |
gh_patches_debug_29119 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-3786 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BigQuery: expose public helper method to convert a list of schema fields to/from a list of schema dictionaries (JSON)
I've received some feedback asking how to take a schema from the client library and save it to a JSON file. One reason to do this is the [`bq` command-line tool](https://cloud.google.com/bigquery/bq-command-line-tool#creatingtablefromfile) accepts a schema file, formatted like
```
[
{"name": "name", "type": "string", "mode": "required"},
{"name": "gender", "type": "string", "mode": "nullable"},
{"name": "count", "type": "integer", "mode": "required"}
]
```
Note: this format is the same as the API representation.
It would be great if our client libraries could read/write in this format.
</issue>
<code>
[start of bigquery/google/cloud/bigquery/schema.py]
1 # Copyright 2015 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Schemas for BigQuery tables / queries."""
16
17
18 class SchemaField(object):
19 """Describe a single field within a table schema.
20
21 :type name: str
22 :param name: the name of the field.
23
24 :type field_type: str
25 :param field_type: the type of the field (one of 'STRING', 'INTEGER',
26 'FLOAT', 'BOOLEAN', 'TIMESTAMP' or 'RECORD').
27
28 :type mode: str
29 :param mode: the mode of the field (one of 'NULLABLE', 'REQUIRED',
30 or 'REPEATED').
31
32 :type description: str
33 :param description: optional description for the field.
34
35 :type fields: tuple of :class:`SchemaField`
36 :param fields: subfields (requires ``field_type`` of 'RECORD').
37 """
38 def __init__(self, name, field_type, mode='NULLABLE',
39 description=None, fields=()):
40 self._name = name
41 self._field_type = field_type
42 self._mode = mode
43 self._description = description
44 self._fields = tuple(fields)
45
46 @property
47 def name(self):
48 """str: The name of the field."""
49 return self._name
50
51 @property
52 def field_type(self):
53 """str: The type of the field.
54
55 Will be one of 'STRING', 'INTEGER', 'FLOAT', 'BOOLEAN',
56 'TIMESTAMP' or 'RECORD'.
57 """
58 return self._field_type
59
60 @property
61 def mode(self):
62 """str: The mode of the field.
63
64 Will be one of 'NULLABLE', 'REQUIRED', or 'REPEATED'.
65 """
66 return self._mode
67
68 @property
69 def is_nullable(self):
70 """Check whether 'mode' is 'nullable'."""
71 return self._mode == 'NULLABLE'
72
73 @property
74 def description(self):
75 """Optional[str]: Description for the field."""
76 return self._description
77
78 @property
79 def fields(self):
80 """tuple: Subfields contained in this field.
81
82 If ``field_type`` is not 'RECORD', this property must be
83 empty / unset.
84 """
85 return self._fields
86
87 def _key(self):
88 """A tuple key that unique-ly describes this field.
89
90 Used to compute this instance's hashcode and evaluate equality.
91
92 Returns:
93 tuple: The contents of this :class:`SchemaField`.
94 """
95 return (
96 self._name,
97 self._field_type.lower(),
98 self._mode,
99 self._description,
100 self._fields,
101 )
102
103 def __eq__(self, other):
104 if not isinstance(other, SchemaField):
105 return NotImplemented
106 return self._key() == other._key()
107
108 def __ne__(self, other):
109 return not self == other
110
111 def __hash__(self):
112 return hash(self._key())
113
114 def __repr__(self):
115 return 'SchemaField{}'.format(self._key())
116
[end of bigquery/google/cloud/bigquery/schema.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bigquery/google/cloud/bigquery/schema.py b/bigquery/google/cloud/bigquery/schema.py
--- a/bigquery/google/cloud/bigquery/schema.py
+++ b/bigquery/google/cloud/bigquery/schema.py
@@ -43,6 +43,25 @@
self._description = description
self._fields = tuple(fields)
+ @classmethod
+ def from_api_repr(cls, api_repr):
+ """Return a ``SchemaField`` object deserialized from a dictionary.
+
+ Args:
+ api_repr (Mapping[str, str]): The serialized representation
+ of the SchemaField, such as what is output by
+ :meth:`to_api_repr`.
+
+ Returns:
+ SchemaField: The ``SchemaField`` object.
+ """
+ return cls(
+ field_type=api_repr['type'].upper(),
+ fields=[cls.from_api_repr(f) for f in api_repr.get('fields', ())],
+ mode=api_repr['mode'].upper(),
+ name=api_repr['name'],
+ )
+
@property
def name(self):
"""str: The name of the field."""
@@ -84,6 +103,28 @@
"""
return self._fields
+ def to_api_repr(self):
+ """Return a dictionary representing this schema field.
+
+ Returns:
+ dict: A dictionary representing the SchemaField in a serialized
+ form.
+ """
+ # Put together the basic representation. See http://bit.ly/2hOAT5u.
+ answer = {
+ 'mode': self.mode.lower(),
+ 'name': self.name,
+ 'type': self.field_type.lower(),
+ }
+
+ # If this is a RECORD type, then sub-fields are also included,
+ # add this to the serialized representation.
+ if self.field_type.upper() == 'RECORD':
+ answer['fields'] = [f.to_api_repr() for f in self.fields]
+
+ # Done; return the serialized dictionary.
+ return answer
+
def _key(self):
"""A tuple key that unique-ly describes this field.
| {"golden_diff": "diff --git a/bigquery/google/cloud/bigquery/schema.py b/bigquery/google/cloud/bigquery/schema.py\n--- a/bigquery/google/cloud/bigquery/schema.py\n+++ b/bigquery/google/cloud/bigquery/schema.py\n@@ -43,6 +43,25 @@\n self._description = description\n self._fields = tuple(fields)\n \n+ @classmethod\n+ def from_api_repr(cls, api_repr):\n+ \"\"\"Return a ``SchemaField`` object deserialized from a dictionary.\n+\n+ Args:\n+ api_repr (Mapping[str, str]): The serialized representation\n+ of the SchemaField, such as what is output by\n+ :meth:`to_api_repr`.\n+\n+ Returns:\n+ SchemaField: The ``SchemaField`` object.\n+ \"\"\"\n+ return cls(\n+ field_type=api_repr['type'].upper(),\n+ fields=[cls.from_api_repr(f) for f in api_repr.get('fields', ())],\n+ mode=api_repr['mode'].upper(),\n+ name=api_repr['name'],\n+ )\n+\n @property\n def name(self):\n \"\"\"str: The name of the field.\"\"\"\n@@ -84,6 +103,28 @@\n \"\"\"\n return self._fields\n \n+ def to_api_repr(self):\n+ \"\"\"Return a dictionary representing this schema field.\n+\n+ Returns:\n+ dict: A dictionary representing the SchemaField in a serialized\n+ form.\n+ \"\"\"\n+ # Put together the basic representation. See http://bit.ly/2hOAT5u.\n+ answer = {\n+ 'mode': self.mode.lower(),\n+ 'name': self.name,\n+ 'type': self.field_type.lower(),\n+ }\n+\n+ # If this is a RECORD type, then sub-fields are also included,\n+ # add this to the serialized representation.\n+ if self.field_type.upper() == 'RECORD':\n+ answer['fields'] = [f.to_api_repr() for f in self.fields]\n+\n+ # Done; return the serialized dictionary.\n+ return answer\n+\n def _key(self):\n \"\"\"A tuple key that unique-ly describes this field.\n", "issue": "BigQuery: expose public helper method to convert a list of schema fields to/from a list of schema dictionaries (JSON)\nI've received some feedback asking how to take a schema from the client library and save it to a JSON file. One reason to do this is the [`bq` command-line tool](https://cloud.google.com/bigquery/bq-command-line-tool#creatingtablefromfile) accepts a schema file, formatted like\r\n\r\n```\r\n[\r\n {\"name\": \"name\", \"type\": \"string\", \"mode\": \"required\"},\r\n {\"name\": \"gender\", \"type\": \"string\", \"mode\": \"nullable\"},\r\n {\"name\": \"count\", \"type\": \"integer\", \"mode\": \"required\"}\r\n]\r\n```\r\n\r\nNote: this format is the same as the API representation.\r\n\r\nIt would be great if our client libraries could read/write in this format.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Schemas for BigQuery tables / queries.\"\"\"\n\n\nclass SchemaField(object):\n \"\"\"Describe a single field within a table schema.\n\n :type name: str\n :param name: the name of the field.\n\n :type field_type: str\n :param field_type: the type of the field (one of 'STRING', 'INTEGER',\n 'FLOAT', 'BOOLEAN', 'TIMESTAMP' or 'RECORD').\n\n :type mode: str\n :param mode: the mode of the field (one of 'NULLABLE', 'REQUIRED',\n or 'REPEATED').\n\n :type description: str\n :param description: optional description for the field.\n\n :type fields: tuple of :class:`SchemaField`\n :param fields: subfields (requires ``field_type`` of 'RECORD').\n \"\"\"\n def __init__(self, name, field_type, mode='NULLABLE',\n description=None, fields=()):\n self._name = name\n self._field_type = field_type\n self._mode = mode\n self._description = description\n self._fields = tuple(fields)\n\n @property\n def name(self):\n \"\"\"str: The name of the field.\"\"\"\n return self._name\n\n @property\n def field_type(self):\n \"\"\"str: The type of the field.\n\n Will be one of 'STRING', 'INTEGER', 'FLOAT', 'BOOLEAN',\n 'TIMESTAMP' or 'RECORD'.\n \"\"\"\n return self._field_type\n\n @property\n def mode(self):\n \"\"\"str: The mode of the field.\n\n Will be one of 'NULLABLE', 'REQUIRED', or 'REPEATED'.\n \"\"\"\n return self._mode\n\n @property\n def is_nullable(self):\n \"\"\"Check whether 'mode' is 'nullable'.\"\"\"\n return self._mode == 'NULLABLE'\n\n @property\n def description(self):\n \"\"\"Optional[str]: Description for the field.\"\"\"\n return self._description\n\n @property\n def fields(self):\n \"\"\"tuple: Subfields contained in this field.\n\n If ``field_type`` is not 'RECORD', this property must be\n empty / unset.\n \"\"\"\n return self._fields\n\n def _key(self):\n \"\"\"A tuple key that unique-ly describes this field.\n\n Used to compute this instance's hashcode and evaluate equality.\n\n Returns:\n tuple: The contents of this :class:`SchemaField`.\n \"\"\"\n return (\n self._name,\n self._field_type.lower(),\n self._mode,\n self._description,\n self._fields,\n )\n\n def __eq__(self, other):\n if not isinstance(other, SchemaField):\n return NotImplemented\n return self._key() == other._key()\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash(self._key())\n\n def __repr__(self):\n return 'SchemaField{}'.format(self._key())\n", "path": "bigquery/google/cloud/bigquery/schema.py"}]} | 1,749 | 469 |
gh_patches_debug_13145 | rasdani/github-patches | git_diff | mabel-dev__opteryx-1159 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
🧹 resync SQLoxide
AST to SQL and AST visitor appear to have been added
</issue>
<code>
[start of opteryx/third_party/sqloxide/__init__.py]
1 try:
2 from .sqloxide import parse_sql
3 except ImportError as e: # pragma: no cover
4 print(e)
5 if str(e) != "PyO3 modules may only be initialized once per interpreter process":
6 raise e
7
8 __all__ = ["parse_sql"]
9
[end of opteryx/third_party/sqloxide/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opteryx/third_party/sqloxide/__init__.py b/opteryx/third_party/sqloxide/__init__.py
--- a/opteryx/third_party/sqloxide/__init__.py
+++ b/opteryx/third_party/sqloxide/__init__.py
@@ -1,8 +1,16 @@
-try:
- from .sqloxide import parse_sql
-except ImportError as e: # pragma: no cover
- print(e)
- if str(e) != "PyO3 modules may only be initialized once per interpreter process":
- raise e
-
-__all__ = ["parse_sql"]
+"""
+This module provides an interface to the sqloxide library, which is responsible for parsing SQL,
+restoring the Abstract Syntax Tree (AST), and performing various mutations on expressions and relations.
+
+For more information about sqloxide: https://github.com/wseaton/sqloxide
+
+This module is not from sqloxide, it is written for Opteryx.
+"""
+
+from .sqloxide import mutate_expressions
+from .sqloxide import mutate_relations
+from .sqloxide import parse_sql
+from .sqloxide import restore_ast
+
+# Explicitly define the API of this module for external consumers
+__all__ = ["parse_sql", "restore_ast", "mutate_expressions", "mutate_relations"]
| {"golden_diff": "diff --git a/opteryx/third_party/sqloxide/__init__.py b/opteryx/third_party/sqloxide/__init__.py\n--- a/opteryx/third_party/sqloxide/__init__.py\n+++ b/opteryx/third_party/sqloxide/__init__.py\n@@ -1,8 +1,16 @@\n-try:\n- from .sqloxide import parse_sql\n-except ImportError as e: # pragma: no cover\n- print(e)\n- if str(e) != \"PyO3 modules may only be initialized once per interpreter process\":\n- raise e\n-\n-__all__ = [\"parse_sql\"]\n+\"\"\"\n+This module provides an interface to the sqloxide library, which is responsible for parsing SQL,\n+restoring the Abstract Syntax Tree (AST), and performing various mutations on expressions and relations.\n+\n+For more information about sqloxide: https://github.com/wseaton/sqloxide\n+\n+This module is not from sqloxide, it is written for Opteryx.\n+\"\"\"\n+\n+from .sqloxide import mutate_expressions\n+from .sqloxide import mutate_relations\n+from .sqloxide import parse_sql\n+from .sqloxide import restore_ast\n+\n+# Explicitly define the API of this module for external consumers\n+__all__ = [\"parse_sql\", \"restore_ast\", \"mutate_expressions\", \"mutate_relations\"]\n", "issue": "\ud83e\uddf9 resync SQLoxide\nAST to SQL and AST visitor appear to have been added \n", "before_files": [{"content": "try:\n from .sqloxide import parse_sql\nexcept ImportError as e: # pragma: no cover\n print(e)\n if str(e) != \"PyO3 modules may only be initialized once per interpreter process\":\n raise e\n\n__all__ = [\"parse_sql\"]\n", "path": "opteryx/third_party/sqloxide/__init__.py"}]} | 642 | 312 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.